repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/string_store/layout.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* layout.h -- example from introduction part 1
*/
#define LAYOUT_NAME "intro_1"
#define MAX_BUF_LEN 10
struct my_root {
size_t len;
char buf[MAX_BUF_LEN];
};
| 1,770 | 38.355556 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/tree_map/ctree_map.c | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ctree_map.c -- Crit-bit trie implementation
*/
#include <ex_common.h>
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
#include "ctree_map.h"
#define BIT_IS_SET(n, i) (!!((n) & (1ULL << (i))))
TOID_DECLARE(struct tree_map_node, CTREE_MAP_TYPE_OFFSET + 1);
struct tree_map_entry {
uint64_t key;
PMEMoid slot;
};
struct tree_map_node {
int diff; /* most significant differing bit */
struct tree_map_entry entries[2];
};
struct ctree_map {
struct tree_map_entry root;
};
/*
* find_crit_bit -- (internal) finds the most significant differing bit
*/
static int
find_crit_bit(uint64_t lhs, uint64_t rhs)
{
return find_last_set_64(lhs ^ rhs);
}
/*
* ctree_map_create -- allocates a new crit-bit tree instance
*/
int
ctree_map_create(PMEMobjpool *pop, TOID(struct ctree_map) *map, void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(map, sizeof(*map));
*map = TX_ZNEW(struct ctree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* ctree_map_clear_node -- (internal) clears this node and its children
*/
static void
ctree_map_clear_node(PMEMoid p)
{
if (OID_IS_NULL(p))
return;
if (OID_INSTANCEOF(p, struct tree_map_node)) {
TOID(struct tree_map_node) node;
TOID_ASSIGN(node, p);
ctree_map_clear_node(D_RW(node)->entries[0].slot);
ctree_map_clear_node(D_RW(node)->entries[1].slot);
}
pmemobj_tx_free(p);
}
/*
* ctree_map_clear -- removes all elements from the map
*/
int
ctree_map_clear(PMEMobjpool *pop, TOID(struct ctree_map) map)
{
TX_BEGIN(pop) {
ctree_map_clear_node(D_RW(map)->root.slot);
TX_ADD_FIELD(map, root);
D_RW(map)->root.slot = OID_NULL;
} TX_END
return 0;
}
/*
* ctree_map_destroy -- cleanups and frees crit-bit tree instance
*/
int
ctree_map_destroy(PMEMobjpool *pop, TOID(struct ctree_map) *map)
{
int ret = 0;
TX_BEGIN(pop) {
ctree_map_clear(pop, *map);
pmemobj_tx_add_range_direct(map, sizeof(*map));
TX_FREE(*map);
*map = TOID_NULL(struct ctree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* ctree_map_insert_leaf -- (internal) inserts a new leaf at the position
*/
static void
ctree_map_insert_leaf(struct tree_map_entry *p,
struct tree_map_entry e, int diff)
{
TOID(struct tree_map_node) new_node = TX_NEW(struct tree_map_node);
D_RW(new_node)->diff = diff;
int d = BIT_IS_SET(e.key, D_RO(new_node)->diff);
/* insert the leaf at the direction based on the critical bit */
D_RW(new_node)->entries[d] = e;
/* find the appropriate position in the tree to insert the node */
TOID(struct tree_map_node) node;
while (OID_INSTANCEOF(p->slot, struct tree_map_node)) {
TOID_ASSIGN(node, p->slot);
/* the critical bits have to be sorted */
if (D_RO(node)->diff < D_RO(new_node)->diff)
break;
p = &D_RW(node)->entries[BIT_IS_SET(e.key, D_RO(node)->diff)];
}
/* insert the found destination in the other slot */
D_RW(new_node)->entries[!d] = *p;
pmemobj_tx_add_range_direct(p, sizeof(*p));
p->key = 0;
p->slot = new_node.oid;
}
/*
* ctree_map_insert_new -- allocates a new object and inserts it into the tree
*/
int
ctree_map_insert_new(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid n = pmemobj_tx_alloc(size, type_num);
constructor(pop, pmemobj_direct(n), arg);
ctree_map_insert(pop, map, key, n);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* ctree_map_insert -- inserts a new key-value pair into the map
*/
int
ctree_map_insert(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key, PMEMoid value)
{
struct tree_map_entry *p = &D_RW(map)->root;
int ret = 0;
/* descend the path until a best matching key is found */
TOID(struct tree_map_node) node;
while (!OID_IS_NULL(p->slot) &&
OID_INSTANCEOF(p->slot, struct tree_map_node)) {
TOID_ASSIGN(node, p->slot);
p = &D_RW(node)->entries[BIT_IS_SET(key, D_RW(node)->diff)];
}
struct tree_map_entry e = {key, value};
TX_BEGIN(pop) {
if (p->key == 0 || p->key == key) {
pmemobj_tx_add_range_direct(p, sizeof(*p));
*p = e;
} else {
ctree_map_insert_leaf(&D_RW(map)->root, e,
find_crit_bit(p->key, key));
}
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* ctree_map_get_leaf -- (internal) searches for a leaf of the key
*/
static struct tree_map_entry *
ctree_map_get_leaf(TOID(struct ctree_map) map, uint64_t key,
struct tree_map_entry **parent)
{
struct tree_map_entry *n = &D_RW(map)->root;
struct tree_map_entry *p = NULL;
TOID(struct tree_map_node) node;
while (!OID_IS_NULL(n->slot) &&
OID_INSTANCEOF(n->slot, struct tree_map_node)) {
TOID_ASSIGN(node, n->slot);
p = n;
n = &D_RW(node)->entries[BIT_IS_SET(key, D_RW(node)->diff)];
}
if (n->key == key) {
if (parent)
*parent = p;
return n;
}
return NULL;
}
/*
* ctree_map_remove_free -- removes and frees an object from the tree
*/
int
ctree_map_remove_free(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid val = ctree_map_remove(pop, map, key);
pmemobj_tx_free(val);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* ctree_map_remove -- removes key-value pair from the map
*/
PMEMoid
ctree_map_remove(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key)
{
struct tree_map_entry *parent = NULL;
struct tree_map_entry *leaf = ctree_map_get_leaf(map, key, &parent);
if (leaf == NULL)
return OID_NULL;
PMEMoid ret = leaf->slot;
if (parent == NULL) { /* root */
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(leaf, sizeof(*leaf));
leaf->key = 0;
leaf->slot = OID_NULL;
} TX_END
} else {
/*
* In this situation:
* parent
* / \
* LEFT RIGHT
* there's no point in leaving the parent internal node
* so it's swapped with the remaining node and then also freed.
*/
TX_BEGIN(pop) {
struct tree_map_entry *dest = parent;
TOID(struct tree_map_node) node;
TOID_ASSIGN(node, parent->slot);
pmemobj_tx_add_range_direct(dest, sizeof(*dest));
*dest = D_RW(node)->entries[
D_RO(node)->entries[0].key == leaf->key];
TX_FREE(node);
} TX_END
}
return ret;
}
/*
* ctree_map_get -- searches for a value of the key
*/
PMEMoid
ctree_map_get(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key)
{
struct tree_map_entry *entry = ctree_map_get_leaf(map, key, NULL);
return entry ? entry->slot : OID_NULL;
}
/*
* ctree_map_lookup -- searches if a key exists
*/
int
ctree_map_lookup(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key)
{
struct tree_map_entry *entry = ctree_map_get_leaf(map, key, NULL);
return entry != NULL;
}
/*
* ctree_map_foreach_node -- (internal) recursively traverses tree
*/
static int
ctree_map_foreach_node(struct tree_map_entry e,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
int ret = 0;
if (OID_INSTANCEOF(e.slot, struct tree_map_node)) {
TOID(struct tree_map_node) node;
TOID_ASSIGN(node, e.slot);
if (ctree_map_foreach_node(D_RO(node)->entries[0],
cb, arg) == 0)
ctree_map_foreach_node(D_RO(node)->entries[1], cb, arg);
} else { /* leaf */
ret = cb(e.key, e.slot, arg);
}
return ret;
}
/*
* ctree_map_foreach -- initiates recursive traversal
*/
int
ctree_map_foreach(PMEMobjpool *pop, TOID(struct ctree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
if (OID_IS_NULL(D_RO(map)->root.slot))
return 0;
return ctree_map_foreach_node(D_RO(map)->root, cb, arg);
}
/*
* ctree_map_is_empty -- checks whether the tree map is empty
*/
int
ctree_map_is_empty(PMEMobjpool *pop, TOID(struct ctree_map) map)
{
return D_RO(map)->root.key == 0;
}
/*
* ctree_map_check -- check if given persistent object is a tree map
*/
int
ctree_map_check(PMEMobjpool *pop, TOID(struct ctree_map) map)
{
return TOID_IS_NULL(map) || !TOID_VALID(map);
}
| 9,557 | 22.835411 | 78 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/tree_map/ctree_map.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ctree_map.h -- TreeMap sorted collection implementation
*/
#ifndef CTREE_MAP_H
#define CTREE_MAP_H
#include <libpmemobj.h>
#ifndef CTREE_MAP_TYPE_OFFSET
#define CTREE_MAP_TYPE_OFFSET 1008
#endif
struct ctree_map;
TOID_DECLARE(struct ctree_map, CTREE_MAP_TYPE_OFFSET + 0);
int ctree_map_check(PMEMobjpool *pop, TOID(struct ctree_map) map);
int ctree_map_create(PMEMobjpool *pop, TOID(struct ctree_map) *map, void *arg);
int ctree_map_destroy(PMEMobjpool *pop, TOID(struct ctree_map) *map);
int ctree_map_insert(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key, PMEMoid value);
int ctree_map_insert_new(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid ctree_map_remove(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key);
int ctree_map_remove_free(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key);
int ctree_map_clear(PMEMobjpool *pop, TOID(struct ctree_map) map);
PMEMoid ctree_map_get(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key);
int ctree_map_lookup(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key);
int ctree_map_foreach(PMEMobjpool *pop, TOID(struct ctree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
int ctree_map_is_empty(PMEMobjpool *pop, TOID(struct ctree_map) map);
#endif /* CTREE_MAP_H */
| 3,038 | 41.208333 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/tree_map/rtree_map.c | /*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rtree_map.c -- implementation of rtree
*/
#include <ex_common.h>
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
#include <stdbool.h>
#include "rtree_map.h"
TOID_DECLARE(struct tree_map_node, RTREE_MAP_TYPE_OFFSET + 1);
/* Good values: 0x10 an 0x100, but implementation is bound to 0x100 */
#ifndef ALPHABET_SIZE
#define ALPHABET_SIZE 0x100
#endif
struct tree_map_node {
TOID(struct tree_map_node) slots[ALPHABET_SIZE];
unsigned has_value;
PMEMoid value;
uint64_t key_size;
unsigned char key[];
};
struct rtree_map {
TOID(struct tree_map_node) root;
};
/*
* rtree_map_create -- allocates a new rtree instance
*/
int
rtree_map_create(PMEMobjpool *pop, TOID(struct rtree_map) *map, void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
TX_ADD_DIRECT(map);
*map = TX_ZNEW(struct rtree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rtree_map_clear_node -- (internal) removes all elements from the node
*/
static void
rtree_map_clear_node(TOID(struct tree_map_node) node)
{
for (unsigned i = 0; i < ALPHABET_SIZE; i++) {
rtree_map_clear_node(D_RO(node)->slots[i]);
}
pmemobj_tx_add_range(node.oid, 0,
sizeof(struct tree_map_node) + D_RO(node)->key_size);
TX_FREE(node);
}
/*
* rtree_map_clear -- removes all elements from the map
*/
int
rtree_map_clear(PMEMobjpool *pop, TOID(struct rtree_map) map)
{
int ret = 0;
TX_BEGIN(pop) {
rtree_map_clear_node(D_RO(map)->root);
TX_ADD_FIELD(map, root);
D_RW(map)->root = TOID_NULL(struct tree_map_node);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rtree_map_destroy -- cleanups and frees rtree instance
*/
int
rtree_map_destroy(PMEMobjpool *pop, TOID(struct rtree_map) *map)
{
int ret = 0;
TX_BEGIN(pop) {
rtree_map_clear(pop, *map);
TX_ADD_DIRECT(map);
TX_FREE(*map);
*map = TOID_NULL(struct rtree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rtree_new_node -- (internal) inserts a node into an empty map
*/
static TOID(struct tree_map_node)
rtree_new_node(const unsigned char *key, uint64_t key_size,
PMEMoid value, unsigned has_value)
{
TOID(struct tree_map_node) node;
node = TX_ZALLOC(struct tree_map_node,
sizeof(struct tree_map_node) + key_size);
/*
* !!! Here should be: D_RO(node)->value
* ... because we don't change map
*/
D_RW(node)->value = value;
D_RW(node)->has_value = has_value;
D_RW(node)->key_size = key_size;
memcpy(D_RW(node)->key, key, key_size);
return node;
}
/*
* rtree_map_insert_empty -- (internal) inserts a node into an empty map
*/
static void
rtree_map_insert_empty(TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size, PMEMoid value)
{
TX_ADD_FIELD(map, root);
D_RW(map)->root = rtree_new_node(key, key_size, value, 1);
}
/*
* key_comm_len -- (internal) calculate the len of common part of keys
*/
static unsigned
key_comm_len(TOID(struct tree_map_node) node,
const unsigned char *key, uint64_t key_size)
{
unsigned i;
for (i = 0;
i < MIN(key_size, D_RO(node)->key_size) &&
key[i] == D_RO(node)->key[i];
i++)
;
return i;
}
/*
* rtree_map_insert_value -- (internal) inserts a pair into a tree
*/
static void
rtree_map_insert_value(TOID(struct tree_map_node) *node,
const unsigned char *key, uint64_t key_size, PMEMoid value)
{
unsigned i;
if (TOID_IS_NULL(*node)) {
TX_ADD_DIRECT(node);
*node = rtree_new_node(key, key_size, value, 1);
return;
}
i = key_comm_len(*node, key, key_size);
if (i != D_RO(*node)->key_size) {
/* Node does not exist. Let's add. */
TOID(struct tree_map_node) orig_node = *node;
TX_ADD_DIRECT(node);
if (i != key_size) {
*node = rtree_new_node(D_RO(orig_node)->key, i,
OID_NULL, 0);
} else {
*node = rtree_new_node(D_RO(orig_node)->key, i,
value, 1);
}
D_RW(*node)->slots[D_RO(orig_node)->key[i]] = orig_node;
TX_ADD_FIELD(orig_node, key_size);
D_RW(orig_node)->key_size -= i;
pmemobj_tx_add_range_direct(D_RW(orig_node)->key,
D_RO(orig_node)->key_size);
memmove(D_RW(orig_node)->key, D_RO(orig_node)->key + i,
D_RO(orig_node)->key_size);
if (i != key_size) {
D_RW(*node)->slots[key[i]] =
rtree_new_node(key + i, key_size - i, value, 1);
}
return;
}
if (i == key_size) {
if (OID_IS_NULL(D_RO(*node)->value) || D_RO(*node)->has_value) {
/* Just replace old value with new */
TX_ADD_FIELD(*node, value);
TX_ADD_FIELD(*node, has_value);
D_RW(*node)->value = value;
D_RW(*node)->has_value = 1;
} else {
/*
* Ignore. By the fact current value should be
* removed in advance, or handled in a different way.
*/
}
} else {
/* Recurse deeply */
return rtree_map_insert_value(&D_RW(*node)->slots[key[i]],
key + i, key_size - i, value);
}
}
/*
* rtree_map_is_empty -- checks whether the tree map is empty
*/
int
rtree_map_is_empty(PMEMobjpool *pop, TOID(struct rtree_map) map)
{
return TOID_IS_NULL(D_RO(map)->root);
}
/*
* rtree_map_insert -- inserts a new key-value pair into the map
*/
int
rtree_map_insert(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size, PMEMoid value)
{
int ret = 0;
TX_BEGIN(pop) {
if (rtree_map_is_empty(pop, map)) {
rtree_map_insert_empty(map, key, key_size, value);
} else {
rtree_map_insert_value(&D_RW(map)->root,
key, key_size, value);
}
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rtree_map_insert_new -- allocates a new object and inserts it into the tree
*/
int
rtree_map_insert_new(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size,
size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid n = pmemobj_tx_alloc(size, type_num);
constructor(pop, pmemobj_direct(n), arg);
rtree_map_insert(pop, map, key, key_size, n);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* is_leaf -- (internal) check a node for zero qty of children
*/
static bool
is_leaf(TOID(struct tree_map_node) node)
{
unsigned j;
for (j = 0;
j < ALPHABET_SIZE &&
TOID_IS_NULL(D_RO(node)->slots[j]);
j++)
;
return (j == ALPHABET_SIZE);
}
/*
* has_only_one_child -- (internal) check a node for qty of children
*/
static bool
has_only_one_child(TOID(struct tree_map_node) node, unsigned *child_idx)
{
unsigned j, child_qty;
for (j = 0, child_qty = 0; j < ALPHABET_SIZE; j++)
if (!TOID_IS_NULL(D_RO(node)->slots[j])) {
child_qty++;
*child_idx = j;
}
return (1 == child_qty);
}
/*
* remove_extra_node -- (internal) remove unneeded extra node
*/
static void
remove_extra_node(TOID(struct tree_map_node) *node)
{
unsigned child_idx;
TOID(struct tree_map_node) tmp, tmp_child;
/* Our node has child with only one child. */
tmp = *node;
has_only_one_child(tmp, &child_idx);
tmp_child = D_RO(tmp)->slots[child_idx];
/*
* That child's incoming label is appended to the ours incoming label
* and the child is removed.
*/
uint64_t new_key_size = D_RO(tmp)->key_size + D_RO(tmp_child)->key_size;
unsigned char *new_key = (unsigned char *)malloc(new_key_size);
assert(new_key != NULL);
memcpy(new_key, D_RO(tmp)->key, D_RO(tmp)->key_size);
memcpy(new_key + D_RO(tmp)->key_size,
D_RO(tmp_child)->key,
D_RO(tmp_child)->key_size);
TX_ADD_DIRECT(node);
*node = rtree_new_node(new_key, new_key_size,
D_RO(tmp_child)->value, D_RO(tmp_child)->has_value);
free(new_key);
TX_FREE(tmp);
memcpy(D_RW(*node)->slots,
D_RO(tmp_child)->slots,
sizeof(D_RO(tmp_child)->slots));
TX_FREE(tmp_child);
}
/*
* rtree_map_remove_node -- (internal) removes node from tree
*/
static PMEMoid
rtree_map_remove_node(TOID(struct rtree_map) map,
TOID(struct tree_map_node) *node,
const unsigned char *key, uint64_t key_size,
bool *check_for_child)
{
bool c4c;
unsigned i, child_idx;
PMEMoid ret = OID_NULL;
*check_for_child = false;
if (TOID_IS_NULL(*node))
return OID_NULL;
i = key_comm_len(*node, key, key_size);
if (i != D_RO(*node)->key_size)
/* Node does not exist */
return OID_NULL;
if (i == key_size) {
if (0 == D_RO(*node)->has_value)
return OID_NULL;
/* Node is found */
ret = D_RO(*node)->value;
/* delete node from tree */
TX_ADD_FIELD((*node), value);
TX_ADD_FIELD((*node), has_value);
D_RW(*node)->value = OID_NULL;
D_RW(*node)->has_value = 0;
if (is_leaf(*node)) {
pmemobj_tx_add_range(node->oid, 0,
sizeof(*node) + D_RO(*node)->key_size);
TX_FREE(*node);
TX_ADD_DIRECT(node);
(*node) = TOID_NULL(struct tree_map_node);
}
return ret;
}
/* Recurse deeply */
ret = rtree_map_remove_node(map,
&D_RW(*node)->slots[key[i]],
key + i, key_size - i,
&c4c);
if (c4c) {
/* Our node has child with only one child. Remove. */
remove_extra_node(&D_RW(*node)->slots[key[i]]);
return ret;
}
if (has_only_one_child(*node, &child_idx) &&
(0 == D_RO(*node)->has_value)) {
*check_for_child = true;
}
return ret;
}
/*
* rtree_map_remove -- removes key-value pair from the map
*/
PMEMoid
rtree_map_remove(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size)
{
PMEMoid ret = OID_NULL;
bool check_for_child;
if (TOID_IS_NULL(map))
return OID_NULL;
TX_BEGIN(pop) {
ret = rtree_map_remove_node(map,
&D_RW(map)->root, key, key_size,
&check_for_child);
if (check_for_child) {
/* Our root node has only one child. Remove. */
remove_extra_node(&D_RW(map)->root);
}
} TX_END
return ret;
}
/*
* rtree_map_remove_free -- removes and frees an object from the tree
*/
int
rtree_map_remove_free(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size)
{
int ret = 0;
if (TOID_IS_NULL(map))
return 1;
TX_BEGIN(pop) {
pmemobj_tx_free(rtree_map_remove(pop, map, key, key_size));
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rtree_map_get_in_node -- (internal) searches for a value in the node
*/
static PMEMoid
rtree_map_get_in_node(TOID(struct tree_map_node) node,
const unsigned char *key, uint64_t key_size)
{
unsigned i;
if (TOID_IS_NULL(node))
return OID_NULL;
i = key_comm_len(node, key, key_size);
if (i != D_RO(node)->key_size)
/* Node does not exist */
return OID_NULL;
if (i == key_size) {
/* Node is found */
return D_RO(node)->value;
} else {
/* Recurse deeply */
return rtree_map_get_in_node(D_RO(node)->slots[key[i]],
key + i, key_size - i);
}
}
/*
* rtree_map_get -- searches for a value of the key
*/
PMEMoid
rtree_map_get(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size)
{
if (TOID_IS_NULL(D_RO(map)->root))
return OID_NULL;
return rtree_map_get_in_node(D_RO(map)->root, key, key_size);
}
/*
* rtree_map_lookup_in_node -- (internal) searches for key if exists
*/
static int
rtree_map_lookup_in_node(TOID(struct tree_map_node) node,
const unsigned char *key, uint64_t key_size)
{
unsigned i;
if (TOID_IS_NULL(node))
return 0;
i = key_comm_len(node, key, key_size);
if (i != D_RO(node)->key_size)
/* Node does not exist */
return 0;
if (i == key_size) {
/* Node is found */
return 1;
}
/* Recurse deeply */
return rtree_map_lookup_in_node(D_RO(node)->slots[key[i]],
key + i, key_size - i);
}
/*
* rtree_map_lookup -- searches if key exists
*/
int
rtree_map_lookup(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size)
{
if (TOID_IS_NULL(D_RO(map)->root))
return 0;
return rtree_map_lookup_in_node(D_RO(map)->root, key, key_size);
}
/*
* rtree_map_foreach_node -- (internal) recursively traverses tree
*/
static int
rtree_map_foreach_node(const TOID(struct tree_map_node) node,
int (*cb)(const unsigned char *key, uint64_t key_size,
PMEMoid, void *arg),
void *arg)
{
unsigned i;
if (TOID_IS_NULL(node))
return 0;
for (i = 0; i < ALPHABET_SIZE; i++) {
if (rtree_map_foreach_node(D_RO(node)->slots[i], cb, arg) != 0)
return 1;
}
if (NULL != cb) {
if (cb(D_RO(node)->key, D_RO(node)->key_size,
D_RO(node)->value, arg) != 0)
return 1;
}
return 0;
}
/*
* rtree_map_foreach -- initiates recursive traversal
*/
int
rtree_map_foreach(PMEMobjpool *pop, TOID(struct rtree_map) map,
int (*cb)(const unsigned char *key, uint64_t key_size,
PMEMoid value, void *arg),
void *arg)
{
return rtree_map_foreach_node(D_RO(map)->root, cb, arg);
}
/*
* ctree_map_check -- check if given persistent object is a tree map
*/
int
rtree_map_check(PMEMobjpool *pop, TOID(struct rtree_map) map)
{
return TOID_IS_NULL(map) || !TOID_VALID(map);
}
| 14,236 | 21.420472 | 78 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/tree_map/btree_map.c | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* btree_map.c -- textbook implementation of btree /w preemptive splitting
*/
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include "btree_map.h"
TOID_DECLARE(struct tree_map_node, BTREE_MAP_TYPE_OFFSET + 1);
#define BTREE_ORDER 8 /* can't be odd */
#define BTREE_MIN ((BTREE_ORDER / 2) - 1) /* min number of keys per node */
struct tree_map_node_item {
uint64_t key;
PMEMoid value;
};
struct tree_map_node {
int n; /* number of occupied slots */
struct tree_map_node_item items[BTREE_ORDER - 1];
TOID(struct tree_map_node) slots[BTREE_ORDER];
};
struct btree_map {
TOID(struct tree_map_node) root;
};
/*
* set_empty_item -- (internal) sets null to the item
*/
static void
set_empty_item(struct tree_map_node_item *item)
{
item->key = 0;
item->value = OID_NULL;
}
/*
* btree_map_create -- allocates a new btree instance
*/
int
btree_map_create(PMEMobjpool *pop, TOID(struct btree_map) *map, void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(map, sizeof(*map));
*map = TX_ZNEW(struct btree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* btree_map_clear_node -- (internal) removes all elements from the node
*/
static void
btree_map_clear_node(TOID(struct tree_map_node) node)
{
for (int i = 0; i < D_RO(node)->n; ++i) {
btree_map_clear_node(D_RO(node)->slots[i]);
}
TX_FREE(node);
}
/*
* btree_map_clear -- removes all elements from the map
*/
int
btree_map_clear(PMEMobjpool *pop, TOID(struct btree_map) map)
{
int ret = 0;
TX_BEGIN(pop) {
btree_map_clear_node(D_RO(map)->root);
TX_ADD_FIELD(map, root);
D_RW(map)->root = TOID_NULL(struct tree_map_node);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* btree_map_destroy -- cleanups and frees btree instance
*/
int
btree_map_destroy(PMEMobjpool *pop, TOID(struct btree_map) *map)
{
int ret = 0;
TX_BEGIN(pop) {
btree_map_clear(pop, *map);
pmemobj_tx_add_range_direct(map, sizeof(*map));
TX_FREE(*map);
*map = TOID_NULL(struct btree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* btree_map_insert_item_at -- (internal) inserts an item at position
*/
static void
btree_map_insert_item_at(TOID(struct tree_map_node) node, int pos,
struct tree_map_node_item item)
{
D_RW(node)->items[pos] = item;
D_RW(node)->n += 1;
}
/*
* btree_map_insert_empty -- (internal) inserts an item into an empty node
*/
static void
btree_map_insert_empty(TOID(struct btree_map) map,
struct tree_map_node_item item)
{
TX_ADD_FIELD(map, root);
D_RW(map)->root = TX_ZNEW(struct tree_map_node);
btree_map_insert_item_at(D_RO(map)->root, 0, item);
}
/*
* btree_map_insert_node -- (internal) inserts and makes space for new node
*/
static void
btree_map_insert_node(TOID(struct tree_map_node) node, int p,
struct tree_map_node_item item,
TOID(struct tree_map_node) left, TOID(struct tree_map_node) right)
{
TX_ADD(node);
if (D_RO(node)->items[p].key != 0) { /* move all existing data */
memmove(&D_RW(node)->items[p + 1], &D_RW(node)->items[p],
sizeof(struct tree_map_node_item) * ((BTREE_ORDER - 2 - p)));
memmove(&D_RW(node)->slots[p + 1], &D_RW(node)->slots[p],
sizeof(TOID(struct tree_map_node)) * ((BTREE_ORDER - 1 - p)));
}
D_RW(node)->slots[p] = left;
D_RW(node)->slots[p + 1] = right;
btree_map_insert_item_at(node, p, item);
}
/*
* btree_map_create_split_node -- (internal) splits a node into two
*/
static TOID(struct tree_map_node)
btree_map_create_split_node(TOID(struct tree_map_node) node,
struct tree_map_node_item *m)
{
TOID(struct tree_map_node) right = TX_ZNEW(struct tree_map_node);
int c = (BTREE_ORDER / 2);
*m = D_RO(node)->items[c - 1]; /* select median item */
TX_ADD(node);
set_empty_item(&D_RW(node)->items[c - 1]);
/* move everything right side of median to the new node */
for (int i = c; i < BTREE_ORDER; ++i) {
if (i != BTREE_ORDER - 1) {
D_RW(right)->items[D_RW(right)->n++] =
D_RO(node)->items[i];
set_empty_item(&D_RW(node)->items[i]);
}
D_RW(right)->slots[i - c] = D_RO(node)->slots[i];
D_RW(node)->slots[i] = TOID_NULL(struct tree_map_node);
}
D_RW(node)->n = c - 1;
return right;
}
/*
* btree_map_find_dest_node -- (internal) finds a place to insert the new key at
*/
static TOID(struct tree_map_node)
btree_map_find_dest_node(TOID(struct btree_map) map,
TOID(struct tree_map_node) n, TOID(struct tree_map_node) parent,
uint64_t key, int *p)
{
if (D_RO(n)->n == BTREE_ORDER - 1) { /* node is full, perform a split */
struct tree_map_node_item m;
TOID(struct tree_map_node) right =
btree_map_create_split_node(n, &m);
if (!TOID_IS_NULL(parent)) {
btree_map_insert_node(parent, *p, m, n, right);
if (key > m.key) /* select node to continue search */
n = right;
} else { /* replacing root node, the tree grows in height */
TOID(struct tree_map_node) up =
TX_ZNEW(struct tree_map_node);
D_RW(up)->n = 1;
D_RW(up)->items[0] = m;
D_RW(up)->slots[0] = n;
D_RW(up)->slots[1] = right;
TX_ADD_FIELD(map, root);
D_RW(map)->root = up;
n = up;
}
}
int i;
for (i = 0; i < BTREE_ORDER - 1; ++i) {
*p = i;
/*
* The key either fits somewhere in the middle or at the
* right edge of the node.
*/
if (D_RO(n)->n == i || D_RO(n)->items[i].key > key) {
return TOID_IS_NULL(D_RO(n)->slots[i]) ? n :
btree_map_find_dest_node(map,
D_RO(n)->slots[i], n, key, p);
}
}
/*
* The key is bigger than the last node element, go one level deeper
* in the rightmost child.
*/
return btree_map_find_dest_node(map, D_RO(n)->slots[i], n, key, p);
}
/*
* btree_map_insert_item -- (internal) inserts and makes space for new item
*/
static void
btree_map_insert_item(TOID(struct tree_map_node) node, int p,
struct tree_map_node_item item)
{
TX_ADD(node);
if (D_RO(node)->items[p].key != 0) {
memmove(&D_RW(node)->items[p + 1], &D_RW(node)->items[p],
sizeof(struct tree_map_node_item) * ((BTREE_ORDER - 2 - p)));
}
btree_map_insert_item_at(node, p, item);
}
/*
* btree_map_is_empty -- checks whether the tree map is empty
*/
int
btree_map_is_empty(PMEMobjpool *pop, TOID(struct btree_map) map)
{
return TOID_IS_NULL(D_RO(map)->root) || D_RO(D_RO(map)->root)->n == 0;
}
/*
* btree_map_insert -- inserts a new key-value pair into the map
*/
int
btree_map_insert(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, PMEMoid value)
{
struct tree_map_node_item item = {key, value};
TX_BEGIN(pop) {
if (btree_map_is_empty(pop, map)) {
btree_map_insert_empty(map, item);
} else {
int p; /* position at the dest node to insert */
TOID(struct tree_map_node) parent =
TOID_NULL(struct tree_map_node);
TOID(struct tree_map_node) dest =
btree_map_find_dest_node(map, D_RW(map)->root,
parent, key, &p);
btree_map_insert_item(dest, p, item);
}
} TX_END
return 0;
}
/*
* btree_map_rotate_right -- (internal) takes one element from right sibling
*/
static void
btree_map_rotate_right(TOID(struct tree_map_node) rsb,
TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
/* move the separator from parent to the deficient node */
struct tree_map_node_item sep = D_RO(parent)->items[p];
btree_map_insert_item(node, D_RO(node)->n, sep);
/* the first element of the right sibling is the new separator */
TX_ADD_FIELD(parent, items[p]);
D_RW(parent)->items[p] = D_RO(rsb)->items[0];
/* the nodes are not necessarily leafs, so copy also the slot */
TX_ADD_FIELD(node, slots[D_RO(node)->n]);
D_RW(node)->slots[D_RO(node)->n] = D_RO(rsb)->slots[0];
TX_ADD(rsb);
D_RW(rsb)->n -= 1; /* it loses one element, but still > min */
/* move all existing elements back by one array slot */
memmove(D_RW(rsb)->items, D_RO(rsb)->items + 1,
sizeof(struct tree_map_node_item) * (D_RO(rsb)->n));
memmove(D_RW(rsb)->slots, D_RO(rsb)->slots + 1,
sizeof(TOID(struct tree_map_node)) * (D_RO(rsb)->n + 1));
}
/*
* btree_map_rotate_left -- (internal) takes one element from left sibling
*/
static void
btree_map_rotate_left(TOID(struct tree_map_node) lsb,
TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
/* move the separator from parent to the deficient node */
struct tree_map_node_item sep = D_RO(parent)->items[p - 1];
btree_map_insert_item(node, 0, sep);
/* the last element of the left sibling is the new separator */
TX_ADD_FIELD(parent, items[p - 1]);
D_RW(parent)->items[p - 1] = D_RO(lsb)->items[D_RO(lsb)->n - 1];
/* rotate the node children */
memmove(D_RW(node)->slots + 1, D_RO(node)->slots,
sizeof(TOID(struct tree_map_node)) * (D_RO(node)->n));
/* the nodes are not necessarily leafs, so copy also the slot */
D_RW(node)->slots[0] = D_RO(lsb)->slots[D_RO(lsb)->n];
TX_ADD_FIELD(lsb, n);
D_RW(lsb)->n -= 1; /* it loses one element, but still > min */
}
/*
* btree_map_merge -- (internal) merges node and right sibling
*/
static void
btree_map_merge(TOID(struct btree_map) map, TOID(struct tree_map_node) rn,
TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
struct tree_map_node_item sep = D_RO(parent)->items[p];
TX_ADD(node);
/* add separator to the deficient node */
D_RW(node)->items[D_RW(node)->n++] = sep;
/* copy right sibling data to node */
memcpy(&D_RW(node)->items[D_RO(node)->n], D_RO(rn)->items,
sizeof(struct tree_map_node_item) * D_RO(rn)->n);
memcpy(&D_RW(node)->slots[D_RO(node)->n], D_RO(rn)->slots,
sizeof(TOID(struct tree_map_node)) * (D_RO(rn)->n + 1));
D_RW(node)->n += D_RO(rn)->n;
TX_FREE(rn); /* right node is now empty */
TX_ADD(parent);
D_RW(parent)->n -= 1;
/* move everything to the right of the separator by one array slot */
memmove(D_RW(parent)->items + p, D_RW(parent)->items + p + 1,
sizeof(struct tree_map_node_item) * (D_RO(parent)->n - p));
memmove(D_RW(parent)->slots + p + 1, D_RW(parent)->slots + p + 2,
sizeof(TOID(struct tree_map_node)) * (D_RO(parent)->n - p + 1));
/* if the parent is empty then the tree shrinks in height */
if (D_RO(parent)->n == 0 && TOID_EQUALS(parent, D_RO(map)->root)) {
TX_ADD(map);
TX_FREE(D_RO(map)->root);
D_RW(map)->root = node;
}
}
/*
* btree_map_rebalance -- (internal) performs tree rebalance
*/
static void
btree_map_rebalance(TOID(struct btree_map) map, TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
TOID(struct tree_map_node) rsb = p >= D_RO(parent)->n ?
TOID_NULL(struct tree_map_node) : D_RO(parent)->slots[p + 1];
TOID(struct tree_map_node) lsb = p == 0 ?
TOID_NULL(struct tree_map_node) : D_RO(parent)->slots[p - 1];
if (!TOID_IS_NULL(rsb) && D_RO(rsb)->n > BTREE_MIN)
btree_map_rotate_right(rsb, node, parent, p);
else if (!TOID_IS_NULL(lsb) && D_RO(lsb)->n > BTREE_MIN)
btree_map_rotate_left(lsb, node, parent, p);
else if (TOID_IS_NULL(rsb)) /* always merge with rightmost node */
btree_map_merge(map, node, lsb, parent, p - 1);
else
btree_map_merge(map, rsb, node, parent, p);
}
/*
* btree_map_get_leftmost_leaf -- (internal) searches for the successor
*/
static TOID(struct tree_map_node)
btree_map_get_leftmost_leaf(TOID(struct btree_map) map,
TOID(struct tree_map_node) n, TOID(struct tree_map_node) *p)
{
if (TOID_IS_NULL(D_RO(n)->slots[0]))
return n;
*p = n;
return btree_map_get_leftmost_leaf(map, D_RO(n)->slots[0], p);
}
/*
* btree_map_remove_from_node -- (internal) removes element from node
*/
static void
btree_map_remove_from_node(TOID(struct btree_map) map,
TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
if (TOID_IS_NULL(D_RO(node)->slots[0])) { /* leaf */
TX_ADD(node);
if (D_RO(node)->n == 1 || p == BTREE_ORDER - 2) {
set_empty_item(&D_RW(node)->items[p]);
} else if (D_RO(node)->n != 1) {
memmove(&D_RW(node)->items[p],
&D_RW(node)->items[p + 1],
sizeof(struct tree_map_node_item) *
(D_RO(node)->n - p));
}
D_RW(node)->n -= 1;
return;
}
/* can't delete from non-leaf nodes, remove successor */
TOID(struct tree_map_node) rchild = D_RW(node)->slots[p + 1];
TOID(struct tree_map_node) lp = node;
TOID(struct tree_map_node) lm =
btree_map_get_leftmost_leaf(map, rchild, &lp);
TX_ADD_FIELD(node, items[p]);
D_RW(node)->items[p] = D_RO(lm)->items[0];
btree_map_remove_from_node(map, lm, lp, 0);
if (D_RO(lm)->n < BTREE_MIN) /* right child can be deficient now */
btree_map_rebalance(map, lm, lp,
TOID_EQUALS(lp, node) ? p + 1 : 0);
}
#define NODE_CONTAINS_ITEM(_n, _i, _k)\
((_i) != D_RO(_n)->n && D_RO(_n)->items[_i].key == (_k))
#define NODE_CHILD_CAN_CONTAIN_ITEM(_n, _i, _k)\
((_i) == D_RO(_n)->n || D_RO(_n)->items[_i].key > (_k)) &&\
!TOID_IS_NULL(D_RO(_n)->slots[_i])
/*
* btree_map_remove_item -- (internal) removes item from node
*/
static PMEMoid
btree_map_remove_item(TOID(struct btree_map) map,
TOID(struct tree_map_node) node, TOID(struct tree_map_node) parent,
uint64_t key, int p)
{
PMEMoid ret = OID_NULL;
for (int i = 0; i <= D_RO(node)->n; ++i) {
if (NODE_CONTAINS_ITEM(node, i, key)) {
ret = D_RO(node)->items[i].value;
btree_map_remove_from_node(map, node, parent, i);
break;
} else if (NODE_CHILD_CAN_CONTAIN_ITEM(node, i, key)) {
ret = btree_map_remove_item(map, D_RO(node)->slots[i],
node, key, i);
break;
}
}
/* check for deficient nodes walking up */
if (!TOID_IS_NULL(parent) && D_RO(node)->n < BTREE_MIN)
btree_map_rebalance(map, node, parent, p);
return ret;
}
/*
* btree_map_remove -- removes key-value pair from the map
*/
PMEMoid
btree_map_remove(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key)
{
PMEMoid ret = OID_NULL;
TX_BEGIN(pop) {
ret = btree_map_remove_item(map, D_RW(map)->root,
TOID_NULL(struct tree_map_node), key, 0);
} TX_END
return ret;
}
/*
* btree_map_get_in_node -- (internal) searches for a value in the node
*/
static PMEMoid
btree_map_get_in_node(TOID(struct tree_map_node) node, uint64_t key)
{
for (int i = 0; i <= D_RO(node)->n; ++i) {
if (NODE_CONTAINS_ITEM(node, i, key))
return D_RO(node)->items[i].value;
else if (NODE_CHILD_CAN_CONTAIN_ITEM(node, i, key))
return btree_map_get_in_node(D_RO(node)->slots[i], key);
}
return OID_NULL;
}
/*
* btree_map_get -- searches for a value of the key
*/
PMEMoid
btree_map_get(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key)
{
if (TOID_IS_NULL(D_RO(map)->root))
return OID_NULL;
return btree_map_get_in_node(D_RO(map)->root, key);
}
/*
* btree_map_lookup_in_node -- (internal) searches for key if exists
*/
static int
btree_map_lookup_in_node(TOID(struct tree_map_node) node, uint64_t key)
{
for (int i = 0; i <= D_RO(node)->n; ++i) {
if (NODE_CONTAINS_ITEM(node, i, key))
return 1;
else if (NODE_CHILD_CAN_CONTAIN_ITEM(node, i, key))
return btree_map_lookup_in_node(
D_RO(node)->slots[i], key);
}
return 0;
}
/*
* btree_map_lookup -- searches if key exists
*/
int
btree_map_lookup(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key)
{
if (TOID_IS_NULL(D_RO(map)->root))
return 0;
return btree_map_lookup_in_node(D_RO(map)->root, key);
}
/*
* btree_map_foreach_node -- (internal) recursively traverses tree
*/
static int
btree_map_foreach_node(const TOID(struct tree_map_node) p,
int (*cb)(uint64_t key, PMEMoid, void *arg), void *arg)
{
if (TOID_IS_NULL(p))
return 0;
for (int i = 0; i <= D_RO(p)->n; ++i) {
if (btree_map_foreach_node(D_RO(p)->slots[i], cb, arg) != 0)
return 1;
if (i != D_RO(p)->n && D_RO(p)->items[i].key != 0) {
if (cb(D_RO(p)->items[i].key, D_RO(p)->items[i].value,
arg) != 0)
return 1;
}
}
return 0;
}
/*
* btree_map_foreach -- initiates recursive traversal
*/
int
btree_map_foreach(PMEMobjpool *pop, TOID(struct btree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
return btree_map_foreach_node(D_RO(map)->root, cb, arg);
}
/*
* ctree_map_check -- check if given persistent object is a tree map
*/
int
btree_map_check(PMEMobjpool *pop, TOID(struct btree_map) map)
{
return TOID_IS_NULL(map) || !TOID_VALID(map);
}
/*
* btree_map_insert_new -- allocates a new object and inserts it into the tree
*/
int
btree_map_insert_new(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid n = pmemobj_tx_alloc(size, type_num);
constructor(pop, pmemobj_direct(n), arg);
btree_map_insert(pop, map, key, n);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* btree_map_remove_free -- removes and frees an object from the tree
*/
int
btree_map_remove_free(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid val = btree_map_remove(pop, map, key);
pmemobj_tx_free(val);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
| 18,459 | 25.988304 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/tree_map/rtree_map.h | /*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rtree_map.h -- Radix TreeMap collection implementation
*/
#ifndef RTREE_MAP_H
#define RTREE_MAP_H
#include <libpmemobj.h>
#ifndef RTREE_MAP_TYPE_OFFSET
#define RTREE_MAP_TYPE_OFFSET 1020
#endif
struct rtree_map;
TOID_DECLARE(struct rtree_map, RTREE_MAP_TYPE_OFFSET + 0);
int rtree_map_check(PMEMobjpool *pop, TOID(struct rtree_map) map);
int rtree_map_create(PMEMobjpool *pop, TOID(struct rtree_map) *map, void *arg);
int rtree_map_destroy(PMEMobjpool *pop, TOID(struct rtree_map) *map);
int rtree_map_insert(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size, PMEMoid value);
int rtree_map_insert_new(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size,
size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid rtree_map_remove(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_remove_free(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_clear(PMEMobjpool *pop, TOID(struct rtree_map) map);
PMEMoid rtree_map_get(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_lookup(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_foreach(PMEMobjpool *pop, TOID(struct rtree_map) map,
int (*cb)(const unsigned char *key, uint64_t key_size,
PMEMoid value, void *arg),
void *arg);
int rtree_map_is_empty(PMEMobjpool *pop, TOID(struct rtree_map) map);
#endif /* RTREE_MAP_H */
| 3,254 | 42.4 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/tree_map/rbtree_map.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rbtree_map.h -- TreeMap sorted collection implementation
*/
#ifndef RBTREE_MAP_H
#define RBTREE_MAP_H
#include <libpmemobj.h>
#ifndef RBTREE_MAP_TYPE_OFFSET
#define RBTREE_MAP_TYPE_OFFSET 1016
#endif
struct rbtree_map;
TOID_DECLARE(struct rbtree_map, RBTREE_MAP_TYPE_OFFSET + 0);
int rbtree_map_check(PMEMobjpool *pop, TOID(struct rbtree_map) map);
int rbtree_map_create(PMEMobjpool *pop, TOID(struct rbtree_map) *map,
void *arg);
int rbtree_map_destroy(PMEMobjpool *pop, TOID(struct rbtree_map) *map);
int rbtree_map_insert(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, PMEMoid value);
int rbtree_map_insert_new(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid rbtree_map_remove(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_remove_free(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_clear(PMEMobjpool *pop, TOID(struct rbtree_map) map);
PMEMoid rbtree_map_get(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_lookup(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_foreach(PMEMobjpool *pop, TOID(struct rbtree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
int rbtree_map_is_empty(PMEMobjpool *pop, TOID(struct rbtree_map) map);
#endif /* RBTREE_MAP_H */
| 3,072 | 41.09589 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/tree_map/btree_map.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* btree_map.h -- TreeMap sorted collection implementation
*/
#ifndef BTREE_MAP_H
#define BTREE_MAP_H
#include <libpmemobj.h>
#ifndef BTREE_MAP_TYPE_OFFSET
#define BTREE_MAP_TYPE_OFFSET 1012
#endif
struct btree_map;
TOID_DECLARE(struct btree_map, BTREE_MAP_TYPE_OFFSET + 0);
int btree_map_check(PMEMobjpool *pop, TOID(struct btree_map) map);
int btree_map_create(PMEMobjpool *pop, TOID(struct btree_map) *map, void *arg);
int btree_map_destroy(PMEMobjpool *pop, TOID(struct btree_map) *map);
int btree_map_insert(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, PMEMoid value);
int btree_map_insert_new(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid btree_map_remove(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_remove_free(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_clear(PMEMobjpool *pop, TOID(struct btree_map) map);
PMEMoid btree_map_get(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_lookup(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_foreach(PMEMobjpool *pop, TOID(struct btree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
int btree_map_is_empty(PMEMobjpool *pop, TOID(struct btree_map) map);
#endif /* BTREE_MAP_H */
| 3,038 | 41.208333 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/tree_map/rbtree_map.c | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rbtree.c -- red-black tree implementation /w sentinel nodes
*/
#include <assert.h>
#include <errno.h>
#include "rbtree_map.h"
TOID_DECLARE(struct tree_map_node, RBTREE_MAP_TYPE_OFFSET + 1);
#define NODE_P(_n)\
D_RW(_n)->parent
#define NODE_GRANDP(_n)\
NODE_P(NODE_P(_n))
#define NODE_PARENT_AT(_n, _rbc)\
D_RW(NODE_P(_n))->slots[_rbc]
#define NODE_PARENT_RIGHT(_n)\
NODE_PARENT_AT(_n, RB_RIGHT)
#define NODE_IS(_n, _rbc)\
TOID_EQUALS(_n, NODE_PARENT_AT(_n, _rbc))
#define NODE_IS_RIGHT(_n)\
TOID_EQUALS(_n, NODE_PARENT_RIGHT(_n))
#define NODE_LOCATION(_n)\
NODE_IS_RIGHT(_n)
#define RB_FIRST(_m)\
D_RW(D_RW(_m)->root)->slots[RB_LEFT]
#define NODE_IS_NULL(_n)\
TOID_EQUALS(_n, s)
enum rb_color {
COLOR_BLACK,
COLOR_RED,
MAX_COLOR
};
enum rb_children {
RB_LEFT,
RB_RIGHT,
MAX_RB
};
struct tree_map_node {
uint64_t key;
PMEMoid value;
enum rb_color color;
TOID(struct tree_map_node) parent;
TOID(struct tree_map_node) slots[MAX_RB];
};
struct rbtree_map {
TOID(struct tree_map_node) sentinel;
TOID(struct tree_map_node) root;
};
/*
* rbtree_map_create -- allocates a new red-black tree instance
*/
int
rbtree_map_create(PMEMobjpool *pop, TOID(struct rbtree_map) *map, void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(map, sizeof(*map));
*map = TX_ZNEW(struct rbtree_map);
TOID(struct tree_map_node) s = TX_ZNEW(struct tree_map_node);
D_RW(s)->color = COLOR_BLACK;
D_RW(s)->parent = s;
D_RW(s)->slots[RB_LEFT] = s;
D_RW(s)->slots[RB_RIGHT] = s;
TOID(struct tree_map_node) r = TX_ZNEW(struct tree_map_node);
D_RW(r)->color = COLOR_BLACK;
D_RW(r)->parent = s;
D_RW(r)->slots[RB_LEFT] = s;
D_RW(r)->slots[RB_RIGHT] = s;
D_RW(*map)->sentinel = s;
D_RW(*map)->root = r;
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rbtree_map_clear_node -- (internal) clears this node and its children
*/
static void
rbtree_map_clear_node(TOID(struct rbtree_map) map, TOID(struct tree_map_node) p)
{
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
if (!NODE_IS_NULL(D_RO(p)->slots[RB_LEFT]))
rbtree_map_clear_node(map, D_RO(p)->slots[RB_LEFT]);
if (!NODE_IS_NULL(D_RO(p)->slots[RB_RIGHT]))
rbtree_map_clear_node(map, D_RO(p)->slots[RB_RIGHT]);
TX_FREE(p);
}
/*
* rbtree_map_clear -- removes all elements from the map
*/
int
rbtree_map_clear(PMEMobjpool *pop, TOID(struct rbtree_map) map)
{
TX_BEGIN(pop) {
rbtree_map_clear_node(map, D_RW(map)->root);
TX_ADD_FIELD(map, root);
TX_ADD_FIELD(map, sentinel);
TX_FREE(D_RW(map)->sentinel);
D_RW(map)->root = TOID_NULL(struct tree_map_node);
D_RW(map)->sentinel = TOID_NULL(struct tree_map_node);
} TX_END
return 0;
}
/*
* rbtree_map_destroy -- cleanups and frees red-black tree instance
*/
int
rbtree_map_destroy(PMEMobjpool *pop, TOID(struct rbtree_map) *map)
{
int ret = 0;
TX_BEGIN(pop) {
rbtree_map_clear(pop, *map);
pmemobj_tx_add_range_direct(map, sizeof(*map));
TX_FREE(*map);
*map = TOID_NULL(struct rbtree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rbtree_map_rotate -- (internal) performs a left/right rotation around a node
*/
static void
rbtree_map_rotate(TOID(struct rbtree_map) map,
TOID(struct tree_map_node) node, enum rb_children c)
{
TOID(struct tree_map_node) child = D_RO(node)->slots[!c];
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
TX_ADD(node);
TX_ADD(child);
D_RW(node)->slots[!c] = D_RO(child)->slots[c];
if (!TOID_EQUALS(D_RO(child)->slots[c], s))
TX_SET(D_RW(child)->slots[c], parent, node);
NODE_P(child) = NODE_P(node);
TX_SET(NODE_P(node), slots[NODE_LOCATION(node)], child);
D_RW(child)->slots[c] = node;
D_RW(node)->parent = child;
}
/*
* rbtree_map_insert_bst -- (internal) inserts a node in regular BST fashion
*/
static void
rbtree_map_insert_bst(TOID(struct rbtree_map) map, TOID(struct tree_map_node) n)
{
TOID(struct tree_map_node) parent = D_RO(map)->root;
TOID(struct tree_map_node) *dst = &RB_FIRST(map);
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
D_RW(n)->slots[RB_LEFT] = s;
D_RW(n)->slots[RB_RIGHT] = s;
while (!NODE_IS_NULL(*dst)) {
parent = *dst;
dst = &D_RW(*dst)->slots[D_RO(n)->key > D_RO(*dst)->key];
}
TX_SET(n, parent, parent);
pmemobj_tx_add_range_direct(dst, sizeof(*dst));
*dst = n;
}
/*
* rbtree_map_recolor -- (internal) restores red-black tree properties
*/
static TOID(struct tree_map_node)
rbtree_map_recolor(TOID(struct rbtree_map) map,
TOID(struct tree_map_node) n, enum rb_children c)
{
TOID(struct tree_map_node) uncle = D_RO(NODE_GRANDP(n))->slots[!c];
if (D_RO(uncle)->color == COLOR_RED) {
TX_SET(uncle, color, COLOR_BLACK);
TX_SET(NODE_P(n), color, COLOR_BLACK);
TX_SET(NODE_GRANDP(n), color, COLOR_RED);
return NODE_GRANDP(n);
} else {
if (NODE_IS(n, !c)) {
n = NODE_P(n);
rbtree_map_rotate(map, n, c);
}
TX_SET(NODE_P(n), color, COLOR_BLACK);
TX_SET(NODE_GRANDP(n), color, COLOR_RED);
rbtree_map_rotate(map, NODE_GRANDP(n), (enum rb_children)!c);
}
return n;
}
/*
* rbtree_map_insert -- inserts a new key-value pair into the map
*/
int
rbtree_map_insert(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, PMEMoid value)
{
int ret = 0;
TX_BEGIN(pop) {
TOID(struct tree_map_node) n = TX_ZNEW(struct tree_map_node);
D_RW(n)->key = key;
D_RW(n)->value = value;
rbtree_map_insert_bst(map, n);
D_RW(n)->color = COLOR_RED;
while (D_RO(NODE_P(n))->color == COLOR_RED)
n = rbtree_map_recolor(map, n, (enum rb_children)
NODE_LOCATION(NODE_P(n)));
TX_SET(RB_FIRST(map), color, COLOR_BLACK);
} TX_END
return ret;
}
/*
* rbtree_map_successor -- (internal) returns the successor of a node
*/
static TOID(struct tree_map_node)
rbtree_map_successor(TOID(struct rbtree_map) map, TOID(struct tree_map_node) n)
{
TOID(struct tree_map_node) dst = D_RO(n)->slots[RB_RIGHT];
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
if (!TOID_EQUALS(s, dst)) {
while (!NODE_IS_NULL(D_RO(dst)->slots[RB_LEFT]))
dst = D_RO(dst)->slots[RB_LEFT];
} else {
dst = D_RO(n)->parent;
while (TOID_EQUALS(n, D_RO(dst)->slots[RB_RIGHT])) {
n = dst;
dst = NODE_P(dst);
}
if (TOID_EQUALS(dst, D_RO(map)->root))
return s;
}
return dst;
}
/*
* rbtree_map_find_node -- (internal) returns the node that contains the key
*/
static TOID(struct tree_map_node)
rbtree_map_find_node(TOID(struct rbtree_map) map, uint64_t key)
{
TOID(struct tree_map_node) dst = RB_FIRST(map);
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
while (!NODE_IS_NULL(dst)) {
if (D_RO(dst)->key == key)
return dst;
dst = D_RO(dst)->slots[key > D_RO(dst)->key];
}
return TOID_NULL(struct tree_map_node);
}
/*
* rbtree_map_repair_branch -- (internal) restores red-black tree in one branch
*/
static TOID(struct tree_map_node)
rbtree_map_repair_branch(TOID(struct rbtree_map) map,
TOID(struct tree_map_node) n, enum rb_children c)
{
TOID(struct tree_map_node) sb = NODE_PARENT_AT(n, !c); /* sibling */
if (D_RO(sb)->color == COLOR_RED) {
TX_SET(sb, color, COLOR_BLACK);
TX_SET(NODE_P(n), color, COLOR_RED);
rbtree_map_rotate(map, NODE_P(n), c);
sb = NODE_PARENT_AT(n, !c);
}
if (D_RO(D_RO(sb)->slots[RB_RIGHT])->color == COLOR_BLACK &&
D_RO(D_RO(sb)->slots[RB_LEFT])->color == COLOR_BLACK) {
TX_SET(sb, color, COLOR_RED);
return D_RO(n)->parent;
} else {
if (D_RO(D_RO(sb)->slots[!c])->color == COLOR_BLACK) {
TX_SET(D_RW(sb)->slots[c], color, COLOR_BLACK);
TX_SET(sb, color, COLOR_RED);
rbtree_map_rotate(map, sb, (enum rb_children)!c);
sb = NODE_PARENT_AT(n, !c);
}
TX_SET(sb, color, D_RO(NODE_P(n))->color);
TX_SET(NODE_P(n), color, COLOR_BLACK);
TX_SET(D_RW(sb)->slots[!c], color, COLOR_BLACK);
rbtree_map_rotate(map, NODE_P(n), c);
return RB_FIRST(map);
}
return n;
}
/*
* rbtree_map_repair -- (internal) restores red-black tree properties
* after remove
*/
static void
rbtree_map_repair(TOID(struct rbtree_map) map, TOID(struct tree_map_node) n)
{
/* if left, repair right sibling, otherwise repair left sibling. */
while (!TOID_EQUALS(n, RB_FIRST(map)) && D_RO(n)->color == COLOR_BLACK)
n = rbtree_map_repair_branch(map, n, (enum rb_children)
NODE_LOCATION(n));
TX_SET(n, color, COLOR_BLACK);
}
/*
* rbtree_map_remove -- removes key-value pair from the map
*/
PMEMoid
rbtree_map_remove(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key)
{
PMEMoid ret = OID_NULL;
TOID(struct tree_map_node) n = rbtree_map_find_node(map, key);
if (TOID_IS_NULL(n))
return ret;
ret = D_RO(n)->value;
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
TOID(struct tree_map_node) r = D_RO(map)->root;
TOID(struct tree_map_node) y = (NODE_IS_NULL(D_RO(n)->slots[RB_LEFT]) ||
NODE_IS_NULL(D_RO(n)->slots[RB_RIGHT]))
? n : rbtree_map_successor(map, n);
TOID(struct tree_map_node) x = NODE_IS_NULL(D_RO(y)->slots[RB_LEFT]) ?
D_RO(y)->slots[RB_RIGHT] : D_RO(y)->slots[RB_LEFT];
TX_BEGIN(pop) {
TX_SET(x, parent, NODE_P(y));
if (TOID_EQUALS(NODE_P(x), r)) {
TX_SET(r, slots[RB_LEFT], x);
} else {
TX_SET(NODE_P(y), slots[NODE_LOCATION(y)], x);
}
if (D_RO(y)->color == COLOR_BLACK)
rbtree_map_repair(map, x);
if (!TOID_EQUALS(y, n)) {
TX_ADD(y);
D_RW(y)->slots[RB_LEFT] = D_RO(n)->slots[RB_LEFT];
D_RW(y)->slots[RB_RIGHT] = D_RO(n)->slots[RB_RIGHT];
D_RW(y)->parent = D_RO(n)->parent;
D_RW(y)->color = D_RO(n)->color;
TX_SET(D_RW(n)->slots[RB_LEFT], parent, y);
TX_SET(D_RW(n)->slots[RB_RIGHT], parent, y);
TX_SET(NODE_P(n), slots[NODE_LOCATION(n)], y);
}
TX_FREE(n);
} TX_END
return ret;
}
/*
* rbtree_map_get -- searches for a value of the key
*/
PMEMoid
rbtree_map_get(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key)
{
TOID(struct tree_map_node) node = rbtree_map_find_node(map, key);
if (TOID_IS_NULL(node))
return OID_NULL;
return D_RO(node)->value;
}
/*
* rbtree_map_lookup -- searches if key exists
*/
int
rbtree_map_lookup(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key)
{
TOID(struct tree_map_node) node = rbtree_map_find_node(map, key);
if (TOID_IS_NULL(node))
return 0;
return 1;
}
/*
* rbtree_map_foreach_node -- (internal) recursively traverses tree
*/
static int
rbtree_map_foreach_node(TOID(struct rbtree_map) map,
TOID(struct tree_map_node) p,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
int ret = 0;
if (TOID_EQUALS(p, D_RO(map)->sentinel))
return 0;
if ((ret = rbtree_map_foreach_node(map,
D_RO(p)->slots[RB_LEFT], cb, arg)) == 0) {
if ((ret = cb(D_RO(p)->key, D_RO(p)->value, arg)) == 0)
rbtree_map_foreach_node(map,
D_RO(p)->slots[RB_RIGHT], cb, arg);
}
return ret;
}
/*
* rbtree_map_foreach -- initiates recursive traversal
*/
int
rbtree_map_foreach(PMEMobjpool *pop, TOID(struct rbtree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
return rbtree_map_foreach_node(map, RB_FIRST(map), cb, arg);
}
/*
* rbtree_map_is_empty -- checks whether the tree map is empty
*/
int
rbtree_map_is_empty(PMEMobjpool *pop, TOID(struct rbtree_map) map)
{
return TOID_IS_NULL(RB_FIRST(map));
}
/*
* rbtree_map_check -- check if given persistent object is a tree map
*/
int
rbtree_map_check(PMEMobjpool *pop, TOID(struct rbtree_map) map)
{
return TOID_IS_NULL(map) || !TOID_VALID(map);
}
/*
* rbtree_map_insert_new -- allocates a new object and inserts it into the tree
*/
int
rbtree_map_insert_new(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid n = pmemobj_tx_alloc(size, type_num);
constructor(pop, pmemobj_direct(n), arg);
rbtree_map_insert(pop, map, key, n);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rbtree_map_remove_free -- removes and frees an object from the tree
*/
int
rbtree_map_remove_free(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid val = rbtree_map_remove(pop, map, key);
pmemobj_tx_free(val);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
| 13,791 | 23.761221 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmempool/manpage.c | /*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* manpage.c -- simple example for the libpmempool man page
*/
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <libpmempool.h>
#define PATH "./pmem-fs/myfile"
#define CHECK_FLAGS (PMEMPOOL_CHECK_FORMAT_STR|PMEMPOOL_CHECK_REPAIR|\
PMEMPOOL_CHECK_VERBOSE)
int
main(int argc, char *argv[])
{
PMEMpoolcheck *ppc;
struct pmempool_check_status *status;
enum pmempool_check_result ret;
/* arguments for check */
struct pmempool_check_args args = {
.path = PATH,
.backup_path = NULL,
.pool_type = PMEMPOOL_POOL_TYPE_DETECT,
.flags = CHECK_FLAGS
};
/* initialize check context */
if ((ppc = pmempool_check_init(&args, sizeof(args))) == NULL) {
perror("pmempool_check_init");
exit(EXIT_FAILURE);
}
/* perform check and repair, answer 'yes' for each question */
while ((status = pmempool_check(ppc)) != NULL) {
switch (status->type) {
case PMEMPOOL_CHECK_MSG_TYPE_ERROR:
printf("%s\n", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_INFO:
printf("%s\n", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_QUESTION:
printf("%s\n", status->str.msg);
status->str.answer = "yes";
break;
default:
pmempool_check_end(ppc);
exit(EXIT_FAILURE);
}
}
/* finalize the check and get the result */
ret = pmempool_check_end(ppc);
switch (ret) {
case PMEMPOOL_CHECK_RESULT_CONSISTENT:
case PMEMPOOL_CHECK_RESULT_REPAIRED:
return 0;
default:
return 1;
}
}
| 3,070 | 30.659794 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmem/simple_copy.c | /*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* simple_copy.c -- show how to use pmem_memcpy_persist()
*
* usage: simple_copy src-file dst-file
*
* Reads 4k from src-file and writes it to dst-file.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#ifndef _WIN32
#include <unistd.h>
#else
#include <io.h>
#endif
#include <string.h>
#include <libpmem.h>
/* just copying 4k to pmem for this example */
#define BUF_LEN 4096
int
main(int argc, char *argv[])
{
int srcfd;
char buf[BUF_LEN];
char *pmemaddr;
size_t mapped_len;
int is_pmem;
int cc;
if (argc != 3) {
fprintf(stderr, "usage: %s src-file dst-file\n", argv[0]);
exit(1);
}
/* open src-file */
if ((srcfd = open(argv[1], O_RDONLY)) < 0) {
perror(argv[1]);
exit(1);
}
/* create a pmem file and memory map it */
if ((pmemaddr = pmem_map_file(argv[2], BUF_LEN,
PMEM_FILE_CREATE|PMEM_FILE_EXCL,
0666, &mapped_len, &is_pmem)) == NULL) {
perror("pmem_map_file");
exit(1);
}
/* read up to BUF_LEN from srcfd */
if ((cc = read(srcfd, buf, BUF_LEN)) < 0) {
pmem_unmap(pmemaddr, mapped_len);
perror("read");
exit(1);
}
/* write it to the pmem */
if (is_pmem) {
pmem_memcpy_persist(pmemaddr, buf, cc);
} else {
memcpy(pmemaddr, buf, cc);
pmem_msync(pmemaddr, cc);
}
close(srcfd);
pmem_unmap(pmemaddr, mapped_len);
exit(0);
}
| 2,975 | 26.813084 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmem/full_copy.c | /*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* full_copy.c -- show how to use pmem_memcpy_nodrain()
*
* usage: full_copy src-file dst-file
*
* Copies src-file to dst-file in 4k chunks.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#ifndef _WIN32
#include <unistd.h>
#else
#include <io.h>
#endif
#include <string.h>
#include <libpmem.h>
/* copying 4k at a time to pmem for this example */
#define BUF_LEN 4096
/*
* do_copy_to_pmem -- copy to pmem, postponing drain step until the end
*/
static void
do_copy_to_pmem(char *pmemaddr, int srcfd, off_t len)
{
char buf[BUF_LEN];
int cc;
/* copy the file, saving the last flush step to the end */
while ((cc = read(srcfd, buf, BUF_LEN)) > 0) {
pmem_memcpy_nodrain(pmemaddr, buf, cc);
pmemaddr += cc;
}
if (cc < 0) {
perror("read");
exit(1);
}
/* perform final flush step */
pmem_drain();
}
/*
* do_copy_to_non_pmem -- copy to a non-pmem memory mapped file
*/
static void
do_copy_to_non_pmem(char *addr, int srcfd, off_t len)
{
char *startaddr = addr;
char buf[BUF_LEN];
int cc;
/* copy the file, saving the last flush step to the end */
while ((cc = read(srcfd, buf, BUF_LEN)) > 0) {
memcpy(addr, buf, cc);
addr += cc;
}
if (cc < 0) {
perror("read");
exit(1);
}
/* flush it */
if (pmem_msync(startaddr, len) < 0) {
perror("pmem_msync");
exit(1);
}
}
int
main(int argc, char *argv[])
{
int srcfd;
struct stat stbuf;
char *pmemaddr;
size_t mapped_len;
int is_pmem;
if (argc != 3) {
fprintf(stderr, "usage: %s src-file dst-file\n", argv[0]);
exit(1);
}
/* open src-file */
if ((srcfd = open(argv[1], O_RDONLY)) < 0) {
perror(argv[1]);
exit(1);
}
/* find the size of the src-file */
if (fstat(srcfd, &stbuf) < 0) {
perror("fstat");
exit(1);
}
/* create a pmem file and memory map it */
if ((pmemaddr = pmem_map_file(argv[2], stbuf.st_size,
PMEM_FILE_CREATE|PMEM_FILE_EXCL,
0666, &mapped_len, &is_pmem)) == NULL) {
perror("pmem_map_file");
exit(1);
}
/* determine if range is true pmem, call appropriate copy routine */
if (is_pmem)
do_copy_to_pmem(pmemaddr, srcfd, stbuf.st_size);
else
do_copy_to_non_pmem(pmemaddr, srcfd, stbuf.st_size);
close(srcfd);
pmem_unmap(pmemaddr, mapped_len);
exit(0);
}
| 3,894 | 24.129032 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmem/manpage.c | /*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* manpage.c -- simple example for the libpmem man page
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#ifndef _WIN32
#include <unistd.h>
#else
#include <io.h>
#endif
#include <string.h>
#include <libpmem.h>
/* using 4k of pmem for this example */
#define PMEM_LEN 4096
#define PATH "/pmem-fs/myfile"
int
main(int argc, char *argv[])
{
char *pmemaddr;
size_t mapped_len;
int is_pmem;
/* create a pmem file and memory map it */
if ((pmemaddr = pmem_map_file(PATH, PMEM_LEN, PMEM_FILE_CREATE,
0666, &mapped_len, &is_pmem)) == NULL) {
perror("pmem_map_file");
exit(1);
}
/* store a string to the persistent memory */
strcpy(pmemaddr, "hello, persistent memory");
/* flush above strcpy to persistence */
if (is_pmem)
pmem_persist(pmemaddr, mapped_len);
else
pmem_msync(pmemaddr, mapped_len);
/*
* Delete the mappings. The region is also
* automatically unmapped when the process is
* terminated.
*/
pmem_unmap(pmemaddr, mapped_len);
}
| 2,656 | 29.895349 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libvmem/manpage.c | /*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* manpage.c -- simple example for the libvmem man page
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <libvmem.h>
int
main(int argc, char *argv[])
{
VMEM *vmp;
char *ptr;
/* create minimum size pool of memory */
if ((vmp = vmem_create("/pmem-fs",
VMEM_MIN_POOL)) == NULL) {
perror("vmem_create");
exit(1);
}
if ((ptr = vmem_malloc(vmp, 100)) == NULL) {
perror("vmem_malloc");
exit(1);
}
strcpy(ptr, "hello, world");
/* give the memory back */
vmem_free(vmp, ptr);
/* ... */
vmem_delete(vmp);
}
| 2,158 | 30.289855 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libvmem/libart/art.h | /*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
* Copyright 2012, Armon Dadgar. All rights reserved.
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ==========================================================================
*
* Filename: art.h
*
* Description: implement ART tree using libvmem based on libart
*
* Author: Andreas Bluemle, Dieter Kasper
* [email protected]
* [email protected]
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
* ==========================================================================
*/
/*
* based on https://github.com/armon/libart/src/art.h
*/
#include <stdint.h>
#ifndef ART_H
#define ART_H
#ifdef __cplusplus
extern "C" {
#endif
#define NODE4 1
#define NODE16 2
#define NODE48 3
#define NODE256 4
#define MAX_PREFIX_LEN 10
#if defined(__GNUC__) && !defined(__clang__)
#if __STDC_VERSION__ >= 199901L && 402 == (__GNUC__ * 100 + __GNUC_MINOR__)
/*
* GCC 4.2.2's C99 inline keyword support is pretty broken; avoid. Introduced in
* GCC 4.2.something, fixed in 4.3.0. So checking for specific major.minor of
* 4.2 is fine.
*/
#define BROKEN_GCC_C99_INLINE
#endif
#endif
typedef int(*art_callback)(void *data, const unsigned char *key,
uint32_t key_len, const unsigned char *value,
uint32_t val_len);
/*
* This struct is included as part
* of all the various node sizes
*/
typedef struct {
uint8_t type;
uint8_t num_children;
uint32_t partial_len;
unsigned char partial[MAX_PREFIX_LEN];
} art_node;
/*
* Small node with only 4 children
*/
typedef struct {
art_node n;
unsigned char keys[4];
art_node *children[4];
} art_node4;
/*
* Node with 16 children
*/
typedef struct {
art_node n;
unsigned char keys[16];
art_node *children[16];
} art_node16;
/*
* Node with 48 children, but
* a full 256 byte field.
*/
typedef struct {
art_node n;
unsigned char keys[256];
art_node *children[48];
} art_node48;
/*
* Full node with 256 children
*/
typedef struct {
art_node n;
art_node *children[256];
} art_node256;
/*
* Represents a leaf. These are
* of arbitrary size, as they include the key.
*/
typedef struct {
uint32_t key_len;
uint32_t val_len;
unsigned char *key;
unsigned char *value;
unsigned char data[];
} art_leaf;
/*
* Main struct, points to root.
*/
typedef struct {
art_node *root;
uint64_t size;
} art_tree;
/*
* Initializes an ART tree
* @return 0 on success.
*/
int art_tree_init(art_tree *t);
/*
* DEPRECATED
* Initializes an ART tree
* @return 0 on success.
*/
#define init_art_tree(...) art_tree_init(__VA_ARGS__)
/*
* Destroys an ART tree
* @return 0 on success.
*/
int art_tree_destroy(VMEM *vmp, art_tree *t);
/*
* Returns the size of the ART tree.
*/
#ifdef BROKEN_GCC_C99_INLINE
#define art_size(t) ((t)->size)
#else
static inline uint64_t art_size(art_tree *t) {
return t->size;
}
#endif
/*
* Inserts a new value into the ART tree
* @arg t The tree
* @arg key The key
* @arg key_len The length of the key
* @arg value Opaque value.
* @return NULL if the item was newly inserted, otherwise
* the old value pointer is returned.
*/
void *art_insert(VMEM *vmp, art_tree *t, const unsigned char *key,
int key_len, void *value, int val_len);
/*
* Deletes a value from the ART tree
* @arg t The tree
* @arg key The key
* @arg key_len The length of the key
* @return NULL if the item was not found, otherwise
* the value pointer is returned.
*/
void *art_delete(VMEM *vmp, art_tree *t, const unsigned char *key,
int key_len);
/*
* Searches for a value in the ART tree
* @arg t The tree
* @arg key The key
* @arg key_len The length of the key
* @return NULL if the item was not found, otherwise
* the value pointer is returned.
*/
void *art_search(const art_tree *t, const unsigned char *key, int key_len);
/*
* Returns the minimum valued leaf
* @return The minimum leaf or NULL
*/
art_leaf *art_minimum(art_tree *t);
/*
* Returns the maximum valued leaf
* @return The maximum leaf or NULL
*/
art_leaf *art_maximum(art_tree *t);
/*
* Iterates through the entries pairs in the map,
* invoking a callback for each. The call back gets a
* key, value for each and returns an integer stop value.
* If the callback returns non-zero, then the iteration stops.
* @arg t The tree to iterate over
* @arg cb The callback function to invoke
* @arg data Opaque handle passed to the callback
* @return 0 on success, or the return of the callback.
*/
int art_iter(art_tree *t, art_callback cb, void *data);
typedef struct _cb_data {
int node_type;
int child_idx;
int first_child;
void *node;
} cb_data;
int art_iter2(art_tree *t, art_callback cb, void *data);
/*
* Iterates through the entries pairs in the map,
* invoking a callback for each that matches a given prefix.
* The call back gets a key, value for each and returns an integer stop value.
* If the callback returns non-zero, then the iteration stops.
* @arg t The tree to iterate over
* @arg prefix The prefix of keys to read
* @arg prefix_len The length of the prefix
* @arg cb The callback function to invoke
* @arg data Opaque handle passed to the callback
* @return 0 on success, or the return of the callback.
*/
int art_iter_prefix(art_tree *t, const unsigned char *prefix, int prefix_len,
art_callback cb, void *data);
#ifdef __cplusplus
}
#endif
#endif
| 6,963 | 25.279245 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libvmem/libart/arttree.h | /*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ==========================================================================
*
* Filename: arttree.h
*
* Description: implement ART tree using libvmem based on libart
*
* Author: Andreas Bluemle, Dieter Kasper
* [email protected]
* [email protected]
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
* ==========================================================================
*/
#ifndef _ARTTREE_H
#define _ARTTREE_H
#ifdef __cplusplus
extern "C" {
#endif
#include "art.h"
#ifdef __cplusplus
}
#endif
#endif /* _ARTTREE_H */
| 2,239 | 35.721311 | 77 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_ssh.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_ssh.h -- rpmem ssh transport layer header file
*/
#ifndef RPMEM_SSH_H
#define RPMEM_SSH_H 1
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_ssh;
struct rpmem_ssh *rpmem_ssh_open(const struct rpmem_target_info *info);
struct rpmem_ssh *rpmem_ssh_exec(const struct rpmem_target_info *info, ...);
struct rpmem_ssh *rpmem_ssh_execv(const struct rpmem_target_info *info,
const char **argv);
int rpmem_ssh_close(struct rpmem_ssh *rps);
int rpmem_ssh_send(struct rpmem_ssh *rps, const void *buff, size_t len);
int rpmem_ssh_recv(struct rpmem_ssh *rps, void *buff, size_t len);
int rpmem_ssh_monitor(struct rpmem_ssh *rps, int nonblock);
const char *rpmem_ssh_strerror(struct rpmem_ssh *rps, int oerrno);
#ifdef __cplusplus
}
#endif
#endif
| 2,381 | 36.21875 | 76 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_fip.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_fip.h -- rpmem libfabric provider module header file
*/
#ifndef RPMEM_FIP_H
#define RPMEM_FIP_H
#include <stdint.h>
#include <netinet/in.h>
#include <sys/types.h>
#include <sys/socket.h>
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_fip;
struct rpmem_fip_attr {
enum rpmem_provider provider;
enum rpmem_persist_method persist_method;
void *laddr;
size_t size;
size_t buff_size;
unsigned nlanes;
void *raddr;
uint64_t rkey;
};
struct rpmem_fip *rpmem_fip_init(const char *node, const char *service,
struct rpmem_fip_attr *attr, unsigned *nlanes);
void rpmem_fip_fini(struct rpmem_fip *fip);
int rpmem_fip_connect(struct rpmem_fip *fip);
int rpmem_fip_close(struct rpmem_fip *fip);
int rpmem_fip_process_start(struct rpmem_fip *fip);
int rpmem_fip_process_stop(struct rpmem_fip *fip);
int rpmem_fip_persist(struct rpmem_fip *fip, size_t offset, size_t len,
unsigned lane, unsigned flags);
int rpmem_fip_read(struct rpmem_fip *fip, void *buff,
size_t len, size_t off, unsigned lane);
void rpmem_fip_probe_fork_safety(int *fork_unsafe);
#ifdef __cplusplus
}
#endif
#endif
| 2,714 | 31.710843 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem.c | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem.c -- main source file for librpmem
*/
#include <stdlib.h>
#include <netdb.h>
#include <stdio.h>
#include <errno.h>
#include <limits.h>
#include <inttypes.h>
#include "librpmem.h"
#include "out.h"
#include "os.h"
#include "os_thread.h"
#include "util.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_obc.h"
#include "rpmem_fip.h"
#include "rpmem_fip_common.h"
#include "rpmem_ssh.h"
#include "rpmem_proto.h"
#define RPMEM_REMOVE_FLAGS_ALL (\
RPMEM_REMOVE_FORCE | \
RPMEM_REMOVE_POOL_SET \
)
#define RPMEM_CHECK_FORK() do {\
if (Rpmem_fork_unsafe) {\
ERR("libfabric is initialized without fork() support");\
return NULL;\
}\
} while (0)
/*
* rpmem_pool -- remote pool context
*/
struct rpmem_pool {
struct rpmem_obc *obc; /* out-of-band connection handle */
struct rpmem_fip *fip; /* fabric provider handle */
struct rpmem_target_info *info;
char fip_service[NI_MAXSERV];
enum rpmem_provider provider;
os_thread_t monitor;
int closing;
int no_headers;
/*
* Last error code, need to be volatile because it can
* be accessed by multiple threads.
*/
volatile int error;
};
/*
* env_get_bool -- parse value of specified environment variable as a bool
*
* Return values:
* 0 - defined, valp has value
* 1 - not defined
* -1 - parsing error
*/
static int
env_get_bool(const char *name, int *valp)
{
LOG(3, "name %s, valp %p", name, valp);
const char *env = os_getenv(name);
if (!env)
return 1;
char *endptr;
errno = 0;
long val = strtol(env, &endptr, 10);
if (*endptr != '\0' || errno)
goto err;
if (val < INT_MIN || val > INT_MAX)
goto err;
*valp = (int)val;
return 0;
err:
RPMEM_LOG(ERR, "!parsing '%s' environment variable failed", name);
return -1;
}
/*
* rpmem_get_provider -- returns provider based on node address and environment
*/
static enum rpmem_provider
rpmem_get_provider(const char *node)
{
LOG(3, "node %s", node);
struct rpmem_fip_probe probe;
enum rpmem_provider prov = RPMEM_PROV_UNKNOWN;
int ret = rpmem_fip_probe_get(node, &probe);
if (ret)
return prov;
/*
* The sockets provider can be used only if specified environment
* variable is set to 1.
*/
if (rpmem_fip_probe(probe, RPMEM_PROV_LIBFABRIC_SOCKETS)) {
int enable;
ret = env_get_bool(RPMEM_PROV_SOCKET_ENV, &enable);
if (!ret && enable) {
prov = RPMEM_PROV_LIBFABRIC_SOCKETS;
}
}
/*
* The verbs provider is enabled by default. If appropriate
* environment variable is set to 0, the verbs provider is disabled.
*
* The verbs provider has higher priority than sockets provider.
*/
if (rpmem_fip_probe(probe, RPMEM_PROV_LIBFABRIC_VERBS)) {
int enable;
ret = env_get_bool(RPMEM_PROV_VERBS_ENV, &enable);
if (ret == 1 || (!ret && enable))
prov = RPMEM_PROV_LIBFABRIC_VERBS;
}
return prov;
}
/*
* rpmem_monitor_thread -- connection monitor background thread
*/
static void *
rpmem_monitor_thread(void *arg)
{
LOG(3, "arg %p", arg);
RPMEMpool *rpp = arg;
int ret = rpmem_obc_monitor(rpp->obc, 0);
if (ret && !rpp->closing) {
RPMEM_LOG(ERR, "unexpected data received");
rpp->error = errno;
}
return NULL;
}
/*
* rpmem_common_init -- common routine for initialization
*/
static RPMEMpool *
rpmem_common_init(const char *target)
{
LOG(3, "target %s", target);
int ret;
RPMEMpool *rpp = calloc(1, sizeof(*rpp));
if (!rpp) {
ERR("!calloc");
goto err_malloc_rpmem;
}
rpp->info = rpmem_target_parse(target);
if (!rpp->info) {
ERR("!parsing target node address failed");
goto err_target_split;
}
rpp->provider = rpmem_get_provider(rpp->info->node);
if (rpp->provider == RPMEM_PROV_UNKNOWN) {
errno = ENOMEDIUM;
ERR("cannot find provider");
goto err_provider;
}
RPMEM_LOG(NOTICE, "provider: %s", rpmem_provider_to_str(rpp->provider));
if (rpp->provider == RPMEM_PROV_LIBFABRIC_SOCKETS) {
/* libfabric's sockets provider does not support IPv6 */
RPMEM_LOG(NOTICE, "forcing using IPv4");
rpp->info->flags |= RPMEM_FLAGS_USE_IPV4;
}
rpp->obc = rpmem_obc_init();
if (!rpp->obc) {
ERR("!out-of-band connection initialization failed");
goto err_obc_init;
}
RPMEM_LOG(INFO, "establishing out-of-band connection");
ret = rpmem_obc_connect(rpp->obc, rpp->info);
if (ret) {
ERR("!out-of-band connection failed");
goto err_obc_connect;
}
RPMEM_LOG(NOTICE, "out-of-band connection established");
return rpp;
err_obc_connect:
rpmem_obc_fini(rpp->obc);
err_obc_init:
err_provider:
rpmem_target_free(rpp->info);
err_target_split:
free(rpp);
err_malloc_rpmem:
return NULL;
}
/*
* rpmem_common_fini -- common routing for deinitialization
*/
static void
rpmem_common_fini(RPMEMpool *rpp, int join)
{
LOG(3, "rpp %p, join %d", rpp, join);
rpmem_obc_disconnect(rpp->obc);
if (join) {
int ret = os_thread_join(&rpp->monitor, NULL);
if (ret) {
errno = ret;
ERR("joining monitor thread failed");
}
}
rpmem_obc_fini(rpp->obc);
rpmem_target_free(rpp->info);
free(rpp);
}
/*
* rpmem_common_fip_init -- common routine for initializing fabric provider
*/
static int
rpmem_common_fip_init(RPMEMpool *rpp, struct rpmem_req_attr *req,
struct rpmem_resp_attr *resp, void *pool_addr, size_t pool_size,
unsigned *nlanes, size_t buff_size)
{
LOG(3, "rpp %p, req %p, resp %p, pool_addr %p, pool_size %zu, nlanes "
"%p", rpp, req, resp, pool_addr, pool_size, nlanes);
int ret;
struct rpmem_fip_attr fip_attr = {
.provider = req->provider,
.persist_method = resp->persist_method,
.laddr = pool_addr,
.size = pool_size,
.buff_size = buff_size,
.nlanes = min(*nlanes, resp->nlanes),
.raddr = (void *)resp->raddr,
.rkey = resp->rkey,
};
ret = snprintf(rpp->fip_service, sizeof(rpp->fip_service),
"%u", resp->port);
if (ret <= 0) {
ERR("snprintf: %d", ret);
goto err_port;
}
rpp->fip = rpmem_fip_init(rpp->info->node, rpp->fip_service,
&fip_attr, nlanes);
if (!rpp->fip) {
ERR("!in-band connection initialization failed");
ret = -1;
goto err_fip_init;
}
RPMEM_LOG(NOTICE, "final nlanes: %u", *nlanes);
RPMEM_LOG(INFO, "establishing in-band connection");
ret = rpmem_fip_connect(rpp->fip);
if (ret) {
ERR("!establishing in-band connection failed");
goto err_fip_connect;
}
RPMEM_LOG(NOTICE, "in-band connection established");
return 0;
err_fip_connect:
rpmem_fip_fini(rpp->fip);
err_fip_init:
err_port:
return ret;
}
/*
* rpmem_common_fip_fini -- common routine for deinitializing fabric provider
*/
static void
rpmem_common_fip_fini(RPMEMpool *rpp)
{
LOG(3, "rpp %p", rpp);
RPMEM_LOG(INFO, "closing in-band connection");
rpmem_fip_fini(rpp->fip);
RPMEM_LOG(NOTICE, "in-band connection closed");
}
/*
* rpmem_log_args -- log input arguments for rpmem_create and rpmem_open
*/
static void
rpmem_log_args(const char *req, const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned nlanes)
{
LOG(3, "req %s, target %s, pool_set_name %s, pool_addr %p, pool_size "
"%zu, nlanes %d", req, target, pool_set_name, pool_addr,
pool_size, nlanes);
RPMEM_LOG(NOTICE, "%s request:", req);
RPMEM_LOG(NOTICE, "\ttarget: %s", target);
RPMEM_LOG(NOTICE, "\tpool set: %s", pool_set_name);
RPMEM_LOG(INFO, "\tpool addr: %p", pool_addr);
RPMEM_LOG(INFO, "\tpool size: %lu", pool_size);
RPMEM_LOG(NOTICE, "\tnlanes: %u", nlanes);
}
/*
* rpmem_log_resp -- log response attributes
*/
static void
rpmem_log_resp(const char *req, const struct rpmem_resp_attr *resp)
{
LOG(3, "req %s, resp %p", req, resp);
RPMEM_LOG(NOTICE, "%s request response:", req);
RPMEM_LOG(NOTICE, "\tnlanes: %u", resp->nlanes);
RPMEM_LOG(NOTICE, "\tport: %u", resp->port);
RPMEM_LOG(NOTICE, "\tpersist method: %s",
rpmem_persist_method_to_str(resp->persist_method));
RPMEM_LOG(NOTICE, "\tremote addr: 0x%" PRIx64, resp->raddr);
}
/*
* rpmem_check_args -- validate user's arguments
*/
static int
rpmem_check_args(void *pool_addr, size_t pool_size, unsigned *nlanes)
{
LOG(3, "pool_addr %p, pool_size %zu, nlanes %p", pool_addr, pool_size,
nlanes);
if (!pool_addr) {
errno = EINVAL;
ERR("invalid pool address");
return -1;
}
if (!IS_PAGE_ALIGNED((uintptr_t)pool_addr)) {
errno = EINVAL;
ERR("Pool address must be aligned to page size (%llu)",
Pagesize);
return -1;
}
if (!IS_PAGE_ALIGNED(pool_size)) {
errno = EINVAL;
ERR("Pool size must be aligned to page size (%llu)",
Pagesize);
return -1;
}
if (!pool_size) {
errno = EINVAL;
ERR("invalid pool size");
return -1;
}
if (!nlanes) {
errno = EINVAL;
ERR("lanes pointer cannot be NULL");
return -1;
}
if (!(*nlanes)) {
errno = EINVAL;
ERR("number of lanes must be positive");
return -1;
}
return 0;
}
/*
* rpmem_create -- create remote pool on target node
*
* target -- target node in format [<user>@]<target_name>[:<port>]
* pool_set_name -- remote pool set name
* pool_addr -- local pool memory address which will be replicated
* pool_size -- required pool size
* nlanes -- number of lanes
* create_attr -- pool attributes used for creating the pool on remote node
*/
RPMEMpool *
rpmem_create(const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned *nlanes,
const struct rpmem_pool_attr *create_attr)
{
LOG(3, "target %s, pool_set_name %s, pool_addr %p, pool_size %zu, "
"nlanes %p, create_attr %p", target, pool_set_name,
pool_addr, pool_size, nlanes, create_attr);
RPMEM_CHECK_FORK();
rpmem_log_args("create", target, pool_set_name,
pool_addr, pool_size, *nlanes);
if (rpmem_check_args(pool_addr, pool_size, nlanes))
return NULL;
RPMEMpool *rpp = rpmem_common_init(target);
if (!rpp)
goto err_common_init;
size_t buff_size = RPMEM_DEF_BUFF_SIZE;
struct rpmem_req_attr req = {
.pool_size = pool_size,
.nlanes = min(*nlanes, Rpmem_max_nlanes),
.provider = rpp->provider,
.pool_desc = pool_set_name,
.buff_size = buff_size,
};
struct rpmem_resp_attr resp;
int ret = rpmem_obc_create(rpp->obc, &req, &resp, create_attr);
if (ret) {
RPMEM_LOG(ERR, "!create request failed");
goto err_obc_create;
}
if (create_attr == NULL ||
util_is_zeroed(create_attr, sizeof(*create_attr)))
rpp->no_headers = 1;
rpmem_log_resp("create", &resp);
ret = rpmem_common_fip_init(rpp, &req, &resp,
pool_addr, pool_size, nlanes, buff_size);
if (ret)
goto err_fip_init;
ret = os_thread_create(&rpp->monitor, NULL, rpmem_monitor_thread, rpp);
if (ret) {
errno = ret;
ERR("!starting monitor thread");
goto err_monitor;
}
return rpp;
err_monitor:
rpmem_common_fip_fini(rpp);
err_fip_init:
rpmem_obc_close(rpp->obc, RPMEM_CLOSE_FLAGS_REMOVE);
err_obc_create:
rpmem_common_fini(rpp, 0);
err_common_init:
return NULL;
}
/*
* rpmem_open -- open remote pool on target node
*
* target -- target node in format [<user>@]<target_name>[:<port>]
* pool_set_name -- remote pool set name
* pool_addr -- local pool memory address which will be replicated
* pool_size -- required pool size
* nlanes -- number of lanes
* open_attr -- pool attributes, received from remote host
*/
RPMEMpool *
rpmem_open(const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned *nlanes,
struct rpmem_pool_attr *open_attr)
{
LOG(3, "target %s, pool_set_name %s, pool_addr %p, pool_size %zu, "
"nlanes %p, create_attr %p", target, pool_set_name,
pool_addr, pool_size, nlanes, open_attr);
RPMEM_CHECK_FORK();
rpmem_log_args("open", target, pool_set_name,
pool_addr, pool_size, *nlanes);
if (rpmem_check_args(pool_addr, pool_size, nlanes))
return NULL;
RPMEMpool *rpp = rpmem_common_init(target);
if (!rpp)
goto err_common_init;
size_t buff_size = RPMEM_DEF_BUFF_SIZE;
struct rpmem_req_attr req = {
.pool_size = pool_size,
.nlanes = min(*nlanes, Rpmem_max_nlanes),
.provider = rpp->provider,
.pool_desc = pool_set_name,
.buff_size = buff_size,
};
struct rpmem_resp_attr resp;
int ret = rpmem_obc_open(rpp->obc, &req, &resp, open_attr);
if (ret) {
RPMEM_LOG(ERR, "!open request failed");
goto err_obc_create;
}
if (open_attr == NULL || util_is_zeroed(open_attr, sizeof(*open_attr)))
rpp->no_headers = 1;
rpmem_log_resp("open", &resp);
ret = rpmem_common_fip_init(rpp, &req, &resp,
pool_addr, pool_size, nlanes, buff_size);
if (ret)
goto err_fip_init;
ret = os_thread_create(&rpp->monitor, NULL, rpmem_monitor_thread, rpp);
if (ret) {
errno = ret;
ERR("!starting monitor thread");
goto err_monitor;
}
return rpp;
err_monitor:
rpmem_common_fip_fini(rpp);
err_fip_init:
rpmem_obc_close(rpp->obc, 0);
err_obc_create:
rpmem_common_fini(rpp, 0);
err_common_init:
return NULL;
}
/*
* rpmem_close -- close remote pool on target node
*/
int
rpmem_close(RPMEMpool *rpp)
{
LOG(3, "rpp %p", rpp);
RPMEM_LOG(INFO, "closing out-of-band connection");
util_fetch_and_or32(&rpp->closing, 1);
rpmem_fip_close(rpp->fip);
int ret = rpmem_obc_close(rpp->obc, 0);
if (ret)
ERR("!close request failed");
RPMEM_LOG(NOTICE, "out-of-band connection closed");
rpmem_common_fip_fini(rpp);
rpmem_common_fini(rpp, 1);
return ret;
}
/*
* rpmem_persist -- persist operation on target node
*
* rpp -- remote pool handle
* offset -- offset in pool
* length -- length of persist operation
* lane -- lane number
*/
int
rpmem_persist(RPMEMpool *rpp, size_t offset, size_t length,
unsigned lane, unsigned flags)
{
LOG(3, "rpp %p, offset %zu, length %zu, lane %d, flags 0x%x",
rpp, offset, length, lane, flags);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
if (flags & RPMEM_FLAGS_MASK) {
ERR("invalid flags (0x%x)", flags);
errno = EINVAL;
return -1;
}
if (rpp->no_headers == 0 && offset < RPMEM_HDR_SIZE) {
ERR("offset (%zu) in pool is less than %d bytes", offset,
RPMEM_HDR_SIZE);
errno = EINVAL;
return -1;
}
/*
* By default use RDMA SEND persist mode which has atomicity
* guarantees. For relaxed persist use RDMA WRITE.
*/
unsigned mode = RPMEM_PERSIST_SEND;
if (flags & RPMEM_PERSIST_RELAXED)
mode = RPMEM_PERSIST_WRITE;
int ret = rpmem_fip_persist(rpp->fip, offset, length,
lane, mode);
if (unlikely(ret)) {
ERR("persist operation failed");
rpp->error = ret;
errno = rpp->error;
return -1;
}
return 0;
}
/*
* rpmem_deep_persist -- deep flush operation on target node
*
* rpp -- remote pool handle
* offset -- offset in pool
* length -- length of deep flush operation
* lane -- lane number
*/
int
rpmem_deep_persist(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane)
{
LOG(3, "rpp %p, offset %zu, length %zu, lane %d", rpp, offset, length,
lane);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
if (offset < RPMEM_HDR_SIZE) {
ERR("offset (%zu) in pool is less than %d bytes", offset,
RPMEM_HDR_SIZE);
errno = EINVAL;
return -1;
}
int ret = rpmem_fip_persist(rpp->fip, offset, length,
lane, RPMEM_DEEP_PERSIST);
if (unlikely(ret)) {
ERR("persist operation failed");
rpp->error = ret;
errno = rpp->error;
return -1;
}
return 0;
}
/*
* rpmem_read -- read data from remote pool:
*
* rpp -- remote pool handle
* buff -- output buffer
* offset -- offset in pool
* length -- length of read operation
*/
int
rpmem_read(RPMEMpool *rpp, void *buff, size_t offset,
size_t length, unsigned lane)
{
LOG(3, "rpp %p, buff %p, offset %zu, length %zu, lane %d", rpp, buff,
offset, length, lane);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
if (rpp->no_headers == 0 && offset < RPMEM_HDR_SIZE)
LOG(1, "reading from pool at offset (%zu) less than %d bytes",
offset, RPMEM_HDR_SIZE);
int ret = rpmem_fip_read(rpp->fip, buff, length, offset, lane);
if (unlikely(ret)) {
errno = ret;
ERR("!read operation failed");
rpp->error = ret;
return -1;
}
return 0;
}
/*
* rpmem_set_attr -- overwrite pool attributes on the remote node
*
* rpp -- remote pool handle
* attr -- new pool attributes for the pool on remote node
*/
int
rpmem_set_attr(RPMEMpool *rpp, const struct rpmem_pool_attr *attr)
{
LOG(3, "rpp %p, attr %p", rpp, attr);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
int ret = rpmem_obc_set_attr(rpp->obc, attr);
if (ret) {
RPMEM_LOG(ERR, "!set attributes request failed");
}
return ret;
}
/*
* rpmem_remove -- remove pool from remote node
*
* target -- target node in format [<user>@]<target_name>[:<port>]
* pool_set_name -- remote pool set name
* flags -- bitwise OR of one or more of the following flags:
* - RPMEM_REMOVE_FORCE
* - RPMEM_REMOVE_POOL_SET
*/
int
rpmem_remove(const char *target, const char *pool_set, int flags)
{
LOG(3, "target %s, pool_set %s, flags %d", target, pool_set, flags);
if (flags & ~(RPMEM_REMOVE_FLAGS_ALL)) {
ERR("invalid flags specified");
errno = EINVAL;
return -1;
}
struct rpmem_target_info *info = rpmem_target_parse(target);
if (!info) {
ERR("!parsing target node address failed");
goto err_target;
}
const char *argv[5];
argv[0] = "--remove";
argv[1] = pool_set;
const char **cur = &argv[2];
if (flags & RPMEM_REMOVE_FORCE)
*cur++ = "--force";
if (flags & RPMEM_REMOVE_POOL_SET)
*cur++ = "--pool-set";
*cur = NULL;
struct rpmem_ssh *ssh = rpmem_ssh_execv(info, argv);
if (!ssh) {
ERR("!executing ssh command failed");
goto err_ssh_exec;
}
int ret;
ret = rpmem_ssh_monitor(ssh, 0);
if (ret) {
ERR("!waiting for remote command failed");
goto err_ssh_monitor;
}
ret = rpmem_ssh_close(ssh);
if (ret) {
errno = EINVAL;
ERR("remote command failed");
goto err_ssh_close;
}
rpmem_target_free(info);
return 0;
err_ssh_monitor:
rpmem_ssh_close(ssh);
err_ssh_close:
err_ssh_exec:
rpmem_target_free(info);
err_target:
return -1;
}
| 19,528 | 22.557298 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem.h | /*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem.h -- internal definitions for librpmem
*/
#define RPMEM_LOG_PREFIX "librpmem"
#define RPMEM_LOG_LEVEL_VAR "RPMEM_LOG_LEVEL"
#define RPMEM_LOG_FILE_VAR "RPMEM_LOG_FILE"
| 1,784 | 43.625 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_util.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_util.h -- util functions for librpmem header file
*/
#ifndef RPMEM_UTIL_H
#define RPMEM_UTIL_H 1
#ifdef __cplusplus
extern "C" {
#endif
enum {
LERR = 1,
LWARN = 2,
LNOTICE = 3,
LINFO = 4,
_LDBG = 10,
};
#define RPMEM_LOG(level, fmt, args...) LOG(L##level, fmt, ## args)
#define RPMEM_DBG(fmt, args...) LOG(_LDBG, fmt, ## args)
#define RPMEM_FATAL(fmt, args...) FATAL(fmt, ## args)
#define RPMEM_ASSERT(cond) ASSERT(cond)
#define RPMEM_FLAGS_ALL RPMEM_PERSIST_RELAXED
#define RPMEM_FLAGS_MASK ((unsigned)(~RPMEM_FLAGS_ALL))
const char *rpmem_util_proto_errstr(enum rpmem_err err);
int rpmem_util_proto_errno(enum rpmem_err err);
void rpmem_util_cmds_init(void);
void rpmem_util_cmds_fini(void);
const char *rpmem_util_cmd_get(void);
void rpmem_util_get_env_max_nlanes(unsigned *max_nlanes);
#ifdef __cplusplus
}
#endif
#endif
| 2,455 | 32.643836 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_util.c | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_util.c -- util functions for librpmem source file
*/
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <stdint.h>
#include "out.h"
#include "os.h"
#include "librpmem.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
static struct rpmem_err_str_errno {
int err;
const char *str;
} rpmem_err_str_errno[MAX_RPMEM_ERR] = {
[RPMEM_SUCCESS] = {
.err = 0,
.str = "Success",
},
[RPMEM_ERR_BADPROTO] = {
.err = EPROTONOSUPPORT,
.str = "Protocol version number mismatch",
},
[RPMEM_ERR_BADNAME] = {
.err = EINVAL,
.str = "Invalid pool descriptor",
},
[RPMEM_ERR_BADSIZE] = {
.err = EFBIG,
.str = "Invalid pool size",
},
[RPMEM_ERR_BADNLANES] = {
.err = EINVAL,
.str = "Invalid number of lanes",
},
[RPMEM_ERR_BADPROVIDER] = {
.err = EINVAL,
.str = "Invalid provider",
},
[RPMEM_ERR_FATAL] = {
.err = EREMOTEIO,
.str = "Fatal error",
},
[RPMEM_ERR_FATAL_CONN] = {
.err = ECONNABORTED,
.str = "Fatal in-band connection error",
},
[RPMEM_ERR_BUSY] = {
.err = EBUSY,
.str = "Pool already in use",
},
[RPMEM_ERR_EXISTS] = {
.err = EEXIST,
.str = "Pool already exists",
},
[RPMEM_ERR_PROVNOSUP] = {
.err = EMEDIUMTYPE,
.str = "Provider not supported",
},
[RPMEM_ERR_NOEXIST] = {
.err = ENOENT,
.str = "Pool set or its part doesn't exist or it is "
"unavailable",
},
[RPMEM_ERR_NOACCESS] = {
.err = EACCES,
.str = "Pool set permission denied",
},
[RPMEM_ERR_POOL_CFG] = {
.err = EINVAL,
.str = "Invalid pool set configuration",
},
};
static char *Rpmem_cmds;
static char **Rpmem_cmd_arr;
static size_t Rpmem_current_cmd;
static size_t Rpmem_ncmds;
#define RPMEM_CMD_SEPARATOR '|'
/*
* rpmem_util_proto_errstr -- return error string for error code
*/
const char *
rpmem_util_proto_errstr(enum rpmem_err err)
{
RPMEM_ASSERT(err < MAX_RPMEM_ERR);
const char *ret = rpmem_err_str_errno[err].str;
RPMEM_ASSERT(ret);
return ret;
}
/*
* rpmem_util_proto_errno -- return appropriate errno value for error code
*/
int
rpmem_util_proto_errno(enum rpmem_err err)
{
RPMEM_ASSERT(err < MAX_RPMEM_ERR);
return rpmem_err_str_errno[err].err;
}
/*
* rpmem_util_cmds_inc -- increase size of array for rpmem commands
*/
static void
rpmem_util_cmds_inc(void)
{
Rpmem_ncmds++;
Rpmem_cmd_arr = realloc(Rpmem_cmd_arr,
Rpmem_ncmds * sizeof(*Rpmem_cmd_arr));
if (!Rpmem_cmd_arr)
RPMEM_FATAL("!realloc");
}
/*
* rpmem_util_cmds_init -- read a RPMEM_CMD from the environment variable
*/
void
rpmem_util_cmds_init(void)
{
char *cmd = os_getenv(RPMEM_CMD_ENV);
if (!cmd)
cmd = RPMEM_DEF_CMD;
Rpmem_cmds = strdup(cmd);
if (!Rpmem_cmds)
RPMEM_FATAL("!strdup");
char *next = Rpmem_cmds;
while (next) {
rpmem_util_cmds_inc();
Rpmem_cmd_arr[Rpmem_ncmds - 1] = next;
next = strchr(next, RPMEM_CMD_SEPARATOR);
if (next) {
*next = '\0';
next++;
}
}
}
/*
* rpmem_util_env_fini -- release RPMEM_CMD copy
*/
void
rpmem_util_cmds_fini(void)
{
RPMEM_ASSERT(Rpmem_cmds);
RPMEM_ASSERT(Rpmem_cmd_arr);
RPMEM_ASSERT(Rpmem_current_cmd < Rpmem_ncmds);
free(Rpmem_cmds);
Rpmem_cmds = NULL;
free(Rpmem_cmd_arr);
Rpmem_cmd_arr = NULL;
Rpmem_ncmds = 0;
Rpmem_current_cmd = 0;
}
/*
* rpmem_util_cmd_get -- get a next command from RPMEM_CMD
*
* RPMEM_CMD can contain multiple commands separated by RPMEM_CMD_SEPARATOR.
* Commands from RPMEM_CMD are read sequentially and used to establish out of
* band connections to remote nodes in the order read from a poolset file.
*
*/
const char *
rpmem_util_cmd_get(void)
{
RPMEM_ASSERT(Rpmem_cmds);
RPMEM_ASSERT(Rpmem_cmd_arr);
RPMEM_ASSERT(Rpmem_current_cmd < Rpmem_ncmds);
char *ret = Rpmem_cmd_arr[Rpmem_current_cmd];
Rpmem_current_cmd = (Rpmem_current_cmd + 1) % Rpmem_ncmds;
return ret;
}
/*
* rpmem_util_get_env_max_nlanes -- read the maximum number of lanes from
* RPMEM_MAX_NLANES
*/
void
rpmem_util_get_env_max_nlanes(unsigned *max_nlanes)
{
char *env_nlanes = os_getenv(RPMEM_MAX_NLANES_ENV);
if (env_nlanes && env_nlanes[0] != '\0') {
char *endptr;
errno = 0;
long nlanes = strtol(env_nlanes, &endptr, 10);
if (endptr[0] != '\0' || nlanes <= 0 ||
(errno == ERANGE &&
(nlanes == LONG_MAX || nlanes == LONG_MIN))) {
RPMEM_LOG(ERR, "%s variable must be a positive integer",
RPMEM_MAX_NLANES_ENV);
} else {
*max_nlanes = (unsigned)nlanes;
}
}
}
| 6,009 | 22.944223 | 77 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_obc.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc.h -- rpmem out-of-band connection client header file
*/
#ifndef RPMEM_OBC_H
#define RPMEM_OBC_H 1
#include <sys/types.h>
#include <sys/socket.h>
#include "librpmem.h"
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_obc;
struct rpmem_obc *rpmem_obc_init(void);
void rpmem_obc_fini(struct rpmem_obc *rpc);
int rpmem_obc_connect(struct rpmem_obc *rpc,
const struct rpmem_target_info *info);
int rpmem_obc_disconnect(struct rpmem_obc *rpc);
int rpmem_obc_monitor(struct rpmem_obc *rpc, int nonblock);
int rpmem_obc_create(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
const struct rpmem_pool_attr *pool_attr);
int rpmem_obc_open(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
struct rpmem_pool_attr *pool_attr);
int rpmem_obc_set_attr(struct rpmem_obc *rpc,
const struct rpmem_pool_attr *pool_attr);
int rpmem_obc_close(struct rpmem_obc *rpc, int flags);
#ifdef __cplusplus
}
#endif
#endif
| 2,615 | 32.974026 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/librpmem.c | /*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* librpmem.c -- entry points for librpmem
*/
#include <stdio.h>
#include <stdint.h>
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_fip.h"
#include "util.h"
#include "out.h"
/*
* librpmem_init -- load-time initialization for librpmem
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
librpmem_init(void)
{
util_init();
out_init(RPMEM_LOG_PREFIX, RPMEM_LOG_LEVEL_VAR, RPMEM_LOG_FILE_VAR,
RPMEM_MAJOR_VERSION, RPMEM_MINOR_VERSION);
LOG(3, NULL);
rpmem_util_cmds_init();
rpmem_util_get_env_max_nlanes(&Rpmem_max_nlanes);
rpmem_fip_probe_fork_safety(&Rpmem_fork_unsafe);
RPMEM_LOG(NOTICE, "Libfabric is %sfork safe",
Rpmem_fork_unsafe ? "not " : "");
}
/*
* librpmem_fini -- librpmem cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
librpmem_fini(void)
{
LOG(3, NULL);
rpmem_util_cmds_fini();
out_fini();
}
/*
* rpmem_check_version -- see if library meets application version requirements
*/
const char *
rpmem_check_version(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != RPMEM_MAJOR_VERSION) {
ERR("librpmem major version mismatch (need %u, found %u)",
major_required, RPMEM_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > RPMEM_MINOR_VERSION) {
ERR("librpmem minor version mismatch (need %u, found %u)",
minor_required, RPMEM_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
/*
* rpmem_errormsg -- return the last error message
*/
const char *
rpmem_errormsg(void)
{
return out_get_errormsg();
}
| 3,326 | 27.681034 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_obc.c | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc.c -- rpmem out-of-band connection client source file
*/
#include <stdlib.h>
#include <netdb.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_common.h"
#include "rpmem_obc.h"
#include "rpmem_proto.h"
#include "rpmem_util.h"
#include "rpmem_ssh.h"
#include "out.h"
#include "sys_util.h"
#include "util.h"
/*
* rpmem_obc -- rpmem out-of-band client connection handle
*/
struct rpmem_obc {
struct rpmem_ssh *ssh;
};
/*
* rpmem_obc_is_connected -- (internal) return non-zero value if client is
* connected
*/
static inline int
rpmem_obc_is_connected(struct rpmem_obc *rpc)
{
return rpc->ssh != NULL;
}
/*
* rpmem_obc_check_ibc_attr -- (internal) check in-band connection
* attributes
*/
static int
rpmem_obc_check_ibc_attr(struct rpmem_msg_ibc_attr *ibc)
{
if (ibc->port == 0 || ibc->port > UINT16_MAX) {
ERR("invalid port number received -- %u", ibc->port);
errno = EPROTO;
return -1;
}
if (ibc->persist_method != RPMEM_PM_GPSPM &&
ibc->persist_method != RPMEM_PM_APM) {
ERR("invalid persistency method received -- %u",
ibc->persist_method);
errno = EPROTO;
return -1;
}
return 0;
}
/*
* rpmem_obc_check_port -- (internal) verify target node port number
*/
static int
rpmem_obc_check_port(const struct rpmem_target_info *info)
{
if (!(info->flags & RPMEM_HAS_SERVICE))
return 0;
if (*info->service == '\0') {
ERR("invalid port number -- '%s'", info->service);
goto err;
}
errno = 0;
char *endptr;
long port = strtol(info->service, &endptr, 10);
if (errno || *endptr != '\0') {
ERR("invalid port number -- '%s'", info->service);
goto err;
}
if (port < 1) {
ERR("port number must be positive -- '%s'", info->service);
goto err;
}
if (port > UINT16_MAX) {
ERR("port number too large -- '%s'", info->service);
goto err;
}
return 0;
err:
errno = EINVAL;
return -1;
}
/*
* rpmem_obc_close_conn -- (internal) close connection
*/
static void
rpmem_obc_close_conn(struct rpmem_obc *rpc)
{
rpmem_ssh_close(rpc->ssh);
(void) util_fetch_and_and64(&rpc->ssh, 0);
}
/*
* rpmem_obc_init_msg_hdr -- (internal) initialize message header
*/
static void
rpmem_obc_set_msg_hdr(struct rpmem_msg_hdr *hdrp,
enum rpmem_msg_type type, size_t size)
{
hdrp->type = type;
hdrp->size = size;
}
/*
* rpmem_obc_set_pool_desc -- (internal) fill the pool descriptor field
*/
static void
rpmem_obc_set_pool_desc(struct rpmem_msg_pool_desc *pool_desc,
const char *desc, size_t size)
{
RPMEM_ASSERT(size <= UINT32_MAX);
RPMEM_ASSERT(size > 0);
pool_desc->size = (uint32_t)size;
memcpy(pool_desc->desc, desc, size);
pool_desc->desc[size - 1] = '\0';
}
/*
* rpmem_obc_alloc_create_msg -- (internal) allocate and fill create request
* message
*/
static struct rpmem_msg_create *
rpmem_obc_alloc_create_msg(const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr, size_t *msg_sizep)
{
size_t pool_desc_size = strlen(req->pool_desc) + 1;
size_t msg_size = sizeof(struct rpmem_msg_create) + pool_desc_size;
struct rpmem_msg_create *msg = malloc(msg_size);
if (!msg) {
ERR("!cannot allocate create request message");
return NULL;
}
rpmem_obc_set_msg_hdr(&msg->hdr, RPMEM_MSG_TYPE_CREATE, msg_size);
msg->c.major = RPMEM_PROTO_MAJOR;
msg->c.minor = RPMEM_PROTO_MINOR;
msg->c.pool_size = req->pool_size;
msg->c.nlanes = req->nlanes;
msg->c.provider = req->provider;
msg->c.buff_size = req->buff_size;
rpmem_obc_set_pool_desc(&msg->pool_desc,
req->pool_desc, pool_desc_size);
if (pool_attr) {
pack_rpmem_pool_attr(pool_attr, &msg->pool_attr);
} else {
RPMEM_LOG(INFO, "using zeroed pool attributes");
memset(&msg->pool_attr, 0, sizeof(msg->pool_attr));
}
*msg_sizep = msg_size;
return msg;
}
/*
* rpmem_obc_check_req -- (internal) check request attributes
*/
static int
rpmem_obc_check_req(const struct rpmem_req_attr *req)
{
if (req->provider >= MAX_RPMEM_PROV) {
ERR("invalid provider specified -- %u", req->provider);
errno = EINVAL;
return -1;
}
return 0;
}
/*
* rpmem_obj_check_hdr_resp -- (internal) check response message header
*/
static int
rpmem_obc_check_hdr_resp(struct rpmem_msg_hdr_resp *resp,
enum rpmem_msg_type type, size_t size)
{
if (resp->type != type) {
ERR("invalid message type received -- %u", resp->type);
errno = EPROTO;
return -1;
}
if (resp->size != size) {
ERR("invalid message size received -- %lu", resp->size);
errno = EPROTO;
return -1;
}
if (resp->status >= MAX_RPMEM_ERR) {
ERR("invalid status received -- %u", resp->status);
errno = EPROTO;
return -1;
}
if (resp->status) {
enum rpmem_err status = (enum rpmem_err)resp->status;
ERR("%s", rpmem_util_proto_errstr(status));
errno = rpmem_util_proto_errno(status);
return -1;
}
return 0;
}
/*
* rpmem_obc_check_create_resp -- (internal) check create response message
*/
static int
rpmem_obc_check_create_resp(struct rpmem_msg_create_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_CREATE_RESP,
sizeof(struct rpmem_msg_create_resp)))
return -1;
if (rpmem_obc_check_ibc_attr(&resp->ibc))
return -1;
return 0;
}
/*
* rpmem_obc_get_res -- (internal) read response attributes
*/
static void
rpmem_obc_get_res(struct rpmem_resp_attr *res,
struct rpmem_msg_ibc_attr *ibc)
{
res->port = (unsigned short)ibc->port;
res->rkey = ibc->rkey;
res->raddr = ibc->raddr;
res->persist_method =
(enum rpmem_persist_method)ibc->persist_method;
res->nlanes = ibc->nlanes;
}
/*
* rpmem_obc_alloc_open_msg -- (internal) allocate and fill open request message
*/
static struct rpmem_msg_open *
rpmem_obc_alloc_open_msg(const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr, size_t *msg_sizep)
{
size_t pool_desc_size = strlen(req->pool_desc) + 1;
size_t msg_size = sizeof(struct rpmem_msg_open) + pool_desc_size;
struct rpmem_msg_open *msg = malloc(msg_size);
if (!msg) {
ERR("!cannot allocate open request message");
return NULL;
}
rpmem_obc_set_msg_hdr(&msg->hdr, RPMEM_MSG_TYPE_OPEN, msg_size);
msg->c.major = RPMEM_PROTO_MAJOR;
msg->c.minor = RPMEM_PROTO_MINOR;
msg->c.pool_size = req->pool_size;
msg->c.nlanes = req->nlanes;
msg->c.provider = req->provider;
msg->c.buff_size = req->buff_size;
rpmem_obc_set_pool_desc(&msg->pool_desc,
req->pool_desc, pool_desc_size);
*msg_sizep = msg_size;
return msg;
}
/*
* rpmem_obc_check_open_resp -- (internal) check open response message
*/
static int
rpmem_obc_check_open_resp(struct rpmem_msg_open_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_OPEN_RESP,
sizeof(struct rpmem_msg_open_resp)))
return -1;
if (rpmem_obc_check_ibc_attr(&resp->ibc))
return -1;
return 0;
}
/*
* rpmem_obc_check_close_resp -- (internal) check close response message
*/
static int
rpmem_obc_check_close_resp(struct rpmem_msg_close_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_CLOSE_RESP,
sizeof(struct rpmem_msg_close_resp)))
return -1;
return 0;
}
/*
* rpmem_obc_check_set_attr_resp -- (internal) check set attributes response
* message
*/
static int
rpmem_obc_check_set_attr_resp(struct rpmem_msg_set_attr_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_SET_ATTR_RESP,
sizeof(struct rpmem_msg_set_attr_resp)))
return -1;
return 0;
}
/*
* rpmem_obc_init -- initialize rpmem obc handle
*/
struct rpmem_obc *
rpmem_obc_init(void)
{
struct rpmem_obc *rpc = calloc(1, sizeof(*rpc));
if (!rpc) {
RPMEM_LOG(ERR, "!allocation of rpmem obc failed");
return NULL;
}
return rpc;
}
/*
* rpmem_obc_fini -- destroy rpmem obc handle
*
* This function must be called with connection already closed - after calling
* the rpmem_obc_disconnect or after receiving relevant value from
* rpmem_obc_monitor.
*/
void
rpmem_obc_fini(struct rpmem_obc *rpc)
{
free(rpc);
}
/*
* rpmem_obc_connect -- connect to target node
*
* Connects to target node, the target must be in the following format:
* <addr>[:<port>]. If the port number is not specified the default
* ssh port will be used. The <addr> is translated into IP address.
*
* Returns an error if connection is already established.
*/
int
rpmem_obc_connect(struct rpmem_obc *rpc, const struct rpmem_target_info *info)
{
if (rpmem_obc_is_connected(rpc)) {
errno = EALREADY;
goto err_notconnected;
}
if (rpmem_obc_check_port(info))
goto err_port;
rpc->ssh = rpmem_ssh_open(info);
if (!rpc->ssh)
goto err_ssh_open;
return 0;
err_ssh_open:
err_port:
err_notconnected:
return -1;
}
/*
* rpmem_obc_disconnect -- close the connection to target node
*
* Returns error if socket is not connected.
*/
int
rpmem_obc_disconnect(struct rpmem_obc *rpc)
{
if (rpmem_obc_is_connected(rpc)) {
rpmem_obc_close_conn(rpc);
return 0;
}
errno = ENOTCONN;
return -1;
}
/*
* rpmem_obc_monitor -- monitor connection with target node
*
* The nonblock variable indicates whether this function should return
* immediately (= 1) or may block (= 0).
*
* If the function detects that socket was closed by remote peer it is
* closed on local side and set to -1, so there is no need to call
* rpmem_obc_disconnect function. Please take a look at functions'
* descriptions to see which functions cannot be used if the connection
* has been already closed.
*
* This function expects there is no data pending on socket, if any data
* is pending this function returns an error and sets errno to EPROTO.
*
* Return values:
* 0 - not connected
* 1 - connected
* < 0 - error
*/
int
rpmem_obc_monitor(struct rpmem_obc *rpc, int nonblock)
{
if (!rpmem_obc_is_connected(rpc))
return 0;
return rpmem_ssh_monitor(rpc->ssh, nonblock);
}
/*
* rpmem_obc_create -- perform create request operation
*
* Returns error if connection has not been established yet.
*/
int
rpmem_obc_create(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
const struct rpmem_pool_attr *pool_attr)
{
if (!rpmem_obc_is_connected(rpc)) {
ERR("out-of-band connection not established");
errno = ENOTCONN;
goto err_notconnected;
}
if (rpmem_obc_check_req(req))
goto err_req;
size_t msg_size;
struct rpmem_msg_create *msg =
rpmem_obc_alloc_create_msg(req, pool_attr, &msg_size);
if (!msg)
goto err_alloc_msg;
RPMEM_LOG(INFO, "sending create request message");
rpmem_hton_msg_create(msg);
if (rpmem_ssh_send(rpc->ssh, msg, msg_size)) {
ERR("!sending create request message failed");
goto err_msg_send;
}
RPMEM_LOG(NOTICE, "create request message sent");
RPMEM_LOG(INFO, "receiving create request response");
struct rpmem_msg_create_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp,
sizeof(resp))) {
ERR("!receiving create request response failed");
goto err_msg_recv;
}
RPMEM_LOG(NOTICE, "create request response received");
rpmem_ntoh_msg_create_resp(&resp);
if (rpmem_obc_check_create_resp(&resp))
goto err_msg_resp;
rpmem_obc_get_res(res, &resp.ibc);
free(msg);
return 0;
err_msg_resp:
err_msg_recv:
err_msg_send:
free(msg);
err_alloc_msg:
err_req:
err_notconnected:
return -1;
}
/*
* rpmem_obc_open -- perform open request operation
*
* Returns error if connection is not already established.
*/
int
rpmem_obc_open(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
struct rpmem_pool_attr *pool_attr)
{
if (!rpmem_obc_is_connected(rpc)) {
ERR("out-of-band connection not established");
errno = ENOTCONN;
goto err_notconnected;
}
if (rpmem_obc_check_req(req))
goto err_req;
size_t msg_size;
struct rpmem_msg_open *msg =
rpmem_obc_alloc_open_msg(req, pool_attr, &msg_size);
if (!msg)
goto err_alloc_msg;
RPMEM_LOG(INFO, "sending open request message");
rpmem_hton_msg_open(msg);
if (rpmem_ssh_send(rpc->ssh, msg, msg_size)) {
ERR("!sending open request message failed");
goto err_msg_send;
}
RPMEM_LOG(NOTICE, "open request message sent");
RPMEM_LOG(INFO, "receiving open request response");
struct rpmem_msg_open_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp, sizeof(resp))) {
ERR("!receiving open request response failed");
goto err_msg_recv;
}
RPMEM_LOG(NOTICE, "open request response received");
rpmem_ntoh_msg_open_resp(&resp);
if (rpmem_obc_check_open_resp(&resp))
goto err_msg_resp;
rpmem_obc_get_res(res, &resp.ibc);
if (pool_attr)
unpack_rpmem_pool_attr(&resp.pool_attr, pool_attr);
free(msg);
return 0;
err_msg_resp:
err_msg_recv:
err_msg_send:
free(msg);
err_alloc_msg:
err_req:
err_notconnected:
return -1;
}
/*
* rpmem_obc_set_attr -- perform set attributes request operation
*
* Returns error if connection is not already established.
*/
int
rpmem_obc_set_attr(struct rpmem_obc *rpc,
const struct rpmem_pool_attr *pool_attr)
{
if (!rpmem_obc_is_connected(rpc)) {
ERR("out-of-band connection not established");
errno = ENOTCONN;
goto err_notconnected;
}
struct rpmem_msg_set_attr msg;
rpmem_obc_set_msg_hdr(&msg.hdr, RPMEM_MSG_TYPE_SET_ATTR, sizeof(msg));
if (pool_attr) {
memcpy(&msg.pool_attr, pool_attr, sizeof(msg.pool_attr));
} else {
RPMEM_LOG(INFO, "using zeroed pool attributes");
memset(&msg.pool_attr, 0, sizeof(msg.pool_attr));
}
RPMEM_LOG(INFO, "sending set attributes request message");
rpmem_hton_msg_set_attr(&msg);
if (rpmem_ssh_send(rpc->ssh, &msg, sizeof(msg))) {
ERR("!sending set attributes request message failed");
goto err_msg_send;
}
RPMEM_LOG(NOTICE, "set attributes request message sent");
RPMEM_LOG(INFO, "receiving set attributes request response");
struct rpmem_msg_set_attr_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp,
sizeof(resp))) {
ERR("!receiving set attributes request response failed");
goto err_msg_recv;
}
RPMEM_LOG(NOTICE, "set attributes request response received");
rpmem_ntoh_msg_set_attr_resp(&resp);
if (rpmem_obc_check_set_attr_resp(&resp))
goto err_msg_resp;
return 0;
err_msg_resp:
err_msg_recv:
err_msg_send:
err_notconnected:
return -1;
}
/*
* rpmem_obc_close -- perform close request operation
*
* Returns error if connection is not already established.
*
* NOTE: this function does not close the connection, but sends close request
* message to remote node and receives a response. The connection must be
* closed using rpmem_obc_disconnect function.
*/
int
rpmem_obc_close(struct rpmem_obc *rpc, int flags)
{
if (!rpmem_obc_is_connected(rpc)) {
errno = ENOTCONN;
return -1;
}
struct rpmem_msg_close msg;
rpmem_obc_set_msg_hdr(&msg.hdr, RPMEM_MSG_TYPE_CLOSE, sizeof(msg));
msg.flags = (uint32_t)flags;
RPMEM_LOG(INFO, "sending close request message");
rpmem_hton_msg_close(&msg);
if (rpmem_ssh_send(rpc->ssh, &msg, sizeof(msg))) {
RPMEM_LOG(ERR, "!sending close request failed");
return -1;
}
RPMEM_LOG(NOTICE, "close request message sent");
RPMEM_LOG(INFO, "receiving close request response");
struct rpmem_msg_close_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp,
sizeof(resp))) {
RPMEM_LOG(ERR, "!receiving close request response failed");
return -1;
}
RPMEM_LOG(NOTICE, "close request response received");
rpmem_ntoh_msg_close_resp(&resp);
if (rpmem_obc_check_close_resp(&resp))
return -1;
return 0;
}
| 16,926 | 22.908192 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_cmd.c | /*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_cmd.c -- simple interface for running an executable in child process
*/
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <stdint.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <signal.h>
#include "util.h"
#include "out.h"
#include "os.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_cmd.h"
/*
* rpmem_cmd_init -- initialize command
*/
struct rpmem_cmd *
rpmem_cmd_init(void)
{
struct rpmem_cmd *cmd = calloc(1, sizeof(*cmd));
if (!cmd) {
RPMEM_LOG(ERR, "allocating command buffer");
goto err_alloc_cmd;
}
return cmd;
err_alloc_cmd:
return NULL;
}
/*
* rpmem_cmd_fini -- deinitialize command
*/
void
rpmem_cmd_fini(struct rpmem_cmd *cmd)
{
for (int i = 0; i < cmd->args.argc; i++)
free(cmd->args.argv[i]);
free(cmd->args.argv);
free(cmd);
}
/*
* rpmem_cmd_push -- push back command's argument
*/
int
rpmem_cmd_push(struct rpmem_cmd *cmd, const char *arg)
{
size_t argv_count = (size_t)cmd->args.argc + 2;
char **argv = realloc(cmd->args.argv, argv_count * sizeof(char *));
if (!argv) {
RPMEM_LOG(ERR, "reallocating command argv");
goto err_realloc;
}
cmd->args.argv = argv;
char *arg_dup = strdup(arg);
if (!arg_dup) {
RPMEM_LOG(ERR, "allocating argument");
goto err_strdup;
}
cmd->args.argv[cmd->args.argc] = arg_dup;
cmd->args.argc++;
cmd->args.argv[cmd->args.argc] = NULL;
return 0;
err_strdup:
err_realloc:
return -1;
}
/*
* rpmem_cmd_log -- print executing command
*/
static void
rpmem_cmd_log(struct rpmem_cmd *cmd)
{
RPMEM_ASSERT(cmd->args.argc > 0);
size_t size = 0;
for (int i = 0; i < cmd->args.argc; i++) {
size += strlen(cmd->args.argv[i]) + 1;
}
char *buff = malloc(size);
if (!buff) {
RPMEM_LOG(ERR, "allocating log buffer for command");
return;
}
size_t pos = 0;
for (int i = 0; pos < size && i < cmd->args.argc; i++) {
int ret = snprintf(&buff[pos], size - pos, "%s%s",
cmd->args.argv[i], i == cmd->args.argc - 1 ?
"" : " ");
if (ret < 0) {
RPMEM_LOG(ERR, "printing command's argument failed");
goto out;
}
pos += (size_t)ret;
}
RPMEM_LOG(INFO, "executing command '%s'", buff);
out:
free(buff);
}
/*
* rpmem_cmd_run -- run command and connect with stdin, stdout and stderr
* using unix sockets.
*
* The communication with child process is done via socketpairs on
* stdin, stdout and stderr. The socketpairs are used instead of pipes
* because reading from disconnected pipe causes a SIGPIPE signal.
* When using socketpair it is possible to read data using recv(3)
* function with MSG_NOSIGNAL flag, which doesn't send a signal.
*/
int
rpmem_cmd_run(struct rpmem_cmd *cmd)
{
int fd_in[2];
int fd_out[2];
int fd_err[2];
rpmem_cmd_log(cmd);
/* socketpair for stdin */
int ret = socketpair(AF_UNIX, SOCK_STREAM, 0, fd_in);
if (ret < 0) {
RPMEM_LOG(ERR, "creating pipe for stdin");
goto err_pipe_in;
}
/* parent process stdin socket */
cmd->fd_in = fd_in[1];
/* socketpair for stdout */
ret = socketpair(AF_UNIX, SOCK_STREAM, 0, fd_out);
if (ret < 0) {
RPMEM_LOG(ERR, "creating pipe for stdout");
goto err_pipe_out;
}
/* parent process stdout socket */
cmd->fd_out = fd_out[0];
/* socketpair for stderr */
ret = socketpair(AF_UNIX, SOCK_STREAM, 0, fd_err);
if (ret < 0) {
RPMEM_LOG(ERR, "creating pipe for stderr");
goto err_pipe_err;
}
/* socketpair for stderr */
cmd->fd_err = fd_err[0];
cmd->pid = fork();
if (cmd->pid == -1) {
RPMEM_LOG(ERR, "forking command");
goto err_fork;
}
if (!cmd->pid) {
dup2(fd_in[0], 0);
dup2(fd_out[1], 1);
dup2(fd_err[1], 2);
execvp(cmd->args.argv[0], cmd->args.argv);
exit(EXIT_FAILURE);
}
os_close(fd_in[0]);
os_close(fd_out[1]);
os_close(fd_err[1]);
return 0;
err_fork:
os_close(fd_err[0]);
os_close(fd_err[1]);
err_pipe_err:
os_close(fd_out[0]);
os_close(fd_out[1]);
err_pipe_out:
os_close(fd_in[0]);
os_close(fd_in[1]);
err_pipe_in:
return -1;
}
/*
* rpmem_cmd_wait -- wait for process to change state
*/
int
rpmem_cmd_wait(struct rpmem_cmd *cmd, int *status)
{
if (cmd->pid <= 0)
return -1;
if (waitpid(cmd->pid, status, 0) != cmd->pid)
return -1;
return 0;
}
/*
* rpmem_cmd_term -- terminate process by sending SIGINT signal
*/
int
rpmem_cmd_term(struct rpmem_cmd *cmd)
{
os_close(cmd->fd_in);
os_close(cmd->fd_out);
os_close(cmd->fd_err);
RPMEM_ASSERT(cmd->pid > 0);
return kill(cmd->pid, SIGINT);
}
| 6,070 | 21.996212 | 77 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_cmd.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_cmd.h -- helper module for invoking separate process
*/
#ifndef RPMEM_CMD_H
#define RPMEM_CMD_H 1
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_cmd {
int fd_in; /* stdin */
int fd_out; /* stdout */
int fd_err; /* stderr */
struct {
char **argv;
int argc;
} args; /* command arguments */
pid_t pid; /* pid of process */
};
struct rpmem_cmd *rpmem_cmd_init(void);
int rpmem_cmd_push(struct rpmem_cmd *cmd, const char *arg);
int rpmem_cmd_run(struct rpmem_cmd *cmd);
int rpmem_cmd_term(struct rpmem_cmd *cmd);
int rpmem_cmd_wait(struct rpmem_cmd *cmd, int *status);
void rpmem_cmd_fini(struct rpmem_cmd *cmd);
#ifdef __cplusplus
}
#endif
#endif
| 2,304 | 32.405797 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemcto/cto.c | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* cto.c -- memory pool & allocation entry points for libpmemcto
*/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
#include <stdint.h>
#include <fcntl.h>
#include <unistd.h>
#include <wchar.h>
#include "libpmemcto.h"
#include "libpmem.h"
#include "jemalloc.h"
#include "util.h"
#include "util_pmem.h"
#include "set.h"
#include "out.h"
#include "cto.h"
#include "mmap.h"
#include "sys_util.h"
#include "valgrind_internal.h"
#include "os_thread.h"
#include "os_deep.h"
/* default hint address for mmap() when PMEM_MMAP_HINT is not specified */
#define CTO_MMAP_HINT ((void *)0x10000000000)
static const struct pool_attr Cto_create_attr = {
CTO_HDR_SIG,
CTO_FORMAT_MAJOR,
CTO_FORMAT_FEAT_DEFAULT,
{0}, {0}, {0}, {0}, {0}
};
static const struct pool_attr Cto_open_attr = {
CTO_HDR_SIG,
CTO_FORMAT_MAJOR,
CTO_FORMAT_FEAT_CHECK,
{0}, {0}, {0}, {0}, {0}
};
static os_mutex_t Pool_lock; /* guards pmemcto_create and pmemcto_open */
/*
* cto_print_jemalloc_messages -- (internal) custom print function for jemalloc
*
* Prints traces from jemalloc. All traces from jemalloc are error messages.
*/
static void
cto_print_jemalloc_messages(void *ignore, const char *s)
{
ERR("%s", s);
}
/*
* cto_print_jemalloc_stats --(internal) print function for jemalloc statistics
*
* Prints statistics from jemalloc. All statistics are printed with level 0.
*/
static void
cto_print_jemalloc_stats(void *ignore, const char *s)
{
LOG_NONL(0, "%s", s);
}
/*
* cto_init -- load-time initialization for cto
*
* Called automatically by the run-time loader.
*/
void
cto_init(void)
{
COMPILE_ERROR_ON(offsetof(struct pmemcto, set) !=
POOL_HDR_SIZE + CTO_DSC_P_SIZE);
util_mutex_init(&Pool_lock);
/* set up jemalloc messages to a custom print function */
je_cto_malloc_message = cto_print_jemalloc_messages;
}
/*
* cto_fini -- libpmemcto cleanup routine
*
* Called automatically when the process terminates.
*/
void
cto_fini(void)
{
LOG(3, NULL);
util_mutex_destroy(&Pool_lock);
}
/*
* cto_descr_create -- (internal) create cto memory pool descriptor
*/
static int
cto_descr_create(PMEMctopool *pcp, const char *layout, size_t poolsize)
{
LOG(3, "pcp %p layout \"%s\" poolsize %zu", pcp, layout, poolsize);
ASSERTeq(poolsize % Pagesize, 0);
/* opaque info lives at the beginning of mapped memory pool */
void *dscp = (void *)((uintptr_t)pcp +
sizeof(struct pool_hdr));
/* create required metadata */
memset(dscp, 0, CTO_DSC_P_SIZE);
if (layout)
strncpy(pcp->layout, layout, PMEMCTO_MAX_LAYOUT - 1);
pcp->addr = (uint64_t)pcp;
pcp->size = poolsize;
pcp->root = (uint64_t)NULL;
pcp->consistent = 0;
/* store non-volatile part of pool's descriptor */
util_persist(pcp->is_pmem, dscp, CTO_DSC_P_SIZE);
return 0;
}
/*
* cto_descr_check -- (internal) validate cto pool descriptor
*/
static int
cto_descr_check(PMEMctopool *pcp, const char *layout, size_t poolsize)
{
LOG(3, "pcp %p layout \"%s\" poolsize %zu", pcp, layout, poolsize);
if (layout && strncmp(pcp->layout, layout, PMEMCTO_MAX_LAYOUT)) {
ERR("wrong layout (\"%s\") pool created with layout \"%s\"",
layout, pcp->layout);
errno = EINVAL;
return -1;
}
if (pcp->consistent == 0) {
ERR("inconsistent pool");
errno = EINVAL;
return -1;
}
if ((void *)pcp->addr == NULL) {
ERR("invalid mapping address");
errno = EINVAL;
return -1;
}
/*
* The pool could be created using older version of the library, when
* the minimum pool size was different.
*/
if (pcp->size < PMEMCTO_MIN_POOL) {
LOG(4, "mapping size is less than minimum (%zu < %zu)",
pcp->size, PMEMCTO_MIN_POOL);
}
if (pcp->size != poolsize) {
ERR("mapping size does not match pool size: %zu != %zu",
pcp->size, poolsize);
errno = EINVAL;
return -1;
}
if ((void *)pcp->root != NULL &&
((char *)pcp->root < ((char *)pcp->addr + CTO_DSC_SIZE_ALIGNED) ||
(char *)pcp->root >= ((char *)pcp->addr + pcp->size))) {
ERR("invalid root pointer");
errno = EINVAL;
return -1;
}
LOG(4, "addr %p size %zu root %p", (void *)pcp->addr, pcp->size,
(void *)pcp->root);
return 0;
}
/*
* cto_runtime_init -- (internal) initialize cto memory pool runtime data
*/
static int
cto_runtime_init(PMEMctopool *pcp, int rdonly, int is_pmem)
{
LOG(3, "pcp %p rdonly %d is_pmem %d", pcp, rdonly, is_pmem);
/* reset consistency flag */
pcp->consistent = 0;
os_part_deep_common(REP(pcp->set, 0), 0,
&pcp->consistent, sizeof(pcp->consistent), 1);
/*
* If possible, turn off all permissions on the pool header page.
*
* The prototype PMFS doesn't allow this when large pages are in
* use. It is not considered an error if this fails.
*/
RANGE_NONE((void *)pcp->addr, sizeof(struct pool_hdr), pcp->is_dev_dax);
return 0;
}
/*
* pmemcto_create -- create a cto memory pool
*/
#ifndef _WIN32
static inline
#endif
PMEMctopool *
pmemcto_createU(const char *path, const char *layout, size_t poolsize,
mode_t mode)
{
LOG(3, "path \"%s\" layout \"%s\" poolsize %zu mode %o",
path, layout, poolsize, mode);
struct pool_set *set;
/* check length of layout */
if (layout && (strlen(layout) >= PMEMCTO_MAX_LAYOUT)) {
ERR("Layout too long");
errno = EINVAL;
return NULL;
}
util_mutex_lock(&Pool_lock);
/*
* Since pmemcto_create and pmemcto_open are guarded by the lock,
* we can safely modify the global Mmap_hint variable and restore
* it once the pool is created.
*/
int old_no_random = Mmap_no_random;
if (!Mmap_no_random) {
Mmap_no_random = 1;
Mmap_hint = CTO_MMAP_HINT; /* XXX: add randomization */
}
if (util_pool_create(&set, path, poolsize, PMEMCTO_MIN_POOL,
PMEMCTO_MIN_PART, &Cto_create_attr, NULL,
REPLICAS_DISABLED) != 0) {
LOG(2, "cannot create pool or pool set");
Mmap_no_random = old_no_random;
util_mutex_unlock(&Pool_lock);
return NULL;
}
Mmap_no_random = old_no_random;
util_mutex_unlock(&Pool_lock);
ASSERT(set->nreplicas > 0);
struct pool_replica *rep = set->replica[0];
PMEMctopool *pcp = rep->part[0].addr;
VALGRIND_REMOVE_PMEM_MAPPING(&pcp->addr,
sizeof(struct pmemcto) -
((uintptr_t)&pcp->addr - (uintptr_t)&pcp->hdr));
pcp->set = set;
pcp->is_pmem = rep->is_pmem;
pcp->is_dev_dax = rep->part[0].is_dev_dax;
/* is_dev_dax implies is_pmem */
ASSERT(!pcp->is_dev_dax || pcp->is_pmem);
if (set->nreplicas > 1) {
errno = ENOTSUP;
ERR("!replicas not supported");
goto err;
}
/* create pool descriptor */
if (cto_descr_create(pcp, layout, rep->repsize) != 0) {
LOG(2, "descriptor creation failed");
goto err;
}
/* initialize runtime parts */
if (cto_runtime_init(pcp, 0, rep->is_pmem) != 0) {
ERR("pool initialization failed");
goto err;
}
/* Prepare pool for jemalloc - empty */
if (je_cto_pool_create(
(void *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
rep->repsize - CTO_DSC_SIZE_ALIGNED,
set->zeroed, 1) == NULL) {
ERR("pool creation failed");
goto err;
}
if (util_poolset_chmod(set, mode))
goto err;
util_poolset_fdclose(set);
LOG(3, "pcp %p", pcp);
return pcp;
err:
LOG(4, "error clean up");
int oerrno = errno;
util_mutex_lock(&Pool_lock);
util_poolset_close(set, DELETE_CREATED_PARTS);
util_mutex_unlock(&Pool_lock);
errno = oerrno;
return NULL;
}
#ifndef _WIN32
/*
* pmemcto_create -- create a log memory pool
*/
PMEMctopool *
pmemcto_create(const char *path, const char *layout, size_t poolsize,
mode_t mode)
{
return pmemcto_createU(path, layout, poolsize, mode);
}
#else
/*
* pmemcto_createW -- create a log memory pool
*/
PMEMctopool *
pmemcto_createW(const wchar_t *path, const wchar_t *layout, size_t poolsize,
mode_t mode)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
char *ulayout = NULL;
if (layout != NULL) {
ulayout = util_toUTF8(layout);
if (ulayout == NULL) {
util_free_UTF8(upath);
return NULL;
}
}
PMEMctopool *ret = pmemcto_createU(upath, ulayout, poolsize, mode);
util_free_UTF8(upath);
util_free_UTF8(ulayout);
return ret;
}
#endif
/*
* cto_open_noinit -- (internal) open a cto memory pool w/o initialization
*
* This routine opens the pool, but does not any run-time initialization.
*/
static PMEMctopool *
cto_open_noinit(const char *path, const char *layout, unsigned flags,
void *addr)
{
LOG(3, "path \"%s\" layout \"%s\" flags 0x%x addr %p",
path, layout, flags, addr);
struct pool_set *set;
if (util_pool_open(&set, path, PMEMCTO_MIN_POOL, &Cto_open_attr,
NULL, addr, flags) != 0) {
LOG(2, "cannot open pool or pool set");
return NULL;
}
ASSERT(set->nreplicas > 0);
struct pool_replica *rep = set->replica[0];
PMEMctopool *pcp = rep->part[0].addr;
VALGRIND_REMOVE_PMEM_MAPPING(&pcp->addr,
sizeof(struct pmemcto) -
((uintptr_t)&pcp->addr - (uintptr_t)&pcp->hdr));
ASSERTeq(pcp->size, rep->repsize);
pcp->set = set;
pcp->is_pmem = rep->is_pmem;
pcp->is_dev_dax = rep->part[0].is_dev_dax;
/* is_dev_dax implies is_pmem */
ASSERT(!pcp->is_dev_dax || pcp->is_pmem);
if (set->nreplicas > 1) {
errno = ENOTSUP;
ERR("!replicas not supported");
goto err;
}
/* validate pool descriptor */
if (cto_descr_check(pcp, layout, set->poolsize) != 0) {
LOG(2, "descriptor check failed");
goto err;
}
util_poolset_fdclose(set);
LOG(3, "pcp %p", pcp);
return pcp;
err:
LOG(4, "error clean up");
int oerrno = errno;
util_poolset_close(set, DO_NOT_DELETE_PARTS);
errno = oerrno;
return NULL;
}
/*
* cto_open_common -- (internal) open a cto memory pool
*
* This routine does all the work, but takes a cow flag so internal
* calls can map a read-only pool if required.
*/
static PMEMctopool *
cto_open_common(const char *path, const char *layout, unsigned flags)
{
LOG(3, "path \"%s\" layout \"%s\" flags 0x%x", path, layout, flags);
PMEMctopool *pcp;
struct pool_set *set;
/*
* XXX: Opening/mapping the pool twice is not the coolest solution,
* but it makes it easier to support both single-file pools and
* pool sets.
*/
util_mutex_lock(&Pool_lock);
/* open pool set to check consistency and to get the mapping address */
if ((pcp = cto_open_noinit(path, layout, flags, NULL)) == NULL) {
LOG(2, "cannot open pool or pool set");
util_mutex_unlock(&Pool_lock);
return NULL;
}
/* get the last mapping address */
void *mapaddr = (void *)pcp->addr;
LOG(4, "mapping address: %p", mapaddr);
int oerrno = errno;
util_poolset_close(pcp->set, DO_NOT_DELETE_PARTS);
errno = oerrno;
/* open the pool once again using the mapping address as a hint */
if ((pcp = cto_open_noinit(path, layout, flags, mapaddr)) == NULL) {
LOG(2, "cannot open pool or pool set");
util_mutex_unlock(&Pool_lock);
return NULL;
}
util_mutex_unlock(&Pool_lock);
set = pcp->set;
if ((void *)pcp->addr != pcp) {
ERR("cannot mmap at the same address: %p != %p",
pcp, (void *)pcp->addr);
errno = ENOMEM;
goto err;
}
/* initialize runtime parts */
if (cto_runtime_init(pcp, set->rdonly, set->replica[0]->is_pmem) != 0) {
ERR("pool initialization failed");
goto err;
}
/*
* Initially, treat this memory region as undefined.
* Once jemalloc initializes its metadata, it will also mark
* registered free chunks (usable heap space) as unaddressable.
*/
VALGRIND_DO_MAKE_MEM_UNDEFINED(
(void *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
set->poolsize - CTO_DSC_SIZE_ALIGNED);
/* Prepare pool for jemalloc */
if (je_cto_pool_create(
(void *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
set->poolsize - CTO_DSC_SIZE_ALIGNED, 0, 0) == NULL) {
ERR("pool creation failed");
util_unmap((void *)pcp->addr, pcp->size);
goto err;
}
util_poolset_fdclose(set);
LOG(3, "pcp %p", pcp);
return pcp;
err:
LOG(4, "error clean up");
oerrno = errno;
util_mutex_lock(&Pool_lock);
util_poolset_close(set, DO_NOT_DELETE_PARTS);
util_mutex_unlock(&Pool_lock);
errno = oerrno;
return NULL;
}
#ifndef _WIN32
/*
* pmemcto_open -- open an existing log memory pool
*/
PMEMctopool *
pmemcto_open(const char *path, const char *layout)
{
LOG(3, "path \"%s\" layout \"%s\"", path, layout);
return cto_open_common(path, layout, 0);
}
#else
/*
* pmemcto_openU -- open an existing cto memory pool
*/
PMEMctopool *
pmemcto_openU(const char *path, const char *layout)
{
LOG(3, "path \"%s\" layout \"%s\"", path, layout);
return cto_open_common(path, layout, 0);
}
/*
* pmemcto_openW -- open an existing log memory pool
*/
PMEMctopool *
pmemcto_openW(const wchar_t *path, const wchar_t *layout)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
char *ulayout = NULL;
if (layout != NULL) {
ulayout = util_toUTF8(layout);
if (ulayout == NULL) {
util_free_UTF8(upath);
return NULL;
}
}
PMEMctopool *ret = pmemcto_openU(upath, ulayout);
util_free_UTF8(upath);
util_free_UTF8(ulayout);
return ret;
}
#endif
/*
* pmemcto_close -- close a cto memory pool
*/
void
pmemcto_close(PMEMctopool *pcp)
{
LOG(3, "pcp %p", pcp);
int ret = je_cto_pool_delete(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED));
if (ret != 0) {
ERR("invalid pool handle: %p", pcp);
errno = EINVAL;
return;
}
/* deep flush the entire pool to persistence */
RANGE_RW((void *)pcp->addr, sizeof(struct pool_hdr), pcp->is_dev_dax);
VALGRIND_DO_MAKE_MEM_DEFINED(pcp->addr, pcp->size);
/* so far, there could be only one replica in CTO pool set */
struct pool_replica *rep = REP(pcp->set, 0);
for (unsigned p = 0; p < rep->nparts; p++) {
struct pool_set_part *part = PART(rep, p);
os_part_deep_common(rep, p, part->addr, part->size, 1);
}
/* set consistency flag */
pcp->consistent = 1;
os_part_deep_common(REP(pcp->set, 0), 0,
&pcp->consistent, sizeof(pcp->consistent), 1);
util_mutex_lock(&Pool_lock);
util_poolset_close(pcp->set, DO_NOT_DELETE_PARTS);
util_mutex_unlock(&Pool_lock);
}
/*
* pmemcto_set_root_pointer -- saves pointer to root object
*/
void
pmemcto_set_root_pointer(PMEMctopool *pcp, void *ptr)
{
LOG(3, "pcp %p ptr %p", pcp, ptr);
#ifdef DEBUG
/* XXX: an error also in non-debug build? (return 0 or -1) */
ASSERT(ptr == NULL ||
((char *)ptr >= ((char *)pcp->addr + CTO_DSC_SIZE_ALIGNED) &&
(char *)ptr < ((char *)pcp->addr + pcp->size)));
#endif
pcp->root = (uint64_t)ptr;
}
/*
* pmemcto_get_root_pointer -- returns pointer to root object
*/
void *
pmemcto_get_root_pointer(PMEMctopool *pcp)
{
LOG(3, "pcp %p", pcp);
LOG(4, "root ptr %p", (void *)pcp->root);
return (void *)pcp->root;
}
/*
* pmemcto_checkU -- memory pool consistency check
*/
#ifndef _WIN32
static inline
#endif
int
pmemcto_checkU(const char *path, const char *layout)
{
LOG(3, "path \"%s\" layout \"%s\"", path, layout);
PMEMctopool *pcp = cto_open_common(path, layout, POOL_OPEN_COW);
if (pcp == NULL)
return -1; /* errno set by pmemcto_open_common() */
int consistent = je_cto_pool_check(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED));
pmemcto_close(pcp);
if (consistent)
LOG(4, "pool consistency check OK");
return consistent;
}
#ifndef _WIN32
/*
* pmemcto_check -- cto memory pool consistency check
*
* Returns true if consistent, zero if inconsistent, -1/error if checking
* cannot happen due to other errors.
*/
int
pmemcto_check(const char *path, const char *layout)
{
return pmemcto_checkU(path, layout);
}
#else
/*
* pmemcto_checkW -- cto memory pool consistency check
*/
int
pmemcto_checkW(const wchar_t *path, const wchar_t *layout)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return -1;
char *ulayout = NULL;
if (layout != NULL) {
ulayout = util_toUTF8(layout);
if (ulayout == NULL) {
util_free_UTF8(upath);
return -1;
}
}
int ret = pmemcto_checkU(upath, ulayout);
util_free_UTF8(upath);
util_free_UTF8(ulayout);
return ret;
}
#endif
/*
* pmemcto_stats_print -- spew memory allocator stats for a pool
*/
void
pmemcto_stats_print(PMEMctopool *pcp, const char *opts)
{
LOG(3, "vmp %p opts \"%s\"", pcp, opts ? opts : "");
je_cto_pool_malloc_stats_print(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
cto_print_jemalloc_stats, NULL, opts);
}
/*
* pmemcto_malloc -- allocate memory
*/
void *
pmemcto_malloc(PMEMctopool *pcp, size_t size)
{
LOG(3, "pcp %p size %zu", pcp, size);
return je_cto_pool_malloc(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
size);
}
/*
* pmemcto_free -- free memory
*/
void
pmemcto_free(PMEMctopool *pcp, void *ptr)
{
LOG(3, "pcp %p ptr %p", pcp, ptr);
je_cto_pool_free((pool_t *)(
(uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED), ptr);
}
/*
* pmemcto_calloc -- allocate zeroed memory
*/
void *
pmemcto_calloc(PMEMctopool *pcp, size_t nmemb, size_t size)
{
LOG(3, "pcp %p nmemb %zu size %zu", pcp, nmemb, size);
return je_cto_pool_calloc(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
nmemb, size);
}
/*
* pmemcto_realloc -- resize a memory allocation
*/
void *
pmemcto_realloc(PMEMctopool *pcp, void *ptr, size_t size)
{
LOG(3, "pcp %p ptr %p size %zu", pcp, ptr, size);
return je_cto_pool_ralloc(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
ptr, size);
}
/*
* pmemcto_aligned_alloc -- allocate aligned memory
*/
void *
pmemcto_aligned_alloc(PMEMctopool *pcp, size_t alignment, size_t size)
{
LOG(3, "pcp %p alignment %zu size %zu", pcp, alignment, size);
return je_cto_pool_aligned_alloc(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
alignment, size);
}
/*
* pmemcto_strdup -- allocate memory for copy of string
*/
char *
pmemcto_strdup(PMEMctopool *pcp, const char *s)
{
LOG(3, "pcp %p s %p", pcp, s);
size_t size = strlen(s) + 1;
void *retaddr = je_cto_pool_malloc(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
size);
if (retaddr == NULL)
return NULL;
return (char *)memcpy(retaddr, s, size);
}
/*
* pmemcto_wcsdup -- allocate memory for copy of widechar string
*/
wchar_t *
pmemcto_wcsdup(PMEMctopool *pcp, const wchar_t *s)
{
LOG(3, "pcp %p s %p", pcp, s);
size_t size = (wcslen(s) + 1) * sizeof(wchar_t);
void *retaddr = je_cto_pool_malloc(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
size);
if (retaddr == NULL)
return NULL;
return (wchar_t *)memcpy(retaddr, s, size);
}
/*
* pmemcto_malloc_usable_size -- get usable size of allocation
*/
size_t
pmemcto_malloc_usable_size(PMEMctopool *pcp, void *ptr)
{
LOG(3, "pcp %p ptr %p", pcp, ptr);
return je_cto_pool_malloc_usable_size(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED), ptr);
}
| 20,165 | 22.503497 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemcto/cto.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* cto.h -- internal definitions for libpmemcto module
*/
#ifndef LIBPMEMCTO_CTO_H
#define LIBPMEMCTO_CTO_H 1
#include "os_thread.h"
#include "util.h"
#include "pool_hdr.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PMEMCTO_LOG_PREFIX "libpmemcto"
#define PMEMCTO_LOG_LEVEL_VAR "PMEMCTO_LOG_LEVEL"
#define PMEMCTO_LOG_FILE_VAR "PMEMCTO_LOG_FILE"
/* attributes of the cto memory pool format for the pool header */
#define CTO_HDR_SIG "PMEMCTO" /* must be 8 bytes including '\0' */
#define CTO_FORMAT_MAJOR 1
#define CTO_FORMAT_FEAT_DEFAULT \
{0x0000, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000}
#define CTO_FORMAT_FEAT_CHECK \
{0x0000, POOL_FEAT_INCOMPAT_VALID, 0x0000}
static const features_t cto_format_feat_default = CTO_FORMAT_FEAT_DEFAULT;
/* size of the persistent part of PMEMOBJ pool descriptor (2kB) */
#define CTO_DSC_P_SIZE 2048
/* size of unused part of the persistent part of PMEMOBJ pool descriptor */
#define CTO_DSC_P_UNUSED (CTO_DSC_P_SIZE - PMEMCTO_MAX_LAYOUT - 28)
/*
* XXX: We don't care about portable data types, as the pool may only be open
* on the same platform.
* Assuming the shutdown state / consistent flag is updated in a fail-safe
* manner, there is no need to checksum the persistent part of the descriptor.
*/
struct pmemcto {
struct pool_hdr hdr; /* memory pool header */
/* persistent part of PMEMCTO pool descriptor (2kB) */
char layout[PMEMCTO_MAX_LAYOUT];
uint64_t addr; /* mapped region */
uint64_t size; /* size of mapped region */
uint64_t root; /* root pointer */
uint8_t consistent; /* successfully flushed before exit */
unsigned char unused[CTO_DSC_P_UNUSED]; /* must be zero */
/* some run-time state, allocated out of memory pool... */
struct pool_set *set; /* pool set info */
int is_pmem; /* true if pool is PMEM */
int rdonly; /* true if pool is opened read-only */
int is_dev_dax; /* true if mapped on device dax */
};
/* data area starts at this alignment after the struct pmemcto above */
#define CTO_FORMAT_DATA_ALIGN ((uintptr_t)4096)
#define CTO_DSC_SIZE (sizeof(struct pmemcto) - sizeof(struct pool_hdr))
#define CTO_DSC_SIZE_ALIGNED\
roundup(sizeof(struct pmemcto), CTO_FORMAT_DATA_ALIGN)
void cto_init(void);
void cto_fini(void);
#ifdef _WIN32
/*
* On Linux we have separate jemalloc builds for libvmem, libvmmalloc
* and libpmemcto, with different function name prefixes. This is to avoid
* symbol collisions in case of static linking of those libraries.
* On Windows we don't provide statically linked libraries, so there is
* no need to have separate jemalloc builds. However, since libpmemcto
* links to jemalloc symbols with "je_cto" prefix, we have to do renaming
* here (unless there is a better solution).
*/
#define je_cto_pool_create je_vmem_pool_create
#define je_cto_pool_delete je_vmem_pool_delete
#define je_cto_pool_malloc je_vmem_pool_malloc
#define je_cto_pool_calloc je_vmem_pool_calloc
#define je_cto_pool_ralloc je_vmem_pool_ralloc
#define je_cto_pool_aligned_alloc je_vmem_pool_aligned_alloc
#define je_cto_pool_free je_vmem_pool_free
#define je_cto_pool_malloc_usable_size je_vmem_pool_malloc_usable_size
#define je_cto_pool_malloc_stats_print je_vmem_pool_malloc_stats_print
#define je_cto_pool_extend je_vmem_pool_extend
#define je_cto_pool_set_alloc_funcs je_vmem_pool_set_alloc_funcs
#define je_cto_pool_check je_vmem_pool_check
#define je_cto_malloc_message je_vmem_malloc_message
#endif
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMCTO_CTO_H */
| 5,089 | 37.270677 | 78 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemcto/libpmemcto_main.c | /*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libpmemcto_main.c -- entry point for libpmemcto.dll
*
* XXX - This is a placeholder. All the library initialization/cleanup
* that is done in library ctors/dtors, as well as TLS initialization
* should be moved here.
*/
void libpmemcto_init(void);
void libpmemcto_fini(void);
int APIENTRY
DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved)
{
switch (dwReason) {
case DLL_PROCESS_ATTACH:
libpmemcto_init();
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
case DLL_PROCESS_DETACH:
libpmemcto_fini();
break;
}
return TRUE;
}
| 2,184 | 34.241935 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemcto/libpmemcto.c | /*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libpmemcto.c -- basic libpmemcto functions
*/
#include <stdio.h>
#include <stdint.h>
#include "libpmemcto.h"
#include "pmemcommon.h"
#include "cto.h"
#include "jemalloc.h"
/*
* libpmemcto_init -- load-time initialization for log
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmemcto_init(void)
{
common_init(PMEMCTO_LOG_PREFIX, PMEMCTO_LOG_LEVEL_VAR,
PMEMCTO_LOG_FILE_VAR, PMEMCTO_MAJOR_VERSION,
PMEMCTO_MINOR_VERSION);
cto_init();
LOG(3, NULL);
}
/*
* libpmemcto_fini -- libpmemcto cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmemcto_fini(void)
{
LOG(3, NULL);
cto_fini();
common_fini();
}
/*
* pmemcto_check_versionU -- see if lib meets application version requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemcto_check_versionU(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != PMEMCTO_MAJOR_VERSION) {
ERR("libpmemcto major version mismatch (need %u, found %u)",
major_required, PMEMCTO_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > PMEMCTO_MINOR_VERSION) {
ERR("libpmemcto minor version mismatch (need %u, found %u)",
minor_required, PMEMCTO_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* pmemcto_check_version -- see if lib meets application version requirements
*/
const char *
pmemcto_check_version(unsigned major_required, unsigned minor_required)
{
return pmemcto_check_versionU(major_required, minor_required);
}
#else
/*
* pmemcto_check_versionW -- see if lib meets application version requirements
*/
const wchar_t *
pmemcto_check_versionW(unsigned major_required, unsigned minor_required)
{
if (pmemcto_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* pmemcto_set_funcs -- allow overriding libpmemcto's call to malloc, etc.
*/
void
pmemcto_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s),
void (*print_func)(const char *s))
{
LOG(3, NULL);
util_set_alloc_funcs(malloc_func, free_func,
realloc_func, strdup_func);
out_set_print_func(print_func);
je_cto_pool_set_alloc_funcs(malloc_func, free_func);
}
/*
* pmemcto_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemcto_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmemcto_errormsg -- return last error message
*/
const char *
pmemcto_errormsg(void)
{
return pmemcto_errormsgU();
}
#else
/*
* pmemcto_errormsgW -- return last error message as wchar_t
*/
const wchar_t *
pmemcto_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
| 4,501 | 24.725714 | 78 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemblk/blk.h | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* blk.h -- internal definitions for libpmem blk module
*/
#ifndef BLK_H
#define BLK_H 1
#include <stddef.h>
#include "ctl.h"
#include "os_thread.h"
#include "pool_hdr.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PMEMBLK_LOG_PREFIX "libpmemblk"
#define PMEMBLK_LOG_LEVEL_VAR "PMEMBLK_LOG_LEVEL"
#define PMEMBLK_LOG_FILE_VAR "PMEMBLK_LOG_FILE"
/* attributes of the blk memory pool format for the pool header */
#define BLK_HDR_SIG "PMEMBLK" /* must be 8 bytes including '\0' */
#define BLK_FORMAT_MAJOR 1
#define BLK_FORMAT_FEAT_DEFAULT \
{0x0000, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000}
#define BLK_FORMAT_FEAT_CHECK \
{0x0000, POOL_FEAT_INCOMPAT_VALID, 0x0000}
static const features_t blk_format_feat_default = BLK_FORMAT_FEAT_DEFAULT;
struct pmemblk {
struct pool_hdr hdr; /* memory pool header */
/* root info for on-media format... */
uint32_t bsize; /* block size */
/* flag indicating if the pool was zero-initialized */
int is_zeroed;
/* some run-time state, allocated out of memory pool... */
void *addr; /* mapped region */
size_t size; /* size of mapped region */
int is_pmem; /* true if pool is PMEM */
int rdonly; /* true if pool is opened read-only */
void *data; /* post-header data area */
size_t datasize; /* size of data area */
size_t nlba; /* number of LBAs in pool */
struct btt *bttp; /* btt handle */
unsigned nlane; /* number of lanes */
unsigned next_lane; /* used to rotate through lanes */
os_mutex_t *locks; /* one per lane */
int is_dev_dax; /* true if mapped on device dax */
struct ctl *ctl; /* top level node of the ctl tree structure */
struct pool_set *set; /* pool set info */
#ifdef DEBUG
/* held during read/write mprotected sections */
os_mutex_t write_lock;
#endif
};
/* data area starts at this alignment after the struct pmemblk above */
#define BLK_FORMAT_DATA_ALIGN ((uintptr_t)4096)
#ifdef __cplusplus
}
#endif
#endif
| 3,519 | 32.207547 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemblk/libpmemblk.c | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libpmemblk.c -- pmem entry points for libpmemblk
*/
#include <stdio.h>
#include <stdint.h>
#include "libpmemblk.h"
#include "ctl_global.h"
#include "pmemcommon.h"
#include "blk.h"
/*
* The variable from which the config is directly loaded. The string
* cannot contain any comments or extraneous white characters.
*/
#define BLK_CONFIG_ENV_VARIABLE "PMEMBLK_CONF"
/*
* The variable that points to a config file from which the config is loaded.
*/
#define BLK_CONFIG_FILE_ENV_VARIABLE "PMEMBLK_CONF_FILE"
/*
* blk_ctl_init_and_load -- (static) initializes CTL and loads configuration
* from env variable and file
*/
static int
blk_ctl_init_and_load(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
if (pbp != NULL && (pbp->ctl = ctl_new()) == NULL) {
LOG(2, "!ctl_new");
return -1;
}
char *env_config = os_getenv(BLK_CONFIG_ENV_VARIABLE);
if (env_config != NULL) {
if (ctl_load_config_from_string(pbp ? pbp->ctl : NULL,
pbp, env_config) != 0) {
LOG(2, "unable to parse config stored in %s "
"environment variable",
BLK_CONFIG_ENV_VARIABLE);
goto err;
}
}
char *env_config_file = os_getenv(BLK_CONFIG_FILE_ENV_VARIABLE);
if (env_config_file != NULL && env_config_file[0] != '\0') {
if (ctl_load_config_from_file(pbp ? pbp->ctl : NULL,
pbp, env_config_file) != 0) {
LOG(2, "unable to parse config stored in %s "
"file (from %s environment variable)",
env_config_file,
BLK_CONFIG_FILE_ENV_VARIABLE);
goto err;
}
}
return 0;
err:
if (pbp)
ctl_delete(pbp->ctl);
return -1;
}
/*
* libpmemblk_init -- (internal) load-time initialization for blk
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmemblk_init(void)
{
ctl_global_register();
if (blk_ctl_init_and_load(NULL))
FATAL("error: %s", pmemblk_errormsg());
common_init(PMEMBLK_LOG_PREFIX, PMEMBLK_LOG_LEVEL_VAR,
PMEMBLK_LOG_FILE_VAR, PMEMBLK_MAJOR_VERSION,
PMEMBLK_MINOR_VERSION);
LOG(3, NULL);
}
/*
* libpmemblk_fini -- libpmemblk cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmemblk_fini(void)
{
LOG(3, NULL);
common_fini();
}
/*
* pmemblk_check_versionU -- see if lib meets application version requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemblk_check_versionU(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != PMEMBLK_MAJOR_VERSION) {
ERR("libpmemblk major version mismatch (need %u, found %u)",
major_required, PMEMBLK_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > PMEMBLK_MINOR_VERSION) {
ERR("libpmemblk minor version mismatch (need %u, found %u)",
minor_required, PMEMBLK_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* pmemblk_check_version -- see if lib meets application version requirements
*/
const char *
pmemblk_check_version(unsigned major_required, unsigned minor_required)
{
return pmemblk_check_versionU(major_required, minor_required);
}
#else
/*
* pmemblk_check_versionW -- see if lib meets application version requirements
*/
const wchar_t *
pmemblk_check_versionW(unsigned major_required, unsigned minor_required)
{
if (pmemblk_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* pmemblk_set_funcs -- allow overriding libpmemblk's call to malloc, etc.
*/
void
pmemblk_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s))
{
LOG(3, NULL);
util_set_alloc_funcs(malloc_func, free_func, realloc_func, strdup_func);
}
/*
* pmemblk_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemblk_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmemblk_errormsg -- return last error message
*/
const char *
pmemblk_errormsg(void)
{
return pmemblk_errormsgU();
}
#else
/*
* pmemblk_errormsgW -- return last error message as wchar_t
*/
const wchar_t *
pmemblk_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
| 5,833 | 24.365217 | 78 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemblk/btt.h | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* btt.h -- btt module definitions
*/
#ifndef BTT_H
#define BTT_H 1
#ifdef __cplusplus
extern "C" {
#endif
/* callback functions passed to btt_init() */
struct ns_callback {
int (*nsread)(void *ns, unsigned lane,
void *buf, size_t count, uint64_t off);
int (*nswrite)(void *ns, unsigned lane,
const void *buf, size_t count, uint64_t off);
int (*nszero)(void *ns, unsigned lane, size_t count, uint64_t off);
ssize_t (*nsmap)(void *ns, unsigned lane, void **addrp,
size_t len, uint64_t off);
void (*nssync)(void *ns, unsigned lane, void *addr, size_t len);
int ns_is_zeroed;
};
struct btt_info;
struct btt *btt_init(uint64_t rawsize, uint32_t lbasize, uint8_t parent_uuid[],
unsigned maxlane, void *ns, const struct ns_callback *ns_cbp);
unsigned btt_nlane(struct btt *bttp);
size_t btt_nlba(struct btt *bttp);
int btt_read(struct btt *bttp, unsigned lane, uint64_t lba, void *buf);
int btt_write(struct btt *bttp, unsigned lane, uint64_t lba, const void *buf);
int btt_set_zero(struct btt *bttp, unsigned lane, uint64_t lba);
int btt_set_error(struct btt *bttp, unsigned lane, uint64_t lba);
int btt_check(struct btt *bttp);
void btt_fini(struct btt *bttp);
uint64_t btt_flog_size(uint32_t nfree);
uint64_t btt_map_size(uint32_t external_nlba);
uint64_t btt_arena_datasize(uint64_t arena_size, uint32_t nfree);
int btt_info_set(struct btt_info *info, uint32_t external_lbasize,
uint32_t nfree, uint64_t arena_size, uint64_t space_left);
struct btt_flog *btt_flog_get_valid(struct btt_flog *flog_pair, int *next);
int map_entry_is_initial(uint32_t map_entry);
void btt_info_convert2h(struct btt_info *infop);
void btt_info_convert2le(struct btt_info *infop);
void btt_flog_convert2h(struct btt_flog *flogp);
void btt_flog_convert2le(struct btt_flog *flogp);
#ifdef __cplusplus
}
#endif
#endif
| 3,423 | 37.47191 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemblk/btt_layout.h | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* btt_layout.h -- block translation table on-media layout definitions
*/
/*
* Layout of BTT info block. All integers are stored little-endian.
*/
#ifndef BTT_LAYOUT_H
#define BTT_LAYOUT_H 1
#ifdef __cplusplus
extern "C" {
#endif
#define BTT_ALIGNMENT ((uintptr_t)4096) /* alignment of all BTT structures */
#define BTTINFO_SIG_LEN 16
#define BTTINFO_UUID_LEN 16
#define BTTINFO_UNUSED_LEN 3968
#define BTTINFO_SIG "BTT_ARENA_INFO\0"
struct btt_info {
char sig[BTTINFO_SIG_LEN]; /* must be "BTT_ARENA_INFO\0\0" */
uint8_t uuid[BTTINFO_UUID_LEN]; /* BTT UUID */
uint8_t parent_uuid[BTTINFO_UUID_LEN]; /* UUID of container */
uint32_t flags; /* see flag bits below */
uint16_t major; /* major version */
uint16_t minor; /* minor version */
uint32_t external_lbasize; /* advertised LBA size (bytes) */
uint32_t external_nlba; /* advertised LBAs in this arena */
uint32_t internal_lbasize; /* size of data area blocks (bytes) */
uint32_t internal_nlba; /* number of blocks in data area */
uint32_t nfree; /* number of free blocks */
uint32_t infosize; /* size of this info block */
/*
* The following offsets are relative to the beginning of
* the btt_info block.
*/
uint64_t nextoff; /* offset to next arena (or zero) */
uint64_t dataoff; /* offset to arena data area */
uint64_t mapoff; /* offset to area map */
uint64_t flogoff; /* offset to area flog */
uint64_t infooff; /* offset to backup info block */
char unused[BTTINFO_UNUSED_LEN]; /* must be zero */
uint64_t checksum; /* Fletcher64 of all fields */
};
/*
* Definitions for flags mask for btt_info structure above.
*/
#define BTTINFO_FLAG_ERROR 0x00000001 /* error state (read-only) */
#define BTTINFO_FLAG_ERROR_MASK 0x00000001 /* all error bits */
/*
* Current on-media format versions.
*/
#define BTTINFO_MAJOR_VERSION 1
#define BTTINFO_MINOR_VERSION 1
/*
* Layout of a BTT "flog" entry. All integers are stored little-endian.
*
* The "nfree" field in the BTT info block determines how many of these
* flog entries there are, and each entry consists of two of the following
* structs (entry updates alternate between the two structs), padded up
* to a cache line boundary to isolate adjacent updates.
*/
#define BTT_FLOG_PAIR_ALIGN ((uintptr_t)64)
struct btt_flog {
uint32_t lba; /* last pre-map LBA using this entry */
uint32_t old_map; /* old post-map LBA (the freed block) */
uint32_t new_map; /* new post-map LBA */
uint32_t seq; /* sequence number (01, 10, 11) */
};
/*
* Layout of a BTT "map" entry. 4-byte internal LBA offset, little-endian.
*/
#define BTT_MAP_ENTRY_SIZE 4
#define BTT_MAP_ENTRY_ERROR 0x40000000U
#define BTT_MAP_ENTRY_ZERO 0x80000000U
#define BTT_MAP_ENTRY_NORMAL 0xC0000000U
#define BTT_MAP_ENTRY_LBA_MASK 0x3fffffffU
#define BTT_MAP_LOCK_ALIGN ((uintptr_t)64)
/*
* BTT layout properties...
*/
#define BTT_MIN_SIZE ((1u << 20) * 16)
#define BTT_MAX_ARENA (1ull << 39) /* 512GB per arena */
#define BTT_MIN_LBA_SIZE (size_t)512
#define BTT_INTERNAL_LBA_ALIGNMENT 256U
#define BTT_DEFAULT_NFREE 256
#ifdef __cplusplus
}
#endif
#endif
| 4,712 | 33.40146 | 77 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemblk/libpmemblk_main.c | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libpmemblk_main.c -- entry point for libpmemblk.dll
*
* XXX - This is a placeholder. All the library initialization/cleanup
* that is done in library ctors/dtors, as well as TLS initialization
* should be moved here.
*/
void libpmemblk_init(void);
void libpmemblk_fini(void);
int APIENTRY
DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved)
{
switch (dwReason) {
case DLL_PROCESS_ATTACH:
libpmemblk_init();
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
case DLL_PROCESS_DETACH:
libpmemblk_fini();
break;
}
return TRUE;
}
| 2,184 | 34.241935 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemblk/blk.c | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* blk.c -- block memory pool entry points for libpmem
*/
#include <inttypes.h>
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/param.h>
#include <unistd.h>
#include <errno.h>
#include <time.h>
#include <stdint.h>
#include <endian.h>
#include <stdbool.h>
#include "libpmem.h"
#include "libpmemblk.h"
#include "mmap.h"
#include "set.h"
#include "out.h"
#include "btt.h"
#include "blk.h"
#include "util.h"
#include "sys_util.h"
#include "util_pmem.h"
#include "valgrind_internal.h"
static const struct pool_attr Blk_create_attr = {
BLK_HDR_SIG,
BLK_FORMAT_MAJOR,
BLK_FORMAT_FEAT_DEFAULT,
{0}, {0}, {0}, {0}, {0}
};
static const struct pool_attr Blk_open_attr = {
BLK_HDR_SIG,
BLK_FORMAT_MAJOR,
BLK_FORMAT_FEAT_CHECK,
{0}, {0}, {0}, {0}, {0}
};
/*
* lane_enter -- (internal) acquire a unique lane number
*/
static void
lane_enter(PMEMblkpool *pbp, unsigned *lane)
{
unsigned mylane;
mylane = util_fetch_and_add32(&pbp->next_lane, 1) % pbp->nlane;
/* lane selected, grab the per-lane lock */
util_mutex_lock(&pbp->locks[mylane]);
*lane = mylane;
}
/*
* lane_exit -- (internal) drop lane lock
*/
static void
lane_exit(PMEMblkpool *pbp, unsigned mylane)
{
util_mutex_unlock(&pbp->locks[mylane]);
}
/*
* nsread -- (internal) read data from the namespace encapsulating the BTT
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static int
nsread(void *ns, unsigned lane, void *buf, size_t count, uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
if (off + count > pbp->datasize) {
ERR("offset + count (%zu) past end of data area (%zu)",
(size_t)off + count, pbp->datasize);
errno = EINVAL;
return -1;
}
memcpy(buf, (char *)pbp->data + off, count);
return 0;
}
/*
* nswrite -- (internal) write data to the namespace encapsulating the BTT
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static int
nswrite(void *ns, unsigned lane, const void *buf, size_t count,
uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
if (off + count > pbp->datasize) {
ERR("offset + count (%zu) past end of data area (%zu)",
(size_t)off + count, pbp->datasize);
errno = EINVAL;
return -1;
}
void *dest = (char *)pbp->data + off;
#ifdef DEBUG
/* grab debug write lock */
util_mutex_lock(&pbp->write_lock);
#endif
/* unprotect the memory (debug version only) */
RANGE_RW(dest, count, pbp->is_dev_dax);
if (pbp->is_pmem)
pmem_memcpy_nodrain(dest, buf, count);
else
memcpy(dest, buf, count);
/* protect the memory again (debug version only) */
RANGE_RO(dest, count, pbp->is_dev_dax);
#ifdef DEBUG
/* release debug write lock */
util_mutex_unlock(&pbp->write_lock);
#endif
if (pbp->is_pmem)
pmem_drain();
else
pmem_msync(dest, count);
return 0;
}
/*
* nsmap -- (internal) allow direct access to a range of a namespace
*
* The caller requests a range to be "mapped" but the return value
* may indicate a smaller amount (in which case the caller is expected
* to call back later for another mapping).
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static ssize_t
nsmap(void *ns, unsigned lane, void **addrp, size_t len, uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(12, "pbp %p lane %u len %zu off %" PRIu64, pbp, lane, len, off);
ASSERT(((ssize_t)len) >= 0);
if (off + len >= pbp->datasize) {
ERR("offset + len (%zu) past end of data area (%zu)",
(size_t)off + len, pbp->datasize - 1);
errno = EINVAL;
return -1;
}
/*
* Since the entire file is memory-mapped, this callback
* can always provide the entire length requested.
*/
*addrp = (char *)pbp->data + off;
LOG(12, "returning addr %p", *addrp);
return (ssize_t)len;
}
/*
* nssync -- (internal) flush changes made to a namespace range
*
* This is used in conjunction with the addresses handed out by
* nsmap() above. There's no need to sync things written via
* nswrite() since those changes are flushed each time nswrite()
* is called.
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static void
nssync(void *ns, unsigned lane, void *addr, size_t len)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(12, "pbp %p lane %u addr %p len %zu", pbp, lane, addr, len);
if (pbp->is_pmem)
pmem_persist(addr, len);
else
pmem_msync(addr, len);
}
/*
* nszero -- (internal) zero data in the namespace encapsulating the BTT
*
* This routine is provided to btt_init() to allow the btt module to
* zero the memory pool containing the BTT layout.
*/
static int
nszero(void *ns, unsigned lane, size_t count, uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
if (off + count > pbp->datasize) {
ERR("offset + count (%zu) past end of data area (%zu)",
(size_t)off + count, pbp->datasize);
errno = EINVAL;
return -1;
}
void *dest = (char *)pbp->data + off;
/* unprotect the memory (debug version only) */
RANGE_RW(dest, count, pbp->is_dev_dax);
pmem_memset_persist(dest, 0, count);
/* protect the memory again (debug version only) */
RANGE_RO(dest, count, pbp->is_dev_dax);
return 0;
}
/* callbacks for btt_init() */
static struct ns_callback ns_cb = {
.nsread = nsread,
.nswrite = nswrite,
.nszero = nszero,
.nsmap = nsmap,
.nssync = nssync,
.ns_is_zeroed = 0
};
/*
* blk_descr_create -- (internal) create block memory pool descriptor
*/
static void
blk_descr_create(PMEMblkpool *pbp, uint32_t bsize, int zeroed)
{
LOG(3, "pbp %p bsize %u zeroed %d", pbp, bsize, zeroed);
/* create the required metadata */
pbp->bsize = htole32(bsize);
util_persist(pbp->is_pmem, &pbp->bsize, sizeof(bsize));
pbp->is_zeroed = zeroed;
util_persist(pbp->is_pmem, &pbp->is_zeroed, sizeof(pbp->is_zeroed));
}
/*
* blk_descr_check -- (internal) validate block memory pool descriptor
*/
static int
blk_descr_check(PMEMblkpool *pbp, size_t *bsize)
{
LOG(3, "pbp %p bsize %zu", pbp, *bsize);
size_t hdr_bsize = le32toh(pbp->bsize);
if (*bsize && *bsize != hdr_bsize) {
ERR("wrong bsize (%zu), pool created with bsize %zu",
*bsize, hdr_bsize);
errno = EINVAL;
return -1;
}
*bsize = hdr_bsize;
LOG(3, "using block size from header: %zu", *bsize);
return 0;
}
/*
* blk_runtime_init -- (internal) initialize block memory pool runtime data
*/
static int
blk_runtime_init(PMEMblkpool *pbp, size_t bsize, int rdonly)
{
LOG(3, "pbp %p bsize %zu rdonly %d",
pbp, bsize, rdonly);
/* remove volatile part of header */
VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
sizeof(struct pmemblk) -
sizeof(struct pool_hdr) -
sizeof(pbp->bsize) -
sizeof(pbp->is_zeroed));
/*
* Use some of the memory pool area for run-time info. This
* run-time state is never loaded from the file, it is always
* created here, so no need to worry about byte-order.
*/
pbp->rdonly = rdonly;
pbp->data = (char *)pbp->addr +
roundup(sizeof(*pbp), BLK_FORMAT_DATA_ALIGN);
ASSERT(((char *)pbp->addr + pbp->size) >= (char *)pbp->data);
pbp->datasize = (size_t)
(((char *)pbp->addr + pbp->size) - (char *)pbp->data);
LOG(4, "data area %p data size %zu bsize %zu",
pbp->data, pbp->datasize, bsize);
long ncpus = sysconf(_SC_NPROCESSORS_ONLN);
if (ncpus < 1)
ncpus = 1;
ns_cb.ns_is_zeroed = pbp->is_zeroed;
/* things free by "goto err" if not NULL */
struct btt *bttp = NULL;
os_mutex_t *locks = NULL;
bttp = btt_init(pbp->datasize, (uint32_t)bsize, pbp->hdr.poolset_uuid,
(unsigned)ncpus * 2, pbp, &ns_cb);
if (bttp == NULL)
goto err; /* btt_init set errno, called LOG */
pbp->bttp = bttp;
pbp->nlane = btt_nlane(pbp->bttp);
pbp->next_lane = 0;
if ((locks = Malloc(pbp->nlane * sizeof(*locks))) == NULL) {
ERR("!Malloc for lane locks");
goto err;
}
for (unsigned i = 0; i < pbp->nlane; i++)
util_mutex_init(&locks[i]);
pbp->locks = locks;
#ifdef DEBUG
/* initialize debug lock */
util_mutex_init(&pbp->write_lock);
#endif
/*
* If possible, turn off all permissions on the pool header page.
*
* The prototype PMFS doesn't allow this when large pages are in
* use. It is not considered an error if this fails.
*/
RANGE_NONE(pbp->addr, sizeof(struct pool_hdr), pbp->is_dev_dax);
/* the data area should be kept read-only for debug version */
RANGE_RO(pbp->data, pbp->datasize, pbp->is_dev_dax);
return 0;
err:
LOG(4, "error clean up");
int oerrno = errno;
if (bttp)
btt_fini(bttp);
errno = oerrno;
return -1;
}
/*
* pmemblk_createU -- create a block memory pool
*/
#ifndef _WIN32
static inline
#endif
PMEMblkpool *
pmemblk_createU(const char *path, size_t bsize, size_t poolsize, mode_t mode)
{
LOG(3, "path %s bsize %zu poolsize %zu mode %o",
path, bsize, poolsize, mode);
/* check if bsize is valid */
if (bsize == 0) {
ERR("Invalid block size %zu", bsize);
errno = EINVAL;
return NULL;
}
if (bsize > UINT32_MAX) {
ERR("Invalid block size %zu", bsize);
errno = EINVAL;
return NULL;
}
struct pool_set *set;
if (util_pool_create(&set, path, poolsize, PMEMBLK_MIN_POOL,
PMEMBLK_MIN_PART, &Blk_create_attr, NULL,
REPLICAS_DISABLED) != 0) {
LOG(2, "cannot create pool or pool set");
return NULL;
}
ASSERT(set->nreplicas > 0);
struct pool_replica *rep = set->replica[0];
PMEMblkpool *pbp = rep->part[0].addr;
VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
sizeof(struct pmemblk) -
((uintptr_t)&pbp->addr - (uintptr_t)&pbp->hdr));
pbp->addr = pbp;
pbp->size = rep->repsize;
pbp->set = set;
pbp->is_pmem = rep->is_pmem;
pbp->is_dev_dax = rep->part[0].is_dev_dax;
/* is_dev_dax implies is_pmem */
ASSERT(!pbp->is_dev_dax || pbp->is_pmem);
/* create pool descriptor */
blk_descr_create(pbp, (uint32_t)bsize, set->zeroed);
/* initialize runtime parts */
if (blk_runtime_init(pbp, bsize, 0) != 0) {
ERR("pool initialization failed");
goto err;
}
if (util_poolset_chmod(set, mode))
goto err;
util_poolset_fdclose(set);
LOG(3, "pbp %p", pbp);
return pbp;
err:
LOG(4, "error clean up");
int oerrno = errno;
util_poolset_close(set, DELETE_CREATED_PARTS);
errno = oerrno;
return NULL;
}
#ifndef _WIN32
/*
* pmemblk_create -- create a block memory pool
*/
PMEMblkpool *
pmemblk_create(const char *path, size_t bsize, size_t poolsize, mode_t mode)
{
return pmemblk_createU(path, bsize, poolsize, mode);
}
#else
/*
* pmemblk_createW -- create a block memory pool
*/
PMEMblkpool *
pmemblk_createW(const wchar_t *path, size_t bsize, size_t poolsize,
mode_t mode)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
PMEMblkpool *ret = pmemblk_createU(upath, bsize, poolsize, mode);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* blk_open_common -- (internal) open a block memory pool
*
* This routine does all the work, but takes a cow flag so internal
* calls can map a read-only pool if required.
*
* Passing in bsize == 0 means a valid pool header must exist (which
* will supply the block size).
*/
static PMEMblkpool *
blk_open_common(const char *path, size_t bsize, unsigned flags)
{
LOG(3, "path %s bsize %zu flags 0x%x", path, bsize, flags);
struct pool_set *set;
if (util_pool_open(&set, path, PMEMBLK_MIN_PART, &Blk_open_attr,
NULL, NULL, flags) != 0) {
LOG(2, "cannot open pool or pool set");
return NULL;
}
ASSERT(set->nreplicas > 0);
struct pool_replica *rep = set->replica[0];
PMEMblkpool *pbp = rep->part[0].addr;
VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
sizeof(struct pmemblk) -
((uintptr_t)&pbp->addr - (uintptr_t)&pbp->hdr));
pbp->addr = pbp;
pbp->size = rep->repsize;
pbp->set = set;
pbp->is_pmem = rep->is_pmem;
pbp->is_dev_dax = rep->part[0].is_dev_dax;
/* is_dev_dax implies is_pmem */
ASSERT(!pbp->is_dev_dax || pbp->is_pmem);
if (set->nreplicas > 1) {
errno = ENOTSUP;
ERR("!replicas not supported");
goto err;
}
/* validate pool descriptor */
if (blk_descr_check(pbp, &bsize) != 0) {
LOG(2, "descriptor check failed");
goto err;
}
/* initialize runtime parts */
if (blk_runtime_init(pbp, bsize, set->rdonly) != 0) {
ERR("pool initialization failed");
goto err;
}
util_poolset_fdclose(set);
LOG(3, "pbp %p", pbp);
return pbp;
err:
LOG(4, "error clean up");
int oerrno = errno;
util_poolset_close(set, DO_NOT_DELETE_PARTS);
errno = oerrno;
return NULL;
}
/*
* pmemblk_openU -- open a block memory pool
*/
#ifndef _WIN32
static inline
#endif
PMEMblkpool *
pmemblk_openU(const char *path, size_t bsize)
{
LOG(3, "path %s bsize %zu", path, bsize);
return blk_open_common(path, bsize, 0);
}
#ifndef _WIN32
/*
* pmemblk_open -- open a block memory pool
*/
PMEMblkpool *
pmemblk_open(const char *path, size_t bsize)
{
return pmemblk_openU(path, bsize);
}
#else
/*
* pmemblk_openW -- open a block memory pool
*/
PMEMblkpool *
pmemblk_openW(const wchar_t *path, size_t bsize)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
PMEMblkpool *ret = pmemblk_openU(upath, bsize);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmemblk_close -- close a block memory pool
*/
void
pmemblk_close(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
btt_fini(pbp->bttp);
if (pbp->locks) {
for (unsigned i = 0; i < pbp->nlane; i++)
os_mutex_destroy(&pbp->locks[i]);
Free((void *)pbp->locks);
}
#ifdef DEBUG
/* destroy debug lock */
os_mutex_destroy(&pbp->write_lock);
#endif
util_poolset_close(pbp->set, DO_NOT_DELETE_PARTS);
}
/*
* pmemblk_bsize -- return size of block for specified pool
*/
size_t
pmemblk_bsize(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
return le32toh(pbp->bsize);
}
/*
* pmemblk_nblock -- return number of usable blocks in a block memory pool
*/
size_t
pmemblk_nblock(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
return btt_nlba(pbp->bttp);
}
/*
* pmemblk_read -- read a block in a block memory pool
*/
int
pmemblk_read(PMEMblkpool *pbp, void *buf, long long blockno)
{
LOG(3, "pbp %p buf %p blockno %lld", pbp, buf, blockno);
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_read(pbp->bttp, lane, (uint64_t)blockno, buf);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_write -- write a block (atomically) in a block memory pool
*/
int
pmemblk_write(PMEMblkpool *pbp, const void *buf, long long blockno)
{
LOG(3, "pbp %p buf %p blockno %lld", pbp, buf, blockno);
if (pbp->rdonly) {
ERR("EROFS (pool is read-only)");
errno = EROFS;
return -1;
}
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_write(pbp->bttp, lane, (uint64_t)blockno, buf);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_set_zero -- zero a block in a block memory pool
*/
int
pmemblk_set_zero(PMEMblkpool *pbp, long long blockno)
{
LOG(3, "pbp %p blockno %lld", pbp, blockno);
if (pbp->rdonly) {
ERR("EROFS (pool is read-only)");
errno = EROFS;
return -1;
}
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_set_zero(pbp->bttp, lane, (uint64_t)blockno);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_set_error -- set the error state on a block in a block memory pool
*/
int
pmemblk_set_error(PMEMblkpool *pbp, long long blockno)
{
LOG(3, "pbp %p blockno %lld", pbp, blockno);
if (pbp->rdonly) {
ERR("EROFS (pool is read-only)");
errno = EROFS;
return -1;
}
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_set_error(pbp->bttp, lane, (uint64_t)blockno);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_checkU -- block memory pool consistency check
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_checkU(const char *path, size_t bsize)
{
LOG(3, "path \"%s\" bsize %zu", path, bsize);
/* map the pool read-only */
PMEMblkpool *pbp = blk_open_common(path, bsize, POOL_OPEN_COW);
if (pbp == NULL)
return -1; /* errno set by blk_open_common() */
int retval = btt_check(pbp->bttp);
int oerrno = errno;
pmemblk_close(pbp);
errno = oerrno;
return retval;
}
#ifndef _WIN32
/*
* pmemblk_check -- block memory pool consistency check
*/
int
pmemblk_check(const char *path, size_t bsize)
{
return pmemblk_checkU(path, bsize);
}
#else
/*
* pmemblk_checkW -- block memory pool consistency check
*/
int
pmemblk_checkW(const wchar_t *path, size_t bsize)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return -1;
int ret = pmemblk_checkU(upath, bsize);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmemblk_ctl_getU -- programmatically executes a read ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_ctl_getU(PMEMblkpool *pbp, const char *name, void *arg)
{
LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_READ, arg);
}
/*
* pmemblk_ctl_setU -- programmatically executes a write ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_ctl_setU(PMEMblkpool *pbp, const char *name, void *arg)
{
LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_WRITE, arg);
}
/*
* pmemblk_ctl_execU -- programmatically executes a runnable ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_ctl_execU(PMEMblkpool *pbp, const char *name, void *arg)
{
LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_RUNNABLE, arg);
}
#ifndef _WIN32
/*
* pmemblk_ctl_get -- programmatically executes a read ctl query
*/
int
pmemblk_ctl_get(PMEMblkpool *pbp, const char *name, void *arg)
{
return pmemblk_ctl_getU(pbp, name, arg);
}
/*
* pmemblk_ctl_set -- programmatically executes a write ctl query
*/
int
pmemblk_ctl_set(PMEMblkpool *pbp, const char *name, void *arg)
{
return pmemblk_ctl_setU(pbp, name, arg);
}
/*
* pmemblk_ctl_exec -- programmatically executes a runnable ctl query
*/
int
pmemblk_ctl_exec(PMEMblkpool *pbp, const char *name, void *arg)
{
return pmemblk_ctl_execU(pbp, name, arg);
}
#else
/*
* pmemblk_ctl_getW -- programmatically executes a read ctl query
*/
int
pmemblk_ctl_getW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemblk_ctl_getU(pbp, uname, arg);
util_free_UTF8(uname);
return ret;
}
/*
* pmemblk_ctl_setW -- programmatically executes a write ctl query
*/
int
pmemblk_ctl_setW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemblk_ctl_setU(pbp, uname, arg);
util_free_UTF8(uname);
return ret;
}
/*
* pmemblk_ctl_execW -- programmatically executes a runnable ctl query
*/
int
pmemblk_ctl_execW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemblk_ctl_execU(pbp, uname, arg);
util_free_UTF8(uname);
return ret;
}
#endif
| 21,240 | 21.195402 | 77 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/container_ravl.c | /*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* container_ravl.c -- implementation of ravl-based block container
*/
#include "container_ravl.h"
#include "ravl.h"
#include "out.h"
#include "sys_util.h"
struct block_container_ravl {
struct block_container super;
struct ravl *tree;
};
/*
* container_compare_memblocks -- (internal) compares two memory blocks
*/
static int
container_compare_memblocks(const void *lhs, const void *rhs)
{
const struct memory_block *l = lhs;
const struct memory_block *r = rhs;
int64_t diff = (int64_t)l->size_idx - (int64_t)r->size_idx;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->zone_id - (int64_t)r->zone_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->chunk_id - (int64_t)r->chunk_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->block_off - (int64_t)r->block_off;
if (diff != 0)
return diff > 0 ? 1 : -1;
return 0;
}
/*
* container_ravl_insert_block -- (internal) inserts a new memory block
* into the container
*/
static int
container_ravl_insert_block(struct block_container *bc,
const struct memory_block *m)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
struct memory_block *e = m->m_ops->get_user_data(m);
VALGRIND_DO_MAKE_MEM_DEFINED(e, sizeof(*e));
VALGRIND_ADD_TO_TX(e, sizeof(*e));
*e = *m;
VALGRIND_SET_CLEAN(e, sizeof(*e));
VALGRIND_REMOVE_FROM_TX(e, sizeof(*e));
return ravl_insert(c->tree, e);
}
/*
* container_ravl_get_rm_block_bestfit -- (internal) removes and returns the
* best-fit memory block for size
*/
static int
container_ravl_get_rm_block_bestfit(struct block_container *bc,
struct memory_block *m)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
struct ravl_node *n = ravl_find(c->tree, m,
RAVL_PREDICATE_GREATER_EQUAL);
if (n == NULL)
return ENOMEM;
struct memory_block *e = ravl_data(n);
*m = *e;
ravl_remove(c->tree, n);
return 0;
}
/*
* container_ravl_get_rm_block_exact --
* (internal) removes exact match memory block
*/
static int
container_ravl_get_rm_block_exact(struct block_container *bc,
const struct memory_block *m)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
struct ravl_node *n = ravl_find(c->tree, m, RAVL_PREDICATE_EQUAL);
if (n == NULL)
return ENOMEM;
ravl_remove(c->tree, n);
return 0;
}
/*
* container_ravl_get_block_exact -- (internal) finds exact match memory block
*/
static int
container_ravl_get_block_exact(struct block_container *bc,
const struct memory_block *m)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
return ravl_find(c->tree, m, RAVL_PREDICATE_EQUAL) ? 0 : ENOMEM;
}
/*
* container_ravl_is_empty -- (internal) checks whether the container is empty
*/
static int
container_ravl_is_empty(struct block_container *bc)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
return ravl_empty(c->tree);
}
/*
* container_ravl_rm_all -- (internal) removes all elements from the tree
*/
static void
container_ravl_rm_all(struct block_container *bc)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
ravl_clear(c->tree);
}
/*
* container_ravl_delete -- (internal) deletes the container
*/
static void
container_ravl_destroy(struct block_container *bc)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
ravl_delete(c->tree);
Free(bc);
}
/*
* Tree-based block container used to provide best-fit functionality to the
* bucket. The time complexity for this particular container is O(k) where k is
* the length of the key.
*
* The get methods also guarantee that the block with lowest possible address
* that best matches the requirements is provided.
*/
static struct block_container_ops container_ravl_ops = {
.insert = container_ravl_insert_block,
.get_rm_exact = container_ravl_get_rm_block_exact,
.get_rm_bestfit = container_ravl_get_rm_block_bestfit,
.get_exact = container_ravl_get_block_exact,
.is_empty = container_ravl_is_empty,
.rm_all = container_ravl_rm_all,
.destroy = container_ravl_destroy,
};
/*
* container_new_ravl -- allocates and initializes a ravl container
*/
struct block_container *
container_new_ravl(struct palloc_heap *heap)
{
struct block_container_ravl *bc = Malloc(sizeof(*bc));
if (bc == NULL)
goto error_container_malloc;
bc->super.heap = heap;
bc->super.c_ops = &container_ravl_ops;
bc->tree = ravl_new(container_compare_memblocks);
if (bc->tree == NULL)
goto error_ravl_new;
return (struct block_container *)&bc->super;
error_ravl_new:
Free(bc);
error_container_malloc:
return NULL;
}
| 6,213 | 25.784483 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/heap_layout.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* heap_layout.h -- internal definitions for heap layout
*/
#ifndef LIBPMEMOBJ_HEAP_LAYOUT_H
#define LIBPMEMOBJ_HEAP_LAYOUT_H 1
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#define HEAP_MAJOR 1
#define HEAP_MINOR 0
#define MAX_CHUNK (UINT16_MAX - 7) /* has to be multiple of 8 */
#define CHUNK_BASE_ALIGNMENT 1024
#define CHUNKSIZE ((size_t)1024 * 256) /* 256 kilobytes */
#define MAX_MEMORY_BLOCK_SIZE (MAX_CHUNK * CHUNKSIZE)
#define HEAP_SIGNATURE_LEN 16
#define HEAP_SIGNATURE "MEMORY_HEAP_HDR\0"
#define ZONE_HEADER_MAGIC 0xC3F0A2D2
#define ZONE_MIN_SIZE (sizeof(struct zone) + sizeof(struct chunk))
#define ZONE_MAX_SIZE (sizeof(struct zone) + sizeof(struct chunk) * MAX_CHUNK)
#define HEAP_MIN_SIZE (sizeof(struct heap_layout) + ZONE_MIN_SIZE)
/* Base bitmap values, relevant for both normal and flexible bitmaps */
#define RUN_BITS_PER_VALUE 64U
#define RUN_BASE_METADATA_VALUES\
((unsigned)(sizeof(struct chunk_run_header) / sizeof(uint64_t)))
#define RUN_BASE_METADATA_SIZE (sizeof(struct chunk_run_header))
#define RUN_CONTENT_SIZE (CHUNKSIZE - RUN_BASE_METADATA_SIZE)
/*
* Calculates the size in bytes of a single run instance, including bitmap
*/
#define RUN_CONTENT_SIZE_BYTES(size_idx)\
(RUN_CONTENT_SIZE + (((size_idx) - 1) * CHUNKSIZE))
/* Default bitmap values, specific for old, non-flexible, bitmaps */
#define RUN_DEFAULT_METADATA_VALUES 40 /* in 8 byte words, 320 bytes total */
#define RUN_DEFAULT_BITMAP_VALUES \
(RUN_DEFAULT_METADATA_VALUES - RUN_BASE_METADATA_VALUES)
#define RUN_DEFAULT_BITMAP_SIZE (sizeof(uint64_t) * RUN_DEFAULT_BITMAP_VALUES)
#define RUN_DEFAULT_BITMAP_NBITS\
(RUN_BITS_PER_VALUE * RUN_DEFAULT_BITMAP_VALUES)
#define RUN_DEFAULT_SIZE \
(CHUNKSIZE - RUN_BASE_METADATA_SIZE - RUN_DEFAULT_BITMAP_SIZE)
/*
* Calculates the size in bytes of a single run instance, without bitmap,
* but only for the default fixed-bitmap algorithm
*/
#define RUN_DEFAULT_SIZE_BYTES(size_idx)\
(RUN_DEFAULT_SIZE + (((size_idx) - 1) * CHUNKSIZE))
#define CHUNK_MASK ((CHUNKSIZE) - 1)
#define CHUNK_ALIGN_UP(value) ((((value) + CHUNK_MASK) & ~CHUNK_MASK))
enum chunk_flags {
CHUNK_FLAG_COMPACT_HEADER = 0x0001,
CHUNK_FLAG_HEADER_NONE = 0x0002,
CHUNK_FLAG_ALIGNED = 0x0004,
CHUNK_FLAG_FLEX_BITMAP = 0x0008,
};
#define CHUNK_FLAGS_ALL_VALID (\
CHUNK_FLAG_COMPACT_HEADER |\
CHUNK_FLAG_HEADER_NONE |\
CHUNK_FLAG_ALIGNED |\
CHUNK_FLAG_FLEX_BITMAP\
)
enum chunk_type {
CHUNK_TYPE_UNKNOWN,
CHUNK_TYPE_FOOTER, /* not actual chunk type */
CHUNK_TYPE_FREE,
CHUNK_TYPE_USED,
CHUNK_TYPE_RUN,
CHUNK_TYPE_RUN_DATA,
MAX_CHUNK_TYPE
};
struct chunk {
uint8_t data[CHUNKSIZE];
};
struct chunk_run_header {
uint64_t block_size;
uint64_t alignment; /* valid only /w CHUNK_FLAG_ALIGNED */
};
struct chunk_run {
struct chunk_run_header hdr;
uint8_t content[RUN_CONTENT_SIZE]; /* bitmap + data */
};
struct chunk_header {
uint16_t type;
uint16_t flags;
uint32_t size_idx;
};
struct zone_header {
uint32_t magic;
uint32_t size_idx;
uint8_t reserved[56];
};
struct zone {
struct zone_header header;
struct chunk_header chunk_headers[MAX_CHUNK];
struct chunk chunks[];
};
struct heap_header {
char signature[HEAP_SIGNATURE_LEN];
uint64_t major;
uint64_t minor;
uint64_t unused; /* might be garbage */
uint64_t chunksize;
uint64_t chunks_per_zone;
uint8_t reserved[960];
uint64_t checksum;
};
struct heap_layout {
struct heap_header header;
struct zone zone0; /* first element of zones array */
};
#define ALLOC_HDR_SIZE_SHIFT (48ULL)
#define ALLOC_HDR_FLAGS_MASK (((1ULL) << ALLOC_HDR_SIZE_SHIFT) - 1)
struct allocation_header_legacy {
uint8_t unused[8];
uint64_t size;
uint8_t unused2[32];
uint64_t root_size;
uint64_t type_num;
};
#define ALLOC_HDR_COMPACT_SIZE sizeof(struct allocation_header_compact)
struct allocation_header_compact {
uint64_t size;
uint64_t extra;
};
enum header_type {
HEADER_LEGACY,
HEADER_COMPACT,
HEADER_NONE,
MAX_HEADER_TYPES
};
static const size_t header_type_to_size[MAX_HEADER_TYPES] = {
sizeof(struct allocation_header_legacy),
sizeof(struct allocation_header_compact),
0
};
static const enum chunk_flags header_type_to_flag[MAX_HEADER_TYPES] = {
(enum chunk_flags)0,
CHUNK_FLAG_COMPACT_HEADER,
CHUNK_FLAG_HEADER_NONE
};
static inline struct zone *
ZID_TO_ZONE(struct heap_layout *layout, size_t zone_id)
{
return (struct zone *)
((uintptr_t)&layout->zone0 + ZONE_MAX_SIZE * zone_id);
}
static inline struct chunk_header *
GET_CHUNK_HDR(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return &ZID_TO_ZONE(layout, zone_id)->chunk_headers[chunk_id];
}
static inline struct chunk *
GET_CHUNK(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return &ZID_TO_ZONE(layout, zone_id)->chunks[chunk_id];
}
static inline struct chunk_run *
GET_CHUNK_RUN(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return (struct chunk_run *)GET_CHUNK(layout, zone_id, chunk_id);
}
#ifdef __cplusplus
}
#endif
#endif
| 6,620 | 27.055085 | 78 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/alloc_class.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* alloc_class.h -- internal definitions for allocation classes
*/
#ifndef LIBPMEMOBJ_ALLOC_CLASS_H
#define LIBPMEMOBJ_ALLOC_CLASS_H 1
#include <stddef.h>
#include <stdint.h>
#include <sys/types.h>
#include "heap_layout.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MAX_ALLOCATION_CLASSES (UINT8_MAX)
#define DEFAULT_ALLOC_CLASS_ID (0)
#define RUN_UNIT_MAX RUN_BITS_PER_VALUE
struct alloc_class_collection;
enum alloc_class_type {
CLASS_UNKNOWN,
CLASS_HUGE,
CLASS_RUN,
MAX_ALLOC_CLASS_TYPES
};
struct alloc_class {
uint8_t id;
uint16_t flags;
size_t unit_size;
enum header_type header_type;
enum alloc_class_type type;
/* run-specific data */
struct {
uint32_t size_idx; /* size index of a single run instance */
size_t alignment; /* required alignment of objects */
unsigned nallocs; /* number of allocs per run */
} run;
};
struct alloc_class_collection *alloc_class_collection_new(void);
void alloc_class_collection_delete(struct alloc_class_collection *ac);
struct alloc_class *alloc_class_by_run(
struct alloc_class_collection *ac,
size_t unit_size, uint16_t flags, uint32_t size_idx);
struct alloc_class *alloc_class_by_alloc_size(
struct alloc_class_collection *ac, size_t size);
struct alloc_class *alloc_class_by_id(
struct alloc_class_collection *ac, uint8_t id);
int alloc_class_reserve(struct alloc_class_collection *ac, uint8_t id);
int alloc_class_find_first_free_slot(struct alloc_class_collection *ac,
uint8_t *slot);
ssize_t
alloc_class_calc_size_idx(struct alloc_class *c, size_t size);
struct alloc_class *
alloc_class_new(int id, struct alloc_class_collection *ac,
enum alloc_class_type type, enum header_type htype,
size_t unit_size, size_t alignment,
uint32_t size_idx);
void alloc_class_delete(struct alloc_class_collection *ac,
struct alloc_class *c);
#ifdef __cplusplus
}
#endif
#endif
| 3,468 | 29.699115 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/recycler.c | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* recycler.c -- implementation of run recycler
*/
#include "heap.h"
#include "recycler.h"
#include "vec.h"
#include "out.h"
#include "util.h"
#include "sys_util.h"
#include "ravl.h"
#include "valgrind_internal.h"
#define THRESHOLD_MUL 4
/*
* recycler_element_cmp -- compares two recycler elements
*/
static int
recycler_element_cmp(const void *lhs, const void *rhs)
{
const struct recycler_element *l = lhs;
const struct recycler_element *r = rhs;
int64_t diff = (int64_t)l->max_free_block - (int64_t)r->max_free_block;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->free_space - (int64_t)r->free_space;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->zone_id - (int64_t)r->zone_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->chunk_id - (int64_t)r->chunk_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
return 0;
}
struct recycler {
struct ravl *runs;
struct palloc_heap *heap;
/*
* How many unaccounted units there *might* be inside of the memory
* blocks stored in the recycler.
* The value is not meant to be accurate, but rather a rough measure on
* how often should the memory block scores be recalculated.
*
* Per-chunk unaccounted units are shared for all zones, which might
* lead to some unnecessary recalculations.
*/
size_t unaccounted_units[MAX_CHUNK];
size_t unaccounted_total;
size_t nallocs;
size_t recalc_threshold;
VEC(, struct recycler_element) recalc;
VEC(, struct memory_block_reserved *) pending;
os_mutex_t lock;
};
/*
* recycler_new -- creates new recycler instance
*/
struct recycler *
recycler_new(struct palloc_heap *heap, size_t nallocs)
{
struct recycler *r = Malloc(sizeof(struct recycler));
if (r == NULL)
goto error_alloc_recycler;
r->runs = ravl_new_sized(recycler_element_cmp,
sizeof(struct recycler_element));
if (r->runs == NULL)
goto error_alloc_tree;
r->heap = heap;
r->nallocs = nallocs;
r->recalc_threshold = nallocs * THRESHOLD_MUL;
r->unaccounted_total = 0;
memset(&r->unaccounted_units, 0, sizeof(r->unaccounted_units));
VEC_INIT(&r->recalc);
VEC_INIT(&r->pending);
os_mutex_init(&r->lock);
return r;
error_alloc_tree:
Free(r);
error_alloc_recycler:
return NULL;
}
/*
* recycler_delete -- deletes recycler instance
*/
void
recycler_delete(struct recycler *r)
{
VEC_DELETE(&r->recalc);
struct memory_block_reserved *mr;
VEC_FOREACH(mr, &r->pending) {
Free(mr);
}
VEC_DELETE(&r->pending);
os_mutex_destroy(&r->lock);
ravl_delete(r->runs);
Free(r);
}
/*
* recycler_element_new -- calculates how many free bytes does a run have and
* what's the largest request that the run can handle, returns that as
* recycler element struct
*/
struct recycler_element
recycler_element_new(struct palloc_heap *heap, const struct memory_block *m)
{
/*
* Counting of the clear bits can race with a concurrent deallocation
* that operates on the same run. This race is benign and has absolutely
* no effect on the correctness of this algorithm. Ideally, we would
* avoid grabbing the lock, but helgrind gets very confused if we
* try to disable reporting for this function.
*/
os_mutex_t *lock = m->m_ops->get_lock(m);
util_mutex_lock(lock);
struct recycler_element e = {
.free_space = 0,
.max_free_block = 0,
.chunk_id = m->chunk_id,
.zone_id = m->zone_id,
};
m->m_ops->calc_free(m, &e.free_space, &e.max_free_block);
util_mutex_unlock(lock);
return e;
}
/*
* recycler_put -- inserts new run into the recycler
*/
int
recycler_put(struct recycler *r, const struct memory_block *m,
struct recycler_element element)
{
int ret = 0;
util_mutex_lock(&r->lock);
ret = ravl_emplace_copy(r->runs, &element);
util_mutex_unlock(&r->lock);
return ret;
}
/*
* recycler_pending_check -- iterates through pending memory blocks, checks
* the reservation status, and puts it in the recycler if the there
* are no more unfulfilled reservations for the block.
*/
static void
recycler_pending_check(struct recycler *r)
{
struct memory_block_reserved *mr = NULL;
size_t pos;
VEC_FOREACH_BY_POS(pos, &r->pending) {
mr = VEC_ARR(&r->pending)[pos];
if (mr->nresv == 0) {
struct recycler_element e = recycler_element_new(
r->heap, &mr->m);
if (ravl_emplace_copy(r->runs, &e) != 0) {
ERR("unable to track run %u due to OOM",
mr->m.chunk_id);
}
Free(mr);
VEC_ERASE_BY_POS(&r->pending, pos);
}
}
}
/*
* recycler_get -- retrieves a chunk from the recycler
*/
int
recycler_get(struct recycler *r, struct memory_block *m)
{
int ret = 0;
util_mutex_lock(&r->lock);
recycler_pending_check(r);
struct recycler_element e = { .max_free_block = m->size_idx, 0, 0, 0};
struct ravl_node *n = ravl_find(r->runs, &e,
RAVL_PREDICATE_GREATER_EQUAL);
if (n == NULL) {
ret = ENOMEM;
goto out;
}
struct recycler_element *ne = ravl_data(n);
m->chunk_id = ne->chunk_id;
m->zone_id = ne->zone_id;
ravl_remove(r->runs, n);
struct chunk_header *hdr = heap_get_chunk_hdr(r->heap, m);
m->size_idx = hdr->size_idx;
memblock_rebuild_state(r->heap, m);
out:
util_mutex_unlock(&r->lock);
return ret;
}
/*
* recycler_pending_put -- places the memory block in the pending container
*/
void
recycler_pending_put(struct recycler *r,
struct memory_block_reserved *m)
{
util_mutex_lock(&r->lock);
if (VEC_PUSH_BACK(&r->pending, m) != 0)
ASSERT(0); /* XXX: fix after refactoring */
util_mutex_unlock(&r->lock);
}
/*
* recycler_recalc -- recalculates the scores of runs in the recycler to match
* the updated persistent state
*/
struct empty_runs
recycler_recalc(struct recycler *r, int force)
{
struct empty_runs runs;
VEC_INIT(&runs);
uint64_t units = r->unaccounted_total;
if (units == 0 || (!force && units < (r->recalc_threshold)))
return runs;
if (util_mutex_trylock(&r->lock) != 0)
return runs;
/* If the search is forced, recalculate everything */
uint64_t search_limit = force ? UINT64_MAX : units;
uint64_t found_units = 0;
struct memory_block nm = MEMORY_BLOCK_NONE;
struct ravl_node *n;
struct recycler_element next = {0, 0, 0, 0};
enum ravl_predicate p = RAVL_PREDICATE_GREATER_EQUAL;
do {
if ((n = ravl_find(r->runs, &next, p)) == NULL)
break;
p = RAVL_PREDICATE_GREATER;
struct recycler_element *ne = ravl_data(n);
next = *ne;
uint64_t chunk_units = r->unaccounted_units[ne->chunk_id];
if (!force && chunk_units == 0)
continue;
uint32_t existing_free_space = ne->free_space;
nm.chunk_id = ne->chunk_id;
nm.zone_id = ne->zone_id;
memblock_rebuild_state(r->heap, &nm);
struct recycler_element e = recycler_element_new(r->heap, &nm);
ASSERT(e.free_space >= existing_free_space);
uint64_t free_space_diff = e.free_space - existing_free_space;
found_units += free_space_diff;
if (free_space_diff == 0)
continue;
/*
* Decrease the per chunk_id counter by the number of nallocs
* found, increased by the blocks potentially freed in the
* active memory block. Cap the sub value to prevent overflow.
*/
util_fetch_and_sub64(&r->unaccounted_units[nm.chunk_id],
MIN(chunk_units, free_space_diff + r->nallocs));
ravl_remove(r->runs, n);
if (e.free_space == r->nallocs) {
memblock_rebuild_state(r->heap, &nm);
if (VEC_PUSH_BACK(&runs, nm) != 0)
ASSERT(0); /* XXX: fix after refactoring */
} else {
VEC_PUSH_BACK(&r->recalc, e);
}
} while (found_units < search_limit);
struct recycler_element *e;
VEC_FOREACH_BY_PTR(e, &r->recalc) {
ravl_emplace_copy(r->runs, e);
}
VEC_CLEAR(&r->recalc);
util_mutex_unlock(&r->lock);
util_fetch_and_sub64(&r->unaccounted_total, units);
return runs;
}
/*
* recycler_inc_unaccounted -- increases the number of unaccounted units in the
* recycler
*/
void
recycler_inc_unaccounted(struct recycler *r, const struct memory_block *m)
{
util_fetch_and_add64(&r->unaccounted_total, m->size_idx);
util_fetch_and_add64(&r->unaccounted_units[m->chunk_id],
m->size_idx);
}
| 9,575 | 24.604278 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/ctl_debug.c | /*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ctl_debug.c -- implementation of the debug CTL namespace
*/
#include "ctl.h"
#include "ctl_debug.h"
#include "obj.h"
/*
* CTL_WRITE_HANDLER(alloc_pattern) -- sets the alloc_pattern field in heap
*/
static int
CTL_WRITE_HANDLER(alloc_pattern)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int arg_in = *(int *)arg;
pop->heap.alloc_pattern = arg_in;
return 0;
}
/*
* CTL_READ_HANDLER(alloc_pattern) -- returns alloc_pattern heap field
*/
static int
CTL_READ_HANDLER(alloc_pattern)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int *arg_out = arg;
*arg_out = pop->heap.alloc_pattern;
return 0;
}
static struct ctl_argument CTL_ARG(alloc_pattern) = CTL_ARG_LONG_LONG;
static const struct ctl_node CTL_NODE(heap)[] = {
CTL_LEAF_RW(alloc_pattern),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(debug)[] = {
CTL_CHILD(heap),
CTL_NODE_END
};
/*
* debug_ctl_register -- registers ctl nodes for "debug" module
*/
void
debug_ctl_register(PMEMobjpool *pop)
{
CTL_REGISTER_MODULE(pop->ctl, debug);
}
| 2,753 | 29.263736 | 75 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/alloc_class.c | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* alloc_class.c -- implementation of allocation classes
*/
#include <float.h>
#include <string.h>
#include "alloc_class.h"
#include "heap_layout.h"
#include "util.h"
#include "out.h"
#include "bucket.h"
#include "cuckoo.h"
#define RUN_CLASS_KEY_PACK(map_idx_s, flags_s, size_idx_s)\
((uint64_t)(map_idx_s) << 32 |\
(uint64_t)(flags_s) << 16 |\
(uint64_t)(size_idx_s))
/*
* Value used to mark a reserved spot in the bucket array.
*/
#define ACLASS_RESERVED ((void *)0xFFFFFFFFULL)
/*
* The last size that is handled by runs.
*/
#define MAX_RUN_SIZE (CHUNKSIZE * 10)
/*
* Maximum number of bytes the allocation class generation algorithm can decide
* to waste in a single run chunk.
*/
#define MAX_RUN_WASTED_BYTES 1024
/*
* Allocation categories are used for allocation classes generation. Each one
* defines the biggest handled size (in bytes) and step pct of the generation
* process. The step percentage defines maximum allowed external fragmentation
* for the category.
*/
#define MAX_ALLOC_CATEGORIES 9
/*
* The first size (in byes) which is actually used in the allocation
* class generation algorithm. All smaller sizes use the first predefined bucket
* with the smallest run unit size.
*/
#define FIRST_GENERATED_CLASS_SIZE 128
/*
* The granularity of the allocation class generation algorithm.
*/
#define ALLOC_BLOCK_SIZE_GEN 64
/*
* The first predefined allocation class size
*/
#define MIN_UNIT_SIZE 128
static struct {
size_t size;
float step;
} categories[MAX_ALLOC_CATEGORIES] = {
/* dummy category - the first allocation class is predefined */
{FIRST_GENERATED_CLASS_SIZE, 0.05f},
{1024, 0.05f},
{2048, 0.05f},
{4096, 0.05f},
{8192, 0.05f},
{16384, 0.05f},
{32768, 0.05f},
{131072, 0.05f},
{393216, 0.05f},
};
#define RUN_UNIT_MAX_ALLOC 8U
/*
* Every allocation has to be a multiple of at least 8 because we need to
* ensure proper alignment of every pmem structure.
*/
#define ALLOC_BLOCK_SIZE 16
/*
* Converts size (in bytes) to number of allocation blocks.
*/
#define SIZE_TO_CLASS_MAP_INDEX(_s, _g) (1 + (((_s) - 1) / (_g)))
/*
* Target number of allocations per run instance.
*/
#define RUN_MIN_NALLOCS 200
/*
* Hard limit of chunks per single run.
*/
#define RUN_SIZE_IDX_CAP (16)
#define ALLOC_CLASS_DEFAULT_FLAGS CHUNK_FLAG_FLEX_BITMAP
struct alloc_class_collection {
size_t granularity;
struct alloc_class *aclasses[MAX_ALLOCATION_CLASSES];
/*
* The last size (in bytes) that is handled by runs, everything bigger
* uses the default class.
*/
size_t last_run_max_size;
/* maps allocation classes to allocation sizes, excluding the header! */
uint8_t *class_map_by_alloc_size;
/* maps allocation classes to run unit sizes */
struct cuckoo *class_map_by_unit_size;
int fail_on_missing_class;
int autogenerate_on_missing_class;
};
/*
* alloc_class_find_first_free_slot -- searches for the
* first available allocation class slot
*
* This function must be thread-safe because allocation classes can be created
* at runtime.
*/
int
alloc_class_find_first_free_slot(struct alloc_class_collection *ac,
uint8_t *slot)
{
LOG(10, NULL);
for (int n = 0; n < MAX_ALLOCATION_CLASSES; ++n) {
if (util_bool_compare_and_swap64(&ac->aclasses[n],
NULL, ACLASS_RESERVED)) {
*slot = (uint8_t)n;
return 0;
}
}
return -1;
}
/*
* alloc_class_reserve -- reserve the specified class id
*/
int
alloc_class_reserve(struct alloc_class_collection *ac, uint8_t id)
{
LOG(10, NULL);
return util_bool_compare_and_swap64(&ac->aclasses[id],
NULL, ACLASS_RESERVED) ? 0 : -1;
}
/*
* alloc_class_reservation_clear -- removes the reservation on class id
*/
static void
alloc_class_reservation_clear(struct alloc_class_collection *ac, int id)
{
LOG(10, NULL);
int ret = util_bool_compare_and_swap64(&ac->aclasses[id],
ACLASS_RESERVED, NULL);
ASSERT(ret);
}
/*
* alloc_class_new -- creates a new allocation class
*/
struct alloc_class *
alloc_class_new(int id, struct alloc_class_collection *ac,
enum alloc_class_type type, enum header_type htype,
size_t unit_size, size_t alignment,
uint32_t size_idx)
{
LOG(10, NULL);
struct alloc_class *c = Malloc(sizeof(*c));
if (c == NULL)
goto error_class_alloc;
c->unit_size = unit_size;
c->header_type = htype;
c->type = type;
c->flags = (uint16_t)
(header_type_to_flag[c->header_type] |
(alignment ? CHUNK_FLAG_ALIGNED : 0)) |
ALLOC_CLASS_DEFAULT_FLAGS;
switch (type) {
case CLASS_HUGE:
id = DEFAULT_ALLOC_CLASS_ID;
break;
case CLASS_RUN:
c->run.alignment = alignment;
struct run_bitmap b;
memblock_run_bitmap(&size_idx, c->flags, unit_size,
alignment, NULL, &b);
c->run.nallocs = b.nbits;
c->run.size_idx = size_idx;
uint8_t slot = (uint8_t)id;
if (id < 0 && alloc_class_find_first_free_slot(ac,
&slot) != 0)
goto error_class_alloc;
id = slot;
size_t map_idx = SIZE_TO_CLASS_MAP_INDEX(c->unit_size,
ac->granularity);
ASSERT(map_idx <= UINT32_MAX);
uint32_t map_idx_s = (uint32_t)map_idx;
uint16_t size_idx_s = (uint16_t)size_idx;
uint16_t flags_s = (uint16_t)c->flags;
uint64_t k = RUN_CLASS_KEY_PACK(map_idx_s,
flags_s, size_idx_s);
if (cuckoo_insert(ac->class_map_by_unit_size,
k, c) != 0) {
ERR("unable to register allocation class");
goto error_map_insert;
}
break;
default:
ASSERT(0);
}
c->id = (uint8_t)id;
ac->aclasses[c->id] = c;
return c;
error_map_insert:
Free(c);
error_class_alloc:
if (id >= 0)
alloc_class_reservation_clear(ac, id);
return NULL;
}
/*
* alloc_class_delete -- (internal) deletes an allocation class
*/
void
alloc_class_delete(struct alloc_class_collection *ac,
struct alloc_class *c)
{
LOG(10, NULL);
ac->aclasses[c->id] = NULL;
Free(c);
}
/*
* alloc_class_find_or_create -- (internal) searches for the
* biggest allocation class for which unit_size is evenly divisible by n.
* If no such class exists, create one.
*/
static struct alloc_class *
alloc_class_find_or_create(struct alloc_class_collection *ac, size_t n)
{
LOG(10, NULL);
COMPILE_ERROR_ON(MAX_ALLOCATION_CLASSES > UINT8_MAX);
uint64_t required_size_bytes = n * RUN_MIN_NALLOCS;
uint32_t required_size_idx = 1;
if (required_size_bytes > RUN_DEFAULT_SIZE) {
required_size_bytes -= RUN_DEFAULT_SIZE;
required_size_idx +=
CALC_SIZE_IDX(CHUNKSIZE, required_size_bytes);
if (required_size_idx > RUN_SIZE_IDX_CAP)
required_size_idx = RUN_SIZE_IDX_CAP;
}
for (int i = MAX_ALLOCATION_CLASSES - 1; i >= 0; --i) {
struct alloc_class *c = ac->aclasses[i];
if (c == NULL || c->type == CLASS_HUGE ||
c->run.size_idx < required_size_idx)
continue;
if (n % c->unit_size == 0 &&
n / c->unit_size <= RUN_UNIT_MAX_ALLOC)
return c;
}
/*
* In order to minimize the wasted space at the end of the run the
* run data size must be divisible by the allocation class unit size
* with the smallest possible remainder, preferably 0.
*/
struct run_bitmap b;
size_t runsize_bytes = 0;
do {
if (runsize_bytes != 0) /* don't increase on first iteration */
n += ALLOC_BLOCK_SIZE_GEN;
uint32_t size_idx = required_size_idx;
memblock_run_bitmap(&size_idx, ALLOC_CLASS_DEFAULT_FLAGS, n, 0,
NULL, &b);
runsize_bytes = RUN_CONTENT_SIZE_BYTES(size_idx) - b.size;
} while ((runsize_bytes % n) > MAX_RUN_WASTED_BYTES);
/*
* Now that the desired unit size is found the existing classes need
* to be searched for possible duplicates. If a class that can handle
* the calculated size already exists, simply return that.
*/
for (int i = 1; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = ac->aclasses[i];
if (c == NULL || c->type == CLASS_HUGE)
continue;
if (n / c->unit_size <= RUN_UNIT_MAX_ALLOC &&
n % c->unit_size == 0)
return c;
if (c->unit_size == n)
return c;
}
return alloc_class_new(-1, ac, CLASS_RUN, HEADER_COMPACT, n, 0,
required_size_idx);
}
/*
* alloc_class_find_min_frag -- searches for an existing allocation
* class that will provide the smallest internal fragmentation for the given
* size.
*/
static struct alloc_class *
alloc_class_find_min_frag(struct alloc_class_collection *ac, size_t n)
{
LOG(10, NULL);
struct alloc_class *best_c = NULL;
size_t lowest_waste = SIZE_MAX;
ASSERTne(n, 0);
/*
* Start from the largest buckets in order to minimize unit size of
* allocated memory blocks.
*/
for (int i = MAX_ALLOCATION_CLASSES - 1; i >= 0; --i) {
struct alloc_class *c = ac->aclasses[i];
/* can't use alloc classes /w no headers by default */
if (c == NULL || c->header_type == HEADER_NONE)
continue;
size_t real_size = n + header_type_to_size[c->header_type];
size_t units = CALC_SIZE_IDX(c->unit_size, real_size);
/* can't exceed the maximum allowed run unit max */
if (c->type == CLASS_RUN && units > RUN_UNIT_MAX_ALLOC)
continue;
if (c->unit_size * units == real_size)
return c;
size_t waste = (c->unit_size * units) - real_size;
/*
* If we assume that the allocation class is only ever going to
* be used with exactly one size, the effective internal
* fragmentation would be increased by the leftover
* memory at the end of the run.
*/
if (c->type == CLASS_RUN) {
size_t wasted_units = c->run.nallocs % units;
size_t wasted_bytes = wasted_units * c->unit_size;
size_t waste_avg_per_unit = wasted_bytes /
c->run.nallocs;
waste += waste_avg_per_unit;
}
if (best_c == NULL || lowest_waste > waste) {
best_c = c;
lowest_waste = waste;
}
}
ASSERTne(best_c, NULL);
return best_c;
}
/*
* alloc_class_collection_new -- creates a new collection of allocation classes
*/
struct alloc_class_collection *
alloc_class_collection_new()
{
LOG(10, NULL);
struct alloc_class_collection *ac = Zalloc(sizeof(*ac));
if (ac == NULL)
return NULL;
ac->granularity = ALLOC_BLOCK_SIZE;
ac->last_run_max_size = MAX_RUN_SIZE;
ac->fail_on_missing_class = 0;
ac->autogenerate_on_missing_class = 1;
size_t maps_size = (MAX_RUN_SIZE / ac->granularity) + 1;
if ((ac->class_map_by_alloc_size = Malloc(maps_size)) == NULL)
goto error;
if ((ac->class_map_by_unit_size = cuckoo_new()) == NULL)
goto error;
memset(ac->class_map_by_alloc_size, 0xFF, maps_size);
if (alloc_class_new(-1, ac, CLASS_HUGE, HEADER_COMPACT,
CHUNKSIZE, 0, 1) == NULL)
goto error;
struct alloc_class *predefined_class =
alloc_class_new(-1, ac, CLASS_RUN, HEADER_COMPACT,
MIN_UNIT_SIZE, 0, 1);
if (predefined_class == NULL)
goto error;
for (size_t i = 0; i < FIRST_GENERATED_CLASS_SIZE / ac->granularity;
++i) {
ac->class_map_by_alloc_size[i] = predefined_class->id;
}
/*
* Based on the defined categories, a set of allocation classes is
* created. The unit size of those classes is depended on the category
* initial size and step.
*/
size_t granularity_mask = ALLOC_BLOCK_SIZE_GEN - 1;
for (int c = 1; c < MAX_ALLOC_CATEGORIES; ++c) {
size_t n = categories[c - 1].size + ALLOC_BLOCK_SIZE_GEN;
do {
if (alloc_class_find_or_create(ac, n) == NULL)
goto error;
float stepf = (float)n * categories[c].step;
size_t stepi = (size_t)stepf;
stepi = (stepf - (float)stepi < FLT_EPSILON) ?
stepi : stepi + 1;
n += (stepi + (granularity_mask)) & ~granularity_mask;
} while (n <= categories[c].size);
}
/*
* Find the largest alloc class and use it's unit size as run allocation
* threshold.
*/
uint8_t largest_aclass_slot;
for (largest_aclass_slot = MAX_ALLOCATION_CLASSES - 1;
largest_aclass_slot > 0 &&
ac->aclasses[largest_aclass_slot] == NULL;
--largest_aclass_slot) {
/* intentional NOP */
}
struct alloc_class *c = ac->aclasses[largest_aclass_slot];
/*
* The actual run might contain less unit blocks than the theoretical
* unit max variable. This may be the case for very large unit sizes.
*/
size_t real_unit_max = c->run.nallocs < RUN_UNIT_MAX_ALLOC ?
c->run.nallocs : RUN_UNIT_MAX_ALLOC;
size_t theoretical_run_max_size = c->unit_size * real_unit_max;
ac->last_run_max_size = MAX_RUN_SIZE > theoretical_run_max_size ?
theoretical_run_max_size : MAX_RUN_SIZE;
#ifdef DEBUG
/*
* Verify that each bucket's unit size points back to the bucket by the
* bucket map. This must be true for the default allocation classes,
* otherwise duplicate buckets will be created.
*/
for (size_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = ac->aclasses[i];
if (c != NULL && c->type == CLASS_RUN) {
ASSERTeq(i, c->id);
ASSERTeq(alloc_class_by_run(ac, c->unit_size,
c->flags, c->run.size_idx), c);
}
}
#endif
return ac;
error:
alloc_class_collection_delete(ac);
return NULL;
}
/*
* alloc_class_collection_delete -- deletes the allocation class collection and
* all of the classes within it
*/
void
alloc_class_collection_delete(struct alloc_class_collection *ac)
{
LOG(10, NULL);
for (size_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = ac->aclasses[i];
if (c != NULL) {
alloc_class_delete(ac, c);
}
}
cuckoo_delete(ac->class_map_by_unit_size);
Free(ac->class_map_by_alloc_size);
Free(ac);
}
/*
* alloc_class_assign_by_size -- (internal) chooses the allocation class that
* best approximates the provided size
*/
static struct alloc_class *
alloc_class_assign_by_size(struct alloc_class_collection *ac,
size_t size)
{
LOG(10, NULL);
size_t class_map_index = SIZE_TO_CLASS_MAP_INDEX(size,
ac->granularity);
struct alloc_class *c = alloc_class_find_min_frag(ac,
class_map_index * ac->granularity);
ASSERTne(c, NULL);
/*
* We don't lock this array because locking this section here and then
* bailing out if someone else was faster would be still slower than
* just calculating the class and failing to assign the variable.
* We are using a compare and swap so that helgrind/drd don't complain.
*/
util_bool_compare_and_swap64(
&ac->class_map_by_alloc_size[class_map_index],
MAX_ALLOCATION_CLASSES, c->id);
return c;
}
/*
* alloc_class_by_alloc_size -- returns allocation class that is assigned
* to handle an allocation of the provided size
*/
struct alloc_class *
alloc_class_by_alloc_size(struct alloc_class_collection *ac, size_t size)
{
if (size < ac->last_run_max_size) {
uint8_t class_id = ac->class_map_by_alloc_size[
SIZE_TO_CLASS_MAP_INDEX(size, ac->granularity)];
if (class_id == MAX_ALLOCATION_CLASSES) {
if (ac->fail_on_missing_class)
return NULL;
else if (ac->autogenerate_on_missing_class)
return alloc_class_assign_by_size(ac, size);
else
return ac->aclasses[DEFAULT_ALLOC_CLASS_ID];
}
return ac->aclasses[class_id];
} else {
return ac->aclasses[DEFAULT_ALLOC_CLASS_ID];
}
}
/*
* alloc_class_by_run -- returns the allocation class that has the given
* unit size
*/
struct alloc_class *
alloc_class_by_run(struct alloc_class_collection *ac,
size_t unit_size, uint16_t flags, uint32_t size_idx)
{
size_t map_idx = SIZE_TO_CLASS_MAP_INDEX(unit_size, ac->granularity);
ASSERT(map_idx <= UINT32_MAX);
uint32_t map_idx_s = (uint32_t)map_idx;
ASSERT(size_idx <= UINT16_MAX);
uint16_t size_idx_s = (uint16_t)size_idx;
uint16_t flags_s = (uint16_t)flags;
return cuckoo_get(ac->class_map_by_unit_size,
RUN_CLASS_KEY_PACK(map_idx_s, flags_s, size_idx_s));
}
/*
* alloc_class_by_id -- returns the allocation class with an id
*/
struct alloc_class *
alloc_class_by_id(struct alloc_class_collection *ac, uint8_t id)
{
return ac->aclasses[id];
}
/*
* alloc_class_calc_size_idx -- calculates how many units does the size require
*/
ssize_t
alloc_class_calc_size_idx(struct alloc_class *c, size_t size)
{
uint32_t size_idx = CALC_SIZE_IDX(c->unit_size,
size + header_type_to_size[c->header_type]);
if (c->type == CLASS_RUN) {
if (c->header_type == HEADER_NONE && size_idx != 1)
return -1;
else if (size_idx > RUN_UNIT_MAX)
return -1;
else if (size_idx > c->run.nallocs)
return -1;
}
return size_idx;
}
| 17,578 | 25.554381 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/container_seglists.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* container_seglists.h -- internal definitions for
* segregated lists block container
*/
#ifndef LIBPMEMOBJ_CONTAINER_SEGLISTS_H
#define LIBPMEMOBJ_CONTAINER_SEGLISTS_H 1
#include "container.h"
#ifdef __cplusplus
extern "C" {
#endif
struct block_container *container_new_seglists(struct palloc_heap *heap);
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CONTAINER_SEGLISTS_H */
| 1,994 | 35.944444 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/obj.h | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj.h -- internal definitions for obj module
*/
#ifndef LIBPMEMOBJ_OBJ_H
#define LIBPMEMOBJ_OBJ_H 1
#include <stddef.h>
#include <stdint.h>
#include "lane.h"
#include "pool_hdr.h"
#include "pmalloc.h"
#include "ctl.h"
#include "sync.h"
#include "stats.h"
#include "ctl_debug.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PMEMOBJ_LOG_PREFIX "libpmemobj"
#define PMEMOBJ_LOG_LEVEL_VAR "PMEMOBJ_LOG_LEVEL"
#define PMEMOBJ_LOG_FILE_VAR "PMEMOBJ_LOG_FILE"
/* attributes of the obj memory pool format for the pool header */
#define OBJ_HDR_SIG "PMEMOBJ" /* must be 8 bytes including '\0' */
#define OBJ_FORMAT_MAJOR 5
#define OBJ_FORMAT_FEAT_DEFAULT \
{0x0000, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000}
#define OBJ_FORMAT_FEAT_CHECK \
{0x0000, POOL_FEAT_INCOMPAT_VALID, 0x0000}
static const features_t obj_format_feat_default = OBJ_FORMAT_FEAT_CHECK;
/* size of the persistent part of PMEMOBJ pool descriptor (2kB) */
#define OBJ_DSC_P_SIZE 2048
/* size of unused part of the persistent part of PMEMOBJ pool descriptor */
#define OBJ_DSC_P_UNUSED (OBJ_DSC_P_SIZE - PMEMOBJ_MAX_LAYOUT - 40)
#define OBJ_LANES_OFFSET 8192 /* lanes offset (8kB) */
#define OBJ_NLANES 1024 /* number of lanes */
#define OBJ_OFF_TO_PTR(pop, off) ((void *)((uintptr_t)(pop) + (off)))
#define OBJ_PTR_TO_OFF(pop, ptr) ((uintptr_t)(ptr) - (uintptr_t)(pop))
#define OBJ_OID_IS_NULL(oid) ((oid).off == 0)
#define OBJ_LIST_EMPTY(head) OBJ_OID_IS_NULL((head)->pe_first)
#define OBJ_OFF_FROM_HEAP(pop, off)\
((off) >= (pop)->heap_offset &&\
(off) < (pop)->heap_offset + (pop)->heap_size)
#define OBJ_OFF_FROM_LANES(pop, off)\
((off) >= (pop)->lanes_offset &&\
(off) < (pop)->lanes_offset +\
(pop)->nlanes * sizeof(struct lane_layout))
#define OBJ_PTR_FROM_POOL(pop, ptr)\
((uintptr_t)(ptr) >= (uintptr_t)(pop) &&\
(uintptr_t)(ptr) < (uintptr_t)(pop) +\
(pop)->heap_offset + (pop)->heap_size)
#define OBJ_OFF_IS_VALID(pop, off)\
(OBJ_OFF_FROM_HEAP(pop, off) ||\
(OBJ_PTR_TO_OFF(pop, &(pop)->root_offset) == (off)) ||\
(OBJ_PTR_TO_OFF(pop, &(pop)->root_size) == (off)) ||\
(OBJ_OFF_FROM_LANES(pop, off)))
#define OBJ_PTR_IS_VALID(pop, ptr)\
OBJ_OFF_IS_VALID(pop, OBJ_PTR_TO_OFF(pop, ptr))
typedef void (*persist_local_fn)(const void *, size_t);
typedef void (*flush_local_fn)(const void *, size_t);
typedef void (*drain_local_fn)(void);
typedef void *(*memcpy_local_fn)(void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memmove_local_fn)(void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memset_local_fn)(void *dest, int c, size_t len, unsigned flags);
typedef int (*persist_remote_fn)(PMEMobjpool *pop, const void *addr,
size_t len, unsigned lane, unsigned flags);
typedef uint64_t type_num_t;
#define CONVERSION_FLAG_OLD_SET_CACHE ((1ULL) << 0)
struct pmemobjpool {
struct pool_hdr hdr; /* memory pool header */
/* persistent part of PMEMOBJ pool descriptor (2kB) */
char layout[PMEMOBJ_MAX_LAYOUT];
uint64_t lanes_offset;
uint64_t nlanes;
uint64_t heap_offset;
uint64_t unused3;
unsigned char unused[OBJ_DSC_P_UNUSED]; /* must be zero */
uint64_t checksum; /* checksum of above fields */
uint64_t root_offset;
/* unique runID for this program run - persistent but not checksummed */
uint64_t run_id;
uint64_t root_size;
/*
* These flags can be set from a conversion tool and are set only for
* the first recovery of the pool.
*/
uint64_t conversion_flags;
uint64_t heap_size;
struct stats_persistent stats_persistent;
char pmem_reserved[496]; /* must be zeroed */
/* some run-time state, allocated out of memory pool... */
void *addr; /* mapped region */
int is_pmem; /* true if pool is PMEM */
int rdonly; /* true if pool is opened read-only */
struct palloc_heap heap;
struct lane_descriptor lanes_desc;
uint64_t uuid_lo;
int is_dev_dax; /* true if mapped on device dax */
struct ctl *ctl; /* top level node of the ctl tree structure */
struct stats *stats;
struct pool_set *set; /* pool set info */
struct pmemobjpool *replica; /* next replica */
/* per-replica functions: pmem or non-pmem */
persist_local_fn persist_local; /* persist function */
flush_local_fn flush_local; /* flush function */
drain_local_fn drain_local; /* drain function */
memcpy_local_fn memcpy_local; /* persistent memcpy function */
memmove_local_fn memmove_local; /* persistent memmove function */
memset_local_fn memset_local; /* persistent memset function */
/* for 'master' replica: with or without data replication */
struct pmem_ops p_ops;
PMEMmutex rootlock; /* root object lock */
int is_master_replica;
int has_remote_replicas;
/* remote replica section */
void *rpp; /* RPMEMpool opaque handle if it is a remote replica */
uintptr_t remote_base; /* beginning of the remote pool */
char *node_addr; /* address of a remote node */
char *pool_desc; /* descriptor of a poolset */
persist_remote_fn persist_remote; /* remote persist function */
int vg_boot;
int tx_debug_skip_expensive_checks;
struct tx_parameters *tx_params;
/*
* Locks are dynamically allocated on FreeBSD. Keep track so
* we can free them on pmemobj_close.
*/
PMEMmutex_internal *mutex_head;
PMEMrwlock_internal *rwlock_head;
PMEMcond_internal *cond_head;
/* padding to align size of this structure to page boundary */
/* sizeof(unused2) == 8192 - offsetof(struct pmemobjpool, unused2) */
char unused2[992];
};
/*
* Stored in the 'size' field of oobh header, determines whether the object
* is internal or not. Internal objects are skipped in pmemobj iteration
* functions.
*/
#define OBJ_INTERNAL_OBJECT_MASK ((1ULL) << 15)
#define CLASS_ID_FROM_FLAG(flag)\
((uint16_t)((flag) >> 48))
/*
* pmemobj_get_uuid_lo -- (internal) evaluates XOR sum of least significant
* 8 bytes with most significant 8 bytes.
*/
static inline uint64_t
pmemobj_get_uuid_lo(PMEMobjpool *pop)
{
uint64_t uuid_lo = 0;
for (int i = 0; i < 8; i++) {
uuid_lo = (uuid_lo << 8) |
(pop->hdr.poolset_uuid[i] ^
pop->hdr.poolset_uuid[8 + i]);
}
return uuid_lo;
}
/*
* OBJ_OID_IS_VALID -- (internal) checks if 'oid' is valid
*/
static inline int
OBJ_OID_IS_VALID(PMEMobjpool *pop, PMEMoid oid)
{
return OBJ_OID_IS_NULL(oid) ||
(oid.pool_uuid_lo == pop->uuid_lo &&
oid.off >= pop->heap_offset &&
oid.off < pop->heap_offset + pop->heap_size);
}
static inline int
OBJ_OFF_IS_VALID_FROM_CTX(void *ctx, uint64_t offset)
{
PMEMobjpool *pop = (PMEMobjpool *)ctx;
return OBJ_OFF_IS_VALID(pop, offset);
}
void obj_init(void);
void obj_fini(void);
int obj_read_remote(void *ctx, uintptr_t base, void *dest, void *addr,
size_t length);
/*
* (debug helper macro) logs notice message if used inside a transaction
*/
#ifdef DEBUG
#define _POBJ_DEBUG_NOTICE_IN_TX()\
_pobj_debug_notice(__func__, NULL, 0)
#else
#define _POBJ_DEBUG_NOTICE_IN_TX() do {} while (0)
#endif
#ifdef __cplusplus
}
#endif
#endif
| 8,524 | 29.887681 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/list.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* list.h -- internal definitions for persistent atomic lists module
*/
#ifndef LIBPMEMOBJ_LIST_H
#define LIBPMEMOBJ_LIST_H 1
#include <stddef.h>
#include <stdint.h>
#include <sys/types.h>
#include "libpmemobj.h"
#include "lane.h"
#include "pmalloc.h"
#include "ulog.h"
#ifdef __cplusplus
extern "C" {
#endif
struct list_entry {
PMEMoid pe_next;
PMEMoid pe_prev;
};
struct list_head {
PMEMoid pe_first;
PMEMmutex lock;
};
int list_insert_new_user(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before,
size_t size, uint64_t type_num, palloc_constr constructor, void *arg,
PMEMoid *oidp);
int list_insert(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head, PMEMoid dest, int before,
PMEMoid oid);
int list_remove_free_user(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head,
PMEMoid *oidp);
int list_remove(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head,
PMEMoid oid);
int list_move(PMEMobjpool *pop,
size_t pe_offset_old, struct list_head *head_old,
size_t pe_offset_new, struct list_head *head_new,
PMEMoid dest, int before, PMEMoid oid);
void list_move_oob(PMEMobjpool *pop,
struct list_head *head_old, struct list_head *head_new,
PMEMoid oid);
#ifdef __cplusplus
}
#endif
#endif
| 2,891 | 29.765957 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/memops.c | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* memops.c -- aggregated memory operations helper implementation
*
* The operation collects all of the required memory modifications that
* need to happen in an atomic way (all of them or none), and abstracts
* away the storage type (transient/persistent) and the underlying
* implementation of how it's actually performed - in some cases using
* the redo log is unnecessary and the allocation process can be sped up
* a bit by completely omitting that whole machinery.
*
* The modifications are not visible until the context is processed.
*/
#include "memops.h"
#include "obj.h"
#include "out.h"
#include "valgrind_internal.h"
#include "vecq.h"
#define ULOG_BASE_SIZE 1024
#define OP_MERGE_SEARCH 64
struct operation_log {
size_t capacity; /* capacity of the ulog log */
size_t offset; /* data offset inside of the log */
struct ulog *ulog; /* DRAM allocated log of modifications */
};
/*
* operation_context -- context of an ongoing palloc operation
*/
struct operation_context {
enum log_type type;
ulog_extend_fn extend; /* function to allocate next ulog */
ulog_free_fn ulog_free; /* function to free next ulogs */
const struct pmem_ops *p_ops;
struct pmem_ops t_ops; /* used for transient data processing */
struct pmem_ops s_ops; /* used for shadow copy data processing */
size_t ulog_curr_offset; /* offset in the log for buffer stores */
size_t ulog_curr_capacity; /* capacity of the current log */
struct ulog *ulog_curr; /* current persistent log */
size_t total_logged; /* total amount of buffer stores in the logs */
struct ulog *ulog; /* pointer to the persistent ulog log */
size_t ulog_base_nbytes; /* available bytes in initial ulog log */
size_t ulog_capacity; /* sum of capacity, incl all next ulog logs */
struct ulog_next next; /* vector of 'next' fields of persistent ulog */
int in_progress; /* operation sanity check */
struct operation_log pshadow_ops; /* shadow copy of persistent ulog */
struct operation_log transient_ops; /* log of transient changes */
/* collection used to look for potential merge candidates */
VECQ(, struct ulog_entry_val *) merge_entries;
};
/*
* operation_log_transient_init -- (internal) initialize operation log
* containing transient memory resident changes
*/
static int
operation_log_transient_init(struct operation_log *log)
{
log->capacity = ULOG_BASE_SIZE;
log->offset = 0;
struct ulog *src = Zalloc(sizeof(struct ulog) +
ULOG_BASE_SIZE);
if (src == NULL) {
ERR("!Zalloc");
return -1;
}
/* initialize underlying redo log structure */
src->capacity = ULOG_BASE_SIZE;
log->ulog = src;
return 0;
}
/*
* operation_log_persistent_init -- (internal) initialize operation log
* containing persistent memory resident changes
*/
static int
operation_log_persistent_init(struct operation_log *log,
size_t ulog_base_nbytes)
{
log->capacity = ULOG_BASE_SIZE;
log->offset = 0;
struct ulog *src = Zalloc(sizeof(struct ulog) +
ULOG_BASE_SIZE);
if (src == NULL) {
ERR("!Zalloc");
return -1;
}
/* initialize underlying redo log structure */
src->capacity = ulog_base_nbytes;
memset(src->unused, 0, sizeof(src->unused));
log->ulog = src;
return 0;
}
/*
* operation_transient_clean -- cleans pmemcheck address state
*/
static int
operation_transient_clean(void *base, const void *addr, size_t len,
unsigned flags)
{
VALGRIND_SET_CLEAN(addr, len);
return 0;
}
/*
* operation_transient_memcpy -- transient memcpy wrapper
*/
static void *
operation_transient_memcpy(void *base, void *dest, const void *src, size_t len,
unsigned flags)
{
return memcpy(dest, src, len);
}
/*
* operation_new -- creates new operation context
*/
struct operation_context *
operation_new(struct ulog *ulog, size_t ulog_base_nbytes,
ulog_extend_fn extend, ulog_free_fn ulog_free,
const struct pmem_ops *p_ops, enum log_type type)
{
struct operation_context *ctx = Zalloc(sizeof(*ctx));
if (ctx == NULL) {
ERR("!Zalloc");
goto error_ctx_alloc;
}
ctx->ulog = ulog;
ctx->ulog_base_nbytes = ulog_base_nbytes;
ctx->ulog_capacity = ulog_capacity(ulog,
ulog_base_nbytes, p_ops);
ctx->extend = extend;
ctx->ulog_free = ulog_free;
ctx->in_progress = 0;
VEC_INIT(&ctx->next);
ulog_rebuild_next_vec(ulog, &ctx->next, p_ops);
ctx->p_ops = p_ops;
ctx->type = type;
ctx->ulog_curr_offset = 0;
ctx->ulog_curr_capacity = 0;
ctx->ulog_curr = NULL;
ctx->t_ops.base = NULL;
ctx->t_ops.flush = operation_transient_clean;
ctx->t_ops.memcpy = operation_transient_memcpy;
ctx->s_ops.base = p_ops->base;
ctx->s_ops.flush = operation_transient_clean;
ctx->s_ops.memcpy = operation_transient_memcpy;
VECQ_INIT(&ctx->merge_entries);
if (operation_log_transient_init(&ctx->transient_ops) != 0)
goto error_ulog_alloc;
if (operation_log_persistent_init(&ctx->pshadow_ops,
ulog_base_nbytes) != 0)
goto error_ulog_alloc;
return ctx;
error_ulog_alloc:
operation_delete(ctx);
error_ctx_alloc:
return NULL;
}
/*
* operation_delete -- deletes operation context
*/
void
operation_delete(struct operation_context *ctx)
{
VECQ_DELETE(&ctx->merge_entries);
VEC_DELETE(&ctx->next);
Free(ctx->pshadow_ops.ulog);
Free(ctx->transient_ops.ulog);
Free(ctx);
}
/*
* operation_merge -- (internal) performs operation on a field
*/
static inline void
operation_merge(struct ulog_entry_base *entry, uint64_t value,
ulog_operation_type type)
{
struct ulog_entry_val *e = (struct ulog_entry_val *)entry;
switch (type) {
case ULOG_OPERATION_AND:
e->value &= value;
break;
case ULOG_OPERATION_OR:
e->value |= value;
break;
case ULOG_OPERATION_SET:
e->value = value;
break;
default:
ASSERT(0); /* unreachable */
}
}
/*
* operation_try_merge_entry -- tries to merge the incoming log entry with
* existing entries
*
* Because this requires a reverse foreach, it cannot be implemented using
* the on-media ulog log structure since there's no way to find what's
* the previous entry in the log. Instead, the last N entries are stored
* in a collection and traversed backwards.
*/
static int
operation_try_merge_entry(struct operation_context *ctx,
void *ptr, uint64_t value, ulog_operation_type type)
{
int ret = 0;
uint64_t offset = OBJ_PTR_TO_OFF(ctx->p_ops->base, ptr);
struct ulog_entry_val *e;
VECQ_FOREACH_REVERSE(e, &ctx->merge_entries) {
if (ulog_entry_offset(&e->base) == offset) {
if (ulog_entry_type(&e->base) == type) {
operation_merge(&e->base, value, type);
return 1;
} else {
break;
}
}
}
return ret;
}
/*
* operation_merge_entry_add -- adds a new entry to the merge collection,
* keeps capacity at OP_MERGE_SEARCH. Removes old entries in FIFO fashion.
*/
static void
operation_merge_entry_add(struct operation_context *ctx,
struct ulog_entry_val *entry)
{
if (VECQ_SIZE(&ctx->merge_entries) == OP_MERGE_SEARCH)
(void) VECQ_DEQUEUE(&ctx->merge_entries);
if (VECQ_ENQUEUE(&ctx->merge_entries, entry) != 0) {
/* this is fine, only runtime perf will get slower */
LOG(2, "out of memory - unable to track entries");
}
}
/*
* operation_add_typed_value -- adds new entry to the current operation, if the
* same ptr address already exists and the operation type is set,
* the new value is not added and the function has no effect.
*/
int
operation_add_typed_entry(struct operation_context *ctx,
void *ptr, uint64_t value,
ulog_operation_type type, enum operation_log_type log_type)
{
struct operation_log *oplog = log_type == LOG_PERSISTENT ?
&ctx->pshadow_ops : &ctx->transient_ops;
/*
* Always make sure to have one extra spare cacheline so that the
* ulog log entry creation has enough room for zeroing.
*/
if (oplog->offset + CACHELINE_SIZE == oplog->capacity) {
size_t ncapacity = oplog->capacity + ULOG_BASE_SIZE;
struct ulog *ulog = Realloc(oplog->ulog,
SIZEOF_ULOG(ncapacity));
if (ulog == NULL)
return -1;
oplog->capacity += ULOG_BASE_SIZE;
oplog->ulog = ulog;
/*
* Realloc invalidated the ulog entries that are inside of this
* vector, need to clear it to avoid use after free.
*/
VECQ_CLEAR(&ctx->merge_entries);
}
if (log_type == LOG_PERSISTENT &&
operation_try_merge_entry(ctx, ptr, value, type) != 0)
return 0;
struct ulog_entry_val *entry = ulog_entry_val_create(
oplog->ulog, oplog->offset, ptr, value, type,
log_type == LOG_TRANSIENT ? &ctx->t_ops : &ctx->s_ops);
if (log_type == LOG_PERSISTENT)
operation_merge_entry_add(ctx, entry);
oplog->offset += ulog_entry_size(&entry->base);
return 0;
}
/*
* operation_add_value -- adds new entry to the current operation with
* entry type autodetected based on the memory location
*/
int
operation_add_entry(struct operation_context *ctx, void *ptr, uint64_t value,
ulog_operation_type type)
{
const struct pmem_ops *p_ops = ctx->p_ops;
PMEMobjpool *pop = (PMEMobjpool *)p_ops->base;
int from_pool = OBJ_OFF_IS_VALID(pop,
(uintptr_t)ptr - (uintptr_t)p_ops->base);
return operation_add_typed_entry(ctx, ptr, value, type,
from_pool ? LOG_PERSISTENT : LOG_TRANSIENT);
}
/*
* operation_add_buffer -- adds a buffer operation to the log
*/
int
operation_add_buffer(struct operation_context *ctx,
void *dest, void *src, size_t size, ulog_operation_type type)
{
size_t real_size = size + sizeof(struct ulog_entry_buf);
/* if there's no space left in the log, reserve some more */
if (ctx->ulog_curr_capacity == 0) {
if (operation_reserve(ctx, ctx->total_logged + real_size) != 0)
return -1;
ctx->ulog_curr = ctx->ulog_curr == NULL ? ctx->ulog :
ulog_next(ctx->ulog_curr, ctx->p_ops);
ctx->ulog_curr_offset = 0;
ctx->ulog_curr_capacity = ctx->ulog_curr->capacity;
}
size_t curr_size = MIN(real_size, ctx->ulog_curr_capacity);
size_t data_size = curr_size - sizeof(struct ulog_entry_buf);
/* create a persistent log entry */
struct ulog_entry_buf *e = ulog_entry_buf_create(ctx->ulog_curr,
ctx->ulog_curr_offset,
dest, src, data_size,
type, ctx->p_ops);
size_t entry_size = ALIGN_UP(curr_size, CACHELINE_SIZE);
ASSERT(entry_size == ulog_entry_size(&e->base));
ASSERT(entry_size <= ctx->ulog_curr_capacity);
ctx->total_logged += entry_size;
ctx->ulog_curr_offset += entry_size;
ctx->ulog_curr_capacity -= entry_size;
/*
* Recursively add the data to the log until the entire buffer is
* processed.
*/
return size - data_size == 0 ? 0 : operation_add_buffer(ctx,
(char *)dest + data_size,
(char *)src + data_size,
size - data_size, type);
}
/*
* operation_process_persistent_redo -- (internal) process using ulog
*/
static void
operation_process_persistent_redo(struct operation_context *ctx)
{
ASSERTeq(ctx->pshadow_ops.capacity % CACHELINE_SIZE, 0);
ulog_store(ctx->ulog, ctx->pshadow_ops.ulog,
ctx->pshadow_ops.offset, ctx->ulog_base_nbytes,
&ctx->next, ctx->p_ops);
ulog_process(ctx->pshadow_ops.ulog, OBJ_OFF_IS_VALID_FROM_CTX,
ctx->p_ops);
ulog_clobber(ctx->ulog, &ctx->next, ctx->p_ops);
}
/*
* operation_process_persistent_undo -- (internal) process using ulog
*/
static void
operation_process_persistent_undo(struct operation_context *ctx)
{
ASSERTeq(ctx->pshadow_ops.capacity % CACHELINE_SIZE, 0);
ulog_process(ctx->ulog, OBJ_OFF_IS_VALID_FROM_CTX, ctx->p_ops);
}
/*
* operation_reserve -- (internal) reserves new capacity in persistent ulog log
*/
int
operation_reserve(struct operation_context *ctx, size_t new_capacity)
{
if (new_capacity > ctx->ulog_capacity) {
if (ctx->extend == NULL) {
ERR("no extend function present");
return -1;
}
if (ulog_reserve(ctx->ulog,
ctx->ulog_base_nbytes, &new_capacity, ctx->extend,
&ctx->next, ctx->p_ops) != 0)
return -1;
ctx->ulog_capacity = new_capacity;
}
return 0;
}
/*
* operation_init -- initializes runtime state of an operation
*/
void
operation_init(struct operation_context *ctx)
{
struct operation_log *plog = &ctx->pshadow_ops;
struct operation_log *tlog = &ctx->transient_ops;
VALGRIND_ANNOTATE_NEW_MEMORY(ctx, sizeof(*ctx));
VALGRIND_ANNOTATE_NEW_MEMORY(tlog->ulog, sizeof(struct ulog) +
tlog->capacity);
VALGRIND_ANNOTATE_NEW_MEMORY(plog->ulog, sizeof(struct ulog) +
plog->capacity);
tlog->offset = 0;
plog->offset = 0;
VECQ_REINIT(&ctx->merge_entries);
ctx->ulog_curr_offset = 0;
ctx->ulog_curr_capacity = 0;
ctx->ulog_curr = NULL;
ctx->total_logged = 0;
}
/*
* operation_start -- initializes and starts a new operation
*/
void
operation_start(struct operation_context *ctx)
{
operation_init(ctx);
ASSERTeq(ctx->in_progress, 0);
ctx->in_progress = 1;
}
void
operation_resume(struct operation_context *ctx)
{
operation_init(ctx);
ASSERTeq(ctx->in_progress, 0);
ctx->in_progress = 1;
ctx->total_logged = ulog_base_nbytes(ctx->ulog);
}
/*
* operation_cancel -- cancels a running operation
*/
void
operation_cancel(struct operation_context *ctx)
{
ASSERTeq(ctx->in_progress, 1);
ctx->in_progress = 0;
}
/*
* operation_process -- processes registered operations
*
* The order of processing is important: persistent, transient.
* This is because the transient entries that reside on persistent memory might
* require write to a location that is currently occupied by a valid persistent
* state but becomes a transient state after operation is processed.
*/
void
operation_process(struct operation_context *ctx)
{
/*
* If there's exactly one persistent entry there's no need to involve
* the redo log. We can simply assign the value, the operation will be
* atomic.
*/
int redo_process = ctx->type == LOG_TYPE_REDO &&
ctx->pshadow_ops.offset != 0;
if (redo_process &&
ctx->pshadow_ops.offset == sizeof(struct ulog_entry_val)) {
struct ulog_entry_base *e = (struct ulog_entry_base *)
ctx->pshadow_ops.ulog->data;
ulog_operation_type t = ulog_entry_type(e);
if (t == ULOG_OPERATION_SET || t == ULOG_OPERATION_AND ||
t == ULOG_OPERATION_OR) {
ulog_entry_apply(e, 1, ctx->p_ops);
redo_process = 0;
}
}
if (redo_process)
operation_process_persistent_redo(ctx);
else if (ctx->type == LOG_TYPE_UNDO)
operation_process_persistent_undo(ctx);
/* process transient entries with transient memory ops */
if (ctx->transient_ops.offset != 0)
ulog_process(ctx->transient_ops.ulog, NULL, &ctx->t_ops);
}
/*
* operation_finish -- finalizes the operation
*/
void
operation_finish(struct operation_context *ctx)
{
ASSERTeq(ctx->in_progress, 1);
ctx->in_progress = 0;
if (ctx->type == LOG_TYPE_REDO && ctx->pshadow_ops.offset != 0) {
operation_process(ctx);
} else if (ctx->type == LOG_TYPE_UNDO && ctx->total_logged != 0) {
ulog_clobber_data(ctx->ulog,
ctx->total_logged, ctx->ulog_base_nbytes,
&ctx->next, ctx->ulog_free, ctx->p_ops);
/* clobbering might have shrunk the ulog */
ctx->ulog_capacity = ulog_capacity(ctx->ulog,
ctx->ulog_base_nbytes, ctx->p_ops);
VEC_CLEAR(&ctx->next);
ulog_rebuild_next_vec(ctx->ulog, &ctx->next, ctx->p_ops);
}
}
| 16,501 | 27.064626 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/stats.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* stats.c -- implementation of statistics
*/
#include "obj.h"
#include "stats.h"
STATS_CTL_HANDLER(persistent, curr_allocated, heap_curr_allocated);
static const struct ctl_node CTL_NODE(heap)[] = {
STATS_CTL_LEAF(persistent, curr_allocated),
CTL_NODE_END
};
/*
* CTL_READ_HANDLER(enabled) -- returns whether or not statistics are enabled
*/
static int
CTL_READ_HANDLER(enabled)(void *ctx,
enum ctl_query_source source, void *arg,
struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int *arg_out = arg;
*arg_out = pop->stats->enabled > 0;
return 0;
}
/*
* CTL_WRITE_HANDLER(enabled) -- enables or disables statistics counting
*/
static int
CTL_WRITE_HANDLER(enabled)(void *ctx,
enum ctl_query_source source, void *arg,
struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int arg_in = *(int *)arg;
pop->stats->enabled = arg_in > 0;
return 0;
}
static struct ctl_argument CTL_ARG(enabled) = CTL_ARG_BOOLEAN;
static const struct ctl_node CTL_NODE(stats)[] = {
CTL_CHILD(heap),
CTL_LEAF_RW(enabled),
CTL_NODE_END
};
/*
* stats_new -- allocates and initializes statistics instance
*/
struct stats *
stats_new(PMEMobjpool *pop)
{
struct stats *s = Malloc(sizeof(*s));
s->enabled = 0;
s->persistent = &pop->stats_persistent;
s->transient = Zalloc(sizeof(struct stats_transient));
if (s->transient == NULL)
goto error_transient_alloc;
return s;
error_transient_alloc:
Free(s);
return NULL;
}
/*
* stats_delete -- deletes statistics instance
*/
void
stats_delete(PMEMobjpool *pop, struct stats *s)
{
pmemops_persist(&pop->p_ops, s->persistent,
sizeof(struct stats_persistent));
Free(s->transient);
Free(s);
}
/*
* stats_ctl_register -- registers ctl nodes for statistics
*/
void
stats_ctl_register(PMEMobjpool *pop)
{
CTL_REGISTER_MODULE(pop->ctl, stats);
}
| 3,424 | 25.145038 | 77 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/ctl_debug.h | /*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ctl_debug.h -- definitions for the debug CTL namespace
*/
#ifndef LIBPMEMOBJ_CTL_DEBUG_H
#define LIBPMEMOBJ_CTL_DEBUG_H 1
#include "libpmemobj.h"
#ifdef __cplusplus
extern "C" {
#endif
void debug_ctl_register(PMEMobjpool *pop);
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CTL_DEBUG_H */
| 1,901 | 35.576923 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/heap.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* heap.h -- internal definitions for heap
*/
#ifndef LIBPMEMOBJ_HEAP_H
#define LIBPMEMOBJ_HEAP_H 1
#include <stddef.h>
#include <stdint.h>
#include "bucket.h"
#include "memblock.h"
#include "memops.h"
#include "palloc.h"
#include "os_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
#define HEAP_OFF_TO_PTR(heap, off) ((void *)((char *)((heap)->base) + (off)))
#define HEAP_PTR_TO_OFF(heap, ptr)\
((uintptr_t)(ptr) - (uintptr_t)((heap)->base))
#define BIT_IS_CLR(a, i) (!((a) & (1ULL << (i))))
int heap_boot(struct palloc_heap *heap, void *heap_start, uint64_t heap_size,
uint64_t *sizep,
void *base, struct pmem_ops *p_ops,
struct stats *stats, struct pool_set *set);
int heap_init(void *heap_start, uint64_t heap_size, uint64_t *sizep,
struct pmem_ops *p_ops);
void heap_cleanup(struct palloc_heap *heap);
int heap_check(void *heap_start, uint64_t heap_size);
int heap_check_remote(void *heap_start, uint64_t heap_size,
struct remote_ops *ops);
int heap_buckets_init(struct palloc_heap *heap);
int heap_create_alloc_class_buckets(struct palloc_heap *heap,
struct alloc_class *c);
int heap_extend(struct palloc_heap *heap, struct bucket *defb, size_t size);
struct alloc_class *
heap_get_best_class(struct palloc_heap *heap, size_t size);
struct bucket *
heap_bucket_acquire(struct palloc_heap *heap, struct alloc_class *c);
struct bucket *
heap_bucket_acquire_by_id(struct palloc_heap *heap, uint8_t class_id);
void
heap_bucket_release(struct palloc_heap *heap, struct bucket *b);
int heap_get_bestfit_block(struct palloc_heap *heap, struct bucket *b,
struct memory_block *m);
struct memory_block
heap_coalesce_huge(struct palloc_heap *heap, struct bucket *b,
const struct memory_block *m);
os_mutex_t *heap_get_run_lock(struct palloc_heap *heap,
uint32_t chunk_id);
void
heap_memblock_on_free(struct palloc_heap *heap, const struct memory_block *m);
int
heap_free_chunk_reuse(struct palloc_heap *heap,
struct bucket *bucket, struct memory_block *m);
void heap_foreach_object(struct palloc_heap *heap, object_callback cb,
void *arg, struct memory_block start);
struct alloc_class_collection *heap_alloc_classes(struct palloc_heap *heap);
void *heap_end(struct palloc_heap *heap);
void heap_vg_open(struct palloc_heap *heap, object_callback cb,
void *arg, int objects);
static inline struct chunk_header *
heap_get_chunk_hdr(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK_HDR(heap->layout, m->zone_id, m->chunk_id);
}
static inline struct chunk *
heap_get_chunk(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK(heap->layout, m->zone_id, m->chunk_id);
}
static inline struct chunk_run *
heap_get_chunk_run(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK_RUN(heap->layout, m->zone_id, m->chunk_id);
}
#ifdef __cplusplus
}
#endif
#endif
| 4,468 | 32.350746 | 78 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/cuckoo.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* cuckoo.h -- internal definitions for cuckoo hash table
*/
#ifndef LIBPMEMOBJ_CUCKOO_H
#define LIBPMEMOBJ_CUCKOO_H 1
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
struct cuckoo;
struct cuckoo *cuckoo_new(void);
void cuckoo_delete(struct cuckoo *c);
int cuckoo_insert(struct cuckoo *c, uint64_t key, void *value);
void *cuckoo_remove(struct cuckoo *c, uint64_t key);
void *cuckoo_get(struct cuckoo *c, uint64_t key);
size_t cuckoo_get_size(struct cuckoo *c);
#ifdef __cplusplus
}
#endif
#endif
| 2,122 | 33.803279 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/memops.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* memops.h -- aggregated memory operations helper definitions
*/
#ifndef LIBPMEMOBJ_MEMOPS_H
#define LIBPMEMOBJ_MEMOPS_H 1
#include <stddef.h>
#include <stdint.h>
#include "vec.h"
#include "pmemops.h"
#include "ulog.h"
#include "lane.h"
#ifdef __cplusplus
extern "C" {
#endif
enum operation_log_type {
LOG_PERSISTENT, /* log of persistent modifications */
LOG_TRANSIENT, /* log of transient memory modifications */
MAX_OPERATION_LOG_TYPE
};
enum log_type {
LOG_TYPE_UNDO,
LOG_TYPE_REDO,
MAX_LOG_TYPE,
};
struct operation_context;
struct operation_context *
operation_new(struct ulog *redo, size_t ulog_base_nbytes,
ulog_extend_fn extend, ulog_free_fn ulog_free,
const struct pmem_ops *p_ops, enum log_type type);
void operation_init(struct operation_context *ctx);
void operation_start(struct operation_context *ctx);
void operation_resume(struct operation_context *ctx);
void operation_delete(struct operation_context *ctx);
int operation_add_buffer(struct operation_context *ctx,
void *dest, void *src, size_t size, ulog_operation_type type);
int operation_add_entry(struct operation_context *ctx,
void *ptr, uint64_t value, ulog_operation_type type);
int operation_add_typed_entry(struct operation_context *ctx,
void *ptr, uint64_t value,
ulog_operation_type type, enum operation_log_type log_type);
int operation_reserve(struct operation_context *ctx, size_t new_capacity);
void operation_process(struct operation_context *ctx);
void operation_finish(struct operation_context *ctx);
void operation_cancel(struct operation_context *ctx);
#ifdef __cplusplus
}
#endif
#endif
| 3,212 | 31.785714 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/libpmemobj_main.c | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libpmemobj_main.c -- entry point for libpmemobj.dll
*
* XXX - This is a placeholder. All the library initialization/cleanup
* that is done in library ctors/dtors, as well as TLS initialization
* should be moved here.
*/
void libpmemobj_init(void);
void libpmemobj_fini(void);
int APIENTRY
DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved)
{
switch (dwReason) {
case DLL_PROCESS_ATTACH:
libpmemobj_init();
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
case DLL_PROCESS_DETACH:
libpmemobj_fini();
break;
}
return TRUE;
}
| 2,184 | 34.241935 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/pmalloc.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmalloc.h -- internal definitions for persistent malloc
*/
#ifndef LIBPMEMOBJ_PMALLOC_H
#define LIBPMEMOBJ_PMALLOC_H 1
#include <stddef.h>
#include <stdint.h>
#include "libpmemobj.h"
#include "memops.h"
#include "palloc.h"
#ifdef __cplusplus
extern "C" {
#endif
/* single operations done in the internal context of the lane */
int pmalloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags);
int pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id);
int prealloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags);
void pfree(PMEMobjpool *pop, uint64_t *off);
/* external operation to be used together with context-aware palloc funcs */
struct operation_context *pmalloc_operation_hold(PMEMobjpool *pop);
struct operation_context *pmalloc_operation_hold_no_start(PMEMobjpool *pop);
void pmalloc_operation_release(PMEMobjpool *pop);
void pmalloc_ctl_register(PMEMobjpool *pop);
int pmalloc_cleanup(PMEMobjpool *pop);
int pmalloc_boot(PMEMobjpool *pop);
#ifdef __cplusplus
}
#endif
#endif
| 2,806 | 34.0875 | 76 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/recycler.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* recycler.h -- internal definitions of run recycler
*
* This is a container that stores runs that are currently not used by any of
* the buckets.
*/
#ifndef LIBPMEMOBJ_RECYCLER_H
#define LIBPMEMOBJ_RECYCLER_H 1
#include "memblock.h"
#include "vec.h"
#ifdef __cplusplus
extern "C" {
#endif
struct recycler;
VEC(empty_runs, struct memory_block);
struct recycler_element {
uint32_t max_free_block;
uint32_t free_space;
uint32_t chunk_id;
uint32_t zone_id;
};
struct recycler *recycler_new(struct palloc_heap *layout,
size_t nallocs);
void recycler_delete(struct recycler *r);
struct recycler_element recycler_element_new(struct palloc_heap *heap,
const struct memory_block *m);
int recycler_put(struct recycler *r, const struct memory_block *m,
struct recycler_element element);
int recycler_get(struct recycler *r, struct memory_block *m);
void
recycler_pending_put(struct recycler *r,
struct memory_block_reserved *m);
struct empty_runs recycler_recalc(struct recycler *r, int force);
void recycler_inc_unaccounted(struct recycler *r,
const struct memory_block *m);
#ifdef __cplusplus
}
#endif
#endif
| 2,734 | 30.802326 | 77 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/palloc.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* palloc.h -- internal definitions for persistent allocator
*/
#ifndef LIBPMEMOBJ_PALLOC_H
#define LIBPMEMOBJ_PALLOC_H 1
#include <stddef.h>
#include <stdint.h>
#include "libpmemobj.h"
#include "memops.h"
#include "ulog.h"
#include "valgrind_internal.h"
#include "stats.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PALLOC_CTL_DEBUG_NO_PATTERN (-1)
struct palloc_heap {
struct pmem_ops p_ops;
struct heap_layout *layout;
struct heap_rt *rt;
uint64_t *sizep;
uint64_t growsize;
struct stats *stats;
struct pool_set *set;
void *base;
int alloc_pattern;
};
struct memory_block;
typedef int (*palloc_constr)(void *base, void *ptr,
size_t usable_size, void *arg);
int palloc_operation(struct palloc_heap *heap, uint64_t off, uint64_t *dest_off,
size_t size, palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id,
struct operation_context *ctx);
int
palloc_reserve(struct palloc_heap *heap, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id,
struct pobj_action *act);
void
palloc_defer_free(struct palloc_heap *heap, uint64_t off,
struct pobj_action *act);
void
palloc_cancel(struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt);
void
palloc_publish(struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt,
struct operation_context *ctx);
void
palloc_set_value(struct palloc_heap *heap, struct pobj_action *act,
uint64_t *ptr, uint64_t value);
uint64_t palloc_first(struct palloc_heap *heap);
uint64_t palloc_next(struct palloc_heap *heap, uint64_t off);
size_t palloc_usable_size(struct palloc_heap *heap, uint64_t off);
uint64_t palloc_extra(struct palloc_heap *heap, uint64_t off);
uint16_t palloc_flags(struct palloc_heap *heap, uint64_t off);
int palloc_boot(struct palloc_heap *heap, void *heap_start,
uint64_t heap_size, uint64_t *sizep,
void *base, struct pmem_ops *p_ops,
struct stats *stats, struct pool_set *set);
int palloc_buckets_init(struct palloc_heap *heap);
int palloc_init(void *heap_start, uint64_t heap_size, uint64_t *sizep,
struct pmem_ops *p_ops);
void *palloc_heap_end(struct palloc_heap *h);
int palloc_heap_check(void *heap_start, uint64_t heap_size);
int palloc_heap_check_remote(void *heap_start, uint64_t heap_size,
struct remote_ops *ops);
void palloc_heap_cleanup(struct palloc_heap *heap);
size_t palloc_heap(void *heap_start);
/* foreach callback, terminates iteration if return value is non-zero */
typedef int (*object_callback)(const struct memory_block *m, void *arg);
#if VG_MEMCHECK_ENABLED
void palloc_heap_vg_open(struct palloc_heap *heap, int objects);
#endif
#ifdef __cplusplus
}
#endif
#endif
| 4,336 | 30.427536 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/container.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* container.h -- internal definitions for block containers
*/
#ifndef LIBPMEMOBJ_CONTAINER_H
#define LIBPMEMOBJ_CONTAINER_H 1
#include "memblock.h"
#ifdef __cplusplus
extern "C" {
#endif
struct block_container {
struct block_container_ops *c_ops;
struct palloc_heap *heap;
};
struct block_container_ops {
/* inserts a new memory block into the container */
int (*insert)(struct block_container *c, const struct memory_block *m);
/* removes exact match memory block */
int (*get_rm_exact)(struct block_container *c,
const struct memory_block *m);
/* removes and returns the best-fit memory block for size */
int (*get_rm_bestfit)(struct block_container *c,
struct memory_block *m);
/* finds exact match memory block */
int (*get_exact)(struct block_container *c,
const struct memory_block *m);
/* checks whether the container is empty */
int (*is_empty)(struct block_container *c);
/* removes all elements from the container */
void (*rm_all)(struct block_container *c);
/* deletes the container */
void (*destroy)(struct block_container *c);
};
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CONTAINER_H */
| 2,751 | 32.560976 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/ravl.c | /*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ravl.c -- implementation of a RAVL tree
* http://sidsen.azurewebsites.net//papers/ravl-trees-journal.pdf
*/
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include "out.h"
#include "ravl.h"
#define RAVL_DEFAULT_DATA_SIZE (sizeof(void *))
enum ravl_slot_type {
RAVL_LEFT,
RAVL_RIGHT,
MAX_SLOTS,
RAVL_ROOT
};
struct ravl_node {
struct ravl_node *parent;
struct ravl_node *slots[MAX_SLOTS];
int32_t rank; /* cannot be greater than height of the subtree */
int32_t pointer_based;
char data[];
};
struct ravl {
struct ravl_node *root;
ravl_compare *compare;
size_t data_size;
};
/*
* ravl_new -- creates a new ravl tree instance
*/
struct ravl *
ravl_new_sized(ravl_compare *compare, size_t data_size)
{
struct ravl *r = Malloc(sizeof(*r));
if (r == NULL)
return NULL;
r->compare = compare;
r->root = NULL;
r->data_size = data_size;
return r;
}
/*
* ravl_new -- creates a new tree that stores data pointers
*/
struct ravl *
ravl_new(ravl_compare *compare)
{
return ravl_new_sized(compare, RAVL_DEFAULT_DATA_SIZE);
}
/*
* ravl_clear_node -- (internal) recursively clears the given subtree,
* calls callback in an in-order fashion. Frees the given node.
*/
static void
ravl_clear_node(struct ravl_node *n, ravl_cb cb, void *arg)
{
if (n == NULL)
return;
ravl_clear_node(n->slots[RAVL_LEFT], cb, arg);
if (cb)
cb((void *)n->data, arg);
ravl_clear_node(n->slots[RAVL_RIGHT], cb, arg);
Free(n);
}
/*
* ravl_clear -- clears the entire tree, starting from the root
*/
void
ravl_clear(struct ravl *ravl)
{
ravl_clear_node(ravl->root, NULL, NULL);
ravl->root = NULL;
}
/*
* ravl_delete_cb -- clears and deletes the given ravl instance, calls callback
*/
void
ravl_delete_cb(struct ravl *ravl, ravl_cb cb, void *arg)
{
ravl_clear_node(ravl->root, cb, arg);
Free(ravl);
}
/*
* ravl_delete -- clears and deletes the given ravl instance
*/
void
ravl_delete(struct ravl *ravl)
{
ravl_delete_cb(ravl, NULL, NULL);
}
/*
* ravl_empty -- checks whether the given tree is empty
*/
int
ravl_empty(struct ravl *ravl)
{
return ravl->root == NULL;
}
/*
* ravl_node_insert_constructor -- node data constructor for ravl_insert
*/
static void
ravl_node_insert_constructor(void *data, size_t data_size, const void *arg)
{
/* copy only the 'arg' pointer */
memcpy(data, &arg, sizeof(arg));
}
/*
* ravl_node_copy_constructor -- node data constructor for ravl_emplace_copy
*/
static void
ravl_node_copy_constructor(void *data, size_t data_size, const void *arg)
{
memcpy(data, arg, data_size);
}
/*
* ravl_new_node -- (internal) allocates and initializes a new node
*/
static struct ravl_node *
ravl_new_node(struct ravl *ravl, ravl_constr constr, const void *arg)
{
struct ravl_node *n = Malloc(sizeof(*n) + ravl->data_size);
if (n == NULL)
return NULL;
n->parent = NULL;
n->slots[RAVL_LEFT] = NULL;
n->slots[RAVL_RIGHT] = NULL;
n->rank = 0;
n->pointer_based = constr == ravl_node_insert_constructor;
constr(n->data, ravl->data_size, arg);
return n;
}
/*
* ravl_slot_opposite -- (internal) returns the opposite slot type, cannot be
* called for root type
*/
static enum ravl_slot_type
ravl_slot_opposite(enum ravl_slot_type t)
{
ASSERTne(t, RAVL_ROOT);
return t == RAVL_LEFT ? RAVL_RIGHT : RAVL_LEFT;
}
/*
* ravl_node_slot_type -- (internal) returns the type of the given node:
* left child, right child or root
*/
static enum ravl_slot_type
ravl_node_slot_type(struct ravl_node *n)
{
if (n->parent == NULL)
return RAVL_ROOT;
return n->parent->slots[RAVL_LEFT] == n ? RAVL_LEFT : RAVL_RIGHT;
}
/*
* ravl_node_sibling -- (internal) returns the sibling of the given node,
* NULL if the node is root (has no parent)
*/
static struct ravl_node *
ravl_node_sibling(struct ravl_node *n)
{
enum ravl_slot_type t = ravl_node_slot_type(n);
if (t == RAVL_ROOT)
return NULL;
return n->parent->slots[t == RAVL_LEFT ? RAVL_RIGHT : RAVL_LEFT];
}
/*
* ravl_node_ref -- (internal) returns the pointer to the memory location in
* which the given node resides
*/
static struct ravl_node **
ravl_node_ref(struct ravl *ravl, struct ravl_node *n)
{
enum ravl_slot_type t = ravl_node_slot_type(n);
return t == RAVL_ROOT ? &ravl->root : &n->parent->slots[t];
}
/*
* ravl_rotate -- (internal) performs a rotation around a given node
*
* The node n swaps place with its parent. If n is right child, parent becomes
* the left child of n, otherwise parent becomes right child of n.
*/
static void
ravl_rotate(struct ravl *ravl, struct ravl_node *n)
{
ASSERTne(n->parent, NULL);
struct ravl_node *p = n->parent;
struct ravl_node **pref = ravl_node_ref(ravl, p);
enum ravl_slot_type t = ravl_node_slot_type(n);
enum ravl_slot_type t_opposite = ravl_slot_opposite(t);
n->parent = p->parent;
p->parent = n;
*pref = n;
if ((p->slots[t] = n->slots[t_opposite]) != NULL)
p->slots[t]->parent = p;
n->slots[t_opposite] = p;
}
/*
* ravl_node_rank -- (internal) returns the rank of the node
*
* For the purpose of balancing, NULL nodes have rank -1.
*/
static int
ravl_node_rank(struct ravl_node *n)
{
return n == NULL ? -1 : n->rank;
}
/*
* ravl_node_rank_difference_parent -- (internal) returns the rank different
* between parent node p and its child n
*
* Every rank difference must be positive.
*
* Either of these can be NULL.
*/
static int
ravl_node_rank_difference_parent(struct ravl_node *p, struct ravl_node *n)
{
return ravl_node_rank(p) - ravl_node_rank(n);
}
/*
* ravl_node_rank_differenced - (internal) returns the rank difference between
* parent and its child
*
* Can be used to check if a given node is an i-child.
*/
static int
ravl_node_rank_difference(struct ravl_node *n)
{
return ravl_node_rank_difference_parent(n->parent, n);
}
/*
* ravl_node_is_i_j -- (internal) checks if a given node is strictly i,j-node
*/
static int
ravl_node_is_i_j(struct ravl_node *n, int i, int j)
{
return (ravl_node_rank_difference_parent(n, n->slots[RAVL_LEFT]) == i &&
ravl_node_rank_difference_parent(n, n->slots[RAVL_RIGHT]) == j);
}
/*
* ravl_node_is -- (internal) checks if a given node is i,j-node or j,i-node
*/
static int
ravl_node_is(struct ravl_node *n, int i, int j)
{
return ravl_node_is_i_j(n, i, j) || ravl_node_is_i_j(n, j, i);
}
/*
* ravl_node_promote -- promotes a given node by increasing its rank
*/
static void
ravl_node_promote(struct ravl_node *n)
{
n->rank += 1;
}
/*
* ravl_node_promote -- demotes a given node by increasing its rank
*/
static void
ravl_node_demote(struct ravl_node *n)
{
ASSERT(n->rank > 0);
n->rank -= 1;
}
/*
* ravl_balance -- balances the tree after insert
*
* This function must restore the invariant that every rank
* difference is positive.
*/
static void
ravl_balance(struct ravl *ravl, struct ravl_node *n)
{
/* walk up the tree, promoting nodes */
while (n->parent && ravl_node_is(n->parent, 0, 1)) {
ravl_node_promote(n->parent);
n = n->parent;
}
/*
* Either the rank rule holds or n is a 0-child whose sibling is an
* i-child with i > 1.
*/
struct ravl_node *s = ravl_node_sibling(n);
if (!(ravl_node_rank_difference(n) == 0 &&
ravl_node_rank_difference_parent(n->parent, s) > 1))
return;
struct ravl_node *y = n->parent;
/* if n is a left child, let z be n's right child and vice versa */
enum ravl_slot_type t = ravl_slot_opposite(ravl_node_slot_type(n));
struct ravl_node *z = n->slots[t];
if (z == NULL || ravl_node_rank_difference(z) == 2) {
ravl_rotate(ravl, n);
ravl_node_demote(y);
} else if (ravl_node_rank_difference(z) == 1) {
ravl_rotate(ravl, z);
ravl_rotate(ravl, z);
ravl_node_promote(z);
ravl_node_demote(n);
ravl_node_demote(y);
}
}
/*
* ravl_insert -- insert data into the tree
*/
int
ravl_insert(struct ravl *ravl, const void *data)
{
return ravl_emplace(ravl, ravl_node_insert_constructor, data);
}
/*
* ravl_insert -- copy construct data inside of a new tree node
*/
int
ravl_emplace_copy(struct ravl *ravl, const void *data)
{
return ravl_emplace(ravl, ravl_node_copy_constructor, data);
}
/*
* ravl_emplace -- construct data inside of a new tree node
*/
int
ravl_emplace(struct ravl *ravl, ravl_constr constr, const void *arg)
{
LOG(6, NULL);
struct ravl_node *n = ravl_new_node(ravl, constr, arg);
if (n == NULL)
return -1;
/* walk down the tree and insert the new node into a missing slot */
struct ravl_node **dstp = &ravl->root;
struct ravl_node *dst = NULL;
while (*dstp != NULL) {
dst = (*dstp);
int cmp_result = ravl->compare(ravl_data(n), ravl_data(dst));
if (cmp_result == 0)
goto error_duplicate;
dstp = &dst->slots[cmp_result > 0];
}
n->parent = dst;
*dstp = n;
ravl_balance(ravl, n);
return 0;
error_duplicate:
errno = EEXIST;
Free(n);
return -1;
}
/*
* ravl_node_type_most -- (internal) returns left-most or right-most node in
* the subtree
*/
static struct ravl_node *
ravl_node_type_most(struct ravl_node *n, enum ravl_slot_type t)
{
while (n->slots[t] != NULL)
n = n->slots[t];
return n;
}
/*
* ravl_node_cessor -- (internal) returns the successor or predecessor of the
* node
*/
static struct ravl_node *
ravl_node_cessor(struct ravl_node *n, enum ravl_slot_type t)
{
/*
* If t child is present, we are looking for t-opposite-most node
* in t child subtree
*/
if (n->slots[t])
return ravl_node_type_most(n->slots[t], ravl_slot_opposite(t));
/* otherwise get the first parent on the t path */
while (n->parent != NULL && n == n->parent->slots[t])
n = n->parent;
return n->parent;
}
/*
* ravl_node_successor -- (internal) returns node's successor
*
* It's the first node larger than n.
*/
static struct ravl_node *
ravl_node_successor(struct ravl_node *n)
{
return ravl_node_cessor(n, RAVL_RIGHT);
}
/*
* ravl_node_successor -- (internal) returns node's successor
*
* It's the first node smaller than n.
*/
static struct ravl_node *
ravl_node_predecessor(struct ravl_node *n)
{
return ravl_node_cessor(n, RAVL_LEFT);
}
/*
* ravl_predicate_holds -- (internal) verifies the given predicate for
* the current node in the search path
*
* If the predicate holds for the given node or a node that can be directly
* derived from it, returns 1. Otherwise returns 0.
*/
static int
ravl_predicate_holds(struct ravl *ravl, int result, struct ravl_node **ret,
struct ravl_node *n, const void *data, enum ravl_predicate flags)
{
if (flags & RAVL_PREDICATE_EQUAL) {
if (result == 0) {
*ret = n;
return 1;
}
}
if (flags & RAVL_PREDICATE_GREATER) {
if (result < 0) { /* data < n->data */
*ret = n;
return 0;
} else if (result == 0) {
*ret = ravl_node_successor(n);
return 1;
}
}
if (flags & RAVL_PREDICATE_LESS) {
if (result > 0) { /* data > n->data */
*ret = n;
return 0;
} else if (result == 0) {
*ret = ravl_node_predecessor(n);
return 1;
}
}
return 0;
}
/*
* ravl_find -- searches for the node in the free
*/
struct ravl_node *
ravl_find(struct ravl *ravl, const void *data, enum ravl_predicate flags)
{
LOG(6, NULL);
struct ravl_node *r = NULL;
struct ravl_node *n = ravl->root;
while (n) {
int result = ravl->compare(data, ravl_data(n));
if (ravl_predicate_holds(ravl, result, &r, n, data, flags))
return r;
n = n->slots[result > 0];
}
return r;
}
/*
* ravl_remove -- removes the given node from the tree
*/
void
ravl_remove(struct ravl *ravl, struct ravl_node *n)
{
LOG(6, NULL);
if (n->slots[RAVL_LEFT] != NULL && n->slots[RAVL_RIGHT] != NULL) {
/* if both children are present, remove the successor instead */
struct ravl_node *s = ravl_node_successor(n);
memcpy(n->data, s->data, ravl->data_size);
ravl_remove(ravl, s);
} else {
/* swap n with the child that may exist */
struct ravl_node *r = n->slots[RAVL_LEFT] ?
n->slots[RAVL_LEFT] : n->slots[RAVL_RIGHT];
if (r != NULL)
r->parent = n->parent;
*ravl_node_ref(ravl, n) = r;
Free(n);
}
}
/*
* ravl_data -- returns the data contained within the node
*/
void *
ravl_data(struct ravl_node *node)
{
if (node->pointer_based) {
void *data;
memcpy(&data, node->data, sizeof(void *));
return data;
} else {
return (void *)node->data;
}
}
| 13,775 | 22.27027 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/stats.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* stats.h -- definitions of statistics
*/
#ifndef LIBPMEMOBJ_STATS_H
#define LIBPMEMOBJ_STATS_H 1
#include "ctl.h"
#ifdef __cplusplus
extern "C" {
#endif
struct stats_transient {
int unused;
};
struct stats_persistent {
uint64_t heap_curr_allocated;
};
struct stats {
int enabled;
struct stats_transient *transient;
struct stats_persistent *persistent;
};
#define STATS_INC(stats, type, name, value) do {\
if ((stats)->enabled)\
util_fetch_and_add64((&(stats)->type->name), (value));\
} while (0)
#define STATS_SUB(stats, type, name, value) do {\
if ((stats)->enabled)\
util_fetch_and_sub64((&(stats)->type->name), (value));\
} while (0)
#define STATS_SET(stats, type, name, value) do {\
if ((stats)->enabled)\
util_atomic_store_explicit64((&(stats)->type->name), (value),\
memory_order_release);\
} while (0)
#define STATS_CTL_LEAF(type, name)\
{CTL_STR(name), CTL_NODE_LEAF,\
{CTL_READ_HANDLER(type##_##name), NULL, NULL},\
NULL, NULL}
#define STATS_CTL_HANDLER(type, name, varname)\
static int CTL_READ_HANDLER(type##_##name)(void *ctx,\
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)\
{\
PMEMobjpool *pop = ctx;\
uint64_t *argv = arg;\
util_atomic_load_explicit64(&pop->stats->type->varname,\
argv, memory_order_acquire);\
return 0;\
}
void stats_ctl_register(PMEMobjpool *pop);
struct stats *stats_new(PMEMobjpool *pop);
void stats_delete(PMEMobjpool *pop, struct stats *stats);
#ifdef __cplusplus
}
#endif
#endif
| 3,087 | 29.27451 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/bucket.c | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* bucket.c -- bucket implementation
*
* Buckets manage volatile state of the heap. They are the abstraction layer
* between the heap-managed chunks/runs and memory allocations.
*
* Each bucket instance can have a different underlying container that is
* responsible for selecting blocks - which means that whether the allocator
* serves memory blocks in best/first/next -fit manner is decided during bucket
* creation.
*/
#include "alloc_class.h"
#include "bucket.h"
#include "heap.h"
#include "out.h"
#include "sys_util.h"
#include "valgrind_internal.h"
/*
* bucket_new -- creates a new bucket instance
*/
struct bucket *
bucket_new(struct block_container *c, struct alloc_class *aclass)
{
if (c == NULL)
return NULL;
struct bucket *b = Malloc(sizeof(*b));
if (b == NULL)
return NULL;
b->container = c;
b->c_ops = c->c_ops;
util_mutex_init(&b->lock);
b->is_active = 0;
b->active_memory_block = NULL;
if (aclass && aclass->type == CLASS_RUN) {
b->active_memory_block =
Zalloc(sizeof(struct memory_block_reserved));
if (b->active_memory_block == NULL)
goto error_active_alloc;
}
b->aclass = aclass;
return b;
error_active_alloc:
util_mutex_destroy(&b->lock);
Free(b);
return NULL;
}
/*
* bucket_insert_block -- inserts a block into the bucket
*/
int
bucket_insert_block(struct bucket *b, const struct memory_block *m)
{
#if VG_MEMCHECK_ENABLED || VG_HELGRIND_ENABLED || VG_DRD_ENABLED
if (On_valgrind) {
size_t size = m->m_ops->get_real_size(m);
void *data = m->m_ops->get_real_data(m);
VALGRIND_DO_MAKE_MEM_NOACCESS(data, size);
VALGRIND_ANNOTATE_NEW_MEMORY(data, size);
}
#endif
return b->c_ops->insert(b->container, m);
}
/*
* bucket_delete -- cleanups and deallocates bucket instance
*/
void
bucket_delete(struct bucket *b)
{
if (b->active_memory_block)
Free(b->active_memory_block);
util_mutex_destroy(&b->lock);
b->c_ops->destroy(b->container);
Free(b);
}
/*
* bucket_current_resvp -- returns the pointer to the current reservation count
*/
int *
bucket_current_resvp(struct bucket *b)
{
return b->active_memory_block ? &b->active_memory_block->nresv : NULL;
}
| 3,750 | 28.077519 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/container_seglists.c | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* container_seglists.c -- implementation of segregated lists block container
*
* This container is constructed from N (up to 64) intrusive lists and a
* single 8 byte bitmap that stores the information whether a given list is
* empty or not.
*/
#include "container_seglists.h"
#include "out.h"
#include "sys_util.h"
#include "util.h"
#include "valgrind_internal.h"
#include "vecq.h"
#define SEGLIST_BLOCK_LISTS 64U
struct block_container_seglists {
struct block_container super;
struct memory_block m;
VECQ(, uint32_t) blocks[SEGLIST_BLOCK_LISTS];
uint64_t nonempty_lists;
};
/*
* container_seglists_insert_block -- (internal) inserts a new memory block
* into the container
*/
static int
container_seglists_insert_block(struct block_container *bc,
const struct memory_block *m)
{
ASSERT(m->chunk_id < MAX_CHUNK);
ASSERT(m->zone_id < UINT16_MAX);
ASSERTne(m->size_idx, 0);
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
if (c->nonempty_lists == 0)
c->m = *m;
ASSERT(m->size_idx <= SEGLIST_BLOCK_LISTS);
ASSERT(m->chunk_id == c->m.chunk_id);
ASSERT(m->zone_id == c->m.zone_id);
if (VECQ_ENQUEUE(&c->blocks[m->size_idx - 1], m->block_off) != 0)
return -1;
/* marks the list as nonempty */
c->nonempty_lists |= 1ULL << (m->size_idx - 1);
return 0;
}
/*
* container_seglists_get_rm_block_bestfit -- (internal) removes and returns the
* best-fit memory block for size
*/
static int
container_seglists_get_rm_block_bestfit(struct block_container *bc,
struct memory_block *m)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
ASSERT(m->size_idx <= SEGLIST_BLOCK_LISTS);
uint32_t i = 0;
/* applicable lists */
uint64_t size_mask = (1ULL << (m->size_idx - 1)) - 1;
uint64_t v = c->nonempty_lists & ~size_mask;
if (v == 0)
return ENOMEM;
/* finds the list that serves the smallest applicable size */
i = util_lssb_index64(v);
uint32_t block_offset = VECQ_DEQUEUE(&c->blocks[i]);
if (VECQ_SIZE(&c->blocks[i]) == 0) /* marks the list as empty */
c->nonempty_lists &= ~(1ULL << (i));
*m = c->m;
m->block_off = block_offset;
m->size_idx = i + 1;
return 0;
}
/*
* container_seglists_is_empty -- (internal) checks whether the container is
* empty
*/
static int
container_seglists_is_empty(struct block_container *bc)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
return c->nonempty_lists == 0;
}
/*
* container_seglists_rm_all -- (internal) removes all elements from the tree
*/
static void
container_seglists_rm_all(struct block_container *bc)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i)
VECQ_CLEAR(&c->blocks[i]);
c->nonempty_lists = 0;
}
/*
* container_seglists_delete -- (internal) deletes the container
*/
static void
container_seglists_destroy(struct block_container *bc)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i)
VECQ_DELETE(&c->blocks[i]);
Free(c);
}
/*
* This container does not support retrieval of exact memory blocks, but other
* than provides best-fit in O(1) time for unit sizes that do not exceed 64.
*/
static struct block_container_ops container_seglists_ops = {
.insert = container_seglists_insert_block,
.get_rm_exact = NULL,
.get_rm_bestfit = container_seglists_get_rm_block_bestfit,
.get_exact = NULL,
.is_empty = container_seglists_is_empty,
.rm_all = container_seglists_rm_all,
.destroy = container_seglists_destroy,
};
/*
* container_new_seglists -- allocates and initializes a seglists container
*/
struct block_container *
container_new_seglists(struct palloc_heap *heap)
{
struct block_container_seglists *bc = Malloc(sizeof(*bc));
if (bc == NULL)
goto error_container_malloc;
bc->super.heap = heap;
bc->super.c_ops = &container_seglists_ops;
for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i)
VECQ_INIT(&bc->blocks[i]);
bc->nonempty_lists = 0;
return (struct block_container *)&bc->super;
error_container_malloc:
return NULL;
}
| 5,744 | 27.440594 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/container_ravl.h | /*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* container_ravl.h -- internal definitions for ravl-based block container
*/
#ifndef LIBPMEMOBJ_CONTAINER_RAVL_H
#define LIBPMEMOBJ_CONTAINER_RAVL_H 1
#include "container.h"
#ifdef __cplusplus
extern "C" {
#endif
struct block_container *container_new_ravl(struct palloc_heap *heap);
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CONTAINER_RAVL_H */
| 1,960 | 36 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/tx.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* tx.h -- internal definitions for transactions
*/
#ifndef LIBPMEMOBJ_INTERNAL_TX_H
#define LIBPMEMOBJ_INTERNAL_TX_H 1
#include <stdint.h>
#include "obj.h"
#include "ulog.h"
#ifdef __cplusplus
extern "C" {
#endif
#define TX_DEFAULT_RANGE_CACHE_SIZE (1 << 15)
#define TX_DEFAULT_RANGE_CACHE_THRESHOLD (1 << 12)
#define TX_RANGE_MASK (8ULL - 1)
#define TX_RANGE_MASK_LEGACY (32ULL - 1)
#define TX_ALIGN_SIZE(s, amask) (((s) + (amask)) & ~(amask))
struct tx_parameters {
size_t cache_size;
};
/*
* Returns the current transaction's pool handle, NULL if not within
* a transaction.
*/
PMEMobjpool *tx_get_pop(void);
void tx_ctl_register(PMEMobjpool *pop);
struct tx_parameters *tx_params_new(void);
void tx_params_delete(struct tx_parameters *tx_params);
#ifdef __cplusplus
}
#endif
#endif
| 2,409 | 30.710526 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/memblock.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* memblock.h -- internal definitions for memory block
*/
#ifndef LIBPMEMOBJ_MEMBLOCK_H
#define LIBPMEMOBJ_MEMBLOCK_H 1
#include <stddef.h>
#include <stdint.h>
#include "os_thread.h"
#include "heap_layout.h"
#include "memops.h"
#include "palloc.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MEMORY_BLOCK_NONE \
(struct memory_block)\
{0, 0, 0, 0, NULL, NULL, MAX_HEADER_TYPES, MAX_MEMORY_BLOCK}
#define MEMORY_BLOCK_IS_NONE(_m)\
((_m).heap == NULL)
#define MEMORY_BLOCK_EQUALS(lhs, rhs)\
((lhs).zone_id == (rhs).zone_id && (lhs).chunk_id == (rhs).chunk_id &&\
(lhs).block_off == (rhs).block_off && (lhs).heap == (rhs).heap)
enum memory_block_type {
/*
* Huge memory blocks are directly backed by memory chunks. A single
* huge block can consist of several chunks.
* The persistent representation of huge memory blocks can be thought
* of as a doubly linked list with variable length elements.
* That list is stored in the chunk headers array where one element
* directly corresponds to one chunk.
*
* U - used, F - free, R - footer, . - empty
* |U| represents a used chunk with a size index of 1, with type
* information (CHUNK_TYPE_USED) stored in the corresponding header
* array element - chunk_headers[chunk_id].
*
* |F...R| represents a free chunk with size index of 5. The empty
* chunk headers have undefined values and shouldn't be used. All
* chunks with size larger than 1 must have a footer in the last
* corresponding header array - chunk_headers[chunk_id - size_idx - 1].
*
* The above representation of chunks will be used to describe the
* way fail-safety is achieved during heap operations.
*
* Allocation of huge memory block with size index 5:
* Initial heap state: |U| <> |F..R| <> |U| <> |F......R|
*
* The only block that matches that size is at very end of the chunks
* list: |F......R|
*
* As the request was for memory block of size 5, and this ones size is
* 7 there's a need to first split the chunk in two.
* 1) The last chunk header of the new allocation is marked as footer
* and the block after that one is marked as free: |F...RF.R|
* This is allowed and has no impact on the heap because this
* modification is into chunk header that is otherwise unused, in
* other words the linked list didn't change.
*
* 2) The size index of the first header is changed from previous value
* of 7 to 5: |F...R||F.R|
* This is a single fail-safe atomic operation and this is the
* first change that is noticeable by the heap operations.
* A single linked list element is split into two new ones.
*
* 3) The allocation process either uses redo log or changes directly
* the chunk header type from free to used: |U...R| <> |F.R|
*
* In a similar fashion the reverse operation, free, is performed:
* Initial heap state: |U| <> |F..R| <> |F| <> |U...R| <> |F.R|
*
* This is the heap after the previous example with the single chunk
* in between changed from used to free.
*
* 1) Determine the neighbors of the memory block which is being
* freed.
*
* 2) Update the footer (if needed) information of the last chunk which
* is the memory block being freed or it's neighbor to the right.
* |F| <> |U...R| <> |F.R << this one|
*
* 3) Update the size index and type of the left-most chunk header.
* And so this: |F << this one| <> |U...R| <> |F.R|
* becomes this: |F.......R|
* The entire chunk header can be updated in a single fail-safe
* atomic operation because it's size is only 64 bytes.
*/
MEMORY_BLOCK_HUGE,
/*
* Run memory blocks are chunks with CHUNK_TYPE_RUN and size index of 1.
* The entire chunk is subdivided into smaller blocks and has an
* additional metadata attached in the form of a bitmap - each bit
* corresponds to a single block.
* In this case there's no need to perform any coalescing or splitting
* on the persistent metadata.
* The bitmap is stored on a variable number of 64 bit values and
* because of the requirement of allocation fail-safe atomicity the
* maximum size index of a memory block from a run is 64 - since that's
* the limit of atomic write guarantee.
*
* The allocation/deallocation process is a single 8 byte write that
* sets/clears the corresponding bits. Depending on the user choice
* it can either be made atomically or using redo-log when grouped with
* other operations.
* It's also important to note that in a case of realloc it might so
* happen that a single 8 byte bitmap value has its bits both set and
* cleared - that's why the run memory block metadata changes operate
* on AND'ing or OR'ing a bitmask instead of directly setting the value.
*/
MEMORY_BLOCK_RUN,
MAX_MEMORY_BLOCK
};
enum memblock_state {
MEMBLOCK_STATE_UNKNOWN,
MEMBLOCK_ALLOCATED,
MEMBLOCK_FREE,
MAX_MEMBLOCK_STATE,
};
/* runtime bitmap information for a run */
struct run_bitmap {
unsigned nvalues; /* number of 8 byte values - size of values array */
unsigned nbits; /* number of valid bits */
size_t size; /* total size of the bitmap in bytes */
uint64_t *values; /* pointer to the bitmap's values array */
};
struct memory_block_ops {
/* returns memory block size */
size_t (*block_size)(const struct memory_block *m);
/* prepares header modification operation */
void (*prep_hdr)(const struct memory_block *m,
enum memblock_state dest_state, struct operation_context *ctx);
/* returns lock associated with memory block */
os_mutex_t *(*get_lock)(const struct memory_block *m);
/* returns whether a block is allocated or not */
enum memblock_state (*get_state)(const struct memory_block *m);
/* returns pointer to the data of a block */
void *(*get_user_data)(const struct memory_block *m);
/*
* Returns the size of a memory block without overhead.
* This is the size of a data block that can be used.
*/
size_t (*get_user_size)(const struct memory_block *m);
/* returns pointer to the beginning of data of a run block */
void *(*get_real_data)(const struct memory_block *m);
/* returns the size of a memory block, including headers */
size_t (*get_real_size)(const struct memory_block *m);
/* writes a header of an allocation */
void (*write_header)(const struct memory_block *m,
uint64_t extra_field, uint16_t flags);
void (*invalidate)(const struct memory_block *m);
/*
* Checks the header type of a chunk matches the expected type and
* modifies it if necessary. This is fail-safe atomic.
*/
void (*ensure_header_type)(const struct memory_block *m,
enum header_type t);
/*
* Reinitializes a block after a heap restart.
* This is called for EVERY allocation, but *only* under Valgrind.
*/
void (*reinit_header)(const struct memory_block *m);
/* returns the extra field of an allocation */
uint64_t (*get_extra)(const struct memory_block *m);
/* returns the flags of an allocation */
uint16_t (*get_flags)(const struct memory_block *m);
/* initializes memblock in valgrind */
void (*vg_init)(const struct memory_block *m, int objects,
object_callback cb, void *arg);
/* iterates over every free block */
int (*iterate_free)(const struct memory_block *m,
object_callback cb, void *arg);
/* iterates over every used block */
int (*iterate_used)(const struct memory_block *m,
object_callback cb, void *arg);
/* calculates number of free units, valid only for runs */
void (*calc_free)(const struct memory_block *m,
uint32_t *free_space, uint32_t *max_free_block);
/* this is called exactly once for every existing chunk */
void (*reinit_chunk)(const struct memory_block *m);
/*
* Initializes bitmap data for a run.
* Do *not* use this function unless absolutely necessery, it breaks
* the abstraction layer by exposing implementation details.
*/
void (*get_bitmap)(const struct memory_block *m, struct run_bitmap *b);
};
struct memory_block {
uint32_t chunk_id; /* index of the memory block in its zone */
uint32_t zone_id; /* index of this block zone in the heap */
/*
* Size index of the memory block represented in either multiple of
* CHUNKSIZE in the case of a huge chunk or in multiple of a run
* block size.
*/
uint32_t size_idx;
/*
* Used only for run chunks, must be zeroed for huge.
* Number of preceding blocks in the chunk. In other words, the
* position of this memory block in run bitmap.
*/
uint32_t block_off;
/*
* The variables below are associated with the memory block and are
* stored here for convenience. Those fields are filled by either the
* memblock_from_offset or memblock_rebuild_state, and they should not
* be modified manually.
*/
const struct memory_block_ops *m_ops;
struct palloc_heap *heap;
enum header_type header_type;
enum memory_block_type type;
};
/*
* This is a representation of a run memory block that is active in a bucket or
* is on a pending list in the recycler.
* This structure should never be passed around by value because the address of
* the nresv variable can be in reservations made through palloc_reserve(). Only
* if the number of reservations equals 0 the structure can be moved/freed.
*/
struct memory_block_reserved {
struct memory_block m;
/*
* Number of reservations made from this run, the pointer to this value
* is stored in a user facing pobj_action structure. Decremented once
* the reservation is published or canceled.
*/
int nresv;
};
struct memory_block memblock_from_offset(struct palloc_heap *heap,
uint64_t off);
struct memory_block memblock_from_offset_opt(struct palloc_heap *heap,
uint64_t off, int size);
void memblock_rebuild_state(struct palloc_heap *heap, struct memory_block *m);
struct memory_block memblock_huge_init(struct palloc_heap *heap,
uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx);
struct memory_block memblock_run_init(struct palloc_heap *heap,
uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx, uint16_t flags,
uint64_t unit_size, uint64_t alignment);
void memblock_run_bitmap(uint32_t *size_idx, uint16_t flags,
uint64_t unit_size, uint64_t alignment, void *content,
struct run_bitmap *b);
#ifdef __cplusplus
}
#endif
#endif
| 11,746 | 35.481366 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/pmalloc.c | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmalloc.c -- implementation of pmalloc POSIX-like API
*
* This is the front-end part of the persistent memory allocator. It uses both
* transient and persistent representation of the heap to provide memory blocks
* in a reasonable time and with an acceptable common-case fragmentation.
*/
#include <inttypes.h>
#include "valgrind_internal.h"
#include "heap.h"
#include "lane.h"
#include "memops.h"
#include "obj.h"
#include "out.h"
#include "palloc.h"
#include "pmalloc.h"
#include "alloc_class.h"
#include "set.h"
#include "mmap.h"
enum pmalloc_operation_type {
OPERATION_INTERNAL, /* used only for single, one-off operations */
OPERATION_EXTERNAL, /* used for everything else, incl. large redos */
MAX_OPERATION_TYPE,
};
struct lane_alloc_runtime {
struct operation_context *ctx[MAX_OPERATION_TYPE];
};
/*
* pmalloc_operation_hold_type -- acquires allocator lane section and returns a
* pointer to its operation context
*/
static struct operation_context *
pmalloc_operation_hold_type(PMEMobjpool *pop, enum pmalloc_operation_type type,
int start)
{
struct lane *lane;
lane_hold(pop, &lane);
struct operation_context *ctx = type == OPERATION_INTERNAL ?
lane->internal : lane->external;
if (start)
operation_start(ctx);
return ctx;
}
/*
* pmalloc_operation_hold_type -- acquires allocator lane section and returns a
* pointer to its operation context without starting
*/
struct operation_context *
pmalloc_operation_hold_no_start(PMEMobjpool *pop)
{
return pmalloc_operation_hold_type(pop, OPERATION_EXTERNAL, 0);
}
/*
* pmalloc_operation_hold -- acquires allocator lane section and returns a
* pointer to its redo log
*/
struct operation_context *
pmalloc_operation_hold(PMEMobjpool *pop)
{
return pmalloc_operation_hold_type(pop, OPERATION_EXTERNAL, 1);
}
/*
* pmalloc_operation_release -- releases allocator lane section
*/
void
pmalloc_operation_release(PMEMobjpool *pop)
{
lane_release(pop);
}
/*
* pmalloc -- allocates a new block of memory
*
* The pool offset is written persistently into the off variable.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
pmalloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, 0, off, size, NULL, NULL,
extra_field, object_flags, 0, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* pmalloc_construct -- allocates a new block of memory with a constructor
*
* The block offset is written persistently into the off variable, but only
* after the constructor function has been called.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, 0, off, size, constructor, arg,
extra_field, object_flags, class_id, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* prealloc -- resizes in-place a previously allocated memory block
*
* The block offset is written persistently into the off variable.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
prealloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, *off, off, size, NULL, NULL,
extra_field, object_flags, 0, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* pfree -- deallocates a memory block previously allocated by pmalloc
*
* A zero value is written persistently into the off variable.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
void
pfree(PMEMobjpool *pop, uint64_t *off)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, *off, off, 0, NULL, NULL,
0, 0, 0, ctx);
ASSERTeq(ret, 0);
pmalloc_operation_release(pop);
}
/*
* pmalloc_boot -- global runtime init routine of allocator section
*/
int
pmalloc_boot(PMEMobjpool *pop)
{
int ret = palloc_boot(&pop->heap, (char *)pop + pop->heap_offset,
pop->set->poolsize - pop->heap_offset, &pop->heap_size,
pop, &pop->p_ops,
pop->stats, pop->set);
if (ret)
return ret;
#if VG_MEMCHECK_ENABLED
if (On_valgrind)
palloc_heap_vg_open(&pop->heap, pop->vg_boot);
#endif
ret = palloc_buckets_init(&pop->heap);
if (ret)
palloc_heap_cleanup(&pop->heap);
return ret;
}
/*
* pmalloc_cleanup -- global cleanup routine of allocator section
*/
int
pmalloc_cleanup(PMEMobjpool *pop)
{
palloc_heap_cleanup(&pop->heap);
return 0;
}
/*
* CTL_WRITE_HANDLER(proto) -- creates a new allocation class
*/
static int
CTL_WRITE_HANDLER(desc)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
uint8_t id;
struct alloc_class_collection *ac = heap_alloc_classes(&pop->heap);
struct pobj_alloc_class_desc *p = arg;
if (p->unit_size <= 0 || p->unit_size > PMEMOBJ_MAX_ALLOC_SIZE ||
p->units_per_block <= 0) {
errno = EINVAL;
return -1;
}
if (p->alignment != 0 && p->unit_size % p->alignment != 0) {
ERR("unit size must be evenly divisible by alignment");
errno = EINVAL;
return -1;
}
if (p->alignment > (MEGABYTE * 2)) {
ERR("alignment cannot be larger than 2 megabytes");
errno = EINVAL;
return -1;
}
enum header_type lib_htype = MAX_HEADER_TYPES;
switch (p->header_type) {
case POBJ_HEADER_LEGACY:
lib_htype = HEADER_LEGACY;
break;
case POBJ_HEADER_COMPACT:
lib_htype = HEADER_COMPACT;
break;
case POBJ_HEADER_NONE:
lib_htype = HEADER_NONE;
break;
case MAX_POBJ_HEADER_TYPES:
default:
ERR("invalid header type");
errno = EINVAL;
return -1;
}
if (SLIST_EMPTY(indexes)) {
if (alloc_class_find_first_free_slot(ac, &id) != 0) {
ERR("no available free allocation class identifier");
errno = EINVAL;
return -1;
}
} else {
struct ctl_index *idx = SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "class_id"), 0);
if (idx->value < 0 || idx->value >= MAX_ALLOCATION_CLASSES) {
ERR("class id outside of the allowed range");
errno = ERANGE;
return -1;
}
id = (uint8_t)idx->value;
if (alloc_class_reserve(ac, id) != 0) {
ERR("attempted to overwrite an allocation class");
errno = EEXIST;
return -1;
}
}
size_t runsize_bytes =
CHUNK_ALIGN_UP((p->units_per_block * p->unit_size) +
RUN_BASE_METADATA_SIZE);
/* aligning the buffer might require up-to to 'alignment' bytes */
if (p->alignment != 0)
runsize_bytes += p->alignment;
uint32_t size_idx = (uint32_t)(runsize_bytes / CHUNKSIZE);
if (size_idx > UINT16_MAX)
size_idx = UINT16_MAX;
struct alloc_class *c = alloc_class_new(id,
heap_alloc_classes(&pop->heap), CLASS_RUN,
lib_htype, p->unit_size, p->alignment, size_idx);
if (c == NULL) {
errno = EINVAL;
return -1;
}
if (heap_create_alloc_class_buckets(&pop->heap, c) != 0) {
alloc_class_delete(ac, c);
return -1;
}
p->class_id = c->id;
return 0;
}
/*
* pmalloc_header_type_parser -- parses the alloc header type argument
*/
static int
pmalloc_header_type_parser(const void *arg, void *dest, size_t dest_size)
{
const char *vstr = arg;
enum pobj_header_type *htype = dest;
ASSERTeq(dest_size, sizeof(enum pobj_header_type));
if (strcmp(vstr, "none") == 0) {
*htype = POBJ_HEADER_NONE;
} else if (strcmp(vstr, "compact") == 0) {
*htype = POBJ_HEADER_COMPACT;
} else if (strcmp(vstr, "legacy") == 0) {
*htype = POBJ_HEADER_LEGACY;
} else {
ERR("invalid header type");
errno = EINVAL;
return -1;
}
return 0;
}
/*
* CTL_READ_HANDLER(desc) -- reads the information about allocation class
*/
static int
CTL_READ_HANDLER(desc)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
uint8_t id;
struct ctl_index *idx = SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "class_id"), 0);
if (idx->value < 0 || idx->value >= MAX_ALLOCATION_CLASSES) {
ERR("class id outside of the allowed range");
errno = ERANGE;
return -1;
}
id = (uint8_t)idx->value;
struct alloc_class *c = alloc_class_by_id(
heap_alloc_classes(&pop->heap), id);
if (c == NULL) {
ERR("class with the given id does not exist");
errno = ENOENT;
return -1;
}
enum pobj_header_type user_htype = MAX_POBJ_HEADER_TYPES;
switch (c->header_type) {
case HEADER_LEGACY:
user_htype = POBJ_HEADER_LEGACY;
break;
case HEADER_COMPACT:
user_htype = POBJ_HEADER_COMPACT;
break;
case HEADER_NONE:
user_htype = POBJ_HEADER_NONE;
break;
default:
ASSERT(0); /* unreachable */
break;
}
struct pobj_alloc_class_desc *p = arg;
p->units_per_block = c->type == CLASS_HUGE ? 0 : c->run.nallocs;
p->header_type = user_htype;
p->unit_size = c->unit_size;
p->class_id = c->id;
p->alignment = c->flags & CHUNK_FLAG_ALIGNED ? c->run.alignment : 0;
return 0;
}
static struct ctl_argument CTL_ARG(desc) = {
.dest_size = sizeof(struct pobj_alloc_class_desc),
.parsers = {
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
unit_size, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
alignment, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
units_per_block, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
header_type, pmalloc_header_type_parser),
CTL_ARG_PARSER_END
}
};
static const struct ctl_node CTL_NODE(class_id)[] = {
CTL_LEAF_RW(desc),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(new)[] = {
CTL_LEAF_WO(desc),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(alloc_class)[] = {
CTL_INDEXED(class_id),
CTL_INDEXED(new),
CTL_NODE_END
};
/*
* CTL_RUNNABLE_HANDLER(extend) -- extends the pool by the given size
*/
static int
CTL_RUNNABLE_HANDLER(extend)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t arg_in = *(ssize_t *)arg;
if (arg_in < (ssize_t)PMEMOBJ_MIN_PART) {
ERR("incorrect size for extend, must be larger than %" PRIu64,
PMEMOBJ_MIN_PART);
return -1;
}
struct palloc_heap *heap = &pop->heap;
struct bucket *defb = heap_bucket_acquire_by_id(heap,
DEFAULT_ALLOC_CLASS_ID);
int ret = heap_extend(heap, defb, (size_t)arg_in) < 0 ? -1 : 0;
heap_bucket_release(heap, defb);
return ret;
}
/*
* CTL_READ_HANDLER(granularity) -- reads the current heap grow size
*/
static int
CTL_READ_HANDLER(granularity)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t *arg_out = arg;
*arg_out = (ssize_t)pop->heap.growsize;
return 0;
}
/*
* CTL_WRITE_HANDLER(granularity) -- changes the heap grow size
*/
static int
CTL_WRITE_HANDLER(granularity)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t arg_in = *(int *)arg;
if (arg_in != 0 && arg_in < (ssize_t)PMEMOBJ_MIN_PART) {
ERR("incorrect grow size, must be 0 or larger than %" PRIu64,
PMEMOBJ_MIN_PART);
return -1;
}
pop->heap.growsize = (size_t)arg_in;
return 0;
}
static struct ctl_argument CTL_ARG(granularity) = CTL_ARG_LONG_LONG;
static const struct ctl_node CTL_NODE(size)[] = {
CTL_LEAF_RW(granularity),
CTL_LEAF_RUNNABLE(extend),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(heap)[] = {
CTL_CHILD(alloc_class),
CTL_CHILD(size),
CTL_NODE_END
};
/*
* pmalloc_ctl_register -- registers ctl nodes for "heap" module
*/
void
pmalloc_ctl_register(PMEMobjpool *pop)
{
CTL_REGISTER_MODULE(pop->ctl, heap);
}
| 13,605 | 24.05709 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/pmemops.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef LIBPMEMOBJ_PMEMOPS_H
#define LIBPMEMOBJ_PMEMOPS_H 1
#include <stddef.h>
#include <stdint.h>
#include "util.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef int (*persist_fn)(void *base, const void *, size_t, unsigned);
typedef int (*flush_fn)(void *base, const void *, size_t, unsigned);
typedef void (*drain_fn)(void *base);
typedef void *(*memcpy_fn)(void *base, void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memmove_fn)(void *base, void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memset_fn)(void *base, void *dest, int c, size_t len,
unsigned flags);
typedef int (*remote_read_fn)(void *ctx, uintptr_t base, void *dest, void *addr,
size_t length);
struct pmem_ops {
/* for 'master' replica: with or without data replication */
persist_fn persist; /* persist function */
flush_fn flush; /* flush function */
drain_fn drain; /* drain function */
memcpy_fn memcpy; /* persistent memcpy function */
memmove_fn memmove; /* persistent memmove function */
memset_fn memset; /* persistent memset function */
void *base;
struct remote_ops {
remote_read_fn read;
void *ctx;
uintptr_t base;
} remote;
};
static force_inline int
pmemops_xpersist(const struct pmem_ops *p_ops, const void *d, size_t s,
unsigned flags)
{
return p_ops->persist(p_ops->base, d, s, flags);
}
static force_inline void
pmemops_persist(const struct pmem_ops *p_ops, const void *d, size_t s)
{
(void) pmemops_xpersist(p_ops, d, s, 0);
}
static force_inline int
pmemops_xflush(const struct pmem_ops *p_ops, const void *d, size_t s,
unsigned flags)
{
return p_ops->flush(p_ops->base, d, s, flags);
}
static force_inline void
pmemops_flush(const struct pmem_ops *p_ops, const void *d, size_t s)
{
(void) pmemops_xflush(p_ops, d, s, 0);
}
static force_inline void
pmemops_drain(const struct pmem_ops *p_ops)
{
p_ops->drain(p_ops->base);
}
static force_inline void *
pmemops_memcpy(const struct pmem_ops *p_ops, void *dest,
const void *src, size_t len, unsigned flags)
{
return p_ops->memcpy(p_ops->base, dest, src, len, flags);
}
static force_inline void *
pmemops_memmove(const struct pmem_ops *p_ops, void *dest,
const void *src, size_t len, unsigned flags)
{
return p_ops->memmove(p_ops->base, dest, src, len, flags);
}
static force_inline void *
pmemops_memset(const struct pmem_ops *p_ops, void *dest, int c,
size_t len, unsigned flags)
{
return p_ops->memset(p_ops->base, dest, c, len, flags);
}
#ifdef __cplusplus
}
#endif
#endif
| 4,125 | 29.791045 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/ulog.c | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ulog.c -- unified log implementation
*/
#include <inttypes.h>
#include <string.h>
#include "libpmemobj.h"
#include "ulog.h"
#include "out.h"
#include "util.h"
#include "valgrind_internal.h"
/*
* Operation flag at the three most significant bits
*/
#define ULOG_OPERATION(op) ((uint64_t)(op))
#define ULOG_OPERATION_MASK ((uint64_t)(0b111ULL << 61ULL))
#define ULOG_OPERATION_FROM_OFFSET(off) (ulog_operation_type)\
((off) & ULOG_OPERATION_MASK)
#define ULOG_OFFSET_MASK (~(ULOG_OPERATION_MASK))
#define CACHELINE_ALIGN(size) ALIGN_UP(size, CACHELINE_SIZE)
#define IS_CACHELINE_ALIGNED(ptr)\
(((uintptr_t)(ptr) & (CACHELINE_SIZE - 1)) == 0)
/*
* ulog_by_offset -- (internal) calculates the ulog pointer
*/
static struct ulog *
ulog_by_offset(size_t offset, const struct pmem_ops *p_ops)
{
if (offset == 0)
return NULL;
size_t aligned_offset = CACHELINE_ALIGN(offset);
return (struct ulog *)((char *)p_ops->base + aligned_offset);
}
/*
* ulog_next -- retrieves the pointer to the next ulog
*/
struct ulog *
ulog_next(struct ulog *ulog, const struct pmem_ops *p_ops)
{
return ulog_by_offset(ulog->next, p_ops);
}
/*
* ulog_operation -- returns the type of entry operation
*/
ulog_operation_type
ulog_entry_type(const struct ulog_entry_base *entry)
{
return ULOG_OPERATION_FROM_OFFSET(entry->offset);
}
/*
* ulog_offset -- returns offset
*/
uint64_t
ulog_entry_offset(const struct ulog_entry_base *entry)
{
return entry->offset & ULOG_OFFSET_MASK;
}
/*
* ulog_entry_size -- returns the size of a ulog entry
*/
size_t
ulog_entry_size(const struct ulog_entry_base *entry)
{
struct ulog_entry_buf *eb;
switch (ulog_entry_type(entry)) {
case ULOG_OPERATION_AND:
case ULOG_OPERATION_OR:
case ULOG_OPERATION_SET:
return sizeof(struct ulog_entry_val);
case ULOG_OPERATION_BUF_SET:
case ULOG_OPERATION_BUF_CPY:
eb = (struct ulog_entry_buf *)entry;
return CACHELINE_ALIGN(
sizeof(struct ulog_entry_buf) + eb->size);
default:
ASSERT(0);
}
return 0;
}
/*
* ulog_entry_valid -- (internal) checks if a ulog entry is valid
* Returns 1 if the range is valid, otherwise 0 is returned.
*/
static int
ulog_entry_valid(const struct ulog_entry_base *entry)
{
if (entry->offset == 0)
return 0;
size_t size;
struct ulog_entry_buf *b;
switch (ulog_entry_type(entry)) {
case ULOG_OPERATION_BUF_CPY:
case ULOG_OPERATION_BUF_SET:
size = ulog_entry_size(entry);
b = (struct ulog_entry_buf *)entry;
if (!util_checksum(b, size, &b->checksum, 0, 0))
return 0;
break;
default:
break;
}
return 1;
}
/*
* ulog_construct -- initializes the ulog structure
*/
void
ulog_construct(uint64_t offset, size_t capacity, int flush,
const struct pmem_ops *p_ops)
{
struct ulog *ulog = ulog_by_offset(offset, p_ops);
VALGRIND_ADD_TO_TX(ulog, SIZEOF_ULOG(capacity));
ulog->capacity = capacity;
ulog->checksum = 0;
ulog->next = 0;
memset(ulog->unused, 0, sizeof(ulog->unused));
if (flush) {
pmemops_xflush(p_ops, ulog, sizeof(*ulog),
PMEMOBJ_F_RELAXED);
pmemops_memset(p_ops, ulog->data, 0, capacity,
PMEMOBJ_F_MEM_NONTEMPORAL |
PMEMOBJ_F_MEM_NODRAIN |
PMEMOBJ_F_RELAXED);
} else {
/*
* We want to avoid replicating zeroes for every ulog of every
* lane, to do that, we need to use plain old memset.
*/
memset(ulog->data, 0, capacity);
}
VALGRIND_REMOVE_FROM_TX(ulog, SIZEOF_ULOG(capacity));
}
/*
* ulog_foreach_entry -- iterates over every existing entry in the ulog
*/
int
ulog_foreach_entry(struct ulog *ulog,
ulog_entry_cb cb, void *arg, const struct pmem_ops *ops)
{
struct ulog_entry_base *e;
int ret = 0;
for (struct ulog *r = ulog; r != NULL; r = ulog_next(r, ops)) {
for (size_t offset = 0; offset < r->capacity; ) {
e = (struct ulog_entry_base *)(r->data + offset);
if (!ulog_entry_valid(e))
return ret;
if ((ret = cb(e, arg, ops)) != 0)
return ret;
offset += ulog_entry_size(e);
}
}
return ret;
}
/*
* ulog_capacity -- (internal) returns the total capacity of the ulog
*/
size_t
ulog_capacity(struct ulog *ulog, size_t ulog_base_bytes,
const struct pmem_ops *p_ops)
{
size_t capacity = ulog_base_bytes;
/* skip the first one, we count it in 'ulog_base_bytes' */
while ((ulog = ulog_next(ulog, p_ops)) != NULL) {
capacity += ulog->capacity;
}
return capacity;
}
/*
* ulog_rebuild_next_vec -- rebuilds the vector of next entries
*/
void
ulog_rebuild_next_vec(struct ulog *ulog, struct ulog_next *next,
const struct pmem_ops *p_ops)
{
do {
if (ulog->next != 0)
VEC_PUSH_BACK(next, ulog->next);
} while ((ulog = ulog_next(ulog, p_ops)) != NULL);
}
/*
* ulog_reserve -- reserves new capacity in the ulog
*/
int
ulog_reserve(struct ulog *ulog,
size_t ulog_base_nbytes, size_t *new_capacity, ulog_extend_fn extend,
struct ulog_next *next,
const struct pmem_ops *p_ops)
{
size_t capacity = ulog_base_nbytes;
uint64_t offset;
VEC_FOREACH(offset, next) {
ulog = ulog_by_offset(offset, p_ops);
capacity += ulog->capacity;
}
while (capacity < *new_capacity) {
if (extend(p_ops->base, &ulog->next) != 0)
return -1;
VEC_PUSH_BACK(next, ulog->next);
ulog = ulog_next(ulog, p_ops);
capacity += ulog->capacity;
}
*new_capacity = capacity;
return 0;
}
/*
* ulog_checksum -- (internal) calculates ulog checksum
*/
static int
ulog_checksum(struct ulog *ulog, size_t ulog_base_bytes, int insert)
{
return util_checksum(ulog, SIZEOF_ULOG(ulog_base_bytes),
&ulog->checksum, insert, 0);
}
/*
* ulog_store -- stores the transient src ulog in the
* persistent dest ulog
*
* The source and destination ulogs must be cacheline aligned.
*/
void
ulog_store(struct ulog *dest, struct ulog *src, size_t nbytes,
size_t ulog_base_nbytes, struct ulog_next *next,
const struct pmem_ops *p_ops)
{
/*
* First, store all entries over the base capacity of the ulog in
* the next logs.
* Because the checksum is only in the first part, we don't have to
* worry about failsafety here.
*/
struct ulog *ulog = dest;
size_t offset = ulog_base_nbytes;
/*
* Copy at least 8 bytes more than needed. If the user always
* properly uses entry creation functions, this will zero-out the
* potential leftovers of the previous log. Since all we really need
* to zero is the offset, sizeof(struct redo_log_entry_base) is enough.
* If the nbytes is aligned, an entire cacheline needs to be addtionally
* zeroed.
* But the checksum must be calculated based solely on actual data.
*/
size_t checksum_nbytes = MIN(ulog_base_nbytes, nbytes);
nbytes = CACHELINE_ALIGN(nbytes + sizeof(struct ulog_entry_base));
size_t base_nbytes = MIN(ulog_base_nbytes, nbytes);
size_t next_nbytes = nbytes - base_nbytes;
size_t nlog = 0;
while (next_nbytes > 0) {
ulog = ulog_by_offset(VEC_ARR(next)[nlog++], p_ops);
ASSERTne(ulog, NULL);
size_t copy_nbytes = MIN(next_nbytes, ulog->capacity);
next_nbytes -= copy_nbytes;
ASSERT(IS_CACHELINE_ALIGNED(ulog->data));
VALGRIND_ADD_TO_TX(ulog->data, copy_nbytes);
pmemops_memcpy(p_ops,
ulog->data,
src->data + offset,
copy_nbytes,
PMEMOBJ_F_MEM_WC |
PMEMOBJ_F_MEM_NODRAIN |
PMEMOBJ_F_RELAXED);
VALGRIND_REMOVE_FROM_TX(ulog->data, copy_nbytes);
offset += copy_nbytes;
}
if (nlog != 0)
pmemops_drain(p_ops);
/*
* Then, calculate the checksum and store the first part of the
* ulog.
*/
src->next = VEC_SIZE(next) == 0 ? 0 : VEC_FRONT(next);
ulog_checksum(src, checksum_nbytes, 1);
pmemops_memcpy(p_ops, dest, src,
SIZEOF_ULOG(base_nbytes),
PMEMOBJ_F_MEM_WC);
}
/*
* ulog_entry_val_create -- creates a new log value entry in the ulog
*
* This function requires at least a cacheline of space to be available in the
* ulog.
*/
struct ulog_entry_val *
ulog_entry_val_create(struct ulog *ulog, size_t offset, uint64_t *dest,
uint64_t value, ulog_operation_type type,
const struct pmem_ops *p_ops)
{
struct ulog_entry_val *e =
(struct ulog_entry_val *)(ulog->data + offset);
struct {
struct ulog_entry_val v;
struct ulog_entry_base zeroes;
} data;
COMPILE_ERROR_ON(sizeof(data) != sizeof(data.v) + sizeof(data.zeroes));
/*
* Write a little bit more to the buffer so that the next entry that
* resides in the log is erased. This will prevent leftovers from
* a previous, clobbered, log from being incorrectly applied.
*/
data.zeroes.offset = 0;
data.v.base.offset = (uint64_t)(dest) - (uint64_t)p_ops->base;
data.v.base.offset |= ULOG_OPERATION(type);
data.v.value = value;
pmemops_memcpy(p_ops, e, &data, sizeof(data),
PMEMOBJ_F_MEM_NOFLUSH | PMEMOBJ_F_RELAXED);
return e;
}
/*
* ulog_entry_buf_create -- atomically creates a buffer entry in the log
*/
struct ulog_entry_buf *
ulog_entry_buf_create(struct ulog *ulog, size_t offset, uint64_t *dest,
const void *src, uint64_t size,
ulog_operation_type type, const struct pmem_ops *p_ops)
{
struct ulog_entry_buf *e =
(struct ulog_entry_buf *)(ulog->data + offset);
//printf("change this\n");
/*
* Depending on the size of the source buffer, we might need to perform
* up to three separate copies:
* 1. The first cacheline, 24b of metadata and 40b of data
* If there's still data to be logged:
* 2. The entire remainder of data data aligned down to cacheline,
* for example, if there's 150b left, this step will copy only
* 128b.
* Now, we are left with between 0 to 63 bytes. If nonzero:
* 3. Create a stack allocated cacheline-sized buffer, fill in the
* remainder of the data, and copy the entire cacheline.
*
* This is done so that we avoid a cache-miss on misaligned writes.
*/
struct ulog_entry_buf *b = alloca(CACHELINE_SIZE);
b->base.offset = (uint64_t)(dest) - (uint64_t)p_ops->base;
b->base.offset |= ULOG_OPERATION(type);
b->size = size;
b->checksum = 0;
size_t bdatasize = CACHELINE_SIZE - sizeof(struct ulog_entry_buf);
size_t ncopy = MIN(size, bdatasize);
memcpy(b->data, src, ncopy);
memset(b->data + ncopy, 0, bdatasize - ncopy);
size_t remaining_size = ncopy > size ? 0 : size - ncopy;
char *srcof = (char *)src + ncopy;
size_t rcopy = ALIGN_DOWN(remaining_size, CACHELINE_SIZE);
size_t lcopy = remaining_size - rcopy;
uint8_t last_cacheline[CACHELINE_SIZE];
if (lcopy != 0) {
memcpy(last_cacheline, srcof + rcopy, lcopy);
memset(last_cacheline + lcopy, 0, CACHELINE_SIZE - lcopy);
}
if (rcopy != 0) {
void *dest = e->data + ncopy;
ASSERT(IS_CACHELINE_ALIGNED(dest));
VALGRIND_ADD_TO_TX(dest, rcopy);
pmemops_memcpy(p_ops, dest, srcof, rcopy,
PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_MEM_NONTEMPORAL);
VALGRIND_REMOVE_FROM_TX(dest, rcopy);
}
if (lcopy != 0) {
void *dest = e->data + ncopy + rcopy;
ASSERT(IS_CACHELINE_ALIGNED(dest));
VALGRIND_ADD_TO_TX(dest, CACHELINE_SIZE);
pmemops_memcpy(p_ops, dest, last_cacheline, CACHELINE_SIZE,
PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_MEM_NONTEMPORAL);
VALGRIND_REMOVE_FROM_TX(dest, CACHELINE_SIZE);
}
b->checksum = util_checksum_seq(b, CACHELINE_SIZE, 0);
if (rcopy != 0)
b->checksum = util_checksum_seq(srcof, rcopy, b->checksum);
if (lcopy != 0)
b->checksum = util_checksum_seq(last_cacheline,
CACHELINE_SIZE, b->checksum);
ASSERT(IS_CACHELINE_ALIGNED(e));
VALGRIND_ADD_TO_TX(e, CACHELINE_SIZE);
pmemops_memcpy(p_ops, e, b, CACHELINE_SIZE,
PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_MEM_NONTEMPORAL);
VALGRIND_REMOVE_FROM_TX(e, CACHELINE_SIZE);
pmemops_drain(p_ops);
ASSERT(ulog_entry_valid(&e->base));
return e;
}
/*
* ulog_entry_apply -- applies modifications of a single ulog entry
*/
void
ulog_entry_apply(const struct ulog_entry_base *e, int persist,
const struct pmem_ops *p_ops)
{
ulog_operation_type t = ulog_entry_type(e);
uint64_t offset = ulog_entry_offset(e);
size_t dst_size = sizeof(uint64_t);
uint64_t *dst = (uint64_t *)((uintptr_t)p_ops->base + offset);
struct ulog_entry_val *ev;
struct ulog_entry_buf *eb;
flush_fn f = persist ? p_ops->persist : p_ops->flush;
switch (t) {
case ULOG_OPERATION_AND:
ev = (struct ulog_entry_val *)e;
VALGRIND_ADD_TO_TX(dst, dst_size);
*dst &= ev->value;
f(p_ops->base, dst, sizeof(uint64_t),
PMEMOBJ_F_RELAXED);
break;
case ULOG_OPERATION_OR:
ev = (struct ulog_entry_val *)e;
VALGRIND_ADD_TO_TX(dst, dst_size);
*dst |= ev->value;
f(p_ops->base, dst, sizeof(uint64_t),
PMEMOBJ_F_RELAXED);
break;
case ULOG_OPERATION_SET:
ev = (struct ulog_entry_val *)e;
VALGRIND_ADD_TO_TX(dst, dst_size);
*dst = ev->value;
f(p_ops->base, dst, sizeof(uint64_t),
PMEMOBJ_F_RELAXED);
break;
case ULOG_OPERATION_BUF_SET:
eb = (struct ulog_entry_buf *)e;
dst_size = eb->size;
VALGRIND_ADD_TO_TX(dst, dst_size);
pmemops_memset(p_ops, dst, *eb->data, eb->size,
PMEMOBJ_F_RELAXED | PMEMOBJ_F_MEM_NODRAIN);
break;
case ULOG_OPERATION_BUF_CPY:
eb = (struct ulog_entry_buf *)e;
dst_size = eb->size;
VALGRIND_ADD_TO_TX(dst, dst_size);
pmemops_memcpy(p_ops, dst, eb->data, eb->size,
PMEMOBJ_F_RELAXED | PMEMOBJ_F_MEM_NODRAIN);
break;
default:
ASSERT(0);
}
VALGRIND_REMOVE_FROM_TX(dst, dst_size);
}
/*
* ulog_process_entry -- (internal) processes a single ulog entry
*/
static int
ulog_process_entry(struct ulog_entry_base *e, void *arg,
const struct pmem_ops *p_ops)
{
ulog_entry_apply(e, 0, p_ops);
return 0;
}
/*
* ulog_clobber -- zeroes the metadata of the ulog
*/
void
ulog_clobber(struct ulog *dest, struct ulog_next *next,
const struct pmem_ops *p_ops)
{
struct ulog empty;
memset(&empty, 0, sizeof(empty));
if (next != NULL)
empty.next = VEC_SIZE(next) == 0 ? 0 : VEC_FRONT(next);
else
empty.next = dest->next;
pmemops_memcpy(p_ops, dest, &empty, sizeof(empty),
PMEMOBJ_F_MEM_WC);
}
/*
* ulog_clobber_data -- zeroes out 'nbytes' of data in the logs
*/
void
ulog_clobber_data(struct ulog *dest,
size_t nbytes, size_t ulog_base_nbytes,
struct ulog_next *next, ulog_free_fn ulog_free,
const struct pmem_ops *p_ops)
{
size_t rcapacity = ulog_base_nbytes;
size_t nlog = 0;
ASSERTne(dest, NULL);
for (struct ulog *r = dest; r != NULL; ) {
size_t nzero = MIN(nbytes, rcapacity);
VALGRIND_ADD_TO_TX(r->data, nzero);
pmemops_memset(p_ops, r->data, 0, nzero, PMEMOBJ_F_MEM_WC);
VALGRIND_ADD_TO_TX(r->data, nzero);
nbytes -= nzero;
if (nbytes == 0)
break;
r = ulog_by_offset(VEC_ARR(next)[nlog++], p_ops);
if (nlog > 1)
break;
ASSERTne(r, NULL);
rcapacity = r->capacity;
}
/*
* To make sure that transaction logs do not occupy too much of space,
* all of them, expect for the first one, are freed at the end of
* the operation. The reasoning for this is that pmalloc() is
* a relatively cheap operation for transactions where many hundreds of
* kilobytes are being snapshot, and so, allocating and freeing the
* buffer for each transaction is an acceptable overhead for the average
* case.
*/
struct ulog *u = ulog_by_offset(dest->next, p_ops);
if (u == NULL)
return;
VEC(, uint64_t *) logs_past_first;
VEC_INIT(&logs_past_first);
size_t next_offset;
while (u != NULL && ((next_offset = u->next) != 0)) {
if (VEC_PUSH_BACK(&logs_past_first, &u->next) != 0) {
/* this is fine, it will just use more pmem */
LOG(1, "unable to free transaction logs memory");
goto out;
}
u = ulog_by_offset(u->next, p_ops);
}
uint64_t *ulog_ptr;
VEC_FOREACH_REVERSE(ulog_ptr, &logs_past_first) {
ulog_free(p_ops->base, ulog_ptr);
}
out:
VEC_DELETE(&logs_past_first);
}
/*
* ulog_process -- process ulog entries
*/
void
ulog_process(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops)
{
LOG(15, "ulog %p", ulog);
#ifdef DEBUG
if (check)
ulog_check(ulog, check, p_ops);
#endif
ulog_foreach_entry(ulog, ulog_process_entry, NULL, p_ops);
}
/*
* ulog_base_nbytes -- (internal) counts the actual of number of bytes
* occupied by the ulog
*/
size_t
ulog_base_nbytes(struct ulog *ulog)
{
size_t offset = 0;
struct ulog_entry_base *e;
for (offset = 0; offset < ulog->capacity; ) {
e = (struct ulog_entry_base *)(ulog->data + offset);
if (!ulog_entry_valid(e))
break;
offset += ulog_entry_size(e);
}
return offset;
}
/*
* ulog_recovery_needed -- checks if the logs needs recovery
*/
int
ulog_recovery_needed(struct ulog *ulog, int verify_checksum)
{
size_t nbytes = MIN(ulog_base_nbytes(ulog), ulog->capacity);
if (nbytes == 0)
return 0;
if (verify_checksum && !ulog_checksum(ulog, nbytes, 0))
return 0;
return 1;
}
/*
* ulog_recover -- recovery of ulog
*
* The ulog_recover shall be preceded by ulog_check call.
*/
void
ulog_recover(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops)
{
LOG(15, "ulog %p", ulog);
if (ulog_recovery_needed(ulog, 1)) {
ulog_process(ulog, check, p_ops);
ulog_clobber(ulog, NULL, p_ops);
}
}
/*
* ulog_check_entry --
* (internal) checks consistency of a single ulog entry
*/
static int
ulog_check_entry(struct ulog_entry_base *e,
void *arg, const struct pmem_ops *p_ops)
{
uint64_t offset = ulog_entry_offset(e);
ulog_check_offset_fn check = arg;
if (!check(p_ops->base, offset)) {
LOG(15, "ulog %p invalid offset %" PRIu64,
e, e->offset);
return -1;
}
return offset == 0 ? -1 : 0;
}
/*
* ulog_check -- (internal) check consistency of ulog entries
*/
int
ulog_check(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops)
{
LOG(15, "ulog %p", ulog);
return ulog_foreach_entry(ulog,
ulog_check_entry, check, p_ops);
}
| 19,076 | 24.538153 | 78 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/sync.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sync.h -- internal to obj synchronization API
*/
#ifndef LIBPMEMOBJ_SYNC_H
#define LIBPMEMOBJ_SYNC_H 1
#include <errno.h>
#include <stdint.h>
#include "libpmemobj.h"
#include "out.h"
#include "os_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* internal definitions of PMEM-locks
*/
typedef union padded_pmemmutex {
char padding[_POBJ_CL_SIZE];
struct {
uint64_t runid;
union {
os_mutex_t mutex;
struct {
void *bsd_mutex_p;
union padded_pmemmutex *next;
} bsd_u;
} mutex_u;
} pmemmutex;
} PMEMmutex_internal;
#define PMEMmutex_lock pmemmutex.mutex_u.mutex
#define PMEMmutex_bsd_mutex_p pmemmutex.mutex_u.bsd_u.bsd_mutex_p
#define PMEMmutex_next pmemmutex.mutex_u.bsd_u.next
typedef union padded_pmemrwlock {
char padding[_POBJ_CL_SIZE];
struct {
uint64_t runid;
union {
os_rwlock_t rwlock;
struct {
void *bsd_rwlock_p;
union padded_pmemrwlock *next;
} bsd_u;
} rwlock_u;
} pmemrwlock;
} PMEMrwlock_internal;
#define PMEMrwlock_lock pmemrwlock.rwlock_u.rwlock
#define PMEMrwlock_bsd_rwlock_p pmemrwlock.rwlock_u.bsd_u.bsd_rwlock_p
#define PMEMrwlock_next pmemrwlock.rwlock_u.bsd_u.next
typedef union padded_pmemcond {
char padding[_POBJ_CL_SIZE];
struct {
uint64_t runid;
union {
os_cond_t cond;
struct {
void *bsd_cond_p;
union padded_pmemcond *next;
} bsd_u;
} cond_u;
} pmemcond;
} PMEMcond_internal;
#define PMEMcond_cond pmemcond.cond_u.cond
#define PMEMcond_bsd_cond_p pmemcond.cond_u.bsd_u.bsd_cond_p
#define PMEMcond_next pmemcond.cond_u.bsd_u.next
/*
* pmemobj_mutex_lock_nofail -- pmemobj_mutex_lock variant that never
* fails from caller perspective. If pmemobj_mutex_lock failed, this function
* aborts the program.
*/
static inline void
pmemobj_mutex_lock_nofail(PMEMobjpool *pop, PMEMmutex *mutexp)
{
int ret = pmemobj_mutex_lock(pop, mutexp);
if (ret) {
errno = ret;
FATAL("!pmemobj_mutex_lock");
}
}
/*
* pmemobj_mutex_unlock_nofail -- pmemobj_mutex_unlock variant that never
* fails from caller perspective. If pmemobj_mutex_unlock failed, this function
* aborts the program.
*/
static inline void
pmemobj_mutex_unlock_nofail(PMEMobjpool *pop, PMEMmutex *mutexp)
{
int ret = pmemobj_mutex_unlock(pop, mutexp);
if (ret) {
errno = ret;
FATAL("!pmemobj_mutex_unlock");
}
}
int pmemobj_mutex_assert_locked(PMEMobjpool *pop, PMEMmutex *mutexp);
#ifdef __cplusplus
}
#endif
#endif
| 4,019 | 27.309859 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/sync.c | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sync.c -- persistent memory resident synchronization primitives
*/
#include <inttypes.h>
#include "obj.h"
#include "out.h"
#include "util.h"
#include "sync.h"
#include "sys_util.h"
#include "util.h"
#include "valgrind_internal.h"
#ifdef __FreeBSD__
#define RECORD_LOCK(init, type, p) \
if (init) {\
PMEM##type##_internal *head = pop->type##_head;\
while (!util_bool_compare_and_swap64(&pop->type##_head, head,\
p)) {\
head = pop->type##_head;\
}\
p->PMEM##type##_next = head;\
}
#else
#define RECORD_LOCK(init, type, p)
#endif
/*
* _get_value -- (internal) atomically initialize and return a value.
* Returns -1 on error, 0 if the caller is not the value
* initializer, 1 if the caller is the value initializer.
*/
static int
_get_value(uint64_t pop_runid, volatile uint64_t *runid, void *value, void *arg,
int (*init_value)(void *value, void *arg))
{
uint64_t tmp_runid;
int initializer = 0;
while ((tmp_runid = *runid) != pop_runid) {
if (tmp_runid == pop_runid - 1)
continue;
if (!util_bool_compare_and_swap64(runid, tmp_runid,
pop_runid - 1))
continue;
initializer = 1;
if (init_value(value, arg)) {
ERR("error initializing lock");
util_fetch_and_and64(runid, 0);
return -1;
}
if (util_bool_compare_and_swap64(runid, pop_runid - 1,
pop_runid) == 0) {
ERR("error setting lock runid");
return -1;
}
}
return initializer;
}
/*
* get_mutex -- (internal) atomically initialize, record and return a mutex
*/
static inline os_mutex_t *
get_mutex(PMEMobjpool *pop, PMEMmutex_internal *imp)
{
if (likely(imp->pmemmutex.runid == pop->run_id))
return &imp->PMEMmutex_lock;
volatile uint64_t *runid = &imp->pmemmutex.runid;
LOG(5, "PMEMmutex %p pop->run_id %" PRIu64 " pmemmutex.runid %" PRIu64,
imp, pop->run_id, *runid);
ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0);
COMPILE_ERROR_ON(sizeof(PMEMmutex) != sizeof(PMEMmutex_internal));
COMPILE_ERROR_ON(util_alignof(PMEMmutex) != util_alignof(os_mutex_t));
VALGRIND_REMOVE_PMEM_MAPPING(imp, _POBJ_CL_SIZE);
int initializer = _get_value(pop->run_id, runid, &imp->PMEMmutex_lock,
NULL, (void *)os_mutex_init);
if (initializer == -1) {
return NULL;
}
RECORD_LOCK(initializer, mutex, imp);
return &imp->PMEMmutex_lock;
}
/*
* get_rwlock -- (internal) atomically initialize, record and return a rwlock
*/
static inline os_rwlock_t *
get_rwlock(PMEMobjpool *pop, PMEMrwlock_internal *irp)
{
if (likely(irp->pmemrwlock.runid == pop->run_id))
return &irp->PMEMrwlock_lock;
volatile uint64_t *runid = &irp->pmemrwlock.runid;
LOG(5, "PMEMrwlock %p pop->run_id %"\
PRIu64 " pmemrwlock.runid %" PRIu64,
irp, pop->run_id, *runid);
ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0);
COMPILE_ERROR_ON(sizeof(PMEMrwlock) != sizeof(PMEMrwlock_internal));
COMPILE_ERROR_ON(util_alignof(PMEMrwlock)
!= util_alignof(os_rwlock_t));
VALGRIND_REMOVE_PMEM_MAPPING(irp, _POBJ_CL_SIZE);
int initializer = _get_value(pop->run_id, runid, &irp->PMEMrwlock_lock,
NULL, (void *)os_rwlock_init);
if (initializer == -1) {
return NULL;
}
RECORD_LOCK(initializer, rwlock, irp);
return &irp->PMEMrwlock_lock;
}
/*
* get_cond -- (internal) atomically initialize, record and return a
* condition variable
*/
static inline os_cond_t *
get_cond(PMEMobjpool *pop, PMEMcond_internal *icp)
{
if (likely(icp->pmemcond.runid == pop->run_id))
return &icp->PMEMcond_cond;
volatile uint64_t *runid = &icp->pmemcond.runid;
LOG(5, "PMEMcond %p pop->run_id %" PRIu64 " pmemcond.runid %" PRIu64,
icp, pop->run_id, *runid);
ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0);
COMPILE_ERROR_ON(sizeof(PMEMcond) != sizeof(PMEMcond_internal));
COMPILE_ERROR_ON(util_alignof(PMEMcond) != util_alignof(os_cond_t));
VALGRIND_REMOVE_PMEM_MAPPING(icp, _POBJ_CL_SIZE);
int initializer = _get_value(pop->run_id, runid, &icp->PMEMcond_cond,
NULL, (void *)os_cond_init);
if (initializer == -1) {
return NULL;
}
RECORD_LOCK(initializer, cond, icp);
return &icp->PMEMcond_cond;
}
/*
* pmemobj_mutex_zero -- zero-initialize a pmem resident mutex
*
* This function is not MT safe.
*/
void
pmemobj_mutex_zero(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
mutexip->pmemmutex.runid = 0;
pmemops_persist(&pop->p_ops, &mutexip->pmemmutex.runid,
sizeof(mutexip->pmemmutex.runid));
}
/*
* pmemobj_mutex_lock -- lock a pmem resident mutex
*
* Atomically initializes and locks a PMEMmutex, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_mutex_lock(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
return os_mutex_lock(mutex);
}
/*
* pmemobj_mutex_assert_locked -- checks whether mutex is locked.
*
* Returns 0 when mutex is locked.
*/
int
pmemobj_mutex_assert_locked(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
int ret = os_mutex_trylock(mutex);
if (ret == EBUSY)
return 0;
if (ret == 0) {
util_mutex_unlock(mutex);
/*
* There's no good error code for this case. EINVAL is used for
* something else here.
*/
return ENODEV;
}
return ret;
}
/*
* pmemobj_mutex_timedlock -- lock a pmem resident mutex
*
* Atomically initializes and locks a PMEMmutex, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_mutex_timedlock(PMEMobjpool *pop, PMEMmutex *__restrict mutexp,
const struct timespec *__restrict abs_timeout)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
return os_mutex_timedlock(mutex, abs_timeout);
}
/*
* pmemobj_mutex_trylock -- trylock a pmem resident mutex
*
* Atomically initializes and trylocks a PMEMmutex, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_mutex_trylock(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
return os_mutex_trylock(mutex);
}
/*
* pmemobj_mutex_unlock -- unlock a pmem resident mutex
*/
int
pmemobj_mutex_unlock(PMEMobjpool *pop, PMEMmutex *mutexp)
{
LOG(3, "pop %p mutex %p", pop, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
/* XXX potential performance improvement - move GET to debug version */
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_mutex_t *mutex = get_mutex(pop, mutexip);
if (mutex == NULL)
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
return os_mutex_unlock(mutex);
}
/*
* pmemobj_rwlock_zero -- zero-initialize a pmem resident rwlock
*
* This function is not MT safe.
*/
void
pmemobj_rwlock_zero(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
rwlockip->pmemrwlock.runid = 0;
pmemops_persist(&pop->p_ops, &rwlockip->pmemrwlock.runid,
sizeof(rwlockip->pmemrwlock.runid));
}
/*
* pmemobj_rwlock_rdlock -- rdlock a pmem resident mutex
*
* Atomically initializes and rdlocks a PMEMrwlock, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_rwlock_rdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_rdlock(rwlock);
}
/*
* pmemobj_rwlock_wrlock -- wrlock a pmem resident mutex
*
* Atomically initializes and wrlocks a PMEMrwlock, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_rwlock_wrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_wrlock(rwlock);
}
/*
* pmemobj_rwlock_timedrdlock -- timedrdlock a pmem resident mutex
*
* Atomically initializes and timedrdlocks a PMEMrwlock, otherwise behaves as
* its POSIX counterpart.
*/
int
pmemobj_rwlock_timedrdlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp,
const struct timespec *__restrict abs_timeout)
{
LOG(3, "pop %p rwlock %p timeout sec %ld nsec %ld", pop, rwlockp,
abs_timeout->tv_sec, abs_timeout->tv_nsec);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_timedrdlock(rwlock, abs_timeout);
}
/*
* pmemobj_rwlock_timedwrlock -- timedwrlock a pmem resident mutex
*
* Atomically initializes and timedwrlocks a PMEMrwlock, otherwise behaves as
* its POSIX counterpart.
*/
int
pmemobj_rwlock_timedwrlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp,
const struct timespec *__restrict abs_timeout)
{
LOG(3, "pop %p rwlock %p timeout sec %ld nsec %ld", pop, rwlockp,
abs_timeout->tv_sec, abs_timeout->tv_nsec);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_timedwrlock(rwlock, abs_timeout);
}
/*
* pmemobj_rwlock_tryrdlock -- tryrdlock a pmem resident mutex
*
* Atomically initializes and tryrdlocks a PMEMrwlock, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_rwlock_tryrdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_tryrdlock(rwlock);
}
/*
* pmemobj_rwlock_trywrlock -- trywrlock a pmem resident mutex
*
* Atomically initializes and trywrlocks a PMEMrwlock, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_rwlock_trywrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_trywrlock(rwlock);
}
/*
* pmemobj_rwlock_unlock -- unlock a pmem resident rwlock
*/
int
pmemobj_rwlock_unlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
{
LOG(3, "pop %p rwlock %p", pop, rwlockp);
ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
/* XXX potential performance improvement - move GET to debug version */
PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
if (rwlock == NULL)
return EINVAL;
ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
return os_rwlock_unlock(rwlock);
}
/*
* pmemobj_cond_zero -- zero-initialize a pmem resident condition variable
*
* This function is not MT safe.
*/
void
pmemobj_cond_zero(PMEMobjpool *pop, PMEMcond *condp)
{
LOG(3, "pop %p cond %p", pop, condp);
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
condip->pmemcond.runid = 0;
pmemops_persist(&pop->p_ops, &condip->pmemcond.runid,
sizeof(condip->pmemcond.runid));
}
/*
* pmemobj_cond_broadcast -- broadcast a pmem resident condition variable
*
* Atomically initializes and broadcast a PMEMcond, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_cond_broadcast(PMEMobjpool *pop, PMEMcond *condp)
{
LOG(3, "pop %p cond %p", pop, condp);
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
os_cond_t *cond = get_cond(pop, condip);
if (cond == NULL)
return EINVAL;
ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
return os_cond_broadcast(cond);
}
/*
* pmemobj_cond_signal -- signal a pmem resident condition variable
*
* Atomically initializes and signal a PMEMcond, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_cond_signal(PMEMobjpool *pop, PMEMcond *condp)
{
LOG(3, "pop %p cond %p", pop, condp);
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
os_cond_t *cond = get_cond(pop, condip);
if (cond == NULL)
return EINVAL;
ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
return os_cond_signal(cond);
}
/*
* pmemobj_cond_timedwait -- timedwait on a pmem resident condition variable
*
* Atomically initializes and timedwait on a PMEMcond, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_cond_timedwait(PMEMobjpool *pop, PMEMcond *__restrict condp,
PMEMmutex *__restrict mutexp,
const struct timespec *__restrict abs_timeout)
{
LOG(3, "pop %p cond %p mutex %p abstime sec %ld nsec %ld", pop, condp,
mutexp, abs_timeout->tv_sec, abs_timeout->tv_nsec);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_cond_t *cond = get_cond(pop, condip);
os_mutex_t *mutex = get_mutex(pop, mutexip);
if ((cond == NULL) || (mutex == NULL))
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
return os_cond_timedwait(cond, mutex, abs_timeout);
}
/*
* pmemobj_cond_wait -- wait on a pmem resident condition variable
*
* Atomically initializes and wait on a PMEMcond, otherwise behaves as its
* POSIX counterpart.
*/
int
pmemobj_cond_wait(PMEMobjpool *pop, PMEMcond *condp,
PMEMmutex *__restrict mutexp)
{
LOG(3, "pop %p cond %p mutex %p", pop, condp, mutexp);
ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
PMEMcond_internal *condip = (PMEMcond_internal *)condp;
PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
os_cond_t *cond = get_cond(pop, condip);
os_mutex_t *mutex = get_mutex(pop, mutexip);
if ((cond == NULL) || (mutex == NULL))
return EINVAL;
ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
return os_cond_wait(cond, mutex);
}
/*
* pmemobj_volatile -- atomically initialize, record and return a
* generic value
*/
void *
pmemobj_volatile(PMEMobjpool *pop, struct pmemvlt *vlt,
void *ptr, size_t size,
int (*constr)(void *ptr, void *arg), void *arg)
{
LOG(3, "pop %p vlt %p ptr %p constr %p arg %p", pop, vlt, ptr,
constr, arg);
if (likely(vlt->runid == pop->run_id))
return ptr;
VALGRIND_REMOVE_PMEM_MAPPING(ptr, size);
VALGRIND_ADD_TO_TX(vlt, sizeof(*vlt));
if (_get_value(pop->run_id, &vlt->runid, ptr, arg, constr) < 0) {
VALGRIND_REMOVE_FROM_TX(vlt, sizeof(*vlt));
return NULL;
}
VALGRIND_REMOVE_FROM_TX(vlt, sizeof(*vlt));
VALGRIND_SET_CLEAN(vlt, sizeof(*vlt));
return ptr;
}
| 18,016 | 25.811012 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/lane.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* lane.h -- internal definitions for lanes
*/
#ifndef LIBPMEMOBJ_LANE_H
#define LIBPMEMOBJ_LANE_H 1
#include <stdint.h>
#include "ulog.h"
#include "libpmemobj.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Distance between lanes used by threads required to prevent threads from
* false sharing part of lanes array. Used if properly spread lanes are
* available. Otherwise less spread out lanes would be used.
*/
#define LANE_JUMP (64 / sizeof(uint64_t))
/*
* Number of times the algorithm will try to reacquire the primary lane for the
* thread. If this threshold is exceeded, a new primary lane is selected for the
* thread.
*/
#define LANE_PRIMARY_ATTEMPTS 128
#define RLANE_DEFAULT 0
#define LANE_TOTAL_SIZE 3072 /* 3 * 1024 (sum of 3 old lane sections) */
/*
* We have 3 kilobytes to distribute.
* The smallest capacity is needed for the internal redo log for which we can
* accurately calculate the maximum number of occupied space: 48 bytes,
* 3 times sizeof(struct ulog_entry_val). One for bitmap OR, second for bitmap
* AND, third for modification of the destination pointer. For future needs,
* this has been bumped up to 12 ulog entries.
*
* The remaining part has to be split between transactional redo and undo logs,
* and since by far the most space consuming operations are transactional
* snapshots, most of the space, 2 kilobytes, is assigned to the undo log.
* After that, the remainder, 640 bytes, or 40 ulog entries, is left for the
* transactional redo logs.
* Thanks to this distribution, all small and medium transactions should be
* entirely performed without allocating any additional metadata.
*/
#define LANE_UNDO_SIZE 2048
#define LANE_REDO_EXTERNAL_SIZE 640
#define LANE_REDO_INTERNAL_SIZE 192
struct lane_layout {
/*
* Redo log for self-contained and 'one-shot' allocator operations.
* Cannot be extended.
*/
struct ULOG(LANE_REDO_INTERNAL_SIZE) internal;
/*
* Redo log for large operations/transactions.
* Can be extended by the use of internal ulog.
*/
struct ULOG(LANE_REDO_EXTERNAL_SIZE) external;
/*
* Undo log for snapshots done in a transaction.
* Can be extended/shrunk by the use of internal ulog.
*/
struct ULOG(LANE_UNDO_SIZE) undo;
};
struct lane {
struct lane_layout *layout; /* pointer to persistent layout */
struct operation_context *internal; /* context for internal ulog */
struct operation_context *external; /* context for external ulog */
struct operation_context *undo; /* context for undo ulog */
};
struct lane_descriptor {
/*
* Number of lanes available at runtime must be <= total number of lanes
* available in the pool. Number of lanes can be limited by shortage of
* other resources e.g. available RNIC's submission queue sizes.
*/
unsigned runtime_nlanes;
unsigned next_lane_idx;
uint64_t *lane_locks;
struct lane *lane;
};
typedef int (*section_layout_op)(PMEMobjpool *pop, void *data, unsigned length);
typedef void *(*section_constr)(PMEMobjpool *pop, void *data);
typedef void (*section_destr)(PMEMobjpool *pop, void *rt);
typedef int (*section_global_op)(PMEMobjpool *pop);
struct section_operations {
section_constr construct_rt;
section_destr destroy_rt;
section_layout_op check;
section_layout_op recover;
section_global_op boot;
section_global_op cleanup;
};
struct lane_info {
uint64_t pop_uuid_lo;
uint64_t lane_idx;
unsigned long nest_count;
/*
* The index of the primary lane for the thread. A thread will always
* try to acquire the primary lane first, and only if that fails it will
* look for a different available lane.
*/
uint64_t primary;
int primary_attempts;
struct lane_info *prev, *next;
};
void lane_info_boot(void);
void lane_info_destroy(void);
void lane_init_data(PMEMobjpool *pop);
int lane_boot(PMEMobjpool *pop);
void lane_cleanup(PMEMobjpool *pop);
int lane_recover_and_section_boot(PMEMobjpool *pop);
int lane_section_cleanup(PMEMobjpool *pop);
int lane_check(PMEMobjpool *pop);
unsigned lane_hold(PMEMobjpool *pop, struct lane **lane);
void lane_release(PMEMobjpool *pop);
void lane_attach(PMEMobjpool *pop, unsigned lane);
unsigned lane_detach(PMEMobjpool *pop);
#ifdef __cplusplus
}
#endif
#endif
| 5,804 | 32.554913 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/bucket.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* bucket.h -- internal definitions for bucket
*/
#ifndef LIBPMEMOBJ_BUCKET_H
#define LIBPMEMOBJ_BUCKET_H 1
#include <stddef.h>
#include <stdint.h>
#include "container.h"
#include "memblock.h"
#include "os_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
#define CALC_SIZE_IDX(_unit_size, _size)\
((_size) == 0 ? 0 : (uint32_t)((((_size) - 1) / (_unit_size)) + 1))
struct bucket {
os_mutex_t lock;
struct alloc_class *aclass;
struct block_container *container;
struct block_container_ops *c_ops;
struct memory_block_reserved *active_memory_block;
int is_active;
};
struct bucket *bucket_new(struct block_container *c,
struct alloc_class *aclass);
int *bucket_current_resvp(struct bucket *b);
int bucket_insert_block(struct bucket *b, const struct memory_block *m);
void bucket_delete(struct bucket *b);
#ifdef __cplusplus
}
#endif
#endif
| 2,466 | 29.8375 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/ulog.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ulog.h -- unified log public interface
*/
#ifndef LIBPMEMOBJ_ULOG_H
#define LIBPMEMOBJ_ULOG_H 1
#include <stddef.h>
#include <stdint.h>
#include "vec.h"
#include "pmemops.h"
struct ulog_entry_base {
uint64_t offset; /* offset with operation type flag */
};
/*
* ulog_entry_val -- log entry
*/
struct ulog_entry_val {
struct ulog_entry_base base;
uint64_t value; /* value to be applied */
};
/*
* ulog_entry_buf - ulog buffer entry
*/
struct ulog_entry_buf {
struct ulog_entry_base base; /* offset with operation type flag */
uint64_t checksum; /* checksum of the entire log entry */
uint64_t size; /* size of the buffer to be modified */
uint8_t data[]; /* content to fill in */
};
/*
* This structure *must* be located at a cacheline boundry. To achieve this,
* the next field is always allocated with extra padding, and then the offset
* is additionally aligned.
*/
#define ULOG(capacity_bytes) {\
/* 64 bytes of metadata */\
uint64_t checksum; /* checksum of ulog header and its entries */\
uint64_t next; /* offset of ulog extension */\
uint64_t capacity; /* capacity of this ulog in bytes */\
uint64_t unused[5]; /* must be 0 */\
uint8_t data[capacity_bytes]; /* N bytes of data */\
}\
#define SIZEOF_ULOG(base_capacity)\
(sizeof(struct ulog) + base_capacity)
/* use this for allocations of aligned ulog extensions */
#define SIZEOF_ALIGNED_ULOG(base_capacity)\
(SIZEOF_ULOG(base_capacity) + CACHELINE_SIZE)
struct ulog ULOG(0);
VEC(ulog_next, uint64_t);
typedef uint64_t ulog_operation_type;
#define ULOG_OPERATION_SET (0b000ULL << 61ULL)
#define ULOG_OPERATION_AND (0b001ULL << 61ULL)
#define ULOG_OPERATION_OR (0b010ULL << 61ULL)
#define ULOG_OPERATION_BUF_SET (0b101ULL << 61ULL)
#define ULOG_OPERATION_BUF_CPY (0b110ULL << 61ULL)
#define ULOG_BIT_OPERATIONS (ULOG_OPERATION_AND | ULOG_OPERATION_OR)
typedef int (*ulog_check_offset_fn)(void *ctx, uint64_t offset);
typedef int (*ulog_extend_fn)(void *, uint64_t *);
typedef int (*ulog_entry_cb)(struct ulog_entry_base *e, void *arg,
const struct pmem_ops *p_ops);
typedef void (*ulog_free_fn)(void *base, uint64_t *next);
struct ulog *ulog_next(struct ulog *ulog, const struct pmem_ops *p_ops);
void ulog_construct(uint64_t offset, size_t capacity, int flush,
const struct pmem_ops *p_ops);
size_t ulog_capacity(struct ulog *ulog, size_t ulog_base_bytes,
const struct pmem_ops *p_ops);
void ulog_rebuild_next_vec(struct ulog *ulog, struct ulog_next *next,
const struct pmem_ops *p_ops);
int ulog_foreach_entry(struct ulog *ulog,
ulog_entry_cb cb, void *arg, const struct pmem_ops *ops);
int ulog_reserve(struct ulog *ulog,
size_t ulog_base_nbytes, size_t *new_capacity_bytes,
ulog_extend_fn extend, struct ulog_next *next,
const struct pmem_ops *p_ops);
void ulog_store(struct ulog *dest,
struct ulog *src, size_t nbytes, size_t ulog_base_nbytes,
struct ulog_next *next, const struct pmem_ops *p_ops);
void ulog_clobber(struct ulog *dest, struct ulog_next *next,
const struct pmem_ops *p_ops);
void ulog_clobber_data(struct ulog *dest,
size_t nbytes, size_t ulog_base_nbytes,
struct ulog_next *next, ulog_free_fn ulog_free,
const struct pmem_ops *p_ops);
void ulog_process(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops);
size_t ulog_base_nbytes(struct ulog *ulog);
int ulog_recovery_needed(struct ulog *ulog, int verify_checksum);
uint64_t ulog_entry_offset(const struct ulog_entry_base *entry);
ulog_operation_type ulog_entry_type(
const struct ulog_entry_base *entry);
struct ulog_entry_val *ulog_entry_val_create(struct ulog *ulog,
size_t offset, uint64_t *dest, uint64_t value,
ulog_operation_type type,
const struct pmem_ops *p_ops);
struct ulog_entry_buf *
ulog_entry_buf_create(struct ulog *ulog, size_t offset,
uint64_t *dest, const void *src, uint64_t size,
ulog_operation_type type, const struct pmem_ops *p_ops);
void ulog_entry_apply(const struct ulog_entry_base *e, int persist,
const struct pmem_ops *p_ops);
size_t ulog_entry_size(const struct ulog_entry_base *entry);
void ulog_recover(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops);
int ulog_check(struct ulog *ulog, ulog_check_offset_fn check,
const struct pmem_ops *p_ops);
#endif
| 5,859 | 33.674556 | 77 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/libpmemobj.c | /*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libpmemobj.c -- pmem entry points for libpmemobj
*/
#include "pmemcommon.h"
#include "obj.h"
/*
* libpmemobj_init -- load-time initialization for obj
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmemobj_init(void)
{
common_init(PMEMOBJ_LOG_PREFIX, PMEMOBJ_LOG_LEVEL_VAR,
PMEMOBJ_LOG_FILE_VAR, PMEMOBJ_MAJOR_VERSION,
PMEMOBJ_MINOR_VERSION);
LOG(3, NULL);
obj_init();
}
/*
* libpmemobj_fini -- libpmemobj cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmemobj_fini(void)
{
LOG(3, NULL);
obj_fini();
common_fini();
}
/*
* pmemobj_check_versionU -- see if lib meets application version requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemobj_check_versionU(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != PMEMOBJ_MAJOR_VERSION) {
ERR("libpmemobj major version mismatch (need %u, found %u)",
major_required, PMEMOBJ_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > PMEMOBJ_MINOR_VERSION) {
ERR("libpmemobj minor version mismatch (need %u, found %u)",
minor_required, PMEMOBJ_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* pmemobj_check_version -- see if lib meets application version requirements
*/
const char *
pmemobj_check_version(unsigned major_required, unsigned minor_required)
{
return pmemobj_check_versionU(major_required, minor_required);
}
#else
/*
* pmemobj_check_versionW -- see if lib meets application version requirements
*/
const wchar_t *
pmemobj_check_versionW(unsigned major_required, unsigned minor_required)
{
if (pmemobj_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* pmemobj_set_funcs -- allow overriding libpmemobj's call to malloc, etc.
*/
void
pmemobj_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s))
{
LOG(3, NULL);
util_set_alloc_funcs(malloc_func, free_func, realloc_func, strdup_func);
}
/*
* pmemobj_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemobj_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmemobj_errormsg -- return last error message
*/
const char *
pmemobj_errormsg(void)
{
return pmemobj_errormsgU();
}
#else
/*
* pmemobj_errormsgW -- return last error message as wchar_t
*/
const wchar_t *
pmemobj_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
| 4,294 | 24.873494 | 78 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/cuckoo.c | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* cuckoo.c -- implementation of cuckoo hash table
*/
#include <stdint.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include "cuckoo.h"
#include "out.h"
#define MAX_HASH_FUNCS 2
#define GROWTH_FACTOR 1.2f
#define INITIAL_SIZE 8
#define MAX_INSERTS 8
#define MAX_GROWS 32
struct cuckoo_slot {
uint64_t key;
void *value;
};
struct cuckoo {
size_t size; /* number of hash table slots */
struct cuckoo_slot *tab;
};
static const struct cuckoo_slot null_slot = {0, NULL};
/*
* hash_mod -- (internal) first hash function
*/
static size_t
hash_mod(struct cuckoo *c, uint64_t key)
{
return key % c->size;
}
/*
* hash_mixer -- (internal) second hash function
*
* Based on Austin Appleby MurmurHash3 64-bit finalizer.
*/
static size_t
hash_mixer(struct cuckoo *c, uint64_t key)
{
key ^= key >> 33;
key *= 0xff51afd7ed558ccd;
key ^= key >> 33;
key *= 0xc4ceb9fe1a85ec53;
key ^= key >> 33;
return key % c->size;
}
static size_t
(*hash_funcs[MAX_HASH_FUNCS])(struct cuckoo *c, uint64_t key) = {
hash_mod,
hash_mixer
};
/*
* cuckoo_new -- allocates and initializes cuckoo hash table
*/
struct cuckoo *
cuckoo_new(void)
{
COMPILE_ERROR_ON((size_t)(INITIAL_SIZE * GROWTH_FACTOR)
== INITIAL_SIZE);
struct cuckoo *c = Malloc(sizeof(struct cuckoo));
if (c == NULL) {
ERR("!Malloc");
goto error_cuckoo_malloc;
}
c->size = INITIAL_SIZE;
size_t tab_rawsize = c->size * sizeof(struct cuckoo_slot);
c->tab = Zalloc(tab_rawsize);
if (c->tab == NULL)
goto error_tab_malloc;
return c;
error_tab_malloc:
Free(c);
error_cuckoo_malloc:
return NULL;
}
/*
* cuckoo_delete -- cleanups and deallocates cuckoo hash table
*/
void
cuckoo_delete(struct cuckoo *c)
{
ASSERTne(c, NULL);
Free(c->tab);
Free(c);
}
/*
* cuckoo_insert_try -- (internal) try inserting into the existing hash table
*/
static int
cuckoo_insert_try(struct cuckoo *c, struct cuckoo_slot *src)
{
struct cuckoo_slot srct;
size_t h[MAX_HASH_FUNCS] = {0};
for (int n = 0; n < MAX_INSERTS; ++n) {
for (int i = 0; i < MAX_HASH_FUNCS; ++i) {
h[i] = hash_funcs[i](c, src->key);
if (c->tab[h[i]].value == NULL) {
c->tab[h[i]] = *src;
return 0;
} else if (c->tab[h[i]].key == src->key) {
return EINVAL;
}
}
srct = c->tab[h[0]];
c->tab[h[0]] = *src;
src->key = srct.key;
src->value = srct.value;
}
return EAGAIN;
}
/*
* cuckoo_grow -- (internal) rehashes the table with GROWTH_FACTOR * size
*/
static int
cuckoo_grow(struct cuckoo *c)
{
size_t oldsize = c->size;
struct cuckoo_slot *oldtab = c->tab;
int n;
for (n = 0; n < MAX_GROWS; ++n) {
size_t nsize = (size_t)((float)c->size * GROWTH_FACTOR);
size_t tab_rawsize = nsize * sizeof(struct cuckoo_slot);
c->tab = Zalloc(tab_rawsize);
if (c->tab == NULL) {
c->tab = oldtab;
return ENOMEM;
}
c->size = nsize;
unsigned i;
for (i = 0; i < oldsize; ++i) {
struct cuckoo_slot s = oldtab[i];
if (s.value != NULL && (cuckoo_insert_try(c, &s) != 0))
break;
}
if (i == oldsize)
break;
else
Free(c->tab);
}
if (n == MAX_GROWS) {
c->tab = oldtab;
c->size = oldsize;
return EINVAL;
}
Free(oldtab);
return 0;
}
/*
* cuckoo_insert -- inserts key-value pair into the hash table
*/
int
cuckoo_insert(struct cuckoo *c, uint64_t key, void *value)
{
ASSERTne(c, NULL);
int err;
struct cuckoo_slot src = {key, value};
for (int n = 0; n < MAX_GROWS; ++n) {
if ((err = cuckoo_insert_try(c, &src)) != EAGAIN)
return err;
if ((err = cuckoo_grow(c)) != 0)
return err;
}
return EINVAL;
}
/*
* cuckoo_find_slot -- (internal) finds the hash table slot of key
*/
static struct cuckoo_slot *
cuckoo_find_slot(struct cuckoo *c, uint64_t key)
{
for (int i = 0; i < MAX_HASH_FUNCS; ++i) {
size_t h = hash_funcs[i](c, key);
if (c->tab[h].key == key)
return &c->tab[h];
}
return NULL;
}
/*
* cuckoo_remove -- removes key-value pair from the hash table
*/
void *
cuckoo_remove(struct cuckoo *c, uint64_t key)
{
ASSERTne(c, NULL);
void *ret = NULL;
struct cuckoo_slot *s = cuckoo_find_slot(c, key);
if (s) {
ret = s->value;
*s = null_slot;
}
return ret;
}
/*
* cuckoo_get -- returns the value of a key
*/
void *
cuckoo_get(struct cuckoo *c, uint64_t key)
{
ASSERTne(c, NULL);
struct cuckoo_slot *s = cuckoo_find_slot(c, key);
return s ? s->value : NULL;
}
/*
* cuckoo_get_size -- returns the size of the underlying table, useful for
* calculating load factor and predicting possible rehashes
*/
size_t
cuckoo_get_size(struct cuckoo *c)
{
ASSERTne(c, NULL);
return c->size;
}
| 6,169 | 21.114695 | 77 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/ravl.h | /*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ravl.h -- internal definitions for ravl tree
*/
#ifndef LIBPMEMOBJ_RAVL_H
#define LIBPMEMOBJ_RAVL_H 1
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
struct ravl;
struct ravl_node;
enum ravl_predicate {
RAVL_PREDICATE_EQUAL = 1 << 0,
RAVL_PREDICATE_GREATER = 1 << 1,
RAVL_PREDICATE_LESS = 1 << 2,
RAVL_PREDICATE_LESS_EQUAL =
RAVL_PREDICATE_EQUAL | RAVL_PREDICATE_LESS,
RAVL_PREDICATE_GREATER_EQUAL =
RAVL_PREDICATE_EQUAL | RAVL_PREDICATE_GREATER,
};
typedef int ravl_compare(const void *lhs, const void *rhs);
typedef void ravl_cb(void *data, void *arg);
typedef void ravl_constr(void *data, size_t data_size, const void *arg);
struct ravl *ravl_new(ravl_compare *compare);
struct ravl *ravl_new_sized(ravl_compare *compare, size_t data_size);
void ravl_delete(struct ravl *ravl);
void ravl_delete_cb(struct ravl *ravl, ravl_cb cb, void *arg);
int ravl_empty(struct ravl *ravl);
void ravl_clear(struct ravl *ravl);
int ravl_insert(struct ravl *ravl, const void *data);
int ravl_emplace(struct ravl *ravl, ravl_constr constr, const void *arg);
int ravl_emplace_copy(struct ravl *ravl, const void *data);
struct ravl_node *ravl_find(struct ravl *ravl, const void *data,
enum ravl_predicate predicate_flags);
void *ravl_data(struct ravl_node *node);
void ravl_remove(struct ravl *ravl, struct ravl_node *node);
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_RAVL_H */
| 3,005 | 35.216867 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/lane.c | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* lane.c -- lane implementation
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <inttypes.h>
#include <errno.h>
#include <limits.h>
#include <sched.h>
#include "libpmemobj.h"
#include "cuckoo.h"
#include "lane.h"
#include "out.h"
#include "util.h"
#include "obj.h"
#include "os_thread.h"
#include "valgrind_internal.h"
#include "memops.h"
#include "palloc.h"
#include "tx.h"
static os_tls_key_t Lane_info_key;
static __thread struct cuckoo *Lane_info_ht;
static __thread struct lane_info *Lane_info_records;
static __thread struct lane_info *Lane_info_cache;
/*
* lane_info_create -- (internal) constructor for thread shared data
*/
static inline void
lane_info_create(void)
{
Lane_info_ht = cuckoo_new();
if (Lane_info_ht == NULL)
FATAL("cuckoo_new");
}
/*
* lane_info_delete -- (internal) deletes lane info hash table
*/
static inline void
lane_info_delete(void)
{
if (unlikely(Lane_info_ht == NULL))
return;
cuckoo_delete(Lane_info_ht);
struct lane_info *record;
struct lane_info *head = Lane_info_records;
while (head != NULL) {
record = head;
head = head->next;
Free(record);
}
Lane_info_ht = NULL;
Lane_info_records = NULL;
Lane_info_cache = NULL;
}
/*
* lane_info_ht_boot -- (internal) boot lane info and add it to thread shared
* data
*/
static inline void
lane_info_ht_boot(void)
{
lane_info_create();
int result = os_tls_set(Lane_info_key, Lane_info_ht);
if (result != 0) {
errno = result;
FATAL("!os_tls_set");
}
}
/*
* lane_info_ht_destroy -- (internal) destructor for thread shared data
*/
static inline void
lane_info_ht_destroy(void *ht)
{
lane_info_delete();
}
/*
* lane_info_boot -- initialize lane info hash table and lane info key
*/
void
lane_info_boot(void)
{
int result = os_tls_key_create(&Lane_info_key, lane_info_ht_destroy);
if (result != 0) {
errno = result;
FATAL("!os_tls_key_create");
}
}
/*
* lane_info_destroy -- destroy lane info hash table
*/
void
lane_info_destroy(void)
{
lane_info_delete();
(void) os_tls_key_delete(Lane_info_key);
}
/*
* lane_info_cleanup -- remove lane info record regarding pool being deleted
*/
static inline void
lane_info_cleanup(PMEMobjpool *pop)
{
if (unlikely(Lane_info_ht == NULL))
return;
struct lane_info *info = cuckoo_remove(Lane_info_ht, pop->uuid_lo);
if (likely(info != NULL)) {
if (info->prev)
info->prev->next = info->next;
if (info->next)
info->next->prev = info->prev;
if (Lane_info_cache == info)
Lane_info_cache = NULL;
if (Lane_info_records == info)
Lane_info_records = info->next;
Free(info);
}
}
/*
* lane_get_layout -- (internal) calculates the real pointer of the lane layout
*/
static struct lane_layout *
lane_get_layout(PMEMobjpool *pop, uint64_t lane_idx)
{
return (void *)((char *)pop + pop->lanes_offset +
sizeof(struct lane_layout) * lane_idx);
}
/*
* lane_ulog_constructor -- (internal) constructor of a ulog extension
*/
static int
lane_ulog_constructor(void *base, void *ptr, size_t usable_size, void *arg)
{
PMEMobjpool *pop = base;
const struct pmem_ops *p_ops = &pop->p_ops;
size_t capacity = ALIGN_DOWN(usable_size - sizeof(struct ulog),
CACHELINE_SIZE);
ulog_construct(OBJ_PTR_TO_OFF(base, ptr), capacity, 1, p_ops);
return 0;
}
/*
* lane_undo_extend -- allocates a new undo log
*/
static int
lane_undo_extend(void *base, uint64_t *redo)
{
PMEMobjpool *pop = base;
struct tx_parameters *params = pop->tx_params;
size_t s = SIZEOF_ALIGNED_ULOG(params->cache_size);
return pmalloc_construct(base, redo, s, lane_ulog_constructor, NULL,
0, OBJ_INTERNAL_OBJECT_MASK, 0);
}
/*
* lane_redo_extend -- allocates a new redo log
*/
static int
lane_redo_extend(void *base, uint64_t *redo)
{
size_t s = SIZEOF_ALIGNED_ULOG(LANE_REDO_EXTERNAL_SIZE);
return pmalloc_construct(base, redo, s, lane_ulog_constructor, NULL,
0, OBJ_INTERNAL_OBJECT_MASK, 0);
}
/*
* lane_init -- (internal) initializes a single lane runtime variables
*/
static int
lane_init(PMEMobjpool *pop, struct lane *lane, struct lane_layout *layout)
{
ASSERTne(lane, NULL);
lane->layout = layout;
lane->internal = operation_new((struct ulog *)&layout->internal,
LANE_REDO_INTERNAL_SIZE,
NULL, NULL, &pop->p_ops,
LOG_TYPE_REDO);
if (lane->internal == NULL)
goto error_internal_new;
lane->external = operation_new((struct ulog *)&layout->external,
LANE_REDO_EXTERNAL_SIZE,
lane_redo_extend, (ulog_free_fn)pfree, &pop->p_ops,
LOG_TYPE_REDO);
if (lane->external == NULL)
goto error_external_new;
lane->undo = operation_new((struct ulog *)&layout->undo,
LANE_UNDO_SIZE,
lane_undo_extend, (ulog_free_fn)pfree, &pop->p_ops,
LOG_TYPE_UNDO);
if (lane->undo == NULL)
goto error_undo_new;
return 0;
error_undo_new:
operation_delete(lane->external);
error_external_new:
operation_delete(lane->internal);
error_internal_new:
return -1;
}
/*
* lane_destroy -- cleanups a single lane runtime variables
*/
static void
lane_destroy(PMEMobjpool *pop, struct lane *lane)
{
operation_delete(lane->undo);
operation_delete(lane->internal);
operation_delete(lane->external);
}
/*
* lane_boot -- initializes all lanes
*/
int
lane_boot(PMEMobjpool *pop)
{
int err = 0;
pop->lanes_desc.lane = Malloc(sizeof(struct lane) * pop->nlanes);
if (pop->lanes_desc.lane == NULL) {
err = ENOMEM;
ERR("!Malloc of volatile lanes");
goto error_lanes_malloc;
}
pop->lanes_desc.next_lane_idx = 0;
pop->lanes_desc.lane_locks =
Zalloc(sizeof(*pop->lanes_desc.lane_locks) * pop->nlanes);
if (pop->lanes_desc.lane_locks == NULL) {
ERR("!Malloc for lane locks");
goto error_locks_malloc;
}
/* add lanes to pmemcheck ignored list */
VALGRIND_ADD_TO_GLOBAL_TX_IGNORE((char *)pop + pop->lanes_offset,
(sizeof(struct lane_layout) * pop->nlanes));
uint64_t i;
for (i = 0; i < pop->nlanes; ++i) {
struct lane_layout *layout = lane_get_layout(pop, i);
if ((err = lane_init(pop, &pop->lanes_desc.lane[i], layout))) {
ERR("!lane_init");
goto error_lane_init;
}
}
return 0;
error_lane_init:
for (; i >= 1; --i)
lane_destroy(pop, &pop->lanes_desc.lane[i - 1]);
Free(pop->lanes_desc.lane_locks);
pop->lanes_desc.lane_locks = NULL;
error_locks_malloc:
Free(pop->lanes_desc.lane);
pop->lanes_desc.lane = NULL;
error_lanes_malloc:
return err;
}
/*
* lane_init_data -- initalizes ulogs for all the lanes
*/
void
lane_init_data(PMEMobjpool *pop)
{
struct lane_layout *layout;
for (uint64_t i = 0; i < pop->nlanes; ++i) {
layout = lane_get_layout(pop, i);
ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->internal),
LANE_REDO_INTERNAL_SIZE, 0, &pop->p_ops);
ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->external),
LANE_REDO_EXTERNAL_SIZE, 0, &pop->p_ops);
ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->undo),
LANE_UNDO_SIZE, 0, &pop->p_ops);
}
layout = lane_get_layout(pop, 0);
pmemops_xpersist(&pop->p_ops, layout,
pop->nlanes * sizeof(struct lane_layout),
PMEMOBJ_F_RELAXED);
}
/*
* lane_cleanup -- destroys all lanes
*/
void
lane_cleanup(PMEMobjpool *pop)
{
for (uint64_t i = 0; i < pop->nlanes; ++i)
lane_destroy(pop, &pop->lanes_desc.lane[i]);
Free(pop->lanes_desc.lane);
pop->lanes_desc.lane = NULL;
Free(pop->lanes_desc.lane_locks);
pop->lanes_desc.lane_locks = NULL;
lane_info_cleanup(pop);
}
/*
* lane_recover_and_section_boot -- performs initialization and recovery of all
* lanes
*/
int
lane_recover_and_section_boot(PMEMobjpool *pop)
{
COMPILE_ERROR_ON(SIZEOF_ULOG(LANE_UNDO_SIZE) +
SIZEOF_ULOG(LANE_REDO_EXTERNAL_SIZE) +
SIZEOF_ULOG(LANE_REDO_INTERNAL_SIZE) != LANE_TOTAL_SIZE);
int err = 0;
uint64_t i; /* lane index */
struct lane_layout *layout;
/*
* First we need to recover the internal/external redo logs so that the
* allocator state is consistent before we boot it.
*/
for (i = 0; i < pop->nlanes; ++i) {
layout = lane_get_layout(pop, i);
ulog_recover((struct ulog *)&layout->internal,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops);
ulog_recover((struct ulog *)&layout->external,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops);
}
if ((err = pmalloc_boot(pop)) != 0)
return err;
/*
* Undo logs must be processed after the heap is initialized since
* a undo recovery might require deallocation of the next ulogs.
*/
for (i = 0; i < pop->nlanes; ++i) {
layout = lane_get_layout(pop, i);
struct ulog *undo = (struct ulog *)&layout->undo;
struct operation_context *ctx = operation_new(
undo,
LANE_UNDO_SIZE,
lane_undo_extend, (ulog_free_fn)pfree, &pop->p_ops,
LOG_TYPE_UNDO);
if (ctx == NULL) {
LOG(2, "undo recovery failed %" PRIu64 " %d",
i, err);
return err;
}
operation_resume(ctx);
operation_process(ctx);
operation_finish(ctx);
operation_delete(ctx);
}
return 0;
}
/*
* lane_section_cleanup -- performs runtime cleanup of all lanes
*/
int
lane_section_cleanup(PMEMobjpool *pop)
{
return pmalloc_cleanup(pop);
}
/*
* lane_check -- performs check of all lanes
*/
int
lane_check(PMEMobjpool *pop)
{
int err = 0;
uint64_t j; /* lane index */
struct lane_layout *layout;
for (j = 0; j < pop->nlanes; ++j) {
layout = lane_get_layout(pop, j);
if (ulog_check((struct ulog *)&layout->internal,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops) != 0) {
LOG(2, "lane %" PRIu64 " internal redo failed: %d",
j, err);
return err;
}
}
return 0;
}
/*
* get_lane -- (internal) get free lane index
*/
static inline void
get_lane(uint64_t *locks, struct lane_info *info, uint64_t nlocks)
{
info->lane_idx = info->primary;
while (1) {
do {
info->lane_idx %= nlocks;
if (likely(util_bool_compare_and_swap64(
&locks[info->lane_idx], 0, 1))) {
if (info->lane_idx == info->primary) {
info->primary_attempts =
LANE_PRIMARY_ATTEMPTS;
} else if (info->primary_attempts == 0) {
info->primary = info->lane_idx;
info->primary_attempts =
LANE_PRIMARY_ATTEMPTS;
}
return;
}
if (info->lane_idx == info->primary &&
info->primary_attempts > 0) {
info->primary_attempts--;
}
++info->lane_idx;
} while (info->lane_idx < nlocks);
sched_yield();
}
}
/*
* get_lane_info_record -- (internal) get lane record attached to memory pool
* or first free
*/
static inline struct lane_info *
get_lane_info_record(PMEMobjpool *pop)
{
if (likely(Lane_info_cache != NULL &&
Lane_info_cache->pop_uuid_lo == pop->uuid_lo)) {
return Lane_info_cache;
}
if (unlikely(Lane_info_ht == NULL)) {
lane_info_ht_boot();
}
struct lane_info *info = cuckoo_get(Lane_info_ht, pop->uuid_lo);
if (unlikely(info == NULL)) {
info = Malloc(sizeof(struct lane_info));
if (unlikely(info == NULL)) {
FATAL("Malloc");
}
info->pop_uuid_lo = pop->uuid_lo;
info->lane_idx = UINT64_MAX;
info->nest_count = 0;
info->next = Lane_info_records;
info->prev = NULL;
info->primary = 0;
info->primary_attempts = LANE_PRIMARY_ATTEMPTS;
if (Lane_info_records) {
Lane_info_records->prev = info;
}
Lane_info_records = info;
if (unlikely(cuckoo_insert(
Lane_info_ht, pop->uuid_lo, info) != 0)) {
FATAL("cuckoo_insert");
}
}
Lane_info_cache = info;
return info;
}
/*
* lane_hold -- grabs a per-thread lane in a round-robin fashion
*/
unsigned
lane_hold(PMEMobjpool *pop, struct lane **lanep)
{
/*
* Before runtime lane initialization all remote operations are
* executed using RLANE_DEFAULT.
*/
if (unlikely(!pop->lanes_desc.runtime_nlanes)) {
ASSERT(pop->has_remote_replicas);
if (lanep != NULL)
FATAL("cannot obtain section before lane's init");
return RLANE_DEFAULT;
}
struct lane_info *lane = get_lane_info_record(pop);
while (unlikely(lane->lane_idx == UINT64_MAX)) {
/* initial wrap to next CL */
lane->primary = lane->lane_idx = util_fetch_and_add32(
&pop->lanes_desc.next_lane_idx, LANE_JUMP);
} /* handles wraparound */
uint64_t *llocks = pop->lanes_desc.lane_locks;
/* grab next free lane from lanes available at runtime */
if (!lane->nest_count++) {
get_lane(llocks, lane, pop->lanes_desc.runtime_nlanes);
}
struct lane *l = &pop->lanes_desc.lane[lane->lane_idx];
/* reinitialize lane's content only if in outermost hold */
if (lanep && lane->nest_count == 1) {
VALGRIND_ANNOTATE_NEW_MEMORY(l, sizeof(*l));
VALGRIND_ANNOTATE_NEW_MEMORY(l->layout, sizeof(*l->layout));
operation_init(l->external);
operation_init(l->internal);
operation_init(l->undo);
}
if (lanep)
*lanep = l;
return (unsigned)lane->lane_idx;
}
/*
* lane_attach -- attaches the lane with the given index to the current thread
*/
void
lane_attach(PMEMobjpool *pop, unsigned lane)
{
struct lane_info *info = get_lane_info_record(pop);
info->nest_count = 1;
info->lane_idx = lane;
}
/*
* lane_detach -- detaches the currently held lane from the current thread
*/
unsigned
lane_detach(PMEMobjpool *pop)
{
struct lane_info *lane = get_lane_info_record(pop);
lane->nest_count -= 1;
ASSERTeq(lane->nest_count, 0);
return (unsigned)lane->lane_idx;
}
/*
* lane_release -- drops the per-thread lane
*/
void
lane_release(PMEMobjpool *pop)
{
if (unlikely(!pop->lanes_desc.runtime_nlanes)) {
ASSERT(pop->has_remote_replicas);
return;
}
struct lane_info *lane = get_lane_info_record(pop);
ASSERTne(lane, NULL);
ASSERTne(lane->lane_idx, UINT64_MAX);
if (unlikely(lane->nest_count == 0)) {
FATAL("lane_release");
} else if (--(lane->nest_count) == 0) {
if (unlikely(!util_bool_compare_and_swap64(
&pop->lanes_desc.lane_locks[lane->lane_idx],
1, 0))) {
FATAL("util_bool_compare_and_swap64");
}
}
}
| 15,147 | 22.78022 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/obj_lane/obj_lane.c | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_lane.c -- unit test for lanes
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <errno.h>
#include <inttypes.h>
#include "list.h"
#include "obj.h"
#include "tx.h"
#include "unittest.h"
#include "pmemcommon.h"
#define MAX_MOCK_LANES 5
#define MOCK_LAYOUT (void *)(0xAAA)
static void *base_ptr;
struct mock_pop {
PMEMobjpool p;
struct lane_layout l[MAX_MOCK_LANES];
};
/*
* mock_flush -- mock flush for lanes
*/
static int
mock_flush(void *ctx, const void *addr, size_t len, unsigned flags)
{
return 0;
}
/*
* mock_persist -- mock flush for lanes
*/
static int
mock_persist(void *ctx, const void *addr, size_t len, unsigned flags)
{
return 0;
}
/*
* mock_memset -- mock memset for lanes
*/
static void *
mock_memset(void *ctx, void *ptr, int c, size_t sz, unsigned flags)
{
memset(ptr, c, sz);
return ptr;
}
/*
* mock_drain -- mock drain for lanes
*/
static void
mock_drain(void *ctx)
{
}
static void
test_lane_boot_cleanup_ok(void)
{
struct mock_pop *pop = MALLOC(sizeof(struct mock_pop));
pop->p.nlanes = MAX_MOCK_LANES;
base_ptr = &pop->p;
pop->p.lanes_offset = (uint64_t)&pop->l - (uint64_t)&pop->p;
pop->p.p_ops.base = pop;
pop->p.p_ops.flush = mock_flush;
pop->p.p_ops.memset = mock_memset;
pop->p.p_ops.drain = mock_drain;
pop->p.p_ops.persist = mock_persist;
lane_init_data(&pop->p);
lane_info_boot();
UT_ASSERTeq(lane_boot(&pop->p), 0);
for (int i = 0; i < MAX_MOCK_LANES; ++i) {
struct lane *lane = &pop->p.lanes_desc.lane[i];
UT_ASSERTeq(lane->layout, &pop->l[i]);
}
lane_cleanup(&pop->p);
UT_ASSERTeq(pop->p.lanes_desc.lane, NULL);
UT_ASSERTeq(pop->p.lanes_desc.lane_locks, NULL);
FREE(pop);
}
static ut_jmp_buf_t Jmp;
static void
signal_handler(int sig)
{
ut_siglongjmp(Jmp);
}
static void
test_lane_hold_release(void)
{
struct ulog *mock_ulog = ZALLOC(SIZEOF_ULOG(1024));
struct pmem_ops p_ops;
struct operation_context *ctx = operation_new(mock_ulog, 1024,
NULL, NULL, &p_ops, LOG_TYPE_REDO);
struct lane mock_lane = {
.layout = MOCK_LAYOUT,
.internal = ctx,
.external = ctx,
.undo = ctx,
};
struct mock_pop *pop = MALLOC(sizeof(struct mock_pop));
pop->p.nlanes = 1;
pop->p.lanes_desc.runtime_nlanes = 1,
pop->p.lanes_desc.lane = &mock_lane;
pop->p.lanes_desc.next_lane_idx = 0;
pop->p.lanes_desc.lane_locks = CALLOC(OBJ_NLANES, sizeof(uint64_t));
pop->p.lanes_offset = (uint64_t)&pop->l - (uint64_t)&pop->p;
pop->p.uuid_lo = 123456;
base_ptr = &pop->p;
struct lane *lane;
lane_hold(&pop->p, &lane);
UT_ASSERTeq(lane->layout, MOCK_LAYOUT);
UT_ASSERTeq(lane->undo, ctx);
lane_hold(&pop->p, &lane);
UT_ASSERTeq(lane->layout, MOCK_LAYOUT);
UT_ASSERTeq(lane->undo, ctx);
lane_release(&pop->p);
lane_release(&pop->p);
struct sigaction v, old;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGABRT, &v, &old);
if (!ut_sigsetjmp(Jmp)) {
lane_release(&pop->p); /* only two sections were held */
UT_ERR("we should not get here");
}
SIGACTION(SIGABRT, &old, NULL);
FREE(pop->p.lanes_desc.lane_locks);
FREE(pop);
operation_delete(ctx);
FREE(mock_ulog);
}
static void
test_lane_sizes(void)
{
UT_COMPILE_ERROR_ON(sizeof(struct lane_layout) != LANE_TOTAL_SIZE);
}
enum thread_work_type {
LANE_INFO_DESTROY,
LANE_CLEANUP
};
struct thread_data {
enum thread_work_type work;
};
/*
* test_separate_thread -- child thread input point for multithreaded
* scenarios
*/
static void *
test_separate_thread(void *arg)
{
UT_ASSERTne(arg, NULL);
struct thread_data *data = arg;
switch (data->work) {
case LANE_INFO_DESTROY:
lane_info_destroy();
break;
case LANE_CLEANUP:
UT_ASSERTne(base_ptr, NULL);
lane_cleanup(base_ptr);
break;
default:
UT_FATAL("Unimplemented thread work type: %d", data->work);
}
return NULL;
}
/*
* test_lane_info_destroy_in_separate_thread -- lane info boot from one thread
* and lane info destroy from another
*/
static void
test_lane_info_destroy_in_separate_thread(void)
{
lane_info_boot();
struct thread_data data;
data.work = LANE_INFO_DESTROY;
os_thread_t thread;
os_thread_create(&thread, NULL, test_separate_thread, &data);
os_thread_join(&thread, NULL);
lane_info_destroy();
}
/*
* test_lane_cleanup_in_separate_thread -- lane boot from one thread and lane
* cleanup from another
*/
static void
test_lane_cleanup_in_separate_thread(void)
{
struct mock_pop *pop = MALLOC(sizeof(struct mock_pop));
pop->p.nlanes = MAX_MOCK_LANES;
pop->p.p_ops.base = pop;
pop->p.p_ops.flush = mock_flush;
pop->p.p_ops.memset = mock_memset;
pop->p.p_ops.drain = mock_drain;
pop->p.p_ops.persist = mock_persist;
base_ptr = &pop->p;
pop->p.lanes_offset = (uint64_t)&pop->l - (uint64_t)&pop->p;
lane_init_data(&pop->p);
lane_info_boot();
UT_ASSERTeq(lane_boot(&pop->p), 0);
for (int i = 0; i < MAX_MOCK_LANES; ++i) {
struct lane *lane = &pop->p.lanes_desc.lane[i];
UT_ASSERTeq(lane->layout, &pop->l[i]);
}
struct thread_data data;
data.work = LANE_CLEANUP;
os_thread_t thread;
os_thread_create(&thread, NULL, test_separate_thread, &data);
os_thread_join(&thread, NULL);
UT_ASSERTeq(pop->p.lanes_desc.lane, NULL);
UT_ASSERTeq(pop->p.lanes_desc.lane_locks, NULL);
FREE(pop);
}
static void
usage(const char *app)
{
UT_FATAL("usage: %s [scenario: s/m]", app);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_lane");
obj_init();
if (argc != 2)
usage(argv[0]);
switch (argv[1][0]) {
case 's':
/* single thread scenarios */
test_lane_boot_cleanup_ok();
test_lane_hold_release();
test_lane_sizes();
break;
case 'm':
/* multithreaded scenarios */
test_lane_info_destroy_in_separate_thread();
test_lane_cleanup_in_separate_thread();
break;
default:
usage(argv[0]);
}
obj_fini();
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically,
* we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 7,562 | 21.114035 | 78 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_obc/rpmem_obc_test_common.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc_test_common.h -- common declarations for rpmem_obc test
*/
#include "unittest.h"
#include "out.h"
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_obc.h"
#define POOL_SIZE 1024
#define NLANES 32
#define NLANES_RESP 16
#define PROVIDER RPMEM_PROV_LIBFABRIC_SOCKETS
#define POOL_DESC "pool_desc"
#define RKEY 0xabababababababab
#define RADDR 0x0101010101010101
#define PORT 1234
#define BUFF_SIZE 8192
#define POOL_ATTR_INIT {\
.signature = "<RPMEM>",\
.major = 1,\
.compat_features = 2,\
.incompat_features = 3,\
.ro_compat_features = 4,\
.poolset_uuid = "POOLSET_UUID0123",\
.uuid = "UUID0123456789AB",\
.next_uuid = "NEXT_UUID0123456",\
.prev_uuid = "PREV_UUID0123456",\
.user_flags = "USER_FLAGS012345",\
}
#define POOL_ATTR_ALT {\
.signature = "<ALT>",\
.major = 5,\
.compat_features = 6,\
.incompat_features = 7,\
.ro_compat_features = 8,\
.poolset_uuid = "UUID_POOLSET_ALT",\
.uuid = "ALT_UUIDCDEFFEDC",\
.next_uuid = "456UUID_NEXT_ALT",\
.prev_uuid = "UUID012_ALT_PREV",\
.user_flags = "012345USER_FLAGS",\
}
static const struct rpmem_pool_attr POOL_ATTR = POOL_ATTR_INIT;
struct server {
int fd_in;
int fd_out;
};
void set_rpmem_cmd(const char *fmt, ...);
struct server *srv_init(void);
void srv_fini(struct server *s);
void srv_recv(struct server *s, void *buff, size_t len);
void srv_send(struct server *s, const void *buff, size_t len);
void srv_wait_disconnect(struct server *s);
void client_connect_wait(struct rpmem_obc *rpc, char *target);
/*
* Since the server may disconnect the connection at any moment
* from the client's perspective, execute the test in a loop so
* the moment when the connection is closed will be possibly different.
*/
#define ECONNRESET_LOOP 10
void server_econnreset(struct server *s, const void *msg, size_t len);
TEST_CASE_DECLARE(client_enotconn);
TEST_CASE_DECLARE(client_connect);
TEST_CASE_DECLARE(client_monitor);
TEST_CASE_DECLARE(server_monitor);
TEST_CASE_DECLARE(server_wait);
TEST_CASE_DECLARE(client_create);
TEST_CASE_DECLARE(server_create);
TEST_CASE_DECLARE(server_create_econnreset);
TEST_CASE_DECLARE(server_create_eproto);
TEST_CASE_DECLARE(server_create_error);
TEST_CASE_DECLARE(client_open);
TEST_CASE_DECLARE(server_open);
TEST_CASE_DECLARE(server_open_econnreset);
TEST_CASE_DECLARE(server_open_eproto);
TEST_CASE_DECLARE(server_open_error);
TEST_CASE_DECLARE(client_close);
TEST_CASE_DECLARE(server_close);
TEST_CASE_DECLARE(server_close_econnreset);
TEST_CASE_DECLARE(server_close_eproto);
TEST_CASE_DECLARE(server_close_error);
TEST_CASE_DECLARE(client_set_attr);
TEST_CASE_DECLARE(server_set_attr);
TEST_CASE_DECLARE(server_set_attr_econnreset);
TEST_CASE_DECLARE(server_set_attr_eproto);
TEST_CASE_DECLARE(server_set_attr_error);
| 4,466 | 31.369565 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_obc/rpmem_obc_test_close.c | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc_test_close.c -- test cases for rpmem_obj_close function
*/
#include "rpmem_obc_test_common.h"
static const struct rpmem_msg_close_resp CLOSE_RESP = {
.hdr = {
.type = RPMEM_MSG_TYPE_CLOSE_RESP,
.size = sizeof(struct rpmem_msg_close_resp),
.status = 0,
},
};
/*
* check_close_msg -- check close message
*/
static void
check_close_msg(struct rpmem_msg_close *msg)
{
size_t msg_size = sizeof(struct rpmem_msg_close);
UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_CLOSE);
UT_ASSERTeq(msg->hdr.size, msg_size);
}
/*
* server_close_handle -- handle a close request message
*/
static void
server_close_handle(struct server *s, const struct rpmem_msg_close_resp *resp)
{
struct rpmem_msg_close msg;
srv_recv(s, &msg, sizeof(msg));
rpmem_ntoh_msg_close(&msg);
check_close_msg(&msg);
srv_send(s, resp, sizeof(*resp));
}
/*
* client_close_errno -- perform close request operation and expect
* specified errno
*/
static void
client_close_errno(char *target, int ex_errno)
{
int ret;
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_close(rpc, 0);
if (ex_errno) {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
} else {
UT_ASSERTeq(ret, 0);
}
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
/*
* Number of cases for EPROTO test. Must be kept in sync with the
* server_close_eproto function.
*/
#define CLOSE_EPROTO_COUNT 5
/*
* server_close_eproto -- send invalid create request responses to a client
*/
int
server_close_eproto(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, CLOSE_EPROTO_COUNT - 1);
int i = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_close_resp resp = CLOSE_RESP;
switch (i) {
case 0:
resp.hdr.type = MAX_RPMEM_MSG_TYPE;
break;
case 1:
resp.hdr.type = RPMEM_MSG_TYPE_OPEN_RESP;
break;
case 2:
resp.hdr.size -= 1;
break;
case 3:
resp.hdr.size += 1;
break;
case 4:
resp.hdr.status = MAX_RPMEM_ERR;
break;
default:
UT_ASSERT(0);
break;
}
rpmem_hton_msg_close_resp(&resp);
server_close_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* client_close_error -- check if valid errno is set if error status returned
*/
static void
client_close_error(char *target)
{
int ret;
for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) {
set_rpmem_cmd("server_close_error %d", e);
int ex_errno = rpmem_util_proto_errno(e);
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
}
/*
* client_close -- test case for close request operation - client side
*/
int
client_close(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
for (int i = 0; i < ECONNRESET_LOOP; i++) {
set_rpmem_cmd("server_close_econnreset %d", i % 2);
client_close_errno(target, ECONNRESET);
}
for (int i = 0; i < CLOSE_EPROTO_COUNT; i++) {
set_rpmem_cmd("server_close_eproto %d", i);
client_close_errno(target, EPROTO);
}
client_close_error(target);
set_rpmem_cmd("server_close");
client_close_errno(target, 0);
return 1;
}
/*
* server_close_error -- return error status in close response message
*/
int
server_close_error(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR);
enum rpmem_err e = (enum rpmem_err)atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_close_resp resp = CLOSE_RESP;
resp.hdr.status = e;
rpmem_hton_msg_close_resp(&resp);
server_close_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_close_econnreset -- test case for closing connection - server size
*/
int
server_close_econnreset(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0|1", tc->name);
int do_send = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_close_resp resp = CLOSE_RESP;
rpmem_hton_msg_close_resp(&resp);
if (do_send)
srv_send(s, &resp, sizeof(resp) / 2);
srv_fini(s);
return 1;
}
/*
* server_close -- test case for close request operation - server side
*/
int
server_close(const struct test_case *tc, int argc, char *argv[])
{
struct server *s = srv_init();
struct rpmem_msg_close_resp resp = CLOSE_RESP;
rpmem_hton_msg_close_resp(&resp);
server_close_handle(s, &resp);
srv_fini(s);
return 0;
}
| 6,240 | 21.777372 | 78 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_obc/rpmem_obc_test_set_attr.c | /*
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc_test_set_attr.c -- test cases for rpmem_set_attr function
*/
#include "rpmem_obc_test_common.h"
static const struct rpmem_msg_set_attr_resp SET_ATTR_RESP = {
.hdr = {
.type = RPMEM_MSG_TYPE_SET_ATTR_RESP,
.size = sizeof(struct rpmem_msg_set_attr_resp),
.status = 0,
}
};
/*
* check_set_attr_msg -- check set attributes message
*/
static void
check_set_attr_msg(struct rpmem_msg_set_attr *msg)
{
size_t msg_size = sizeof(struct rpmem_msg_set_attr);
struct rpmem_pool_attr pool_attr = POOL_ATTR_ALT;
UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_SET_ATTR);
UT_ASSERTeq(msg->hdr.size, msg_size);
UT_ASSERTeq(memcmp(&msg->pool_attr, &pool_attr, sizeof(pool_attr)), 0);
}
/*
* server_open_handle -- handle an set attributes request message
*/
static void
server_set_attr_handle(struct server *s,
const struct rpmem_msg_set_attr_resp *resp)
{
size_t msg_size = sizeof(struct rpmem_msg_set_attr);
struct rpmem_msg_set_attr *msg = MALLOC(msg_size);
srv_recv(s, msg, msg_size);
rpmem_ntoh_msg_set_attr(msg);
check_set_attr_msg(msg);
srv_send(s, resp, sizeof(*resp));
FREE(msg);
}
/*
* Number of cases for EPROTO test. Must be kept in sync with the
* server_set_attr_eproto function.
*/
#define SET_ATTR_EPROTO_COUNT 5
/*
* server_set_attr_eproto -- send invalid set attributes request responses to
* a client
*/
int
server_set_attr_eproto(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, SET_ATTR_EPROTO_COUNT - 1);
int i = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_set_attr_resp resp = SET_ATTR_RESP;
switch (i) {
case 0:
resp.hdr.type = MAX_RPMEM_MSG_TYPE;
break;
case 1:
resp.hdr.type = RPMEM_MSG_TYPE_CREATE_RESP;
break;
case 2:
resp.hdr.size -= 1;
break;
case 3:
resp.hdr.size += 1;
break;
case 4:
resp.hdr.status = MAX_RPMEM_ERR;
break;
default:
UT_ASSERT(0);
break;
}
rpmem_hton_msg_set_attr_resp(&resp);
server_set_attr_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_set_attr_error -- return error status in set attributes response
* message
*/
int
server_set_attr_error(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR);
enum rpmem_err e = (enum rpmem_err)atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_set_attr_resp resp = SET_ATTR_RESP;
resp.hdr.status = e;
rpmem_hton_msg_set_attr_resp(&resp);
server_set_attr_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_set_attr_econnreset -- test case for closing connection - server side
*/
int
server_set_attr_econnreset(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0|1", tc->name);
int do_send = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_set_attr_resp resp = SET_ATTR_RESP;
rpmem_hton_msg_set_attr_resp(&resp);
if (do_send)
srv_send(s, &resp, sizeof(resp) / 2);
srv_fini(s);
return 1;
}
/*
* server_set_attr -- test case for rpmem_obc_set_attr - server side
* side
*/
int
server_set_attr(const struct test_case *tc, int argc, char *argv[])
{
struct server *s = srv_init();
struct rpmem_msg_set_attr_resp resp = SET_ATTR_RESP;
rpmem_hton_msg_set_attr_resp(&resp);
server_set_attr_handle(s, &resp);
srv_fini(s);
return 0;
}
/*
* client_set_attr_init -- initialize communication - client side
*/
static struct rpmem_obc *
client_set_attr_init(char *target)
{
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
return rpc;
}
/*
* client_set_attr_fini -- finalize communication - client side
*/
static void
client_set_attr_fini(struct rpmem_obc *rpc)
{
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
/*
* client_set_attr_errno -- perform set attributes request operation and expect
* specified errno.
*/
static void
client_set_attr_errno(char *target, int ex_errno)
{
struct rpmem_obc *rpc = client_set_attr_init(target);
const struct rpmem_pool_attr pool_attr_alt = POOL_ATTR_ALT;
int ret = rpmem_obc_set_attr(rpc, &pool_attr_alt);
if (ex_errno) {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
} else {
UT_ASSERTeq(ret, 0);
}
client_set_attr_fini(rpc);
}
/*
* client_set_attr_error -- check if valid errno is set if error status
* returned
*/
static void
client_set_attr_error(char *target)
{
int ret;
for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) {
set_rpmem_cmd("server_set_attr_error %d", e);
int ex_errno = rpmem_util_proto_errno(e);
struct rpmem_obc *rpc = client_set_attr_init(target);
const struct rpmem_pool_attr pool_attr_alt = POOL_ATTR_ALT;
ret = rpmem_obc_set_attr(rpc, &pool_attr_alt);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
client_set_attr_fini(rpc);
}
}
/*
* client_set_attr -- test case for set attributes request operation - client
* side
*/
int
client_set_attr(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
for (int i = 0; i < ECONNRESET_LOOP; i++) {
set_rpmem_cmd("server_set_attr_econnreset %d", i % 2);
client_set_attr_errno(target, ECONNRESET);
}
for (int i = 0; i < SET_ATTR_EPROTO_COUNT; i++) {
set_rpmem_cmd("server_set_attr_eproto %d", i);
client_set_attr_errno(target, EPROTO);
}
client_set_attr_error(target);
set_rpmem_cmd("server_set_attr");
client_set_attr_errno(target, 0);
return 1;
}
| 7,199 | 22.684211 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_obc/rpmem_obc_test_create.c | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc_test_create.c -- test cases for rpmem_obc_create function
*/
#include "rpmem_obc_test_common.h"
static const struct rpmem_msg_create_resp CREATE_RESP = {
.hdr = {
.type = RPMEM_MSG_TYPE_CREATE_RESP,
.size = sizeof(struct rpmem_msg_create_resp),
.status = 0,
},
.ibc = {
.port = PORT,
.rkey = RKEY,
.raddr = RADDR,
.persist_method = RPMEM_PM_GPSPM,
.nlanes = NLANES_RESP,
},
};
/*
* check_create_msg -- check create message
*/
static void
check_create_msg(struct rpmem_msg_create *msg)
{
size_t pool_desc_size = strlen(POOL_DESC) + 1;
size_t msg_size = sizeof(struct rpmem_msg_create) + pool_desc_size;
struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT;
UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_CREATE);
UT_ASSERTeq(msg->hdr.size, msg_size);
UT_ASSERTeq(msg->c.major, RPMEM_PROTO_MAJOR);
UT_ASSERTeq(msg->c.minor, RPMEM_PROTO_MINOR);
UT_ASSERTeq(msg->c.pool_size, POOL_SIZE);
UT_ASSERTeq(msg->c.provider, PROVIDER);
UT_ASSERTeq(msg->c.nlanes, NLANES);
UT_ASSERTeq(msg->c.buff_size, BUFF_SIZE);
UT_ASSERTeq(msg->pool_desc.size, pool_desc_size);
UT_ASSERTeq(strcmp((char *)msg->pool_desc.desc, POOL_DESC), 0);
UT_ASSERTeq(memcmp(&msg->pool_attr, &pool_attr, sizeof(pool_attr)), 0);
}
/*
* server_create_handle -- handle a create request message
*/
static void
server_create_handle(struct server *s, const struct rpmem_msg_create_resp *resp)
{
size_t msg_size = sizeof(struct rpmem_msg_create) +
strlen(POOL_DESC) + 1;
struct rpmem_msg_create *msg = MALLOC(msg_size);
srv_recv(s, msg, msg_size);
rpmem_ntoh_msg_create(msg);
check_create_msg(msg);
srv_send(s, resp, sizeof(*resp));
FREE(msg);
}
/*
* Number of cases for EPROTO test. Must be kept in sync with the
* server_create_eproto function.
*/
#define CREATE_EPROTO_COUNT 8
/*
* server_create_eproto -- send invalid create request responses to a client
*/
int
server_create_eproto(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, CREATE_EPROTO_COUNT - 1);
int i = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_create_resp resp = CREATE_RESP;
switch (i) {
case 0:
resp.hdr.type = MAX_RPMEM_MSG_TYPE;
break;
case 1:
resp.hdr.type = RPMEM_MSG_TYPE_OPEN_RESP;
break;
case 2:
resp.hdr.size -= 1;
break;
case 3:
resp.hdr.size += 1;
break;
case 4:
resp.hdr.status = MAX_RPMEM_ERR;
break;
case 5:
resp.ibc.port = 0;
break;
case 6:
resp.ibc.port = UINT16_MAX + 1;
break;
case 7:
resp.ibc.persist_method = MAX_RPMEM_PM;
break;
default:
UT_ASSERT(0);
break;
}
rpmem_hton_msg_create_resp(&resp);
server_create_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_create_error -- return an error status in create response message
*/
int
server_create_error(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR);
enum rpmem_err e = (enum rpmem_err)atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_create_resp resp = CREATE_RESP;
resp.hdr.status = e;
rpmem_hton_msg_create_resp(&resp);
server_create_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_create_econnreset -- test case for closing connection - server side
*/
int
server_create_econnreset(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0|1", tc->name);
int do_send = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_create_resp resp = CREATE_RESP;
rpmem_hton_msg_create_resp(&resp);
if (do_send)
srv_send(s, &resp, sizeof(resp) / 2);
srv_fini(s);
return 1;
}
/*
* server_create -- test case for rpmem_obc_create function - server side
*/
int
server_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 0)
UT_FATAL("usage: %s", tc->name);
struct server *s = srv_init();
struct rpmem_msg_create_resp resp = CREATE_RESP;
rpmem_hton_msg_create_resp(&resp);
server_create_handle(s, &resp);
srv_fini(s);
return 0;
}
/*
* client_create_errno -- perform create request operation and expect
* specified errno. If ex_errno is zero expect certain values in res struct.
*/
static void
client_create_errno(char *target, int ex_errno)
{
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
.buff_size = BUFF_SIZE,
};
struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT;
struct rpmem_resp_attr res;
int ret;
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_create(rpc, &req, &res, &pool_attr);
if (ex_errno) {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
} else {
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(res.port, CREATE_RESP.ibc.port);
UT_ASSERTeq(res.rkey, CREATE_RESP.ibc.rkey);
UT_ASSERTeq(res.raddr, CREATE_RESP.ibc.raddr);
UT_ASSERTeq(res.persist_method,
CREATE_RESP.ibc.persist_method);
UT_ASSERTeq(res.nlanes,
CREATE_RESP.ibc.nlanes);
}
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
/*
* client_create_error -- check if valid errno is set if error status returned
*/
static void
client_create_error(char *target)
{
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
.buff_size = BUFF_SIZE,
};
struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT;
struct rpmem_resp_attr res;
int ret;
for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) {
set_rpmem_cmd("server_create_error %d", e);
int ex_errno = rpmem_util_proto_errno(e);
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_create(rpc, &req, &res, &pool_attr);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
}
/*
* client_create -- test case for create request operation - client side
*/
int
client_create(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
for (int i = 0; i < ECONNRESET_LOOP; i++) {
set_rpmem_cmd("server_create_econnreset %d", i % 2);
client_create_errno(target, ECONNRESET);
}
for (int i = 0; i < CREATE_EPROTO_COUNT; i++) {
set_rpmem_cmd("server_create_eproto %d", i);
client_create_errno(target, EPROTO);
}
client_create_error(target);
set_rpmem_cmd("server_create");
client_create_errno(target, 0);
return 1;
}
| 8,157 | 23.136095 | 80 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_obc/rpmem_obc_test_common.c | /*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc_test_common.c -- common definitions for rpmem_obc tests
*/
#include <sys/socket.h>
#include <netinet/in.h>
#include "rpmem_obc_test_common.h"
#include "os.h"
#define CMD_BUFF_SIZE 4096
static const char *rpmem_cmd;
/*
* set_rpmem_cmd -- set RPMEM_CMD variable
*/
void
set_rpmem_cmd(const char *fmt, ...)
{
static char cmd_buff[CMD_BUFF_SIZE];
if (!rpmem_cmd) {
char *cmd = os_getenv(RPMEM_CMD_ENV);
UT_ASSERTne(cmd, NULL);
rpmem_cmd = STRDUP(cmd);
}
ssize_t ret;
size_t cnt = 0;
va_list ap;
va_start(ap, fmt);
ret = snprintf(&cmd_buff[cnt], CMD_BUFF_SIZE - cnt,
"%s ", rpmem_cmd);
UT_ASSERT(ret > 0);
cnt += (size_t)ret;
ret = vsnprintf(&cmd_buff[cnt], CMD_BUFF_SIZE - cnt, fmt, ap);
UT_ASSERT(ret > 0);
cnt += (size_t)ret;
va_end(ap);
ret = os_setenv(RPMEM_CMD_ENV, cmd_buff, 1);
UT_ASSERTeq(ret, 0);
/*
* Rpmem has internal RPMEM_CMD variable copy and it is assumed
* RPMEMD_CMD will not change its value during execution. To refresh the
* internal copy it must be destroyed and a instance must be initialized
* manually.
*/
rpmem_util_cmds_fini();
rpmem_util_cmds_init();
}
struct server *
srv_init(void)
{
struct server *s = MALLOC(sizeof(*s));
s->fd_in = STDIN_FILENO;
s->fd_out = STDOUT_FILENO;
uint32_t status = 0;
srv_send(s, &status, sizeof(status));
return s;
}
/*
* srv_stop -- close the server
*/
void
srv_fini(struct server *s)
{
FREE(s);
}
/*
* srv_recv -- read a message from the client
*/
void
srv_recv(struct server *s, void *buff, size_t len)
{
size_t rd = 0;
uint8_t *cbuf = buff;
while (rd < len) {
ssize_t ret = read(s->fd_in, &cbuf[rd], len - rd);
UT_ASSERT(ret > 0);
rd += (size_t)ret;
}
}
/*
* srv_send -- send a message to the client
*/
void
srv_send(struct server *s, const void *buff, size_t len)
{
size_t wr = 0;
const uint8_t *cbuf = buff;
while (wr < len) {
ssize_t ret = write(s->fd_out, &cbuf[wr], len - wr);
UT_ASSERT(ret > 0);
wr += (size_t)ret;
}
}
/*
* client_connect_wait -- wait until client connects to the server
*/
void
client_connect_wait(struct rpmem_obc *rpc, char *target)
{
struct rpmem_target_info *info;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
while (rpmem_obc_connect(rpc, info))
;
rpmem_target_free(info);
}
/*
* server_econnreset -- disconnect from client during performing an
* operation
*/
void
server_econnreset(struct server *s, const void *msg, size_t len)
{
for (int i = 0; i < ECONNRESET_LOOP; i++) {
srv_send(s, msg, len);
}
}
| 4,138 | 23.204678 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_obc/rpmem_obc_test.c | /*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc_test.c -- unit test for rpmem_obc module
*/
#include "rpmem_obc_test_common.h"
#include "pmemcommon.h"
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(client_enotconn),
TEST_CASE(client_connect),
TEST_CASE(client_create),
TEST_CASE(server_create),
TEST_CASE(server_create_econnreset),
TEST_CASE(server_create_eproto),
TEST_CASE(server_create_error),
TEST_CASE(client_open),
TEST_CASE(server_open),
TEST_CASE(server_open_econnreset),
TEST_CASE(server_open_eproto),
TEST_CASE(server_open_error),
TEST_CASE(client_close),
TEST_CASE(server_close),
TEST_CASE(server_close_econnreset),
TEST_CASE(server_close_eproto),
TEST_CASE(server_close_error),
TEST_CASE(client_monitor),
TEST_CASE(server_monitor),
TEST_CASE(client_set_attr),
TEST_CASE(server_set_attr),
TEST_CASE(server_set_attr_econnreset),
TEST_CASE(server_set_attr_eproto),
TEST_CASE(server_set_attr_error),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmem_obc");
common_init("rpmem_obc",
"RPMEM_LOG_LEVEL",
"RPMEM_LOG_FILE", 0, 0);
rpmem_util_cmds_init();
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
rpmem_util_cmds_fini();
common_fini();
DONE(NULL);
}
| 2,903 | 29.893617 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_obc/rpmem_obc_test_open.c | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc_test_open.c -- test cases for rpmem_obj_open function
*/
#include "rpmem_obc_test_common.h"
static const struct rpmem_msg_open_resp OPEN_RESP = {
.hdr = {
.type = RPMEM_MSG_TYPE_OPEN_RESP,
.size = sizeof(struct rpmem_msg_open_resp),
.status = 0,
},
.ibc = {
.port = PORT,
.rkey = RKEY,
.raddr = RADDR,
.persist_method = RPMEM_PM_GPSPM,
.nlanes = NLANES_RESP,
},
.pool_attr = POOL_ATTR_INIT,
};
/*
* check_open_msg -- check open message
*/
static void
check_open_msg(struct rpmem_msg_open *msg)
{
size_t pool_desc_size = strlen(POOL_DESC) + 1;
size_t msg_size = sizeof(struct rpmem_msg_open) + pool_desc_size;
UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_OPEN);
UT_ASSERTeq(msg->hdr.size, msg_size);
UT_ASSERTeq(msg->c.major, RPMEM_PROTO_MAJOR);
UT_ASSERTeq(msg->c.minor, RPMEM_PROTO_MINOR);
UT_ASSERTeq(msg->c.pool_size, POOL_SIZE);
UT_ASSERTeq(msg->c.provider, PROVIDER);
UT_ASSERTeq(msg->c.nlanes, NLANES);
UT_ASSERTeq(msg->c.buff_size, BUFF_SIZE);
UT_ASSERTeq(msg->pool_desc.size, pool_desc_size);
UT_ASSERTeq(strcmp((char *)msg->pool_desc.desc, POOL_DESC), 0);
}
/*
* server_open_handle -- handle an open request message
*/
static void
server_open_handle(struct server *s, const struct rpmem_msg_open_resp *resp)
{
size_t msg_size = sizeof(struct rpmem_msg_open) +
strlen(POOL_DESC) + 1;
struct rpmem_msg_open *msg = MALLOC(msg_size);
srv_recv(s, msg, msg_size);
rpmem_ntoh_msg_open(msg);
check_open_msg(msg);
srv_send(s, resp, sizeof(*resp));
FREE(msg);
}
/*
* Number of cases for EPROTO test. Must be kept in sync with the
* server_open_eproto function.
*/
#define OPEN_EPROTO_COUNT 8
/*
* server_open_eproto -- send invalid open request responses to a client
*/
int
server_open_eproto(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, OPEN_EPROTO_COUNT - 1);
int i = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_open_resp resp = OPEN_RESP;
switch (i) {
case 0:
resp.hdr.type = MAX_RPMEM_MSG_TYPE;
break;
case 1:
resp.hdr.type = RPMEM_MSG_TYPE_CREATE_RESP;
break;
case 2:
resp.hdr.size -= 1;
break;
case 3:
resp.hdr.size += 1;
break;
case 4:
resp.hdr.status = MAX_RPMEM_ERR;
break;
case 5:
resp.ibc.port = 0;
break;
case 6:
resp.ibc.port = UINT16_MAX + 1;
break;
case 7:
resp.ibc.persist_method = MAX_RPMEM_PM;
break;
default:
UT_ASSERT(0);
break;
}
rpmem_hton_msg_open_resp(&resp);
server_open_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_open_error -- return error status in open response message
*/
int
server_open_error(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR);
enum rpmem_err e = (enum rpmem_err)atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_open_resp resp = OPEN_RESP;
resp.hdr.status = e;
rpmem_hton_msg_open_resp(&resp);
server_open_handle(s, &resp);
srv_fini(s);
return 1;
}
/*
* server_open -- test case for rpmem_obc_create function - server side
*/
int
server_open_econnreset(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s 0|1", tc->name);
int do_send = atoi(argv[0]);
struct server *s = srv_init();
struct rpmem_msg_open_resp resp = OPEN_RESP;
rpmem_hton_msg_open_resp(&resp);
if (do_send)
srv_send(s, &resp, sizeof(resp) / 2);
srv_fini(s);
return 1;
}
/*
* server_open -- test case for open request message - server side
*/
int
server_open(const struct test_case *tc, int argc, char *argv[])
{
struct server *s = srv_init();
struct rpmem_msg_open_resp resp = OPEN_RESP;
rpmem_hton_msg_open_resp(&resp);
server_open_handle(s, &resp);
srv_fini(s);
return 0;
}
/*
* client_open_errno -- perform open request operation and expect
* specified errno, repeat the operation specified number of times.
* If ex_errno is zero expect certain values in res struct.
*/
static void
client_open_errno(char *target, int ex_errno)
{
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
.buff_size = BUFF_SIZE,
};
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
struct rpmem_resp_attr res;
int ret;
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_open(rpc, &req, &res, &pool_attr);
if (ex_errno) {
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
} else {
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(res.port, OPEN_RESP.ibc.port);
UT_ASSERTeq(res.rkey, OPEN_RESP.ibc.rkey);
UT_ASSERTeq(res.raddr, OPEN_RESP.ibc.raddr);
UT_ASSERTeq(res.persist_method,
OPEN_RESP.ibc.persist_method);
UT_ASSERTeq(res.nlanes,
OPEN_RESP.ibc.nlanes);
UT_ASSERTeq(memcmp(pool_attr.signature,
OPEN_RESP.pool_attr.signature,
RPMEM_POOL_HDR_SIG_LEN), 0);
UT_ASSERTeq(pool_attr.major, OPEN_RESP.pool_attr.major);
UT_ASSERTeq(pool_attr.compat_features,
OPEN_RESP.pool_attr.compat_features);
UT_ASSERTeq(pool_attr.incompat_features,
OPEN_RESP.pool_attr.incompat_features);
UT_ASSERTeq(pool_attr.ro_compat_features,
OPEN_RESP.pool_attr.ro_compat_features);
UT_ASSERTeq(memcmp(pool_attr.poolset_uuid,
OPEN_RESP.pool_attr.poolset_uuid,
RPMEM_POOL_HDR_UUID_LEN), 0);
UT_ASSERTeq(memcmp(pool_attr.uuid,
OPEN_RESP.pool_attr.uuid,
RPMEM_POOL_HDR_UUID_LEN), 0);
UT_ASSERTeq(memcmp(pool_attr.next_uuid,
OPEN_RESP.pool_attr.next_uuid,
RPMEM_POOL_HDR_UUID_LEN), 0);
UT_ASSERTeq(memcmp(pool_attr.prev_uuid,
OPEN_RESP.pool_attr.prev_uuid,
RPMEM_POOL_HDR_UUID_LEN), 0);
UT_ASSERTeq(memcmp(pool_attr.user_flags,
OPEN_RESP.pool_attr.user_flags,
RPMEM_POOL_USER_FLAGS_LEN), 0);
}
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
/*
* client_open_error -- check if valid errno is set if error status returned
*/
static void
client_open_error(char *target)
{
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
.buff_size = BUFF_SIZE,
};
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
struct rpmem_resp_attr res;
int ret;
for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) {
set_rpmem_cmd("server_open_error %d", e);
int ex_errno = rpmem_util_proto_errno(e);
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
client_connect_wait(rpc, target);
ret = rpmem_obc_open(rpc, &req, &res, &pool_attr);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ex_errno);
rpmem_obc_disconnect(rpc);
rpmem_obc_fini(rpc);
}
}
/*
* client_open -- test case for open request message - client side
*/
int
client_open(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
for (int i = 0; i < ECONNRESET_LOOP; i++) {
set_rpmem_cmd("server_open_econnreset %d", i % 2);
client_open_errno(target, ECONNRESET);
}
for (int i = 0; i < OPEN_EPROTO_COUNT; i++) {
set_rpmem_cmd("server_open_eproto %d", i);
client_open_errno(target, EPROTO);
}
client_open_error(target);
set_rpmem_cmd("server_open");
client_open_errno(target, 0);
return 1;
}
| 8,942 | 23.70442 | 76 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_obc/rpmem_obc_test_misc.c | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc_test_misc.c -- miscellaneous test cases for rpmem_obc module
*/
#include <netdb.h>
#include "rpmem_obc_test_common.h"
static const struct rpmem_msg_close_resp CLOSE_RESP = {
.hdr = {
.type = RPMEM_MSG_TYPE_CLOSE_RESP,
.size = sizeof(struct rpmem_msg_close_resp),
.status = 0,
},
};
/*
* client_enotconn -- check if ENOTCONN error is returned after
* calling rpmem_obc API without connecting to the server.
*/
int
client_enotconn(const struct test_case *tc, int argc, char *argv[])
{
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
struct rpmem_req_attr req = {
.pool_size = POOL_SIZE,
.nlanes = NLANES,
.provider = PROVIDER,
.pool_desc = POOL_DESC,
};
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
struct rpmem_resp_attr res;
int ret;
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_create(rpc, &req, &res, &pool_attr);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOTCONN);
ret = rpmem_obc_open(rpc, &req, &res, &pool_attr);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOTCONN);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOTCONN);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOTCONN);
rpmem_obc_fini(rpc);
return 0;
}
/*
* client_connect -- try to connect to the server at specified address and port
*/
int
client_connect(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]...", tc->name);
for (int i = 0; i < argc; i++) {
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
struct rpmem_target_info *info;
info = rpmem_target_parse(argv[i]);
UT_ASSERTne(info, NULL);
int ret = rpmem_obc_connect(rpc, info);
if (ret) {
UT_OUT("not connected: %s: %s", argv[i],
out_get_errormsg());
} else {
UT_OUT(" connected: %s", argv[i]);
rpmem_obc_disconnect(rpc);
}
rpmem_target_free(info);
rpmem_obc_fini(rpc);
}
return argc;
}
/*
* server_monitor -- test case for rpmem_obc_create function - server side
*/
int
server_monitor(const struct test_case *tc, int argc, char *argv[])
{
struct server *s = srv_init();
struct rpmem_msg_close close;
struct rpmem_msg_close_resp resp = CLOSE_RESP;
rpmem_hton_msg_close_resp(&resp);
srv_recv(s, &close, sizeof(close));
srv_send(s, &resp, sizeof(resp));
srv_fini(s);
return 0;
}
/*
* server_monitor -- test case for rpmem_obc_monitor function - server side
*/
int
client_monitor(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <addr>[:<port>]", tc->name);
char *target = argv[0];
set_rpmem_cmd("server_monitor");
{
/*
* Connect to target node, check connection state before
* and after disconnecting.
*/
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
struct rpmem_target_info *info;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
int ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_disconnect(rpc);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTne(ret, 1);
rpmem_target_free(info);
rpmem_obc_fini(rpc);
}
{
/*
* Connect to target node and expect that server will
* disconnect.
*/
struct rpmem_obc *rpc = rpmem_obc_init();
UT_ASSERTne(rpc, NULL);
struct rpmem_target_info *info;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
int ret = rpmem_obc_connect(rpc, info);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_monitor(rpc, 1);
UT_ASSERTeq(ret, 1);
ret = rpmem_obc_close(rpc, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_obc_monitor(rpc, 0);
UT_ASSERTne(ret, 1);
rpmem_obc_disconnect(rpc);
rpmem_target_free(info);
rpmem_obc_fini(rpc);
}
return 1;
}
| 5,490 | 23.846154 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/obj_out_of_memory/obj_out_of_memory.c | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_out_of_memory.c -- allocate objects until OOM
*/
#include <stdlib.h>
#include "unittest.h"
#define LAYOUT_NAME "out_of_memory"
struct cargs {
size_t size;
};
static int
test_constructor(PMEMobjpool *pop, void *addr, void *args)
{
struct cargs *a = args;
pmemobj_memset_persist(pop, addr, rand() % 256, a->size / 2);
return 0;
}
static void
test_alloc(PMEMobjpool *pop, size_t size)
{
unsigned long cnt = 0;
while (1) {
struct cargs args = { size };
if (pmemobj_alloc(pop, NULL, size, 0,
test_constructor, &args) != 0)
break;
cnt++;
}
UT_OUT("size: %zu allocs: %lu", size, cnt);
}
static void
test_free(PMEMobjpool *pop)
{
PMEMoid oid;
PMEMoid next;
POBJ_FOREACH_SAFE(pop, oid, next)
pmemobj_free(&oid);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_out_of_memory");
if (argc < 3)
UT_FATAL("usage: %s size filename ...", argv[0]);
size_t size = ATOUL(argv[1]);
for (int i = 2; i < argc; i++) {
const char *path = argv[i];
PMEMobjpool *pop = pmemobj_create(path, LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
test_alloc(pop, size);
pmemobj_close(pop);
UT_ASSERTeq(pmemobj_check(path, LAYOUT_NAME), 1);
/*
* To prevent subsequent opens from receiving exactly the same
* volatile memory addresses a dummy malloc has to be made.
* This can expose issues in which traces of previous volatile
* state are leftover in the persistent pool.
*/
void *heap_touch = MALLOC(1);
UT_ASSERTne(pop = pmemobj_open(path, LAYOUT_NAME), NULL);
test_free(pop);
pmemobj_close(pop);
FREE(heap_touch);
}
DONE(NULL);
}
| 3,270 | 25.379032 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmemd_db/rpmemd_db_test.c | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmemd_db_test.c -- unit test for pool set database
*
* usage: rpmemd_db <log-file> <root_dir> <pool_desc_1> <pool_desc_2>
*/
#include "file.h"
#include "unittest.h"
#include "librpmem.h"
#include "rpmemd_db.h"
#include "rpmemd_log.h"
#include "util_pmem.h"
#include "set.h"
#include "out.h"
#include <limits.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
#define POOL_MODE 0644
#define FAILED_FUNC(func_name) \
UT_ERR("!%s(): %s() failed", __func__, func_name);
#define FAILED_FUNC_PARAM(func_name, param) \
UT_ERR("!%s(): %s(%s) failed", __func__, func_name, param);
#define NPOOLS_DUAL 2
#define POOL_ATTR_CREATE 0
#define POOL_ATTR_OPEN 1
#define POOL_ATTR_SET_ATTR 2
#define POOL_STATE_INITIAL 0
#define POOL_STATE_CREATED 1
#define POOL_STATE_OPENED 2
#define POOL_STATE_CLOSED POOL_STATE_CREATED
#define POOL_STATE_REMOVED POOL_STATE_INITIAL
/*
* fill_rand -- fill a buffer with random values
*/
static void
fill_rand(void *addr, size_t len)
{
unsigned char *buff = addr;
srand(time(NULL));
for (unsigned i = 0; i < len; i++)
buff[i] = (rand() % ('z' - 'a')) + 'a';
}
/*
* test_init -- test rpmemd_db_init() and rpmemd_db_fini()
*/
static int
test_init(const char *root_dir)
{
struct rpmemd_db *db;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
rpmemd_db_fini(db);
return 0;
}
/*
* test_check_dir -- test rpmemd_db_check_dir()
*/
static int
test_check_dir(const char *root_dir)
{
struct rpmemd_db *db;
int ret;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
ret = rpmemd_db_check_dir(db);
if (ret) {
FAILED_FUNC("rpmemd_db_check_dir");
}
rpmemd_db_fini(db);
return ret;
}
/*
* test_create -- test rpmemd_db_pool_create()
*/
static int
test_create(const char *root_dir, const char *pool_desc)
{
struct rpmem_pool_attr attr;
memset(&attr, 0, sizeof(attr));
attr.incompat_features = 2;
struct rpmemd_db_pool *prp;
struct rpmemd_db *db;
int ret = -1;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_create");
goto fini;
}
rpmemd_db_pool_close(db, prp);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0);
if (ret) {
FAILED_FUNC("rpmemd_db_pool_remove");
}
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* test_create_dual -- dual test for rpmemd_db_pool_create()
*/
static int
test_create_dual(const char *root_dir, const char *pool_desc_1,
const char *pool_desc_2)
{
struct rpmem_pool_attr attr1;
memset(&attr1, 0, sizeof(attr1));
attr1.incompat_features = 2;
struct rpmemd_db_pool *prp1, *prp2;
struct rpmemd_db *db;
int ret = -1;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
/* test dual create */
prp1 = rpmemd_db_pool_create(db, pool_desc_1, 0, &attr1);
if (prp1 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_1);
goto err_create_1;
}
prp2 = rpmemd_db_pool_create(db, pool_desc_2, 0, &attr1);
if (prp2 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_2);
goto err_create_2;
}
rpmemd_db_pool_close(db, prp2);
rpmemd_db_pool_close(db, prp1);
ret = rpmemd_db_pool_remove(db, pool_desc_2, 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_2);
goto err_remove_2;
}
ret = rpmemd_db_pool_remove(db, pool_desc_1, 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_1);
}
goto fini;
err_create_2:
rpmemd_db_pool_close(db, prp1);
err_remove_2:
rpmemd_db_pool_remove(db, pool_desc_1, 0, 0);
err_create_1:
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* compare_attr -- compare pool's attributes
*/
static void
compare_attr(struct rpmem_pool_attr *a1, struct rpmem_pool_attr *a2)
{
char *msg;
if (a1->major != a2->major) {
msg = "major";
goto err_mismatch;
}
if (a1->compat_features != a2->compat_features) {
msg = "compat_features";
goto err_mismatch;
}
if (a1->incompat_features != a2->incompat_features) {
msg = "incompat_features";
goto err_mismatch;
}
if (a1->ro_compat_features != a2->ro_compat_features) {
msg = "ro_compat_features";
goto err_mismatch;
}
if (memcmp(a1->signature, a2->signature, RPMEM_POOL_HDR_SIG_LEN)) {
msg = "signature";
goto err_mismatch;
}
if (memcmp(a1->poolset_uuid, a2->poolset_uuid,
RPMEM_POOL_HDR_UUID_LEN)) {
msg = "poolset_uuid";
goto err_mismatch;
}
if (memcmp(a1->uuid, a2->uuid, RPMEM_POOL_HDR_UUID_LEN)) {
msg = "uuid";
goto err_mismatch;
}
if (memcmp(a1->next_uuid, a2->next_uuid, RPMEM_POOL_HDR_UUID_LEN)) {
msg = "next_uuid";
goto err_mismatch;
}
if (memcmp(a1->prev_uuid, a2->prev_uuid, RPMEM_POOL_HDR_UUID_LEN)) {
msg = "prev_uuid";
goto err_mismatch;
}
return;
err_mismatch:
errno = EINVAL;
UT_FATAL("%s(): pool attributes mismatch (%s)", __func__, msg);
}
/*
* test_open -- test rpmemd_db_pool_open()
*/
static int
test_open(const char *root_dir, const char *pool_desc)
{
struct rpmem_pool_attr attr1, attr2;
struct rpmemd_db_pool *prp;
struct rpmemd_db *db;
int ret = -1;
fill_rand(&attr1, sizeof(attr1));
attr1.major = 1;
attr1.incompat_features = 2;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr1);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_create");
goto fini;
}
rpmemd_db_pool_close(db, prp);
prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr2);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_open");
goto fini;
}
rpmemd_db_pool_close(db, prp);
compare_attr(&attr1, &attr2);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0);
if (ret) {
FAILED_FUNC("rpmemd_db_pool_remove");
}
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* test_open_dual -- dual test for rpmemd_db_pool_open()
*/
static int
test_open_dual(const char *root_dir, const char *pool_desc_1,
const char *pool_desc_2)
{
struct rpmem_pool_attr attr1a, attr2a, attr1b, attr2b;
struct rpmemd_db_pool *prp1, *prp2;
struct rpmemd_db *db;
int ret = -1;
fill_rand(&attr1a, sizeof(attr1a));
fill_rand(&attr1b, sizeof(attr1b));
attr1a.major = 1;
attr1a.incompat_features = 2;
attr1b.major = 1;
attr1b.incompat_features = 2;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
prp1 = rpmemd_db_pool_create(db, pool_desc_1, 0, &attr1a);
if (prp1 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_1);
goto err_create_1;
}
rpmemd_db_pool_close(db, prp1);
prp2 = rpmemd_db_pool_create(db, pool_desc_2, 0, &attr1b);
if (prp2 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_2);
goto err_create_2;
}
rpmemd_db_pool_close(db, prp2);
/* test dual open */
prp1 = rpmemd_db_pool_open(db, pool_desc_1, 0, &attr2a);
if (prp1 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc_1);
goto err_open_1;
}
prp2 = rpmemd_db_pool_open(db, pool_desc_2, 0, &attr2b);
if (prp2 == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc_2);
goto err_open_2;
}
rpmemd_db_pool_close(db, prp1);
rpmemd_db_pool_close(db, prp2);
compare_attr(&attr1a, &attr2a);
compare_attr(&attr1b, &attr2b);
ret = rpmemd_db_pool_remove(db, pool_desc_2, 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_2);
goto err_remove_2;
}
ret = rpmemd_db_pool_remove(db, pool_desc_1, 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_1);
}
goto fini;
err_open_2:
rpmemd_db_pool_close(db, prp1);
err_open_1:
rpmemd_db_pool_remove(db, pool_desc_2, 0, 0);
err_create_2:
err_remove_2:
rpmemd_db_pool_remove(db, pool_desc_1, 0, 0);
err_create_1:
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* test_set_attr -- test rpmemd_db_pool_set_attr()
*/
static int
test_set_attr(const char *root_dir, const char *pool_desc)
{
struct rpmem_pool_attr attr[3];
struct rpmemd_db_pool *prp;
struct rpmemd_db *db;
int ret = -1;
fill_rand(&attr[POOL_ATTR_CREATE], sizeof(attr[POOL_ATTR_CREATE]));
fill_rand(&attr[POOL_ATTR_SET_ATTR], sizeof(attr[POOL_ATTR_SET_ATTR]));
attr[POOL_ATTR_CREATE].major = 1;
attr[POOL_ATTR_CREATE].incompat_features = 2;
attr[POOL_ATTR_SET_ATTR].major = 1;
attr[POOL_ATTR_SET_ATTR].incompat_features = 2;
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr[POOL_ATTR_CREATE]);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_create");
goto err_create;
}
rpmemd_db_pool_close(db, prp);
prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr[POOL_ATTR_OPEN]);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_open");
goto err_open;
}
compare_attr(&attr[POOL_ATTR_CREATE], &attr[POOL_ATTR_OPEN]);
ret = rpmemd_db_pool_set_attr(prp, &attr[POOL_ATTR_SET_ATTR]);
if (ret) {
FAILED_FUNC("rpmemd_db_pool_set_attr");
goto err_set_attr;
}
rpmemd_db_pool_close(db, prp);
prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr[POOL_ATTR_OPEN]);
if (prp == NULL) {
FAILED_FUNC("rpmemd_db_pool_open");
goto err_open;
}
compare_attr(&attr[POOL_ATTR_SET_ATTR], &attr[POOL_ATTR_OPEN]);
rpmemd_db_pool_close(db, prp);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0);
if (ret) {
FAILED_FUNC("rpmemd_db_pool_remove");
}
goto fini;
err_set_attr:
rpmemd_db_pool_close(db, prp);
err_open:
rpmemd_db_pool_remove(db, pool_desc, 0, 0);
err_create:
fini:
rpmemd_db_fini(db);
return ret;
}
/*
* test_set_attr_dual -- dual test for rpmemd_db_pool_set_attr()
*/
static int
test_set_attr_dual(const char *root_dir, const char *pool_desc_1,
const char *pool_desc_2)
{
struct rpmem_pool_attr attr[NPOOLS_DUAL][3];
struct rpmemd_db_pool *prp[NPOOLS_DUAL];
const char *pool_desc[NPOOLS_DUAL] = {pool_desc_1, pool_desc_2};
unsigned pool_state[NPOOLS_DUAL] = {POOL_STATE_INITIAL};
struct rpmemd_db *db;
int ret = -1;
/* initialize rpmem database */
db = rpmemd_db_init(root_dir, POOL_MODE);
if (db == NULL) {
FAILED_FUNC("rpmemd_db_init");
return -1;
}
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
/*
* generate random pool attributes for create and set
* attributes operations
*/
fill_rand(&attr[p][POOL_ATTR_CREATE],
sizeof(attr[p][POOL_ATTR_CREATE]));
fill_rand(&attr[p][POOL_ATTR_SET_ATTR],
sizeof(attr[p][POOL_ATTR_SET_ATTR]));
attr[p][POOL_ATTR_CREATE].major = 1;
attr[p][POOL_ATTR_CREATE].incompat_features = 2;
attr[p][POOL_ATTR_SET_ATTR].major = 1;
attr[p][POOL_ATTR_SET_ATTR].incompat_features = 2;
/* create pool */
prp[p] = rpmemd_db_pool_create(db, pool_desc[p], 0,
&attr[p][POOL_ATTR_CREATE]);
if (prp[p] == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_create",
pool_desc[p]);
goto err;
}
rpmemd_db_pool_close(db, prp[p]);
pool_state[p] = POOL_STATE_CREATED;
}
/* open pools and check pool attributes */
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
prp[p] = rpmemd_db_pool_open(db, pool_desc[p], 0,
&attr[p][POOL_ATTR_OPEN]);
if (prp[p] == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc[p]);
goto err;
}
pool_state[p] = POOL_STATE_OPENED;
compare_attr(&attr[p][POOL_ATTR_CREATE],
&attr[p][POOL_ATTR_OPEN]);
}
/* set attributes and close pools */
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
ret = rpmemd_db_pool_set_attr(prp[p],
&attr[p][POOL_ATTR_SET_ATTR]);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_set_attr",
pool_desc[p]);
goto err;
}
rpmemd_db_pool_close(db, prp[p]);
pool_state[p] = POOL_STATE_CLOSED;
}
/* open pools and check attributes */
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
prp[p] = rpmemd_db_pool_open(db, pool_desc[p], 0,
&attr[p][POOL_ATTR_OPEN]);
if (prp[p] == NULL) {
FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc[p]);
goto err;
}
pool_state[p] = POOL_STATE_OPENED;
compare_attr(&attr[p][POOL_ATTR_SET_ATTR],
&attr[p][POOL_ATTR_OPEN]);
}
err:
for (unsigned p = 0; p < NPOOLS_DUAL; ++p) {
if (pool_state[p] == POOL_STATE_OPENED) {
rpmemd_db_pool_close(db, prp[p]);
pool_state[p] = POOL_STATE_CLOSED;
}
if (pool_state[p] == POOL_STATE_CREATED) {
ret = rpmemd_db_pool_remove(db, pool_desc[p], 0, 0);
if (ret) {
FAILED_FUNC_PARAM("rpmemd_db_pool_remove",
pool_desc[p]);
}
pool_state[p] = POOL_STATE_REMOVED;
}
}
rpmemd_db_fini(db);
return ret;
}
static int
exists_cb(struct part_file *pf, void *arg)
{
return util_file_exists(pf->part->path);
}
static int
noexists_cb(struct part_file *pf, void *arg)
{
int exists = util_file_exists(pf->part->path);
if (exists < 0)
return -1;
else
return !exists;
}
/*
* test_remove -- test for rpmemd_db_pool_remove()
*/
static void
test_remove(const char *root_dir, const char *pool_desc)
{
struct rpmem_pool_attr attr;
struct rpmemd_db_pool *prp;
struct rpmemd_db *db;
int ret;
char path[PATH_MAX];
snprintf(path, PATH_MAX, "%s/%s", root_dir, pool_desc);
fill_rand(&attr, sizeof(attr));
strncpy((char *)attr.poolset_uuid, "TEST", sizeof(attr.poolset_uuid));
attr.incompat_features = 2;
db = rpmemd_db_init(root_dir, POOL_MODE);
UT_ASSERTne(db, NULL);
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr);
UT_ASSERTne(prp, NULL);
rpmemd_db_pool_close(db, prp);
ret = util_poolset_foreach_part(path, exists_cb, NULL);
UT_ASSERTeq(ret, 1);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0);
UT_ASSERTeq(ret, 0);
ret = util_poolset_foreach_part(path, noexists_cb, NULL);
UT_ASSERTeq(ret, 1);
prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr);
UT_ASSERTne(prp, NULL);
rpmemd_db_pool_close(db, prp);
ret = rpmemd_db_pool_remove(db, pool_desc, 0, 1);
UT_ASSERTeq(ret, 0);
ret = util_file_exists(path);
UT_ASSERTne(ret, 1);
rpmemd_db_fini(db);
}
int
main(int argc, char *argv[])
{
char *pool_desc[2], *log_file;
char root_dir[PATH_MAX];
START(argc, argv, "rpmemd_db");
util_init();
out_init("rpmemd_db", "RPMEM_LOG_LEVEL", "RPMEM_LOG_FILE", 0, 0);
if (argc != 5)
UT_FATAL("usage: %s <log-file> <root_dir> <pool_desc_1>"
" <pool_desc_2>", argv[0]);
log_file = argv[1];
if (realpath(argv[2], root_dir) == NULL)
UT_FATAL("!realpath(%s)", argv[1]);
pool_desc[0] = argv[3];
pool_desc[1] = argv[4];
if (rpmemd_log_init("rpmemd error: ", log_file, 0))
FAILED_FUNC("rpmemd_log_init");
test_init(root_dir);
test_check_dir(root_dir);
test_create(root_dir, pool_desc[0]);
test_create_dual(root_dir, pool_desc[0], pool_desc[1]);
test_open(root_dir, pool_desc[0]);
test_open_dual(root_dir, pool_desc[0], pool_desc[1]);
test_set_attr(root_dir, pool_desc[0]);
test_set_attr_dual(root_dir, pool_desc[0], pool_desc[1]);
test_remove(root_dir, pool_desc[0]);
rpmemd_log_close();
out_fini();
DONE(NULL);
}
| 16,549 | 23.701493 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_fip/rpmem_fip_test.c | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_fip_test.c -- tests for rpmem_fip and rpmemd_fip modules
*/
#include <netdb.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "unittest.h"
#include "pmemcommon.h"
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_fip_common.h"
#include "rpmem_fip_oob.h"
#include "rpmemd_fip.h"
#include "rpmemd_log.h"
#include "rpmemd_util.h"
#include "rpmem_fip.h"
#include "os.h"
#define SIZE_PER_LANE 64
#define COUNT_PER_LANE 32
#define NLANES 1024
#define SOCK_NLANES 32
#define NTHREADS 32
#define TOTAL_PER_LANE (SIZE_PER_LANE * COUNT_PER_LANE)
#define POOL_SIZE (NLANES * TOTAL_PER_LANE)
static uint8_t lpool[POOL_SIZE];
static uint8_t rpool[POOL_SIZE];
TEST_CASE_DECLARE(client_init);
TEST_CASE_DECLARE(server_init);
TEST_CASE_DECLARE(client_connect);
TEST_CASE_DECLARE(server_connect);
TEST_CASE_DECLARE(server_process);
TEST_CASE_DECLARE(client_persist);
TEST_CASE_DECLARE(client_persist_mt);
TEST_CASE_DECLARE(client_read);
/*
* get_persist_method -- parse persist method
*/
static enum rpmem_persist_method
get_persist_method(const char *pm)
{
if (strcmp(pm, "GPSPM") == 0)
return RPMEM_PM_GPSPM;
else if (strcmp(pm, "APM") == 0)
return RPMEM_PM_APM;
else
UT_FATAL("unknown method");
}
/*
* get_provider -- get provider for given target
*/
static enum rpmem_provider
get_provider(const char *target, const char *prov_name, unsigned *nlanes)
{
struct rpmem_fip_probe probe;
int ret;
int any = 0;
if (strcmp(prov_name, "any") == 0)
any = 1;
ret = rpmem_fip_probe_get(target, &probe);
UT_ASSERTeq(ret, 0);
UT_ASSERT(rpmem_fip_probe_any(probe));
enum rpmem_provider provider;
if (any) {
/* return verbs in first place */
if (rpmem_fip_probe(probe,
RPMEM_PROV_LIBFABRIC_VERBS))
provider = RPMEM_PROV_LIBFABRIC_VERBS;
else if (rpmem_fip_probe(probe,
RPMEM_PROV_LIBFABRIC_SOCKETS))
provider = RPMEM_PROV_LIBFABRIC_SOCKETS;
else
UT_ASSERT(0);
} else {
provider = rpmem_provider_from_str(prov_name);
UT_ASSERTne(provider, RPMEM_PROV_UNKNOWN);
UT_ASSERT(rpmem_fip_probe(probe, provider));
}
/*
* Decrease number of lanes for socket provider because
* the test may be too long.
*/
if (provider == RPMEM_PROV_LIBFABRIC_SOCKETS)
*nlanes = min(*nlanes, SOCK_NLANES);
return provider;
}
/*
* set_pool_data -- set pools data to well known values
*/
static void
set_pool_data(uint8_t *pool, int inverse)
{
for (unsigned l = 0; l < NLANES; l++) {
for (unsigned i = 0; i < COUNT_PER_LANE; i++) {
size_t offset = l * TOTAL_PER_LANE + i * SIZE_PER_LANE;
unsigned val = i + l;
if (inverse)
val = ~val;
memset(&pool[offset], (int)val, SIZE_PER_LANE);
}
}
}
/*
* persist_arg -- arguments for client persist thread
*/
struct persist_arg {
struct rpmem_fip *fip;
unsigned lane;
};
/*
* client_persist_thread -- thread callback for persist operation
*/
static void *
client_persist_thread(void *arg)
{
struct persist_arg *args = arg;
int ret;
/* persist with len == 0 should always succeed */
ret = rpmem_fip_persist(args->fip, args->lane * TOTAL_PER_LANE,
0, args->lane, RPMEM_PERSIST_WRITE);
UT_ASSERTeq(ret, 0);
for (unsigned i = 0; i < COUNT_PER_LANE; i++) {
size_t offset = args->lane * TOTAL_PER_LANE + i * SIZE_PER_LANE;
unsigned val = args->lane + i;
memset(&lpool[offset], (int)val, SIZE_PER_LANE);
ret = rpmem_fip_persist(args->fip, offset,
SIZE_PER_LANE, args->lane, RPMEM_PERSIST_WRITE);
UT_ASSERTeq(ret, 0);
}
return NULL;
}
/*
* client_init -- test case for client initialization
*/
int
client_init(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>",
tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
set_rpmem_cmd("server_init %s", persist_method);
char fip_service[NI_MAXSERV];
struct rpmem_target_info *info;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
unsigned nlanes = NLANES;
enum rpmem_provider provider = get_provider(info->node,
prov_name, &nlanes);
client_t *client;
struct rpmem_resp_attr resp;
client = client_exchange(info, nlanes, provider, &resp);
struct rpmem_fip_attr attr = {
.provider = provider,
.persist_method = resp.persist_method,
.laddr = lpool,
.size = POOL_SIZE,
.nlanes = resp.nlanes,
.raddr = (void *)resp.raddr,
.rkey = resp.rkey,
};
ssize_t sret = snprintf(fip_service, NI_MAXSERV, "%u", resp.port);
UT_ASSERT(sret > 0);
/*
* Tune the maximum number of lanes according to environment.
*/
rpmem_util_get_env_max_nlanes(&Rpmem_max_nlanes);
struct rpmem_fip *fip;
fip = rpmem_fip_init(info->node, fip_service, &attr, &nlanes);
UT_ASSERTne(fip, NULL);
client_close_begin(client);
client_close_end(client);
rpmem_fip_fini(fip);
rpmem_target_free(info);
return 3;
}
/*
* server_init -- test case for server initialization
*/
int
server_init(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <persist method>", tc->name);
enum rpmem_persist_method persist_method = get_persist_method(argv[0]);
unsigned nlanes;
enum rpmem_provider provider;
char *addr = NULL;
int ret;
server_exchange_begin(&nlanes, &provider, &addr);
UT_ASSERTne(addr, NULL);
struct rpmemd_fip_attr attr = {
.addr = rpool,
.size = POOL_SIZE,
.nlanes = nlanes,
.provider = provider,
.persist_method = persist_method,
.nthreads = NTHREADS,
};
ret = rpmemd_apply_pm_policy(&attr.persist_method, &attr.persist,
&attr.memcpy_persist,
1 /* is pmem */);
UT_ASSERTeq(ret, 0);
struct rpmem_resp_attr resp;
struct rpmemd_fip *fip;
enum rpmem_err err;
fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err);
UT_ASSERTne(fip, NULL);
server_exchange_end(resp);
server_close_begin();
server_close_end();
rpmemd_fip_fini(fip);
FREE(addr);
return 1;
}
/*
* client_connect -- test case for establishing connection - client side
*/
int
client_connect(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>",
tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
set_rpmem_cmd("server_connect %s", persist_method);
char fip_service[NI_MAXSERV];
struct rpmem_target_info *info;
int ret;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
unsigned nlanes = NLANES;
enum rpmem_provider provider = get_provider(info->node,
prov_name, &nlanes);
client_t *client;
struct rpmem_resp_attr resp;
client = client_exchange(info, nlanes, provider, &resp);
struct rpmem_fip_attr attr = {
.provider = provider,
.persist_method = resp.persist_method,
.laddr = lpool,
.size = POOL_SIZE,
.nlanes = resp.nlanes,
.raddr = (void *)resp.raddr,
.rkey = resp.rkey,
};
ssize_t sret = snprintf(fip_service, NI_MAXSERV, "%u", resp.port);
UT_ASSERT(sret > 0);
struct rpmem_fip *fip;
fip = rpmem_fip_init(info->node, fip_service, &attr, &nlanes);
UT_ASSERTne(fip, NULL);
ret = rpmem_fip_connect(fip);
UT_ASSERTeq(ret, 0);
client_close_begin(client);
ret = rpmem_fip_close(fip);
UT_ASSERTeq(ret, 0);
client_close_end(client);
rpmem_fip_fini(fip);
rpmem_target_free(info);
return 3;
}
/*
* server_connect -- test case for establishing connection - server side
*/
int
server_connect(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <persist method>", tc->name);
enum rpmem_persist_method persist_method = get_persist_method(argv[0]);
unsigned nlanes;
enum rpmem_provider provider;
char *addr = NULL;
server_exchange_begin(&nlanes, &provider, &addr);
UT_ASSERTne(addr, NULL);
struct rpmemd_fip_attr attr = {
.addr = rpool,
.size = POOL_SIZE,
.nlanes = nlanes,
.provider = provider,
.persist_method = persist_method,
.nthreads = NTHREADS,
};
int ret;
struct rpmem_resp_attr resp;
struct rpmemd_fip *fip;
enum rpmem_err err;
ret = rpmemd_apply_pm_policy(&attr.persist_method, &attr.persist,
&attr.memcpy_persist,
1 /* is pmem */);
UT_ASSERTeq(ret, 0);
fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err);
UT_ASSERTne(fip, NULL);
server_exchange_end(resp);
ret = rpmemd_fip_accept(fip, -1);
UT_ASSERTeq(ret, 0);
server_close_begin();
server_close_end();
ret = rpmemd_fip_wait_close(fip, -1);
UT_ASSERTeq(ret, 0);
ret = rpmemd_fip_close(fip);
UT_ASSERTeq(ret, 0);
rpmemd_fip_fini(fip);
FREE(addr);
return 1;
}
/*
* server_process -- test case for processing data on server side
*/
int
server_process(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: %s <persist method>", tc->name);
enum rpmem_persist_method persist_method = get_persist_method(argv[0]);
set_pool_data(rpool, 1);
unsigned nlanes;
enum rpmem_provider provider;
char *addr = NULL;
server_exchange_begin(&nlanes, &provider, &addr);
UT_ASSERTne(addr, NULL);
struct rpmemd_fip_attr attr = {
.addr = rpool,
.size = POOL_SIZE,
.nlanes = nlanes,
.provider = provider,
.persist_method = persist_method,
.nthreads = NTHREADS,
};
int ret;
struct rpmem_resp_attr resp;
struct rpmemd_fip *fip;
enum rpmem_err err;
ret = rpmemd_apply_pm_policy(&attr.persist_method, &attr.persist,
&attr.memcpy_persist,
1 /* is pmem */);
UT_ASSERTeq(ret, 0);
fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err);
UT_ASSERTne(fip, NULL);
server_exchange_end(resp);
ret = rpmemd_fip_accept(fip, -1);
UT_ASSERTeq(ret, 0);
ret = rpmemd_fip_process_start(fip);
server_close_begin();
ret = rpmemd_fip_process_stop(fip);
UT_ASSERTeq(ret, 0);
server_close_end();
ret = rpmemd_fip_wait_close(fip, -1);
UT_ASSERTeq(ret, 0);
ret = rpmemd_fip_close(fip);
UT_ASSERTeq(ret, 0);
rpmemd_fip_fini(fip);
FREE(addr);
return 1;
}
/*
* client_persist -- test case for single-threaded persist operation
*/
int
client_persist(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>",
tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
set_rpmem_cmd("server_process %s", persist_method);
char fip_service[NI_MAXSERV];
struct rpmem_target_info *info;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
int ret;
set_pool_data(lpool, 1);
set_pool_data(rpool, 1);
unsigned nlanes = NLANES;
enum rpmem_provider provider = get_provider(info->node,
prov_name, &nlanes);
client_t *client;
struct rpmem_resp_attr resp;
client = client_exchange(info, nlanes, provider, &resp);
struct rpmem_fip_attr attr = {
.provider = provider,
.persist_method = resp.persist_method,
.laddr = lpool,
.size = POOL_SIZE,
.nlanes = resp.nlanes,
.raddr = (void *)resp.raddr,
.rkey = resp.rkey,
};
ssize_t sret = snprintf(fip_service, NI_MAXSERV, "%u", resp.port);
UT_ASSERT(sret > 0);
struct rpmem_fip *fip;
fip = rpmem_fip_init(info->node, fip_service, &attr, &nlanes);
UT_ASSERTne(fip, NULL);
ret = rpmem_fip_connect(fip);
UT_ASSERTeq(ret, 0);
struct persist_arg arg = {
.fip = fip,
.lane = 0,
};
client_persist_thread(&arg);
ret = rpmem_fip_read(fip, rpool, POOL_SIZE, 0, 0);
UT_ASSERTeq(ret, 0);
client_close_begin(client);
ret = rpmem_fip_close(fip);
UT_ASSERTeq(ret, 0);
client_close_end(client);
rpmem_fip_fini(fip);
ret = memcmp(rpool, lpool, POOL_SIZE);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
return 3;
}
/*
* client_persist_mt -- test case for multi-threaded persist operation
*/
int
client_persist_mt(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>",
tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
set_rpmem_cmd("server_process %s", persist_method);
char fip_service[NI_MAXSERV];
struct rpmem_target_info *info;
int ret;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
set_pool_data(lpool, 1);
set_pool_data(rpool, 1);
unsigned nlanes = NLANES;
enum rpmem_provider provider = get_provider(info->node,
prov_name, &nlanes);
client_t *client;
struct rpmem_resp_attr resp;
client = client_exchange(info, nlanes, provider, &resp);
struct rpmem_fip_attr attr = {
.provider = provider,
.persist_method = resp.persist_method,
.laddr = lpool,
.size = POOL_SIZE,
.nlanes = resp.nlanes,
.raddr = (void *)resp.raddr,
.rkey = resp.rkey,
};
ssize_t sret = snprintf(fip_service, NI_MAXSERV, "%u", resp.port);
UT_ASSERT(sret > 0);
struct rpmem_fip *fip;
fip = rpmem_fip_init(info->node, fip_service, &attr, &nlanes);
UT_ASSERTne(fip, NULL);
ret = rpmem_fip_connect(fip);
UT_ASSERTeq(ret, 0);
os_thread_t *persist_thread = MALLOC(resp.nlanes * sizeof(os_thread_t));
struct persist_arg *args = MALLOC(resp.nlanes *
sizeof(struct persist_arg));
for (unsigned i = 0; i < nlanes; i++) {
args[i].fip = fip;
args[i].lane = i;
PTHREAD_CREATE(&persist_thread[i], NULL,
client_persist_thread, &args[i]);
}
for (unsigned i = 0; i < nlanes; i++)
PTHREAD_JOIN(&persist_thread[i], NULL);
ret = rpmem_fip_read(fip, rpool, POOL_SIZE, 0, 0);
UT_ASSERTeq(ret, 0);
client_close_begin(client);
ret = rpmem_fip_close(fip);
UT_ASSERTeq(ret, 0);
client_close_end(client);
rpmem_fip_fini(fip);
FREE(persist_thread);
FREE(args);
ret = memcmp(rpool, lpool, POOL_SIZE);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
return 3;
}
/*
* client_read -- test case for read operation
*/
int
client_read(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL("usage: %s <target> <provider> <persist method>",
tc->name);
char *target = argv[0];
char *prov_name = argv[1];
char *persist_method = argv[2];
set_rpmem_cmd("server_process %s", persist_method);
char fip_service[NI_MAXSERV];
struct rpmem_target_info *info;
int ret;
info = rpmem_target_parse(target);
UT_ASSERTne(info, NULL);
set_pool_data(lpool, 0);
set_pool_data(rpool, 1);
unsigned nlanes = NLANES;
enum rpmem_provider provider = get_provider(info->node,
prov_name, &nlanes);
client_t *client;
struct rpmem_resp_attr resp;
client = client_exchange(info, nlanes, provider, &resp);
struct rpmem_fip_attr attr = {
.provider = provider,
.persist_method = resp.persist_method,
.laddr = lpool,
.size = POOL_SIZE,
.nlanes = resp.nlanes,
.raddr = (void *)resp.raddr,
.rkey = resp.rkey,
};
ssize_t sret = snprintf(fip_service, NI_MAXSERV, "%u", resp.port);
UT_ASSERT(sret > 0);
struct rpmem_fip *fip;
fip = rpmem_fip_init(info->node, fip_service, &attr, &nlanes);
UT_ASSERTne(fip, NULL);
ret = rpmem_fip_connect(fip);
UT_ASSERTeq(ret, 0);
/* read with len == 0 should always succeed */
ret = rpmem_fip_read(fip, lpool, 0, 0, 0);
UT_ASSERTeq(ret, 0);
ret = rpmem_fip_read(fip, lpool, POOL_SIZE, 0, 0);
UT_ASSERTeq(ret, 0);
client_close_begin(client);
ret = rpmem_fip_close(fip);
UT_ASSERTeq(ret, 0);
client_close_end(client);
rpmem_fip_fini(fip);
ret = memcmp(rpool, lpool, POOL_SIZE);
UT_ASSERTeq(ret, 0);
rpmem_target_free(info);
return 3;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(client_init),
TEST_CASE(server_init),
TEST_CASE(client_connect),
TEST_CASE(server_connect),
TEST_CASE(client_persist),
TEST_CASE(client_persist_mt),
TEST_CASE(server_process),
TEST_CASE(client_read),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
/* workaround for left-opened files by libfabric */
rpmem_fip_probe_get("localhost", NULL);
START(argc, argv, "rpmem_obc");
common_init("rpmem_fip",
"RPMEM_LOG_LEVEL",
"RPMEM_LOG_FILE", 0, 0);
rpmem_util_cmds_init();
rpmemd_log_init("rpmemd", os_getenv("RPMEMD_LOG_FILE"), 0);
rpmemd_log_level = rpmemd_log_level_from_str(
os_getenv("RPMEMD_LOG_LEVEL"));
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
common_fini();
rpmemd_log_close();
rpmem_util_cmds_fini();
DONE(NULL);
}
| 17,818 | 21.903599 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_fip/rpmem_fip_oob.h | /*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_fip_sock.h -- simple oob connection implementation for exchanging
* required RDMA related data
*/
#include <stdint.h>
#include <netinet/in.h>
typedef struct rpmem_ssh client_t;
client_t *client_exchange(struct rpmem_target_info *info,
unsigned nlanes,
enum rpmem_provider provider,
struct rpmem_resp_attr *resp);
void client_close_begin(client_t *c);
void client_close_end(client_t *c);
void server_exchange_begin(unsigned *lanes, enum rpmem_provider *provider,
char **addr);
void server_exchange_end(struct rpmem_resp_attr resp);
void server_close_begin(void);
void server_close_end(void);
void set_rpmem_cmd(const char *fmt, ...);
| 2,258 | 37.948276 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmem_fip/rpmem_fip_oob.c | /*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_fip_oob.c -- simple oob connection implementation for exchanging
* required RDMA related data
*/
#include <sys/socket.h>
#include <netinet/in.h>
#include <netdb.h>
#include <string.h>
#include "rpmem_common.h"
#include "rpmem_proto.h"
#include "rpmem_fip_oob.h"
#include "rpmem_ssh.h"
#include "unittest.h"
#include "rpmem_util.h"
#include "os.h"
#define CMD_BUFF_SIZE 4096
static const char *rpmem_cmd;
/*
* set_rpmem_cmd -- set RPMEM_CMD variable
*/
void
set_rpmem_cmd(const char *fmt, ...)
{
static char cmd_buff[CMD_BUFF_SIZE];
if (!rpmem_cmd) {
char *cmd = os_getenv(RPMEM_CMD_ENV);
UT_ASSERTne(cmd, NULL);
rpmem_cmd = STRDUP(cmd);
}
ssize_t ret;
size_t cnt = 0;
va_list ap;
va_start(ap, fmt);
ret = snprintf(&cmd_buff[cnt], CMD_BUFF_SIZE - cnt,
"%s ", rpmem_cmd);
UT_ASSERT(ret > 0);
cnt += (size_t)ret;
ret = vsnprintf(&cmd_buff[cnt], CMD_BUFF_SIZE - cnt, fmt, ap);
UT_ASSERT(ret > 0);
cnt += (size_t)ret;
va_end(ap);
ret = os_setenv(RPMEM_CMD_ENV, cmd_buff, 1);
UT_ASSERTeq(ret, 0);
/*
* Rpmem has internal RPMEM_CMD variable copy and it is assumed
* RPMEMD_CMD will not change its value during execution. To refresh the
* internal copy it must be destroyed and a instance must be initialized
* manually.
*/
rpmem_util_cmds_fini();
rpmem_util_cmds_init();
}
/*
* client_exchange -- connect to remote host and exchange required information
*/
client_t *
client_exchange(struct rpmem_target_info *info,
unsigned nlanes,
enum rpmem_provider provider,
struct rpmem_resp_attr *resp)
{
struct rpmem_ssh *ssh = rpmem_ssh_open(info);
UT_ASSERTne(ssh, NULL);
int ret;
ret = rpmem_ssh_send(ssh, &nlanes, sizeof(nlanes));
UT_ASSERTeq(ret, 0);
ret = rpmem_ssh_send(ssh, &provider, sizeof(provider));
UT_ASSERTeq(ret, 0);
ret = rpmem_ssh_recv(ssh, resp, sizeof(*resp));
UT_ASSERTeq(ret, 0);
return ssh;
}
/*
* client_close_begin -- begin closing connection
*/
void
client_close_begin(client_t *c)
{
int cmd = 1;
int ret;
ret = rpmem_ssh_send(c, &cmd, sizeof(cmd));
UT_ASSERTeq(ret, 0);
ret = rpmem_ssh_recv(c, &cmd, sizeof(cmd));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(cmd, 0);
}
/*
* client_close_end -- end closing connection
*/
void
client_close_end(client_t *c)
{
rpmem_ssh_close(c);
}
/*
* server_exchange_begin -- accept a connection and read required information
*/
void
server_exchange_begin(unsigned *lanes, enum rpmem_provider *provider,
char **addr)
{
UT_ASSERTne(addr, NULL);
char *conn = rpmem_get_ssh_conn_addr();
UT_ASSERTne(conn, NULL);
*addr = strdup(conn);
UT_ASSERTne(*addr, NULL);
uint32_t status = 0;
WRITE(STDOUT_FILENO, &status, sizeof(status));
READ(STDIN_FILENO, lanes, sizeof(*lanes));
READ(STDIN_FILENO, provider, sizeof(*provider));
}
/*
* server_exchange_end -- send response to client
*/
void
server_exchange_end(struct rpmem_resp_attr resp)
{
WRITE(STDOUT_FILENO, &resp, sizeof(resp));
}
/*
* server_close_begin -- wait for close command
*/
void
server_close_begin(void)
{
int cmd = 0;
READ(STDIN_FILENO, &cmd, sizeof(cmd));
UT_ASSERTeq(cmd, 1);
}
/*
* server_close_end -- send close response and wait for disconnect
*/
void
server_close_end(void)
{
int cmd = 0;
WRITE(STDOUT_FILENO, &cmd, sizeof(cmd));
}
| 4,863 | 23.199005 | 78 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/rpmemd_log/rpmemd_log_test.c | /*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmemd_log_test.c -- unit tests for rpmemd_log
*/
#include <stddef.h>
#include <sys/param.h>
#include <syslog.h>
#include "unittest.h"
#include "rpmemd_log.h"
#define PREFIX "prefix"
static FILE *syslog_fh;
/*
* openlog -- mock for openlog function which logs its usage
*/
FUNC_MOCK(openlog, void, const char *ident, int option, int facility)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("openlog: ident = %s, option = %d, facility = %d",
ident, option, facility);
}
FUNC_MOCK_END
/*
* closelog -- mock for closelog function which logs its usage
*/
FUNC_MOCK(closelog, void, void)
FUNC_MOCK_RUN_DEFAULT {
UT_OUT("closelog");
}
FUNC_MOCK_END
/*
* syslog -- mock for syslog function which redirects message to a file
*/
FUNC_MOCK(syslog, void, int priority, const char *format, ...)
FUNC_MOCK_RUN_DEFAULT {
UT_ASSERT(priority == LOG_ERR ||
priority == LOG_WARNING ||
priority == LOG_NOTICE ||
priority == LOG_INFO ||
priority == LOG_DEBUG);
va_list ap;
va_start(ap, format);
vfprintf(syslog_fh, format, ap);
va_end(ap);
}
FUNC_MOCK_END
/*
* vsyslog -- mock for vsyslog function which redirects message to a file
*/
FUNC_MOCK(vsyslog, void, int priority, const char *format, va_list ap)
FUNC_MOCK_RUN_DEFAULT {
UT_ASSERT(priority == LOG_ERR ||
priority == LOG_WARNING ||
priority == LOG_NOTICE ||
priority == LOG_INFO ||
priority == LOG_DEBUG);
vfprintf(syslog_fh, format, ap);
}
FUNC_MOCK_END
/*
* l2s -- level to string
*/
static const char *
l2s(enum rpmemd_log_level level)
{
return rpmemd_log_level_to_str(level);
}
/*
* test_log_messages -- test log messages on specified level
*/
static void
test_log_messages(enum rpmemd_log_level level)
{
rpmemd_log_level = level;
RPMEMD_LOG(ERR, "ERR message on %s level", l2s(level));
RPMEMD_LOG(WARN, "WARN message on %s level", l2s(level));
RPMEMD_LOG(NOTICE, "NOTICE message on %s level", l2s(level));
RPMEMD_LOG(INFO, "INFO message on %s level", l2s(level));
RPMEMD_DBG("DBG message on %s level", l2s(level));
}
/*
* test_all_log_messages -- test log messages on all levels, with and without
* a prefix.
*/
static void
test_all_log_messages(void)
{
rpmemd_prefix(NULL);
test_log_messages(RPD_LOG_ERR);
test_log_messages(RPD_LOG_WARN);
test_log_messages(RPD_LOG_NOTICE);
test_log_messages(RPD_LOG_INFO);
test_log_messages(_RPD_LOG_DBG);
rpmemd_prefix("[%s]", PREFIX);
test_log_messages(RPD_LOG_ERR);
test_log_messages(RPD_LOG_WARN);
test_log_messages(RPD_LOG_NOTICE);
test_log_messages(RPD_LOG_INFO);
test_log_messages(_RPD_LOG_DBG);
}
#define USAGE() do {\
UT_ERR("usage: %s fatal|log|assert "\
"stderr|file|syslog <file>", argv[0]);\
} while (0)
enum test_log_type {
TEST_STDERR,
TEST_FILE,
TEST_SYSLOG,
};
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmemd_log");
if (argc < 4) {
USAGE();
return 1;
}
const char *log_op = argv[1];
const char *log_type = argv[2];
const char *file = argv[3];
int do_fatal = 0;
int do_assert = 0;
if (strcmp(log_op, "fatal") == 0) {
do_fatal = 1;
} else if (strcmp(log_op, "assert") == 0) {
do_assert = 1;
} else if (strcmp(log_op, "log") == 0) {
} else {
USAGE();
return 1;
}
enum test_log_type type;
if (strcmp(log_type, "stderr") == 0) {
type = TEST_STDERR;
} else if (strcmp(log_type, "file") == 0) {
type = TEST_FILE;
} else if (strcmp(log_type, "syslog") == 0) {
type = TEST_SYSLOG;
} else {
USAGE();
return 1;
}
int fd_stderr = -1;
FILE *stderr_fh = NULL;
switch (type) {
case TEST_STDERR:
/*
* Duplicate stdout file descriptor in order to preserve
* the file list after redirecting the stdout to a file.
*/
fd_stderr = dup(2);
UT_ASSERTne(fd_stderr, -1);
os_close(2);
stderr_fh = os_fopen(file, "a");
UT_ASSERTne(stderr_fh, NULL);
break;
case TEST_SYSLOG:
syslog_fh = os_fopen(file, "a");
UT_ASSERTne(syslog_fh, NULL);
break;
default:
break;
}
int ret;
switch (type) {
case TEST_STDERR:
ret = rpmemd_log_init("rpmemd_log", NULL, 0);
UT_ASSERTeq(ret, 0);
break;
case TEST_SYSLOG:
ret = rpmemd_log_init("rpmemd_log", NULL, 1);
UT_ASSERTeq(ret, 0);
break;
case TEST_FILE:
ret = rpmemd_log_init("rpmemd_log", file, 0);
UT_ASSERTeq(ret, 0);
break;
}
if (do_fatal) {
RPMEMD_FATAL("fatal");
} else if (do_assert) {
RPMEMD_ASSERT(1);
RPMEMD_ASSERT(0);
} else {
test_all_log_messages();
}
rpmemd_log_close();
switch (type) {
case TEST_STDERR:
/* restore the original stdout file descriptor */
fclose(stderr_fh);
UT_ASSERTeq(dup2(fd_stderr, 2), 2);
os_close(fd_stderr);
break;
case TEST_SYSLOG:
fclose(syslog_fh);
break;
default:
break;
}
DONE(NULL);
}
| 6,270 | 23.02682 | 77 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/log_pool_win/log_pool_win.c | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* log_pool.c -- unit test for pmemlog_create() and pmemlog_open()
*
* usage: log_pool op path [poolsize mode]
*
* op can be:
* c - create
* o - open
*
* "poolsize" and "mode" arguments are ignored for "open"
*/
#include "unittest.h"
#define MB ((size_t)1 << 20)
static void
pool_create(const wchar_t *path, size_t poolsize, unsigned mode)
{
char *upath = ut_toUTF8(path);
PMEMlogpool *plp = pmemlog_createW(path, poolsize, mode);
if (plp == NULL)
UT_OUT("!%s: pmemlog_create", upath);
else {
os_stat_t stbuf;
STATW(path, &stbuf);
UT_OUT("%s: file size %zu usable space %zu mode 0%o",
upath, stbuf.st_size,
pmemlog_nbyte(plp),
stbuf.st_mode & 0777);
pmemlog_close(plp);
int result = pmemlog_checkW(path);
if (result < 0)
UT_OUT("!%s: pmemlog_check", upath);
else if (result == 0)
UT_OUT("%s: pmemlog_check: not consistent", upath);
}
free(upath);
}
static void
pool_open(const wchar_t *path)
{
char *upath = ut_toUTF8(path);
PMEMlogpool *plp = pmemlog_openW(path);
if (plp == NULL)
UT_OUT("!%s: pmemlog_open", upath);
else {
UT_OUT("%s: pmemlog_open: Success", upath);
pmemlog_close(plp);
}
free(upath);
}
int
wmain(int argc, wchar_t *argv[])
{
STARTW(argc, argv, "log_pool_win");
if (argc < 3)
UT_FATAL("usage: %s op path [poolsize mode]",
ut_toUTF8(argv[0]));
size_t poolsize;
unsigned mode;
switch (argv[1][0]) {
case 'c':
poolsize = wcstoul(argv[3], NULL, 0) * MB; /* in megabytes */
mode = wcstoul(argv[4], NULL, 8);
pool_create(argv[2], poolsize, mode);
break;
case 'o':
pool_open(argv[2]);
break;
default:
UT_FATAL("unknown operation");
}
DONEW(NULL);
}
| 3,277 | 25.650407 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/util_poolset_size/util_poolset_size.c | /*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* util_poolset_size.c -- unit test for util_poolset_size
*
* usage: util_poolset_size file...
*/
#include "unittest.h"
#include "set.h"
#include "pmemcommon.h"
#include <errno.h>
#define LOG_PREFIX "ut"
#define LOG_LEVEL_VAR "TEST_LOG_LEVEL"
#define LOG_FILE_VAR "TEST_LOG_FILE"
#define MAJOR_VERSION 1
#define MINOR_VERSION 0
int
main(int argc, char *argv[])
{
START(argc, argv, "util_poolset_size");
common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR,
MAJOR_VERSION, MINOR_VERSION);
if (argc < 2)
UT_FATAL("usage: %s file...",
argv[0]);
for (int i = 1; i < argc; i++) {
char *fname = argv[i];
size_t size = util_poolset_size(fname);
UT_OUT("util_poolset_size(%s): %lu", fname, size);
}
common_fini();
DONE(NULL);
}
| 2,351 | 31.666667 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/ddmap/ddmap.c | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ddmap.c -- simple app for reading and writing data from/to a regular file or
* dax device using mmap instead of file io API
*/
#include <stdio.h>
#include <unistd.h>
#include <getopt.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include "common.h"
#include "output.h"
#include "mmap.h"
#include "file.h"
#include "util.h"
#include "os.h"
/*
* ddmap_context -- context and arguments
*/
struct ddmap_context {
char *file_in; /* input file name */
char *file_out; /* output file name */
char *str; /* string data to write */
size_t offset_in; /* offset from beginning of input file for */
/* read/write operations expressed in blocks */
size_t offset_out; /* offset from beginning of output file for */
/* read/write operations expressed in blocks */
size_t bytes; /* size of blocks to write at the time */
size_t count; /* number of blocks to read/write */
int checksum; /* compute checksum */
int runlen; /* print bytes as runlen/char sequence */
};
/*
* the default context, with all fields initialized to zero or NULL
*/
static struct ddmap_context ddmap_default;
/*
* print_usage -- print short description of usage
*/
static void
print_usage(void)
{
printf("Usage: ddmap [option] ...\n");
printf("Valid options:\n");
printf("-i FILE - read from FILE\n");
printf("-o FILE - write to FILE\n");
printf("-d STRING - STRING to be written\n");
printf("-s N - skip N blocks at start of input\n");
printf("-q N - skip N blocks at start of output\n");
printf("-b N - read/write N bytes at a time\n");
printf("-n N - copy N input blocks\n");
printf("-c - compute checksum\n");
printf("-r - print file content as runlen/char pairs\n");
printf("-h - print this usage info\n");
}
/*
* long_options -- command line options
*/
static const struct option long_options[] = {
{"input-file", required_argument, NULL, 'i'},
{"output-file", required_argument, NULL, 'o'},
{"string", required_argument, NULL, 'd'},
{"offset-in", required_argument, NULL, 's'},
{"offset-out", required_argument, NULL, 'q'},
{"block-size", required_argument, NULL, 'b'},
{"count", required_argument, NULL, 'n'},
{"checksum", no_argument, NULL, 'c'},
{"runlen", no_argument, NULL, 'r'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0 },
};
/*
* ddmap_print_char -- (internal) print single char
*
* Printable ASCII characters are printed normally,
* NUL character is printed as a little circle (the degree symbol),
* non-printable ASCII characters are printed as centered dots.
*/
static void
ddmap_print_char(char c)
{
if (c == '\0')
/* print the degree symbol for NUL */
printf("\u00B0");
else if (c >= ' ' && c <= '~')
/* print printable ASCII character */
printf("%c", c);
else
/* print centered dot for non-printable character */
printf("\u00B7");
}
/*
* ddmap_print_runlen -- (internal) print file content as length/char pairs
*
* For each sequence of chars of the same value (could be just 1 byte)
* print length of the sequence and the char value.
*/
static void
ddmap_print_runlen(char *addr, size_t len)
{
char c = '\0';
ssize_t cnt = 0;
for (size_t i = 0; i < len; i++) {
if (i > 0 && c != addr[i] && cnt != 0) {
printf("%zd ", cnt);
ddmap_print_char(c);
printf("\n");
cnt = 0;
}
c = addr[i];
cnt++;
}
if (cnt) {
printf("%zd ", cnt);
ddmap_print_char(c);
printf("\n");
}
}
/*
* ddmap_print_bytes -- (internal) print array of bytes
*/
static void
ddmap_print_bytes(const char *data, size_t len)
{
for (size_t i = 0; i < len; ++i) {
ddmap_print_char(data[i]);
}
printf("\n");
}
/*
* ddmap_read -- (internal) read a string from the file at the offset and
* print it to stdout
*/
static int
ddmap_read(const char *path, size_t offset_in, size_t bytes, size_t count,
int runlen)
{
size_t len = bytes * count;
os_off_t offset = (os_off_t)(bytes * offset_in);
char *read_buff = Zalloc(len + 1);
if (read_buff == NULL) {
outv_err("Zalloc(%zu) failed\n", len + 1);
return -1;
}
ssize_t read_len = util_file_pread(path, read_buff, len, offset);
if (read_len < 0) {
outv_err("pread failed");
Free(read_buff);
return -1;
} else if ((size_t)read_len < len) {
outv(1, "read less bytes than requested: %zd vs. %zu\n",
read_len, len);
}
if (runlen)
ddmap_print_runlen(read_buff, (size_t)read_len);
else
ddmap_print_bytes(read_buff, (size_t)read_len);
Free(read_buff);
return 0;
}
/*
* ddmap_zero -- (internal) zero a range of data in the file
*/
static int
ddmap_zero(const char *path, size_t offset, size_t len)
{
void *addr;
ssize_t filesize = util_file_get_size(path);
if (filesize < 0) {
outv_err("invalid file size");
return -1;
}
if (offset + len > (size_t)filesize)
len = (size_t)filesize - offset;
addr = util_file_map_whole(path);
if (addr == NULL) {
outv_err("map failed");
return -1;
}
memset((char *)addr + offset, 0, len);
util_unmap(addr, (size_t)filesize);
return 0;
}
/*
* ddmap_write_data -- (internal) write data to a file
*/
static int
ddmap_write_data(const char *path, const char *data,
os_off_t offset, size_t len)
{
if (util_file_pwrite(path, data, len, offset) < 0) {
outv_err("pwrite for dax device failed: path %s,"
" len %zu, offset %zd", path, len, offset);
return -1;
}
return 0;
}
/*
* ddmap_write_from_file -- (internal) write data from file to dax device or
* file
*/
static int
ddmap_write_from_file(const char *path_in, const char *path_out,
size_t offset_in, size_t offset_out, size_t bytes,
size_t count)
{
char *src, *tmp_src;
os_off_t offset;
ssize_t file_in_size = util_file_get_size(path_in);
size_t data_left, len;
util_init();
src = util_file_map_whole(path_in);
src += (os_off_t)(offset_in * bytes);
offset = (os_off_t)(offset_out * bytes);
data_left = (size_t)file_in_size;
tmp_src = src;
do {
len = MIN(data_left, bytes);
ddmap_write_data(path_out, tmp_src, offset, len);
tmp_src += len;
data_left -= len;
if (data_left == 0) {
data_left = (size_t)file_in_size;
tmp_src = src;
}
offset += (os_off_t)len;
count--;
} while (count > 0);
util_unmap(src, (size_t)file_in_size);
return 0;
}
/*
* ddmap_write -- (internal) write the string to the file
*/
static int
ddmap_write(const char *path, const char *str, size_t offset_in, size_t bytes,
size_t count)
{
/* calculate how many characters from the string are to be written */
size_t length;
size_t str_len = (str != NULL) ? strlen(str) + 1 : 0;
os_off_t offset = (os_off_t)(bytes * offset_in);
size_t len = bytes * count;
if (len == 0)
length = str_len;
else
length = min(len, str_len);
/* write the string */
if (length > 0) {
if (ddmap_write_data(path, str, offset, length))
return -1;
}
/* zero the rest of requested range */
if (length < len) {
if (ddmap_zero(path, (size_t)offset + length, len - length))
return -1;
}
return 0;
}
/*
* ddmap_checksum -- (internal) compute checksum of a slice of an input file
*/
static int
ddmap_checksum(const char *path, size_t bytes, size_t count, size_t offset_in)
{
char *src;
uint64_t checksum;
ssize_t filesize = util_file_get_size(path);
os_off_t offset = (os_off_t)(bytes * offset_in);
size_t len = bytes * count;
if ((size_t)filesize < len + (size_t)offset) {
outv_err("offset with length exceed file size");
return -1;
}
util_init();
src = util_file_map_whole(path);
util_checksum(src + offset, len, &checksum, 1, 0);
util_unmap(src, (size_t)filesize);
printf("%" PRIu64 "\n", checksum);
return 0;
}
/*
* parse_args -- (internal) parse command line arguments
*/
static int
parse_args(struct ddmap_context *ctx, int argc, char *argv[])
{
int opt;
char *endptr;
size_t offset;
size_t count;
size_t bytes;
while ((opt = getopt_long(argc, argv, "i:o:d:s:q:b:n:crhv",
long_options, NULL)) != -1) {
switch (opt) {
case 'i':
ctx->file_in = optarg;
break;
case 'o':
ctx->file_out = optarg;
break;
case 'd':
ctx->str = optarg;
if (ctx->count == 0)
ctx->count = strlen(ctx->str);
if (ctx->bytes == 0)
ctx->bytes = 1;
break;
case 's':
errno = 0;
offset = strtoul(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno) {
outv_err("'%s' -- invalid input offset",
optarg);
return -1;
}
ctx->offset_in = offset;
break;
case 'q':
errno = 0;
offset = strtoul(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno) {
outv_err("'%s' -- invalid output offset",
optarg);
return -1;
}
ctx->offset_out = offset;
break;
case 'b':
errno = 0;
bytes = strtoull(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno) {
outv_err("'%s' -- invalid block size", optarg);
return -1;
}
ctx->bytes = bytes;
break;
case 'n':
errno = 0;
count = strtoull(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno) {
outv_err("'%s' -- invalid count", optarg);
return -1;
}
ctx->count = count;
break;
case 'c':
ctx->checksum = 1;
break;
case 'r':
ctx->runlen = 1;
break;
case 'h':
print_usage();
exit(EXIT_SUCCESS);
case 'v':
out_set_vlevel(1);
break;
default:
print_usage();
exit(EXIT_FAILURE);
}
}
return 0;
}
/*
* validate_args -- (internal) validate arguments
*/
static int
validate_args(struct ddmap_context *ctx)
{
if ((ctx->file_in == NULL) && (ctx->file_out == NULL)) {
outv_err("an input file and/or an output file must be "
"provided");
return -1;
} else if (ctx->file_out == NULL) {
if (ctx->bytes == 0) {
outv_err("number of bytes to read has to be provided");
return -1;
}
} else if (ctx->file_in == NULL) {
/* ddmap_write requirements */
if (ctx->str == NULL && (ctx->count * ctx->bytes) == 0) {
outv_err("when writing, 'data' or 'count' and 'bytes' "
"have to be provided");
return -1;
}
} else {
/* scenarios other than ddmap_write requirement */
if ((ctx->bytes * ctx->count) == 0) {
outv_err("number of bytes and count must be provided");
return -1;
}
}
return 0;
}
/*
* do_ddmap -- (internal) perform ddmap
*/
static int
do_ddmap(struct ddmap_context *ctx)
{
if ((ctx->file_in != NULL) && (ctx->file_out != NULL)) {
if (ddmap_write_from_file(ctx->file_in, ctx->file_out,
ctx->offset_in, ctx->offset_out, ctx->bytes,
ctx->count))
return -1;
return 0;
}
if ((ctx->checksum == 1) && (ctx->file_in != NULL)) {
if (ddmap_checksum(ctx->file_in, ctx->bytes, ctx->count,
ctx->offset_in))
return -1;
return 0;
}
if (ctx->file_in != NULL) {
if (ddmap_read(ctx->file_in, ctx->offset_in, ctx->bytes,
ctx->count, ctx->runlen))
return -1;
} else { /* ctx->file_out != NULL */
if (ddmap_write(ctx->file_out, ctx->str, ctx->offset_in,
ctx->bytes, ctx->count))
return -1;
}
return 0;
}
int
main(int argc, char *argv[])
{
#ifdef _WIN32
wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc);
for (int i = 0; i < argc; i++) {
argv[i] = util_toUTF8(wargv[i]);
if (argv[i] == NULL) {
for (i--; i >= 0; i--)
free(argv[i]);
outv_err("Error during arguments conversion\n");
return 1;
}
}
#endif
int ret = 0;
struct ddmap_context ctx = ddmap_default;
if ((ret = parse_args(&ctx, argc, argv)))
goto out;
if ((ret = validate_args(&ctx)))
goto out;
if ((ret = do_ddmap(&ctx))) {
outv_err("failed to perform ddmap\n");
if (errno)
outv_err("errno: %s\n", strerror(errno));
ret = -1;
goto out;
}
out:
#ifdef _WIN32
for (int i = argc; i > 0; i--)
free(argv[i - 1]);
#endif
return ret;
}
| 13,362 | 23.83829 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/bttcreate/bttcreate.c | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* bttcreate.c -- tool for generating BTT layout
*/
#include <stdio.h>
#include <getopt.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <stdint.h>
#include <stdbool.h>
#include <assert.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/mman.h>
#include "set.h"
#include "pool_hdr.h"
#include "btt.h"
#include "btt_layout.h"
#include "pmemcommon.h"
#include "os.h"
#define BTT_CREATE_DEF_SIZE (20 * 1UL << 20) /* 20 MB */
#define BTT_CREATE_DEF_BLK_SIZE 512UL
#define BTT_CREATE_DEF_OFFSET_SIZE (4 * 1UL << 10) /* 4 KB */
struct btt_context {
void *addr;
uint64_t len;
};
struct bbtcreate_options {
const char *fpath;
size_t poolsize;
uint32_t blocksize;
unsigned maxlanes;
uuid_t uuid;
bool trunc;
bool verbose;
bool user_uuid;
};
/*
* nsread -- btt callback for reading
*/
static int
nsread(void *ns, unsigned lane, void *buf, size_t count,
uint64_t off)
{
struct btt_context *nsc = (struct btt_context *)ns;
if (off + count > nsc->len) {
errno = EINVAL;
return -1;
}
memcpy(buf, (char *)nsc->addr + off, count);
return 0;
}
/*
* nswrite -- btt callback for writing
*/
static int
nswrite(void *ns, unsigned lane, const void *buf,
size_t count, uint64_t off)
{
struct btt_context *nsc = (struct btt_context *)ns;
if (off + count > nsc->len) {
errno = EINVAL;
return -1;
}
memcpy((char *)nsc->addr + off, buf, count);
return 0;
}
/*
* nsmap -- btt callback for memory mapping
*/
static ssize_t
nsmap(void *ns, unsigned lane, void **addrp, size_t len,
uint64_t off)
{
struct btt_context *nsc = (struct btt_context *)ns;
assert((ssize_t)len >= 0);
if (off + len >= nsc->len) {
errno = EINVAL;
return -1;
}
/*
* Since the entire file is memory-mapped, this callback
* can always provide the entire length requested.
*/
*addrp = (char *)nsc->addr + off;
return (ssize_t)len;
}
/*
* nssync -- btt callback for memory synchronization
*/
static void
nssync(void *ns, unsigned lane, void *addr, size_t len)
{
/* do nothing */
}
/*
* nszero -- btt callback for zeroing memory
*/
static int
nszero(void *ns, unsigned lane, size_t len, uint64_t off)
{
struct btt_context *nsc = (struct btt_context *)ns;
if (off + len >= nsc->len) {
errno = EINVAL;
return -1;
}
memset((char *)nsc->addr + off, 0, len);
return 0;
}
/*
* print_usage -- print usage of program
*/
static void
print_usage(char *name)
{
printf("Usage: %s [-s <pool_file_size>] [-b <block_size>] "
"[-l <max_lanes>] [-u <uuid>] [-t] [-v] "
"<pool_name>\n", name);
}
/*
* file_error -- handle file errors
*/
static int
file_error(const int fd, const char *fpath)
{
if (fd != -1)
(void) os_close(fd);
os_unlink(fpath);
return -1;
}
/*
* print_uuid -- print uuid
*/
static void
print_uuid(uuid_t uuid)
{
char uuidstr[POOL_HDR_UUID_STR_LEN];
if (util_uuid_to_string(uuid, uuidstr) == 0) {
printf("uuid\t\t%s\n", uuidstr);
}
}
/*
* print_result -- print result if verbose option is on
*/
static void
print_result(struct bbtcreate_options *opts)
{
if (opts->verbose) {
printf("BTT successfully created: %s\n", opts->fpath);
printf("poolsize\t%zuB\n", opts->poolsize);
printf("blocksize\t%uB\n", opts->blocksize);
printf("maxlanes\t%u\n", opts->maxlanes);
print_uuid(opts->uuid);
putchar('\n');
}
}
int
main(int argc, char *argv[])
{
#ifdef _WIN32
wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc);
for (int i = 0; i < argc; i++) {
argv[i] = util_toUTF8(wargv[i]);
if (argv[i] == NULL) {
for (i--; i >= 0; i--)
free(argv[i]);
fprintf(stderr, "Error during arguments conversion\n");
return 1;
}
}
#endif
common_init("", "", "", 0, 0);
int opt;
size_t size;
int fd;
int res = 0;
struct bbtcreate_options opts = {
.poolsize = BTT_CREATE_DEF_SIZE,
.blocksize = BTT_CREATE_DEF_BLK_SIZE,
.maxlanes = BTT_DEFAULT_NFREE,
.trunc = false,
.verbose = false,
.user_uuid = false
};
/* parse option */
while ((opt = getopt(argc, argv, "s:b:l:u:tv")) != -1) {
switch (opt) {
case 's':
if (util_parse_size(optarg, &size) == 0) {
opts.poolsize = size;
} else {
fprintf(stderr, "Wrong size format in pool"
" size option\n");
res = 1;
goto out;
}
break;
case 'b':
if (util_parse_size(optarg, &size) == 0) {
opts.blocksize = (uint32_t)size;
} else {
fprintf(stderr, "Wrong size format in block"
" size option\n");
res = 1;
goto out;
}
break;
case 'l':
opts.maxlanes = (unsigned)strtoul(optarg, NULL, 0);
break;
case 'u':
if (util_uuid_from_string(optarg,
(struct uuid *)&opts.uuid) == 0) {
opts.user_uuid = true;
} else {
fprintf(stderr, "Wrong uuid format.");
res = 1;
goto out;
}
break;
case 't':
opts.trunc = true;
break;
case 'v':
opts.verbose = true;
break;
default:
print_usage(argv[0]);
res = 1;
goto out;
}
}
if (optind < argc) {
opts.fpath = argv[optind];
} else {
print_usage(argv[0]);
res = 1;
goto out;
}
/* check sizes */
if (opts.poolsize - BTT_CREATE_DEF_OFFSET_SIZE < BTT_MIN_SIZE) {
fprintf(stderr, "Pool size is less then %d MB\n",
BTT_MIN_SIZE >> 20);
res = 1;
goto out;
}
if (opts.blocksize < BTT_MIN_LBA_SIZE) {
fprintf(stderr, "Block size is less then %zu B\n",
BTT_MIN_LBA_SIZE);
res = 1;
goto out;
}
/* open file */
if ((fd = os_open(opts.fpath, O_RDWR|O_CREAT,
S_IRUSR|S_IWUSR)) < 0) {
perror(opts.fpath);
res = 1;
goto out;
}
/* allocate file */
if (!opts.trunc) {
if (os_posix_fallocate(fd, 0,
(os_off_t)opts.poolsize) != 0) {
perror("posix_fallocate");
res = file_error(fd, opts.fpath);
goto error;
}
} else {
if (os_ftruncate(fd, (os_off_t)opts.poolsize) != 0) {
perror("ftruncate");
res = file_error(fd, opts.fpath);
goto error;
}
}
/* map created file */
void *base = util_map(fd, opts.poolsize, MAP_SHARED, 0, 0, NULL);
if (!base) {
perror("util_map");
res = file_error(fd, opts.fpath);
goto error_map;
}
/* setup btt context */
struct btt_context btt_context = {
.addr = (void *)((uint64_t)base + BTT_CREATE_DEF_OFFSET_SIZE),
.len = opts.poolsize - BTT_CREATE_DEF_OFFSET_SIZE
};
/* generate uuid */
if (!opts.user_uuid) {
if (util_uuid_generate(opts.uuid) < 0) {
perror("util_uuid_generate");
res = -1;
goto error_map;
}
}
/* init callback structure */
static struct ns_callback btt_ns_callback = {
.nsread = nsread,
.nswrite = nswrite,
.nsmap = nsmap,
.nssync = nssync,
.nszero = nszero,
};
/* init btt in requested area */
struct btt *bttp = btt_init(opts.poolsize - BTT_CREATE_DEF_OFFSET_SIZE,
opts.blocksize, opts.uuid, opts.maxlanes,
(void *)&btt_context,
&btt_ns_callback);
if (!bttp) {
printf("Error: Cannot initialize BTT layer\n");
res = -1;
goto error_map;
}
/* initialize metadata */
if (btt_set_error(bttp, 0, 0)) {
perror("btt_set_error");
res = -1;
goto error_btt;
}
if (btt_set_zero(bttp, 0, 0)) {
perror("btt_set_zero");
res = -1;
goto error_btt;
}
/* print results */
print_result(&opts);
error_btt:
btt_fini(bttp);
error_map:
common_fini();
error:
os_close(fd);
out:
#ifdef _WIN32
for (int i = argc; i > 0; i--)
free(argv[i - 1]);
#endif
return res;
}
| 8,869 | 21.009926 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/dllview/dllview.c | /*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* dllview.c -- a simple utility displaying the list of symbols exported by DLL
*
* usage: dllview filename
*/
#include <windows.h>
#include <stdio.h>
#include <winnt.h>
#include <imagehlp.h>
int
main(int argc, char *argv[])
{
if (argc < 2) {
fprintf(stderr, "usage: %s dllname\n", argv[0]);
exit(1);
}
const char *dllname = argv[1];
LOADED_IMAGE img;
if (MapAndLoad(dllname, NULL, &img, 1, 1) == FALSE) {
fprintf(stderr, "cannot load DLL image\n");
exit(2);
}
IMAGE_EXPORT_DIRECTORY *dir;
ULONG dirsize;
dir = (IMAGE_EXPORT_DIRECTORY *)ImageDirectoryEntryToData(
img.MappedAddress, 0 /* mapped as image */,
IMAGE_DIRECTORY_ENTRY_EXPORT, &dirsize);
if (dir == NULL) {
fprintf(stderr, "cannot read image directory\n");
UnMapAndLoad(&img);
exit(3);
}
DWORD *rva;
rva = (DWORD *)ImageRvaToVa(img.FileHeader, img.MappedAddress,
dir->AddressOfNames, NULL);
for (DWORD i = 0; i < dir->NumberOfNames; i++) {
char *name = (char *)ImageRvaToVa(img.FileHeader,
img.MappedAddress, rva[i], NULL);
printf("%s\n", name);
}
UnMapAndLoad(&img);
return 0;
}
| 2,705 | 31.214286 | 79 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/pmemalloc/pmemalloc.c | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmemmalloc.c -- simple tool for allocating objects from pmemobj
*
* usage: pmemalloc [-r <size>] [-o <size>] [-t <type_num>]
* [-c <count>] [-e <num>] <file>
*/
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <libpmemobj.h>
#include <util.h>
#define USAGE()\
printf("usage: pmemalloc"\
" [-r <size>] [-o <size>] [-t <type_num>]"\
" [-s] [-f] [-e a|f|s] <file>\n")
int
main(int argc, char *argv[])
{
#ifdef _WIN32
wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc);
for (int i = 0; i < argc; i++) {
argv[i] = util_toUTF8(wargv[i]);
if (argv[i] == NULL) {
for (i--; i >= 0; i--)
free(argv[i]);
fprintf(stderr, "Error during arguments conversion\n");
return 1;
}
}
#endif
int opt;
int tmpi;
long long tmpl;
int ret = 0;
size_t size = 0;
size_t root_size = 0;
unsigned type_num = 0;
char exit_at = '\0';
int do_set = 0;
int do_free = 0;
size_t alloc_class_size = 0;
if (argc < 2) {
USAGE();
ret = 1;
goto end;
}
while ((opt = getopt(argc, argv, "r:o:c:t:e:sf")) != -1) {
switch (opt) {
case 'r':
tmpl = atoll(optarg);
if (tmpl < 0) {
USAGE();
ret = 1;
goto end;
}
root_size = (size_t)tmpl;
break;
case 'o':
tmpl = atoll(optarg);
if (tmpl < 0) {
USAGE();
ret = 1;
goto end;
}
size = (size_t)tmpl;
break;
case 'c':
tmpl = atoll(optarg);
if (tmpl < 0) {
USAGE();
ret = 1;
goto end;
}
alloc_class_size = (size_t)tmpl;
break;
case 't':
tmpi = atoi(optarg);
if (tmpi < 0) {
USAGE();
ret = 1;
goto end;
}
type_num = (unsigned)tmpi;
break;
case 'e':
exit_at = optarg[0];
break;
case 's':
do_set = 1;
break;
case 'f':
do_free = 1;
break;
default:
USAGE();
ret = 1;
goto end;
}
}
char *file = argv[optind];
PMEMobjpool *pop;
if ((pop = pmemobj_open(file, NULL)) == NULL) {
fprintf(stderr, "pmemobj_open: %s\n", pmemobj_errormsg());
ret = 1;
goto end;
}
if (root_size) {
PMEMoid oid = pmemobj_root(pop, root_size);
if (OID_IS_NULL(oid)) {
fprintf(stderr, "pmemobj_root: %s\n",
pmemobj_errormsg());
ret = 1;
goto end;
}
}
if (alloc_class_size) {
PMEMoid oid;
struct pobj_alloc_class_desc desc;
desc.alignment = 0;
desc.class_id = 0;
desc.header_type = POBJ_HEADER_COMPACT;
desc.unit_size = alloc_class_size;
desc.units_per_block = 1;
ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc", &desc);
if (ret != 0)
goto end;
ret = pmemobj_xalloc(pop, &oid, 1, type_num,
POBJ_CLASS_ID(desc.class_id), NULL, NULL);
if (ret != 0)
goto end;
}
if (size) {
PMEMoid oid;
TX_BEGIN(pop) {
oid = pmemobj_tx_alloc(size, type_num);
if (exit_at == 'a')
exit(1);
} TX_END
if (OID_IS_NULL(oid)) {
fprintf(stderr, "pmemobj_tx_alloc: %s\n",
pmemobj_errormsg());
ret = 1;
goto end;
}
if (do_set) {
TX_BEGIN(pop) {
pmemobj_tx_add_range(oid, 0, size);
if (exit_at == 's')
exit(1);
} TX_END
}
if (do_free) {
TX_BEGIN(pop) {
pmemobj_tx_free(oid);
if (exit_at == 'f')
exit(1);
} TX_END
}
}
pmemobj_close(pop);
end:
#ifdef _WIN32
for (int i = argc; i > 0; i--)
free(argv[i - 1]);
#endif
return ret;
}
| 4,870 | 21.761682 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/pmemdetect/pmemdetect.c | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmemdetect.c -- detect PMEM/Device DAX device or Device DAX alignment
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <getopt.h>
#include <errno.h>
#include "mmap.h"
#include "libpmem.h"
#include "file.h"
#include "os.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <unistd.h>
#define SIZE 4096
#define DEVDAX_DETECT (1 << 0)
#define DEVDAX_ALIGN (1 << 1)
#define MAP_SYNC_SUPP (1 << 2)
#define DAX_REGION_DETECT (1 << 3)
#define FILE_SIZE (1 << 4)
#define err(fmt, ...) fprintf(stderr, "pmemdetect: " fmt, __VA_ARGS__)
/* arguments */
static int Opts;
static char *Path;
static size_t Align;
/*
* print_usage -- print short description of usage
*/
static void
print_usage(void)
{
printf("Usage: pmemdetect [options] <path>\n");
printf("Valid options:\n");
printf("-d, --devdax - check if <path> is Device DAX\n");
printf("-a, --align=N - check Device DAX alignment\n");
printf("-r, --dax-region - check if Dev DAX <path> has region id\n");
printf("-s, --map-sync - check if <path> supports MAP_SYNC\n");
printf("-z, --size - print file/Device DAX size\n");
printf("-h, --help - print this usage info\n");
}
/*
* long_options -- command line options
*/
static const struct option long_options[] = {
{"devdax", no_argument, NULL, 'd'},
{"align", required_argument, NULL, 'a'},
{"dax-region", no_argument, NULL, 'r'},
{"map-sync", no_argument, NULL, 's'},
{"size", no_argument, NULL, 'z'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0 },
};
/*
* parse_args -- (internal) parse command line arguments
*/
static int
parse_args(int argc, char *argv[])
{
int opt;
while ((opt = getopt_long(argc, argv, "a:dshrz",
long_options, NULL)) != -1) {
switch (opt) {
case 'd':
Opts |= DEVDAX_DETECT;
break;
case 'r':
Opts |= DAX_REGION_DETECT;
break;
case 'a':
Opts |= DEVDAX_ALIGN;
char *endptr;
errno = 0;
size_t align = strtoull(optarg, &endptr, 0);
if ((endptr && *endptr != '\0') || errno) {
err("'%s' -- invalid alignment", optarg);
return -1;
}
Align = align;
break;
case 's':
Opts |= MAP_SYNC_SUPP;
break;
case 'z':
Opts |= FILE_SIZE;
break;
case 'h':
print_usage();
exit(EXIT_SUCCESS);
default:
print_usage();
exit(EXIT_FAILURE);
}
}
if (optind < argc) {
Path = argv[optind];
} else {
print_usage();
exit(EXIT_FAILURE);
}
return 0;
}
/*
* get_params -- get parameters for pmem_map_file
*/
static int
get_params(const char *path, int *flags, size_t *size)
{
int ret;
os_stat_t buf;
ret = os_stat(path, &buf);
if (ret && errno != ENOENT) {
/* error other than no such file */
perror(path);
return -1;
}
if (ret) {
/* no such file */
*flags = PMEM_FILE_CREATE;
*size = SIZE;
} else if (S_ISDIR(buf.st_mode)) {
*flags = PMEM_FILE_CREATE | PMEM_FILE_TMPFILE;
*size = SIZE;
} else {
/* file exist */
*size = 0;
*flags = 0;
}
return 0;
}
/*
* is_pmem -- checks if given path points to pmem-aware filesystem
*/
static int
is_pmem(const char *path)
{
int ret;
int flags;
size_t size;
ret = get_params(path, &flags, &size);
if (ret)
return ret;
int is_pmem;
void *addr = pmem_map_file(path, size, flags, 0, &size, &is_pmem);
if (addr == NULL) {
perror("pmem_map_file failed");
return -1;
}
pmem_unmap(addr, size);
return is_pmem;
}
/*
* is_dev_dax -- checks if given path points to Device DAX
*/
static int
is_dev_dax(const char *path)
{
enum file_type type = util_file_get_type(path);
if (type < 0) {
printf("%s -- not accessible\n", path);
return -1;
}
if (os_access(path, W_OK|R_OK)) {
printf("%s -- permission denied\n", path);
return -1;
}
if (type == TYPE_DEVDAX)
return 1;
printf("%s -- not device dax\n", path);
return 0;
}
/*
* is_dev_dax_align -- checks if Device DAX alignment is as specified
*/
static int
is_dev_dax_align(const char *path, size_t req_align)
{
if (is_dev_dax(path) != 1)
return -1;
size_t align = util_file_device_dax_alignment(path);
return (req_align == align) ? 1 : 0;
}
/*
* supports_map_sync -- checks if MAP_SYNC is supported on a filesystem
* from given path
*/
static int
supports_map_sync(const char *path)
{
int ret;
int flags;
size_t size;
ret = get_params(path, &flags, &size);
if (ret)
return ret;
int fd;
if (flags & PMEM_FILE_TMPFILE)
fd = util_tmpfile(path, "/pmemdetect.XXXXXX", 0);
else if (flags & PMEM_FILE_CREATE)
fd = os_open(path, O_CREAT|O_RDWR, S_IWUSR|S_IRUSR);
else
fd = os_open(path, O_RDWR);
if (fd < 0) {
perror(path);
return -1;
}
if (flags & PMEM_FILE_CREATE) {
ret = os_ftruncate(fd, (off_t)size);
if (ret) {
perror(path);
os_close(fd);
return -1;
}
}
void *addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
MAP_SHARED|MAP_SYNC|MAP_SHARED_VALIDATE, fd, 0);
if (addr != MAP_FAILED) {
ret = 1;
} else if (addr == MAP_FAILED &&
(errno == EOPNOTSUPP || errno == EINVAL)) {
ret = 0;
} else {
err("mmap: %s\n", strerror(errno));
ret = -1;
}
os_close(fd);
if (flags & PMEM_FILE_CREATE && !(flags & PMEM_FILE_TMPFILE))
util_unlink(path);
return ret;
}
int
main(int argc, char *argv[])
{
#ifdef _WIN32
wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc);
for (int i = 0; i < argc; i++) {
argv[i] = util_toUTF8(wargv[i]);
if (argv[i] == NULL) {
for (i--; i >= 0; i--)
free(argv[i]);
err("error during arguments conversion\n");
return 2;
}
}
#endif
int ret;
if (parse_args(argc, argv)) {
ret = 2;
goto out;
}
util_init();
util_mmap_init();
if (Opts & DEVDAX_DETECT)
ret = is_dev_dax(Path);
else if (Opts & DAX_REGION_DETECT) {
ret = util_ddax_region_find(Path);
if (ret < 0) {
printf("Sysfs id file for dax_region is not supported:"
" %s\n", Path);
ret = 0;
} else {
ret = 1;
}
} else if (Opts & DEVDAX_ALIGN) {
ret = is_dev_dax_align(Path, Align);
} else if (Opts & FILE_SIZE) {
printf("%zu", (size_t)util_file_get_size(Path));
ret = 1;
} else if (Opts & MAP_SYNC_SUPP) {
ret = supports_map_sync(Path);
} else {
ret = is_pmem(Path);
}
/*
* Return 0 on 'true'. Otherwise return 1.
* If any problem occurred return 2.
*/
switch (ret) {
case 0:
ret = 1;
break;
case 1:
ret = 0;
break;
default:
ret = 2;
break;
}
util_mmap_fini();
out:
#ifdef _WIN32
for (int i = argc; i > 0; i--)
free(argv[i - 1]);
#endif
return ret;
}
| 8,121 | 20.601064 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/pmemwrite/write.c | /*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* write.c -- simple app for writing data to pool used by pmempool tests
*/
#include <stdio.h>
#include <unistd.h>
#include <getopt.h>
#include <stdlib.h>
#include <libgen.h>
#include <string.h>
#include <inttypes.h>
#include <err.h>
#include "common.h"
#include "output.h"
#include <libpmemlog.h>
#include <libpmemblk.h>
#include "mmap.h"
#include "queue.h"
/*
* pmemwrite -- context and arguments
*/
struct pmemwrite
{
char *fname; /* file name */
int nargs; /* number of arguments */
char **args; /* list of arguments */
};
static struct pmemwrite pmemwrite = {
.fname = NULL,
.nargs = 0,
.args = NULL,
};
/*
* print_usage -- print short description of usage
*/
static void
print_usage(char *appname)
{
printf("Usage: %s <file> <args>...\n", appname);
printf("Valid arguments:\n");
printf("<blockno>:w:<string> - write <string> to <blockno> block\n");
printf("<blockno>:z - set zero flag on <blockno> block\n");
printf("<blockno>:z - set error flag on <blockno> block\n");
}
/*
* pmemwrite_log -- write data to pmemlog pool file
*/
static int
pmemwrite_log(struct pmemwrite *pwp)
{
PMEMlogpool *plp = pmemlog_open(pwp->fname);
if (!plp) {
warn("%s", pwp->fname);
return -1;
}
int i;
int ret = 0;
for (i = 0; i < pwp->nargs; i++) {
size_t len = strlen(pwp->args[i]);
if (pmemlog_append(plp, pwp->args[i], len)) {
warn("%s", pwp->fname);
ret = -1;
break;
}
}
pmemlog_close(plp);
return ret;
}
/*
* pmemwrite_blk -- write data to pmemblk pool file
*/
static int
pmemwrite_blk(struct pmemwrite *pwp)
{
PMEMblkpool *pbp = pmemblk_open(pwp->fname, 0);
if (!pbp) {
warn("%s", pwp->fname);
return -1;
}
int i;
int ret = 0;
size_t blksize = pmemblk_bsize(pbp);
char *blk = malloc(blksize);
if (!blk) {
ret = -1;
outv_err("malloc(%lu) failed\n", blksize);
goto nomem;
}
for (i = 0; i < pwp->nargs; i++) {
int64_t blockno;
char *buff;
size_t buffsize = strlen(pwp->args[i]) + 1;
buff = malloc(buffsize);
if (buff == NULL) {
ret = -1;
outv_err("malloc(%lu) failed\n", buffsize);
goto end;
}
char flag;
/* <blockno>:w:<string> - write string to <blockno> */
if (sscanf(pwp->args[i], "%" SCNi64 ":w:%[^:]",
&blockno, buff) == 2) {
memset(blk, 0, blksize);
size_t bufflen = strlen(buff);
if (bufflen == 0) {
free(buff);
goto end;
}
if (bufflen > blksize) {
outv_err("String is longer than block size. "
"Truncating.\n");
bufflen = blksize;
}
memcpy(blk, buff, bufflen);
ret = pmemblk_write(pbp, blk, blockno);
free(buff);
if (ret)
goto end;
/* <blockno>:<flag> - set <flag> flag on <blockno> */
} else if (sscanf(pwp->args[i], "%" SCNi64 ":%c",
&blockno, &flag) == 2) {
free(buff);
switch (flag) {
case 'z':
ret = pmemblk_set_zero(pbp, blockno);
break;
case 'e':
ret = pmemblk_set_error(pbp, blockno);
break;
default:
outv_err("Invalid flag '%c'\n", flag);
ret = -1;
goto end;
}
if (ret) {
warn("%s", pwp->fname);
goto end;
}
} else {
free(buff);
outv_err("Invalid argument '%s'\n", pwp->args[i]);
ret = -1;
goto end;
}
}
end:
free(blk);
nomem:
pmemblk_close(pbp);
return ret;
}
int
main(int argc, char *argv[])
{
#ifdef _WIN32
wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc);
for (int i = 0; i < argc; i++) {
argv[i] = util_toUTF8(wargv[i]);
if (argv[i] == NULL) {
for (i--; i >= 0; i--)
free(argv[i]);
outv_err("Error during arguments conversion\n");
return 1;
}
}
#endif
int opt;
int ret = 0;
util_init();
char *appname = basename(argv[0]);
while ((opt = getopt(argc, argv, "h")) != -1) {
switch (opt) {
case 'h':
print_usage(appname);
ret = 0;
goto end;
default:
print_usage(appname);
ret = 1;
goto end;
}
}
if (optind + 1 < argc) {
pmemwrite.fname = argv[optind];
optind++;
pmemwrite.nargs = argc - optind;
pmemwrite.args = &argv[optind];
} else {
print_usage(appname);
ret = 1;
goto end;
}
out_set_vlevel(1);
struct pmem_pool_params params;
/* parse pool type from file */
pmem_pool_parse_params(pmemwrite.fname, ¶ms, 1);
switch (params.type) {
case PMEM_POOL_TYPE_BLK:
ret = pmemwrite_blk(&pmemwrite);
break;
case PMEM_POOL_TYPE_LOG:
ret = pmemwrite_log(&pmemwrite);
break;
default:
ret = 1;
}
end:
#ifdef _WIN32
for (int i = argc; i > 0; i--)
free(argv[i - 1]);
#endif
return ret;
}
| 6,084 | 21.620818 | 74 | c |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/test/tools/extents/extents.c | /*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* extents -- extents listing
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include "extent.h"
#define B2SEC(n) ((n) >> 9) /* convert bytes to sectors */
enum modes {
MODE_PRINT_ALL_EXTENTS = 0,
MODE_PRINT_ONE_PHY_OF_LOG,
};
static const char *usage_str =
"usage: %s "
"[-h] "
"[-l <logical_offset>] "
"<file>\n";
int
main(int argc, char *argv[])
{
long unsigned offset = 0;
unsigned extent = 0;
char *error;
int ret = -1;
int opt;
enum modes mode = MODE_PRINT_ALL_EXTENTS;
while ((opt = getopt(argc, argv, "hl:")) != -1) {
switch (opt) {
case 'h':
printf(usage_str, argv[0]);
return 0;
case 'l':
mode = MODE_PRINT_ONE_PHY_OF_LOG;
errno = 0;
offset = strtoul(optarg, &error, 10 /* base */);
if (errno || *error != '\0') {
if (errno)
perror("strtoul");
if (*error != '\0') {
fprintf(stderr,
"error: invalid character(s) in the given logical offset: %s\n",
error);
}
return -1;
}
break;
default:
fprintf(stderr, usage_str, argv[0]);
return -1;
}
}
if (optind + 1 < argc) {
fprintf(stderr, "error: unknown option: %s\n",
argv[optind + 1]);
fprintf(stderr, usage_str, argv[0]);
return -1;
}
if (optind >= argc) {
fprintf(stderr, usage_str, argv[0]);
return -1;
}
const char *file = argv[optind];
struct extents *exts = malloc(sizeof(struct extents));
if (exts == NULL)
return -1;
long count = os_extents_count(file, exts);
if (count < 0)
goto exit_free;
if (count == 0) {
ret = 0;
goto exit_free;
}
exts->extents = malloc(exts->extents_count * sizeof(struct extent));
if (exts->extents == NULL)
goto exit_free;
ret = os_extents_get(file, exts);
if (ret)
goto exit_free;
switch (mode) {
case MODE_PRINT_ALL_EXTENTS:
for (unsigned e = 0; e < exts->extents_count; e++) {
/* extents are in bytes, convert them to sectors */
printf("%lu %lu\n",
B2SEC(exts->extents[e].offset_physical),
B2SEC(exts->extents[e].length));
}
break;
case MODE_PRINT_ONE_PHY_OF_LOG:
/* print the physical offset of the given logical one */
for (unsigned e = 0; e < exts->extents_count; e++) {
if (B2SEC(exts->extents[e].offset_logical) > offset)
break;
extent = e;
}
if (extent == exts->extents_count - 1) {
long unsigned max_log;
max_log = B2SEC(exts->extents[extent].offset_logical) +
B2SEC(exts->extents[extent].length);
if (offset > max_log) {
fprintf(stderr,
"error: maximum logical offset is %lu\n",
max_log);
ret = -1;
goto exit_free;
}
}
offset += B2SEC(exts->extents[extent].offset_physical) -
B2SEC(exts->extents[extent].offset_logical);
printf("%lu\n", offset);
break;
default:
fprintf(stderr, usage_str, argv[0]);
return -1;
}
exit_free:
if (exts->extents)
free(exts->extents);
free(exts);
return ret;
}
| 4,484 | 23.642857 | 74 | c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.