repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/uuid.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* uuid.c -- uuid utilities
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "uuid.h"
#include "out.h"
/*
* util_uuid_to_string -- generate a string form of the uuid
*/
int
util_uuid_to_string(const uuid_t u, char *buf)
{
int len; /* size that is returned from sprintf call */
if (buf == NULL) {
LOG(2, "invalid buffer for uuid string");
return -1;
}
if (u == NULL) {
LOG(2, "invalid uuid structure");
return -1;
}
struct uuid *uuid = (struct uuid *)u;
len = snprintf(buf, POOL_HDR_UUID_STR_LEN,
"%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x",
uuid->time_low, uuid->time_mid, uuid->time_hi_and_ver,
uuid->clock_seq_hi, uuid->clock_seq_low, uuid->node[0],
uuid->node[1], uuid->node[2], uuid->node[3], uuid->node[4],
uuid->node[5]);
if (len != POOL_HDR_UUID_STR_LEN - 1) {
LOG(2, "snprintf(uuid): %d", len);
return -1;
}
return 0;
}
/*
* util_uuid_from_string -- generate a binary form of the uuid
*
* uuid string read from /proc/sys/kernel/random/uuid. UUID string
* format example:
* f81d4fae-7dec-11d0-a765-00a0c91e6bf6
*/
int
util_uuid_from_string(const char *uuid, struct uuid *ud)
{
if (strlen(uuid) != 36) {
LOG(2, "invalid uuid string");
return -1;
}
if (uuid[8] != '-' || uuid[13] != '-' || uuid[18] != '-' ||
uuid[23] != '-') {
LOG(2, "invalid uuid string");
return -1;
}
int n = sscanf(uuid,
"%08x-%04hx-%04hx-%02hhx%02hhx-"
"%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx",
&ud->time_low, &ud->time_mid, &ud->time_hi_and_ver,
&ud->clock_seq_hi, &ud->clock_seq_low, &ud->node[0],
&ud->node[1], &ud->node[2], &ud->node[3], &ud->node[4],
&ud->node[5]);
if (n != 11) {
LOG(2, "sscanf(uuid)");
return -1;
}
return 0;
}
| 1,818 | 20.654762 | 66 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/queue.h | /*
* Source: glibc 2.24 (git://sourceware.org/glibc.git /misc/sys/queue.h)
*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)queue.h 8.5 (Berkeley) 8/20/94
*/
#ifndef _PMDK_QUEUE_H_
#define _PMDK_QUEUE_H_
/*
* This file defines five types of data structures: singly-linked lists,
* lists, simple queues, tail queues, and circular queues.
*
* A singly-linked list is headed by a single forward pointer. The
* elements are singly linked for minimum space and pointer manipulation
* overhead at the expense of O(n) removal for arbitrary elements. New
* elements can be added to the list after an existing element or at the
* head of the list. Elements being removed from the head of the list
* should use the explicit macro for this purpose for optimum
* efficiency. A singly-linked list may only be traversed in the forward
* direction. Singly-linked lists are ideal for applications with large
* datasets and few or no removals or for implementing a LIFO queue.
*
* A list is headed by a single forward pointer (or an array of forward
* pointers for a hash table header). The elements are doubly linked
* so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before
* or after an existing element or at the head of the list. A list
* may only be traversed in the forward direction.
*
* A simple queue is headed by a pair of pointers, one the head of the
* list and the other to the tail of the list. The elements are singly
* linked to save space, so elements can only be removed from the
* head of the list. New elements can be added to the list after
* an existing element, at the head of the list, or at the end of the
* list. A simple queue may only be traversed in the forward direction.
*
* A tail queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or
* after an existing element, at the head of the list, or at the end of
* the list. A tail queue may be traversed in either direction.
*
* A circle queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or after
* an existing element, at the head of the list, or at the end of the list.
* A circle queue may be traversed in either direction, but has a more
* complex end of list detection.
*
* For details on the use of these macros, see the queue(3) manual page.
*/
/*
* XXX This is a workaround for a bug in the llvm's static analyzer. For more
* info see https://github.com/pmem/issues/issues/309.
*/
#ifdef __clang_analyzer__
static void custom_assert(void)
{
abort();
}
#define ANALYZER_ASSERT(x) (__builtin_expect(!(x), 0) ? (void)0 : custom_assert())
#else
#define ANALYZER_ASSERT(x) do {} while (0)
#endif
/*
* List definitions.
*/
#define PMDK_LIST_HEAD(name, type) \
struct name { \
struct type *lh_first; /* first element */ \
}
#define PMDK_LIST_HEAD_INITIALIZER(head) \
{ NULL }
#ifdef __cplusplus
#define PMDK__CAST_AND_ASSIGN(x, y) x = (__typeof__(x))y;
#else
#define PMDK__CAST_AND_ASSIGN(x, y) x = (void *)(y);
#endif
#define PMDK_LIST_ENTRY(type) \
struct { \
struct type *le_next; /* next element */ \
struct type **le_prev; /* address of previous next element */ \
}
/*
* List functions.
*/
#define PMDK_LIST_INIT(head) do { \
(head)->lh_first = NULL; \
} while (/*CONSTCOND*/0)
#define PMDK_LIST_INSERT_AFTER(listelm, elm, field) do { \
if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
(listelm)->field.le_next->field.le_prev = \
&(elm)->field.le_next; \
(listelm)->field.le_next = (elm); \
(elm)->field.le_prev = &(listelm)->field.le_next; \
} while (/*CONSTCOND*/0)
#define PMDK_LIST_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.le_prev = (listelm)->field.le_prev; \
(elm)->field.le_next = (listelm); \
*(listelm)->field.le_prev = (elm); \
(listelm)->field.le_prev = &(elm)->field.le_next; \
} while (/*CONSTCOND*/0)
#define PMDK_LIST_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.le_next = (head)->lh_first) != NULL) \
(head)->lh_first->field.le_prev = &(elm)->field.le_next;\
(head)->lh_first = (elm); \
(elm)->field.le_prev = &(head)->lh_first; \
} while (/*CONSTCOND*/0)
#define PMDK_LIST_REMOVE(elm, field) do { \
ANALYZER_ASSERT((elm) != NULL); \
if ((elm)->field.le_next != NULL) \
(elm)->field.le_next->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = (elm)->field.le_next; \
} while (/*CONSTCOND*/0)
#define PMDK_LIST_FOREACH(var, head, field) \
for ((var) = ((head)->lh_first); \
(var); \
(var) = ((var)->field.le_next))
/*
* List access methods.
*/
#define PMDK_LIST_EMPTY(head) ((head)->lh_first == NULL)
#define PMDK_LIST_FIRST(head) ((head)->lh_first)
#define PMDK_LIST_NEXT(elm, field) ((elm)->field.le_next)
/*
* Singly-linked List definitions.
*/
#define PMDK_SLIST_HEAD(name, type) \
struct name { \
struct type *slh_first; /* first element */ \
}
#define PMDK_SLIST_HEAD_INITIALIZER(head) \
{ NULL }
#define PMDK_SLIST_ENTRY(type) \
struct { \
struct type *sle_next; /* next element */ \
}
/*
* Singly-linked List functions.
*/
#define PMDK_SLIST_INIT(head) do { \
(head)->slh_first = NULL; \
} while (/*CONSTCOND*/0)
#define PMDK_SLIST_INSERT_AFTER(slistelm, elm, field) do { \
(elm)->field.sle_next = (slistelm)->field.sle_next; \
(slistelm)->field.sle_next = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_SLIST_INSERT_HEAD(head, elm, field) do { \
(elm)->field.sle_next = (head)->slh_first; \
(head)->slh_first = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_SLIST_REMOVE_HEAD(head, field) do { \
(head)->slh_first = (head)->slh_first->field.sle_next; \
} while (/*CONSTCOND*/0)
#define PMDK_SLIST_REMOVE(head, elm, type, field) do { \
if ((head)->slh_first == (elm)) { \
PMDK_SLIST_REMOVE_HEAD((head), field); \
} \
else { \
struct type *curelm = (head)->slh_first; \
while(curelm->field.sle_next != (elm)) \
curelm = curelm->field.sle_next; \
curelm->field.sle_next = \
curelm->field.sle_next->field.sle_next; \
} \
} while (/*CONSTCOND*/0)
#define PMDK_SLIST_FOREACH(var, head, field) \
for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
/*
* Singly-linked List access methods.
*/
#define PMDK_SLIST_EMPTY(head) ((head)->slh_first == NULL)
#define PMDK_SLIST_FIRST(head) ((head)->slh_first)
#define PMDK_SLIST_NEXT(elm, field) ((elm)->field.sle_next)
/*
* Singly-linked Tail queue declarations.
*/
#define PMDK_STAILQ_HEAD(name, type) \
struct name { \
struct type *stqh_first; /* first element */ \
struct type **stqh_last; /* addr of last next element */ \
}
#define PMDK_STAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).stqh_first }
#define PMDK_STAILQ_ENTRY(type) \
struct { \
struct type *stqe_next; /* next element */ \
}
/*
* Singly-linked Tail queue functions.
*/
#define PMDK_STAILQ_INIT(head) do { \
(head)->stqh_first = NULL; \
(head)->stqh_last = &(head)->stqh_first; \
} while (/*CONSTCOND*/0)
#define PMDK_STAILQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
(head)->stqh_last = &(elm)->field.stqe_next; \
(head)->stqh_first = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_STAILQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.stqe_next = NULL; \
*(head)->stqh_last = (elm); \
(head)->stqh_last = &(elm)->field.stqe_next; \
} while (/*CONSTCOND*/0)
#define PMDK_STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
(head)->stqh_last = &(elm)->field.stqe_next; \
(listelm)->field.stqe_next = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_STAILQ_REMOVE_HEAD(head, field) do { \
if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
(head)->stqh_last = &(head)->stqh_first; \
} while (/*CONSTCOND*/0)
#define PMDK_STAILQ_REMOVE(head, elm, type, field) do { \
if ((head)->stqh_first == (elm)) { \
PMDK_STAILQ_REMOVE_HEAD((head), field); \
} else { \
struct type *curelm = (head)->stqh_first; \
while (curelm->field.stqe_next != (elm)) \
curelm = curelm->field.stqe_next; \
if ((curelm->field.stqe_next = \
curelm->field.stqe_next->field.stqe_next) == NULL) \
(head)->stqh_last = &(curelm)->field.stqe_next; \
} \
} while (/*CONSTCOND*/0)
#define PMDK_STAILQ_FOREACH(var, head, field) \
for ((var) = ((head)->stqh_first); \
(var); \
(var) = ((var)->field.stqe_next))
#define PMDK_STAILQ_CONCAT(head1, head2) do { \
if (!PMDK_STAILQ_EMPTY((head2))) { \
*(head1)->stqh_last = (head2)->stqh_first; \
(head1)->stqh_last = (head2)->stqh_last; \
PMDK_STAILQ_INIT((head2)); \
} \
} while (/*CONSTCOND*/0)
/*
* Singly-linked Tail queue access methods.
*/
#define PMDK_STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
#define PMDK_STAILQ_FIRST(head) ((head)->stqh_first)
#define PMDK_STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
/*
* Simple queue definitions.
*/
#define PMDK_SIMPLEQ_HEAD(name, type) \
struct name { \
struct type *sqh_first; /* first element */ \
struct type **sqh_last; /* addr of last next element */ \
}
#define PMDK_SIMPLEQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).sqh_first }
#define PMDK_SIMPLEQ_ENTRY(type) \
struct { \
struct type *sqe_next; /* next element */ \
}
/*
* Simple queue functions.
*/
#define PMDK_SIMPLEQ_INIT(head) do { \
(head)->sqh_first = NULL; \
(head)->sqh_last = &(head)->sqh_first; \
} while (/*CONSTCOND*/0)
#define PMDK_SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
(head)->sqh_last = &(elm)->field.sqe_next; \
(head)->sqh_first = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.sqe_next = NULL; \
*(head)->sqh_last = (elm); \
(head)->sqh_last = &(elm)->field.sqe_next; \
} while (/*CONSTCOND*/0)
#define PMDK_SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
(head)->sqh_last = &(elm)->field.sqe_next; \
(listelm)->field.sqe_next = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_SIMPLEQ_REMOVE_HEAD(head, field) do { \
if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
(head)->sqh_last = &(head)->sqh_first; \
} while (/*CONSTCOND*/0)
#define PMDK_SIMPLEQ_REMOVE(head, elm, type, field) do { \
if ((head)->sqh_first == (elm)) { \
PMDK_SIMPLEQ_REMOVE_HEAD((head), field); \
} else { \
struct type *curelm = (head)->sqh_first; \
while (curelm->field.sqe_next != (elm)) \
curelm = curelm->field.sqe_next; \
if ((curelm->field.sqe_next = \
curelm->field.sqe_next->field.sqe_next) == NULL) \
(head)->sqh_last = &(curelm)->field.sqe_next; \
} \
} while (/*CONSTCOND*/0)
#define PMDK_SIMPLEQ_FOREACH(var, head, field) \
for ((var) = ((head)->sqh_first); \
(var); \
(var) = ((var)->field.sqe_next))
/*
* Simple queue access methods.
*/
#define PMDK_SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
#define PMDK_SIMPLEQ_FIRST(head) ((head)->sqh_first)
#define PMDK_SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
/*
* Tail queue definitions.
*/
#define PMDK__TAILQ_HEAD(name, type, qual) \
struct name { \
qual type *tqh_first; /* first element */ \
qual type *qual *tqh_last; /* addr of last next element */ \
}
#define PMDK_TAILQ_HEAD(name, type) PMDK__TAILQ_HEAD(name, struct type,)
#define PMDK_TAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).tqh_first }
#define PMDK__TAILQ_ENTRY(type, qual) \
struct { \
qual type *tqe_next; /* next element */ \
qual type *qual *tqe_prev; /* address of previous next element */\
}
#define PMDK_TAILQ_ENTRY(type) PMDK__TAILQ_ENTRY(struct type,)
/*
* Tail queue functions.
*/
#define PMDK_TAILQ_INIT(head) do { \
(head)->tqh_first = NULL; \
(head)->tqh_last = &(head)->tqh_first; \
} while (/*CONSTCOND*/0)
#define PMDK_TAILQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
(head)->tqh_first->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(head)->tqh_first = (elm); \
(elm)->field.tqe_prev = &(head)->tqh_first; \
} while (/*CONSTCOND*/0)
#define PMDK_TAILQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.tqe_next = NULL; \
(elm)->field.tqe_prev = (head)->tqh_last; \
*(head)->tqh_last = (elm); \
(head)->tqh_last = &(elm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define PMDK_TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
(elm)->field.tqe_next->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(listelm)->field.tqe_next = (elm); \
(elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define PMDK_TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
(elm)->field.tqe_next = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define PMDK_TAILQ_REMOVE(head, elm, field) do { \
ANALYZER_ASSERT((elm) != NULL); \
if (((elm)->field.tqe_next) != NULL) \
(elm)->field.tqe_next->field.tqe_prev = \
(elm)->field.tqe_prev; \
else \
(head)->tqh_last = (elm)->field.tqe_prev; \
*(elm)->field.tqe_prev = (elm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define PMDK_TAILQ_FOREACH(var, head, field) \
for ((var) = ((head)->tqh_first); \
(var); \
(var) = ((var)->field.tqe_next))
#define PMDK_TAILQ_FOREACH_REVERSE(var, head, headname, field) \
for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
(var); \
(var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
#define PMDK_TAILQ_CONCAT(head1, head2, field) do { \
if (!PMDK_TAILQ_EMPTY(head2)) { \
*(head1)->tqh_last = (head2)->tqh_first; \
(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
(head1)->tqh_last = (head2)->tqh_last; \
PMDK_TAILQ_INIT((head2)); \
} \
} while (/*CONSTCOND*/0)
/*
* Tail queue access methods.
*/
#define PMDK_TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
#define PMDK_TAILQ_FIRST(head) ((head)->tqh_first)
#define PMDK_TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define PMDK_TAILQ_LAST(head, headname) \
(*(((struct headname *)((head)->tqh_last))->tqh_last))
#define PMDK_TAILQ_PREV(elm, headname, field) \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
/*
* Circular queue definitions.
*/
#define PMDK_CIRCLEQ_HEAD(name, type) \
struct name { \
struct type *cqh_first; /* first element */ \
struct type *cqh_last; /* last element */ \
}
#define PMDK_CIRCLEQ_HEAD_INITIALIZER(head) \
{ (void *)&(head), (void *)&(head) }
#define PMDK_CIRCLEQ_ENTRY(type) \
struct { \
struct type *cqe_next; /* next element */ \
struct type *cqe_prev; /* previous element */ \
}
/*
* Circular queue functions.
*/
#define PMDK_CIRCLEQ_INIT(head) do { \
PMDK__CAST_AND_ASSIGN((head)->cqh_first, (head)); \
PMDK__CAST_AND_ASSIGN((head)->cqh_last, (head)); \
} while (/*CONSTCOND*/0)
#define PMDK_CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm)->field.cqe_next; \
(elm)->field.cqe_prev = (listelm); \
if ((listelm)->field.cqe_next == (void *)(head)) \
(head)->cqh_last = (elm); \
else \
(listelm)->field.cqe_next->field.cqe_prev = (elm); \
(listelm)->field.cqe_next = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm); \
(elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
if ((listelm)->field.cqe_prev == (void *)(head)) \
(head)->cqh_first = (elm); \
else \
(listelm)->field.cqe_prev->field.cqe_next = (elm); \
(listelm)->field.cqe_prev = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
(elm)->field.cqe_next = (head)->cqh_first; \
(elm)->field.cqe_prev = (void *)(head); \
if ((head)->cqh_last == (void *)(head)) \
(head)->cqh_last = (elm); \
else \
(head)->cqh_first->field.cqe_prev = (elm); \
(head)->cqh_first = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
PMDK__CAST_AND_ASSIGN((elm)->field.cqe_next, (head)); \
(elm)->field.cqe_prev = (head)->cqh_last; \
if ((head)->cqh_first == (void *)(head)) \
(head)->cqh_first = (elm); \
else \
(head)->cqh_last->field.cqe_next = (elm); \
(head)->cqh_last = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_CIRCLEQ_REMOVE(head, elm, field) do { \
if ((elm)->field.cqe_next == (void *)(head)) \
(head)->cqh_last = (elm)->field.cqe_prev; \
else \
(elm)->field.cqe_next->field.cqe_prev = \
(elm)->field.cqe_prev; \
if ((elm)->field.cqe_prev == (void *)(head)) \
(head)->cqh_first = (elm)->field.cqe_next; \
else \
(elm)->field.cqe_prev->field.cqe_next = \
(elm)->field.cqe_next; \
} while (/*CONSTCOND*/0)
#define PMDK_CIRCLEQ_FOREACH(var, head, field) \
for ((var) = ((head)->cqh_first); \
(var) != (const void *)(head); \
(var) = ((var)->field.cqe_next))
#define PMDK_CIRCLEQ_FOREACH_REVERSE(var, head, field) \
for ((var) = ((head)->cqh_last); \
(var) != (const void *)(head); \
(var) = ((var)->field.cqe_prev))
/*
* Circular queue access methods.
*/
#define PMDK_CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
#define PMDK_CIRCLEQ_FIRST(head) ((head)->cqh_first)
#define PMDK_CIRCLEQ_LAST(head) ((head)->cqh_last)
#define PMDK_CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
#define PMDK_CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
#define PMDK_CIRCLEQ_LOOP_NEXT(head, elm, field) \
(((elm)->field.cqe_next == (void *)(head)) \
? ((head)->cqh_first) \
: ((elm)->field.cqe_next))
#define PMDK_CIRCLEQ_LOOP_PREV(head, elm, field) \
(((elm)->field.cqe_prev == (void *)(head)) \
? ((head)->cqh_last) \
: ((elm)->field.cqe_prev))
/*
* Sorted queue functions.
*/
#define PMDK_SORTEDQ_HEAD(name, type) PMDK_CIRCLEQ_HEAD(name, type)
#define PMDK_SORTEDQ_HEAD_INITIALIZER(head) PMDK_CIRCLEQ_HEAD_INITIALIZER(head)
#define PMDK_SORTEDQ_ENTRY(type) PMDK_CIRCLEQ_ENTRY(type)
#define PMDK_SORTEDQ_INIT(head) PMDK_CIRCLEQ_INIT(head)
#define PMDK_SORTEDQ_INSERT(head, elm, field, type, comparer) { \
type *_elm_it; \
for (_elm_it = (head)->cqh_first; \
((_elm_it != (void *)(head)) && \
(comparer(_elm_it, (elm)) < 0)); \
_elm_it = _elm_it->field.cqe_next) \
/*NOTHING*/; \
if (_elm_it == (void *)(head)) \
PMDK_CIRCLEQ_INSERT_TAIL(head, elm, field); \
else \
PMDK_CIRCLEQ_INSERT_BEFORE(head, _elm_it, elm, field); \
}
#define PMDK_SORTEDQ_REMOVE(head, elm, field) PMDK_CIRCLEQ_REMOVE(head, elm, field)
#define PMDK_SORTEDQ_FOREACH(var, head, field) PMDK_CIRCLEQ_FOREACH(var, head, field)
#define PMDK_SORTEDQ_FOREACH_REVERSE(var, head, field) \
PMDK_CIRCLEQ_FOREACH_REVERSE(var, head, field)
/*
* Sorted queue access methods.
*/
#define PMDK_SORTEDQ_EMPTY(head) PMDK_CIRCLEQ_EMPTY(head)
#define PMDK_SORTEDQ_FIRST(head) PMDK_CIRCLEQ_FIRST(head)
#define PMDK_SORTEDQ_LAST(head) PMDK_CIRCLEQ_LAST(head)
#define PMDK_SORTEDQ_NEXT(elm, field) PMDK_CIRCLEQ_NEXT(elm, field)
#define PMDK_SORTEDQ_PREV(elm, field) PMDK_CIRCLEQ_PREV(elm, field)
#endif /* sys/queue.h */
| 22,165 | 33.907087 | 85 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/set.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* set.h -- internal definitions for set module
*/
#ifndef PMDK_SET_H
#define PMDK_SET_H 1
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <sys/types.h>
#include "out.h"
#include "vec.h"
#include "pool_hdr.h"
#include "librpmem.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* pool sets & replicas
*/
#define POOLSET_HDR_SIG "PMEMPOOLSET"
#define POOLSET_HDR_SIG_LEN 11 /* does NOT include '\0' */
#define POOLSET_REPLICA_SIG "REPLICA"
#define POOLSET_REPLICA_SIG_LEN 7 /* does NOT include '\0' */
#define POOLSET_OPTION_SIG "OPTION"
#define POOLSET_OPTION_SIG_LEN 6 /* does NOT include '\0' */
/* pool set option flags */
enum pool_set_option_flag {
OPTION_UNKNOWN = 0x0,
OPTION_SINGLEHDR = 0x1, /* pool headers only in the first part */
OPTION_NOHDRS = 0x2, /* no pool headers, remote replicas only */
};
struct pool_set_option {
const char *name;
enum pool_set_option_flag flag;
};
#define POOL_LOCAL 0
#define POOL_REMOTE 1
#define REPLICAS_DISABLED 0
#define REPLICAS_ENABLED 1
/* util_pool_open flags */
#define POOL_OPEN_COW 1 /* copy-on-write mode */
#define POOL_OPEN_IGNORE_SDS 2 /* ignore shutdown state */
#define POOL_OPEN_IGNORE_BAD_BLOCKS 4 /* ignore bad blocks */
#define POOL_OPEN_CHECK_BAD_BLOCKS 8 /* check bad blocks */
enum del_parts_mode {
DO_NOT_DELETE_PARTS, /* do not delete part files */
DELETE_CREATED_PARTS, /* delete only newly created parts files */
DELETE_ALL_PARTS /* force delete all parts files */
};
struct pool_set_part {
/* populated by a pool set file parser */
const char *path;
size_t filesize; /* aligned to page size */
int fd;
int flags; /* stores flags used when opening the file */
/* valid only if fd >= 0 */
int is_dev_dax; /* indicates if the part is on device dax */
size_t alignment; /* internal alignment (Device DAX only) */
int created; /* indicates newly created (zeroed) file */
/* util_poolset_open/create */
void *remote_hdr; /* allocated header for remote replica */
void *hdr; /* base address of header */
size_t hdrsize; /* size of the header mapping */
int hdr_map_sync; /* header mapped with MAP_SYNC */
void *addr; /* base address of the mapping */
size_t size; /* size of the mapping - page aligned */
int map_sync; /* part has been mapped with MAP_SYNC flag */
int rdonly; /* is set based on compat features, affects */
/* the whole poolset */
uuid_t uuid;
int has_bad_blocks; /* part file contains bad blocks */
int sds_dirty_modified; /* sds dirty flag was set */
};
struct pool_set_directory {
const char *path;
size_t resvsize; /* size of the address space reservation */
};
struct remote_replica {
void *rpp; /* RPMEMpool opaque handle */
char *node_addr; /* address of a remote node */
/* poolset descriptor is a pool set file name on a remote node */
char *pool_desc; /* descriptor of a poolset */
};
struct pool_replica {
unsigned nparts;
unsigned nallocated;
unsigned nhdrs; /* should be 0, 1 or nparts */
size_t repsize; /* total size of all the parts (mappings) */
size_t resvsize; /* min size of the address space reservation */
int is_pmem; /* true if all the parts are in PMEM */
struct remote_replica *remote; /* not NULL if the replica */
/* is a remote one */
VEC(, struct pool_set_directory) directory;
struct pool_set_part part[];
};
struct pool_set {
char *path; /* path of the poolset file */
unsigned nreplicas;
uuid_t uuid;
int rdonly;
int zeroed; /* true if all the parts are new files */
size_t poolsize; /* the smallest replica size */
int has_bad_blocks; /* pool set contains bad blocks */
int remote; /* true if contains a remote replica */
unsigned options; /* enabled pool set options */
int directory_based;
size_t resvsize;
unsigned next_id;
unsigned next_directory_id;
int ignore_sds; /* don't use shutdown state */
struct pool_replica *replica[];
};
struct part_file {
int is_remote;
/*
* Pointer to the part file structure -
* - not-NULL only for a local part file
*/
struct pool_set_part *part;
/*
* Pointer to the replica structure -
* - not-NULL only for a remote replica
*/
struct remote_replica *remote;
};
struct pool_attr {
char signature[POOL_HDR_SIG_LEN]; /* pool signature */
uint32_t major; /* format major version number */
features_t features; /* features flags */
unsigned char poolset_uuid[POOL_HDR_UUID_LEN]; /* pool uuid */
unsigned char first_part_uuid[POOL_HDR_UUID_LEN]; /* first part uuid */
unsigned char prev_repl_uuid[POOL_HDR_UUID_LEN]; /* prev replica uuid */
unsigned char next_repl_uuid[POOL_HDR_UUID_LEN]; /* next replica uuid */
unsigned char arch_flags[POOL_HDR_ARCH_LEN]; /* arch flags */
};
/* get index of the (r)th replica */
static inline unsigned
REPidx(const struct pool_set *set, unsigned r)
{
ASSERTne(set->nreplicas, 0);
return r % set->nreplicas;
}
/* get index of the (r + 1)th replica */
static inline unsigned
REPNidx(const struct pool_set *set, unsigned r)
{
ASSERTne(set->nreplicas, 0);
return (r + 1) % set->nreplicas;
}
/* get index of the (r - 1)th replica */
static inline unsigned
REPPidx(const struct pool_set *set, unsigned r)
{
ASSERTne(set->nreplicas, 0);
return (set->nreplicas + r - 1) % set->nreplicas;
}
/* get index of the (r)th part */
static inline unsigned
PARTidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nparts, 0);
return p % rep->nparts;
}
/* get index of the (r + 1)th part */
static inline unsigned
PARTNidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nparts, 0);
return (p + 1) % rep->nparts;
}
/* get index of the (r - 1)th part */
static inline unsigned
PARTPidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nparts, 0);
return (rep->nparts + p - 1) % rep->nparts;
}
/* get index of the (r)th part */
static inline unsigned
HDRidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nhdrs, 0);
return p % rep->nhdrs;
}
/* get index of the (r + 1)th part */
static inline unsigned
HDRNidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nhdrs, 0);
return (p + 1) % rep->nhdrs;
}
/* get index of the (r - 1)th part */
static inline unsigned
HDRPidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nhdrs, 0);
return (rep->nhdrs + p - 1) % rep->nhdrs;
}
/* get (r)th replica */
static inline struct pool_replica *
REP(const struct pool_set *set, unsigned r)
{
return set->replica[REPidx(set, r)];
}
/* get (r + 1)th replica */
static inline struct pool_replica *
REPN(const struct pool_set *set, unsigned r)
{
return set->replica[REPNidx(set, r)];
}
/* get (r - 1)th replica */
static inline struct pool_replica *
REPP(const struct pool_set *set, unsigned r)
{
return set->replica[REPPidx(set, r)];
}
/* get (p)th part */
static inline struct pool_set_part *
PART(struct pool_replica *rep, unsigned p)
{
return &rep->part[PARTidx(rep, p)];
}
/* get (p + 1)th part */
static inline struct pool_set_part *
PARTN(struct pool_replica *rep, unsigned p)
{
return &rep->part[PARTNidx(rep, p)];
}
/* get (p - 1)th part */
static inline struct pool_set_part *
PARTP(struct pool_replica *rep, unsigned p)
{
return &rep->part[PARTPidx(rep, p)];
}
/* get (p)th header */
static inline struct pool_hdr *
HDR(struct pool_replica *rep, unsigned p)
{
return (struct pool_hdr *)(rep->part[HDRidx(rep, p)].hdr);
}
/* get (p + 1)th header */
static inline struct pool_hdr *
HDRN(struct pool_replica *rep, unsigned p)
{
return (struct pool_hdr *)(rep->part[HDRNidx(rep, p)].hdr);
}
/* get (p - 1)th header */
static inline struct pool_hdr *
HDRP(struct pool_replica *rep, unsigned p)
{
return (struct pool_hdr *)(rep->part[HDRPidx(rep, p)].hdr);
}
extern int Prefault_at_open;
extern int Prefault_at_create;
extern int SDS_at_create;
extern int Fallocate_at_create;
extern int COW_at_open;
int util_poolset_parse(struct pool_set **setp, const char *path, int fd);
int util_poolset_read(struct pool_set **setp, const char *path);
int util_poolset_create_set(struct pool_set **setp, const char *path,
size_t poolsize, size_t minsize, int ignore_sds);
int util_poolset_open(struct pool_set *set);
void util_poolset_close(struct pool_set *set, enum del_parts_mode del);
void util_poolset_free(struct pool_set *set);
int util_poolset_chmod(struct pool_set *set, mode_t mode);
void util_poolset_fdclose(struct pool_set *set);
void util_poolset_fdclose_always(struct pool_set *set);
int util_is_poolset_file(const char *path);
int util_poolset_foreach_part_struct(struct pool_set *set,
int (*cb)(struct part_file *pf, void *arg), void *arg);
int util_poolset_foreach_part(const char *path,
int (*cb)(struct part_file *pf, void *arg), void *arg);
size_t util_poolset_size(const char *path);
int util_replica_deep_common(const void *addr, size_t len,
struct pool_set *set, unsigned replica_id, int flush);
int util_replica_deep_persist(const void *addr, size_t len,
struct pool_set *set, unsigned replica_id);
int util_replica_deep_drain(const void *addr, size_t len,
struct pool_set *set, unsigned replica_id);
int util_pool_create(struct pool_set **setp, const char *path, size_t poolsize,
size_t minsize, size_t minpartsize, const struct pool_attr *attr,
unsigned *nlanes, int can_have_rep);
int util_pool_create_uuids(struct pool_set **setp, const char *path,
size_t poolsize, size_t minsize, size_t minpartsize,
const struct pool_attr *attr, unsigned *nlanes, int can_have_rep,
int remote);
int util_part_open(struct pool_set_part *part, size_t minsize, int create_part);
void util_part_fdclose(struct pool_set_part *part);
int util_replica_open(struct pool_set *set, unsigned repidx, int flags);
int util_replica_set_attr(struct pool_replica *rep,
const struct rpmem_pool_attr *rattr);
void util_pool_hdr2attr(struct pool_attr *attr, struct pool_hdr *hdr);
void util_pool_attr2hdr(struct pool_hdr *hdr,
const struct pool_attr *attr);
int util_replica_close(struct pool_set *set, unsigned repidx);
int util_map_part(struct pool_set_part *part, void *addr, size_t size,
size_t offset, int flags, int rdonly);
int util_unmap_part(struct pool_set_part *part);
int util_unmap_parts(struct pool_replica *rep, unsigned start_index,
unsigned end_index);
int util_header_create(struct pool_set *set, unsigned repidx, unsigned partidx,
const struct pool_attr *attr, int overwrite);
int util_map_hdr(struct pool_set_part *part, int flags, int rdonly);
void util_unmap_hdr(struct pool_set_part *part);
int util_pool_has_device_dax(struct pool_set *set);
int util_pool_open_nocheck(struct pool_set *set, unsigned flags);
int util_pool_open(struct pool_set **setp, const char *path, size_t minpartsize,
const struct pool_attr *attr, unsigned *nlanes, void *addr,
unsigned flags);
int util_pool_open_remote(struct pool_set **setp, const char *path, int cow,
size_t minpartsize, struct rpmem_pool_attr *rattr);
void *util_pool_extend(struct pool_set *set, size_t *size, size_t minpartsize);
void util_remote_init(void);
void util_remote_fini(void);
int util_update_remote_header(struct pool_set *set, unsigned repn);
void util_remote_init_lock(void);
void util_remote_destroy_lock(void);
int util_pool_close_remote(RPMEMpool *rpp);
void util_remote_unload(void);
void util_replica_fdclose(struct pool_replica *rep);
int util_poolset_remote_open(struct pool_replica *rep, unsigned repidx,
size_t minsize, int create, void *pool_addr,
size_t pool_size, unsigned *nlanes);
int util_remote_load(void);
int util_replica_open_remote(struct pool_set *set, unsigned repidx, int flags);
int util_poolset_remote_replica_open(struct pool_set *set, unsigned repidx,
size_t minsize, int create, unsigned *nlanes);
int util_replica_close_local(struct pool_replica *rep, unsigned repn,
enum del_parts_mode del);
int util_replica_close_remote(struct pool_replica *rep, unsigned repn,
enum del_parts_mode del);
extern int (*Rpmem_persist)(RPMEMpool *rpp, size_t offset, size_t length,
unsigned lane, unsigned flags);
extern int (*Rpmem_deep_persist)(RPMEMpool *rpp, size_t offset, size_t length,
unsigned lane);
extern int (*Rpmem_read)(RPMEMpool *rpp, void *buff, size_t offset,
size_t length, unsigned lane);
extern int (*Rpmem_close)(RPMEMpool *rpp);
extern int (*Rpmem_remove)(const char *target,
const char *pool_set_name, int flags);
extern int (*Rpmem_set_attr)(RPMEMpool *rpp,
const struct rpmem_pool_attr *rattr);
#ifdef __cplusplus
}
#endif
#endif
| 14,145 | 31.077098 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/shutdown_state.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* shutdown_state.c -- unsafe shudown detection
*/
#include <string.h>
#include <stdbool.h>
#include <endian.h>
#include "shutdown_state.h"
#include "out.h"
#include "util.h"
#include "os_deep.h"
#include "set.h"
#include "libpmem2.h"
#include "badblocks.h"
#include "../libpmem2/pmem2_utils.h"
#define FLUSH_SDS(sds, rep) \
if ((rep) != NULL) os_part_deep_common(rep, 0, sds, sizeof(*(sds)), 1)
/*
* shutdown_state_checksum -- (internal) counts SDS checksum and flush it
*/
static void
shutdown_state_checksum(struct shutdown_state *sds, struct pool_replica *rep)
{
LOG(3, "sds %p", sds);
util_checksum(sds, sizeof(*sds), &sds->checksum, 1, 0);
FLUSH_SDS(sds, rep);
}
/*
* shutdown_state_init -- initializes shutdown_state struct
*/
int
shutdown_state_init(struct shutdown_state *sds, struct pool_replica *rep)
{
/* check if we didn't change size of shutdown_state accidentally */
COMPILE_ERROR_ON(sizeof(struct shutdown_state) != 64);
LOG(3, "sds %p", sds);
memset(sds, 0, sizeof(*sds));
shutdown_state_checksum(sds, rep);
return 0;
}
/*
* shutdown_state_add_part -- adds file uuid and usc to shutdown_state struct
*
* if path does not exist it will fail which does NOT mean shutdown failure
*/
int
shutdown_state_add_part(struct shutdown_state *sds, int fd,
struct pool_replica *rep)
{
LOG(3, "sds %p, fd %d", sds, fd);
size_t len = 0;
char *uid;
uint64_t usc;
struct pmem2_source *src;
if (pmem2_source_from_fd(&src, fd))
return 1;
int ret = pmem2_source_device_usc(src, &usc);
if (ret == PMEM2_E_NOSUPP) {
usc = 0;
} else if (ret != 0) {
if (ret == -EPERM) {
/* overwrite error message */
ERR(
"Cannot read unsafe shutdown count. For more information please check https://github.com/pmem/pmdk/issues/4207");
}
LOG(2, "cannot read unsafe shutdown count for %d", fd);
goto err;
}
ret = pmem2_source_device_id(src, NULL, &len);
if (ret != PMEM2_E_NOSUPP && ret != 0) {
ERR("cannot read uuid of %d", fd);
goto err;
}
len += 4 - len % 4;
uid = Zalloc(len);
if (uid == NULL) {
ERR("!Zalloc");
goto err;
}
ret = pmem2_source_device_id(src, uid, &len);
if (ret != PMEM2_E_NOSUPP && ret != 0) {
ERR("cannot read uuid of %d", fd);
Free(uid);
goto err;
}
sds->usc = htole64(le64toh(sds->usc) + usc);
uint64_t tmp;
util_checksum(uid, len, &tmp, 1, 0);
sds->uuid = htole64(le64toh(sds->uuid) + tmp);
FLUSH_SDS(sds, rep);
Free(uid);
pmem2_source_delete(&src);
shutdown_state_checksum(sds, rep);
return 0;
err:
pmem2_source_delete(&src);
return 1;
}
/*
* shutdown_state_set_dirty -- sets dirty pool flag
*/
void
shutdown_state_set_dirty(struct shutdown_state *sds, struct pool_replica *rep)
{
LOG(3, "sds %p", sds);
sds->dirty = 1;
rep->part[0].sds_dirty_modified = 1;
FLUSH_SDS(sds, rep);
shutdown_state_checksum(sds, rep);
}
/*
* shutdown_state_clear_dirty -- clears dirty pool flag
*/
void
shutdown_state_clear_dirty(struct shutdown_state *sds, struct pool_replica *rep)
{
LOG(3, "sds %p", sds);
struct pool_set_part part = rep->part[0];
/*
* If a dirty flag was set in previous program execution it should be
* preserved as it stores information about potential ADR failure.
*/
if (part.sds_dirty_modified != 1)
return;
sds->dirty = 0;
part.sds_dirty_modified = 0;
FLUSH_SDS(sds, rep);
shutdown_state_checksum(sds, rep);
}
/*
* shutdown_state_reinit -- (internal) reinitializes shutdown_state struct
*/
static void
shutdown_state_reinit(struct shutdown_state *curr_sds,
struct shutdown_state *pool_sds, struct pool_replica *rep)
{
LOG(3, "curr_sds %p, pool_sds %p", curr_sds, pool_sds);
shutdown_state_init(pool_sds, rep);
pool_sds->uuid = htole64(curr_sds->uuid);
pool_sds->usc = htole64(curr_sds->usc);
pool_sds->dirty = 0;
FLUSH_SDS(pool_sds, rep);
shutdown_state_checksum(pool_sds, rep);
}
/*
* shutdown_state_check -- compares and fixes shutdown state
*/
int
shutdown_state_check(struct shutdown_state *curr_sds,
struct shutdown_state *pool_sds, struct pool_replica *rep)
{
LOG(3, "curr_sds %p, pool_sds %p", curr_sds, pool_sds);
if (util_is_zeroed(pool_sds, sizeof(*pool_sds)) &&
!util_is_zeroed(curr_sds, sizeof(*curr_sds))) {
shutdown_state_reinit(curr_sds, pool_sds, rep);
return 0;
}
bool is_uuid_usc_correct =
le64toh(pool_sds->usc) == le64toh(curr_sds->usc) &&
le64toh(pool_sds->uuid) == le64toh(curr_sds->uuid);
bool is_checksum_correct = util_checksum(pool_sds,
sizeof(*pool_sds), &pool_sds->checksum, 0, 0);
int dirty = pool_sds->dirty;
if (!is_checksum_correct) {
/* the program was killed during opening or closing the pool */
LOG(2, "incorrect checksum - SDS will be reinitialized");
shutdown_state_reinit(curr_sds, pool_sds, rep);
return 0;
}
if (is_uuid_usc_correct) {
if (dirty == 0)
return 0;
/*
* the program was killed when the pool was opened
* but there wasn't an ADR failure
*/
LOG(2,
"the pool was not closed - SDS will be reinitialized");
shutdown_state_reinit(curr_sds, pool_sds, rep);
return 0;
}
if (dirty == 0) {
/* an ADR failure but the pool was closed */
LOG(2,
"an ADR failure was detected but the pool was closed - SDS will be reinitialized");
shutdown_state_reinit(curr_sds, pool_sds, rep);
return 0;
}
/* an ADR failure - the pool might be corrupted */
ERR("an ADR failure was detected, the pool might be corrupted");
return 1;
}
| 5,491 | 22.370213 | 117 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/mmap.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* mmap.h -- internal definitions for mmap module
*/
#ifndef PMDK_MMAP_H
#define PMDK_MMAP_H 1
#include <stddef.h>
#include <stdint.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <errno.h>
#include "out.h"
#include "queue.h"
#include "os.h"
#ifdef __cplusplus
extern "C" {
#endif
extern int Mmap_no_random;
extern void *Mmap_hint;
extern char *Mmap_mapfile;
void *util_map_sync(void *addr, size_t len, int proto, int flags, int fd,
os_off_t offset, int *map_sync);
void *util_map(int fd, os_off_t off, size_t len, int flags, int rdonly,
size_t req_align, int *map_sync);
int util_unmap(void *addr, size_t len);
#ifdef __FreeBSD__
#define MAP_NORESERVE 0
#define OS_MAPFILE "/proc/curproc/map"
#else
#define OS_MAPFILE "/proc/self/maps"
#endif
#ifndef MAP_SYNC
#define MAP_SYNC 0x80000
#endif
#ifndef MAP_SHARED_VALIDATE
#define MAP_SHARED_VALIDATE 0x03
#endif
/*
* macros for micromanaging range protections for the debug version
*/
#ifdef DEBUG
#define RANGE(addr, len, is_dev_dax, type) do {\
if (!is_dev_dax) ASSERT(util_range_##type(addr, len) >= 0);\
} while (0)
#else
#define RANGE(addr, len, is_dev_dax, type) do {} while (0)
#endif
#define RANGE_RO(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, ro)
#define RANGE_RW(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, rw)
#define RANGE_NONE(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, none)
/* pmem mapping type */
enum pmem_map_type {
PMEM_DEV_DAX, /* device dax */
PMEM_MAP_SYNC, /* mapping with MAP_SYNC flag on dax fs */
MAX_PMEM_TYPE
};
/*
* this structure tracks the file mappings outstanding per file handle
*/
struct map_tracker {
PMDK_SORTEDQ_ENTRY(map_tracker) entry;
uintptr_t base_addr;
uintptr_t end_addr;
unsigned region_id;
enum pmem_map_type type;
#ifdef _WIN32
/* Windows-specific data */
HANDLE FileHandle;
HANDLE FileMappingHandle;
DWORD Access;
os_off_t Offset;
size_t FileLen;
#endif
};
void util_mmap_init(void);
void util_mmap_fini(void);
int util_range_ro(void *addr, size_t len);
int util_range_rw(void *addr, size_t len);
int util_range_none(void *addr, size_t len);
char *util_map_hint_unused(void *minaddr, size_t len, size_t align);
char *util_map_hint(size_t len, size_t req_align);
#define KILOBYTE ((uintptr_t)1 << 10)
#define MEGABYTE ((uintptr_t)1 << 20)
#define GIGABYTE ((uintptr_t)1 << 30)
/*
* util_map_hint_align -- choose the desired mapping alignment
*
* The smallest supported alignment is 2 megabytes because of the object
* alignment requirements. Changing this value to 4 kilobytes constitues a
* layout change.
*
* Use 1GB page alignment only if the mapping length is at least
* twice as big as the page size.
*/
static inline size_t
util_map_hint_align(size_t len, size_t req_align)
{
size_t align = 2 * MEGABYTE;
if (req_align)
align = req_align;
else if (len >= 2 * GIGABYTE)
align = GIGABYTE;
return align;
}
int util_range_register(const void *addr, size_t len, const char *path,
enum pmem_map_type type);
int util_range_unregister(const void *addr, size_t len);
struct map_tracker *util_range_find(uintptr_t addr, size_t len);
int util_range_is_pmem(const void *addr, size_t len);
#ifdef __cplusplus
}
#endif
#endif
| 3,328 | 22.27972 | 76 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/ravl.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* ravl.c -- implementation of a RAVL tree
* https://sidsen.azurewebsites.net//papers/ravl-trees-journal.pdf
*/
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include "out.h"
#include "ravl.h"
#include "alloc.h"
#define RAVL_DEFAULT_DATA_SIZE (sizeof(void *))
enum ravl_slot_type {
RAVL_LEFT,
RAVL_RIGHT,
MAX_SLOTS,
RAVL_ROOT
};
struct ravl_node {
struct ravl_node *parent;
struct ravl_node *slots[MAX_SLOTS];
int32_t rank; /* cannot be greater than height of the subtree */
int32_t pointer_based;
char data[];
};
struct ravl {
struct ravl_node *root;
ravl_compare *compare;
size_t data_size;
};
/*
* ravl_new -- creates a new ravl tree instance
*/
struct ravl *
ravl_new_sized(ravl_compare *compare, size_t data_size)
{
struct ravl *r = Malloc(sizeof(*r));
if (r == NULL) {
ERR("!Malloc");
return r;
}
r->compare = compare;
r->root = NULL;
r->data_size = data_size;
return r;
}
/*
* ravl_new -- creates a new tree that stores data pointers
*/
struct ravl *
ravl_new(ravl_compare *compare)
{
return ravl_new_sized(compare, RAVL_DEFAULT_DATA_SIZE);
}
/*
* ravl_clear_node -- (internal) recursively clears the given subtree,
* calls callback in an in-order fashion. Optionally frees the given node.
*/
static void
ravl_foreach_node(struct ravl_node *n, ravl_cb cb, void *arg, int free_node)
{
if (n == NULL)
return;
ravl_foreach_node(n->slots[RAVL_LEFT], cb, arg, free_node);
if (cb)
cb((void *)n->data, arg);
ravl_foreach_node(n->slots[RAVL_RIGHT], cb, arg, free_node);
if (free_node)
Free(n);
}
/*
* ravl_clear -- clears the entire tree, starting from the root
*/
void
ravl_clear(struct ravl *ravl)
{
ravl_foreach_node(ravl->root, NULL, NULL, 1);
ravl->root = NULL;
}
/*
* ravl_delete_cb -- clears and deletes the given ravl instance, calls callback
*/
void
ravl_delete_cb(struct ravl *ravl, ravl_cb cb, void *arg)
{
ravl_foreach_node(ravl->root, cb, arg, 1);
Free(ravl);
}
/*
* ravl_delete -- clears and deletes the given ravl instance
*/
void
ravl_delete(struct ravl *ravl)
{
ravl_delete_cb(ravl, NULL, NULL);
}
/*
* ravl_foreach -- traverses the entire tree, calling callback for every node
*/
void
ravl_foreach(struct ravl *ravl, ravl_cb cb, void *arg)
{
ravl_foreach_node(ravl->root, cb, arg, 0);
}
/*
* ravl_empty -- checks whether the given tree is empty
*/
int
ravl_empty(struct ravl *ravl)
{
return ravl->root == NULL;
}
/*
* ravl_node_insert_constructor -- node data constructor for ravl_insert
*/
static void
ravl_node_insert_constructor(void *data, size_t data_size, const void *arg)
{
/* copy only the 'arg' pointer */
memcpy(data, &arg, sizeof(arg));
}
/*
* ravl_node_copy_constructor -- node data constructor for ravl_emplace_copy
*/
static void
ravl_node_copy_constructor(void *data, size_t data_size, const void *arg)
{
memcpy(data, arg, data_size);
}
/*
* ravl_new_node -- (internal) allocates and initializes a new node
*/
static struct ravl_node *
ravl_new_node(struct ravl *ravl, ravl_constr constr, const void *arg)
{
struct ravl_node *n = Malloc(sizeof(*n) + ravl->data_size);
if (n == NULL) {
ERR("!Malloc");
return n;
}
n->parent = NULL;
n->slots[RAVL_LEFT] = NULL;
n->slots[RAVL_RIGHT] = NULL;
n->rank = 0;
n->pointer_based = constr == ravl_node_insert_constructor;
constr(n->data, ravl->data_size, arg);
return n;
}
/*
* ravl_slot_opposite -- (internal) returns the opposite slot type, cannot be
* called for root type
*/
static enum ravl_slot_type
ravl_slot_opposite(enum ravl_slot_type t)
{
ASSERTne(t, RAVL_ROOT);
return t == RAVL_LEFT ? RAVL_RIGHT : RAVL_LEFT;
}
/*
* ravl_node_slot_type -- (internal) returns the type of the given node:
* left child, right child or root
*/
static enum ravl_slot_type
ravl_node_slot_type(struct ravl_node *n)
{
if (n->parent == NULL)
return RAVL_ROOT;
return n->parent->slots[RAVL_LEFT] == n ? RAVL_LEFT : RAVL_RIGHT;
}
/*
* ravl_node_sibling -- (internal) returns the sibling of the given node,
* NULL if the node is root (has no parent)
*/
static struct ravl_node *
ravl_node_sibling(struct ravl_node *n)
{
enum ravl_slot_type t = ravl_node_slot_type(n);
if (t == RAVL_ROOT)
return NULL;
return n->parent->slots[t == RAVL_LEFT ? RAVL_RIGHT : RAVL_LEFT];
}
/*
* ravl_node_ref -- (internal) returns the pointer to the memory location in
* which the given node resides
*/
static struct ravl_node **
ravl_node_ref(struct ravl *ravl, struct ravl_node *n)
{
enum ravl_slot_type t = ravl_node_slot_type(n);
return t == RAVL_ROOT ? &ravl->root : &n->parent->slots[t];
}
/*
* ravl_rotate -- (internal) performs a rotation around a given node
*
* The node n swaps place with its parent. If n is right child, parent becomes
* the left child of n, otherwise parent becomes right child of n.
*/
static void
ravl_rotate(struct ravl *ravl, struct ravl_node *n)
{
ASSERTne(n->parent, NULL);
struct ravl_node *p = n->parent;
struct ravl_node **pref = ravl_node_ref(ravl, p);
enum ravl_slot_type t = ravl_node_slot_type(n);
enum ravl_slot_type t_opposite = ravl_slot_opposite(t);
n->parent = p->parent;
p->parent = n;
*pref = n;
if ((p->slots[t] = n->slots[t_opposite]) != NULL)
p->slots[t]->parent = p;
n->slots[t_opposite] = p;
}
/*
* ravl_node_rank -- (internal) returns the rank of the node
*
* For the purpose of balancing, NULL nodes have rank -1.
*/
static int
ravl_node_rank(struct ravl_node *n)
{
return n == NULL ? -1 : n->rank;
}
/*
* ravl_node_rank_difference_parent -- (internal) returns the rank different
* between parent node p and its child n
*
* Every rank difference must be positive.
*
* Either of these can be NULL.
*/
static int
ravl_node_rank_difference_parent(struct ravl_node *p, struct ravl_node *n)
{
return ravl_node_rank(p) - ravl_node_rank(n);
}
/*
* ravl_node_rank_differenced - (internal) returns the rank difference between
* parent and its child
*
* Can be used to check if a given node is an i-child.
*/
static int
ravl_node_rank_difference(struct ravl_node *n)
{
return ravl_node_rank_difference_parent(n->parent, n);
}
/*
* ravl_node_is_i_j -- (internal) checks if a given node is strictly i,j-node
*/
static int
ravl_node_is_i_j(struct ravl_node *n, int i, int j)
{
return (ravl_node_rank_difference_parent(n, n->slots[RAVL_LEFT]) == i &&
ravl_node_rank_difference_parent(n, n->slots[RAVL_RIGHT]) == j);
}
/*
* ravl_node_is -- (internal) checks if a given node is i,j-node or j,i-node
*/
static int
ravl_node_is(struct ravl_node *n, int i, int j)
{
return ravl_node_is_i_j(n, i, j) || ravl_node_is_i_j(n, j, i);
}
/*
* ravl_node_promote -- promotes a given node by increasing its rank
*/
static void
ravl_node_promote(struct ravl_node *n)
{
n->rank += 1;
}
/*
* ravl_node_promote -- demotes a given node by increasing its rank
*/
static void
ravl_node_demote(struct ravl_node *n)
{
ASSERT(n->rank > 0);
n->rank -= 1;
}
/*
* ravl_balance -- balances the tree after insert
*
* This function must restore the invariant that every rank
* difference is positive.
*/
static void
ravl_balance(struct ravl *ravl, struct ravl_node *n)
{
/* walk up the tree, promoting nodes */
while (n->parent && ravl_node_is(n->parent, 0, 1)) {
ravl_node_promote(n->parent);
n = n->parent;
}
/*
* Either the rank rule holds or n is a 0-child whose sibling is an
* i-child with i > 1.
*/
struct ravl_node *s = ravl_node_sibling(n);
if (!(ravl_node_rank_difference(n) == 0 &&
ravl_node_rank_difference_parent(n->parent, s) > 1))
return;
struct ravl_node *y = n->parent;
/* if n is a left child, let z be n's right child and vice versa */
enum ravl_slot_type t = ravl_slot_opposite(ravl_node_slot_type(n));
struct ravl_node *z = n->slots[t];
if (z == NULL || ravl_node_rank_difference(z) == 2) {
ravl_rotate(ravl, n);
ravl_node_demote(y);
} else if (ravl_node_rank_difference(z) == 1) {
ravl_rotate(ravl, z);
ravl_rotate(ravl, z);
ravl_node_promote(z);
ravl_node_demote(n);
ravl_node_demote(y);
}
}
/*
* ravl_insert -- insert data into the tree
*/
int
ravl_insert(struct ravl *ravl, const void *data)
{
return ravl_emplace(ravl, ravl_node_insert_constructor, data);
}
/*
* ravl_insert -- copy construct data inside of a new tree node
*/
int
ravl_emplace_copy(struct ravl *ravl, const void *data)
{
return ravl_emplace(ravl, ravl_node_copy_constructor, data);
}
/*
* ravl_emplace -- construct data inside of a new tree node
*/
int
ravl_emplace(struct ravl *ravl, ravl_constr constr, const void *arg)
{
LOG(6, NULL);
struct ravl_node *n = ravl_new_node(ravl, constr, arg);
if (n == NULL)
return -1;
/* walk down the tree and insert the new node into a missing slot */
struct ravl_node **dstp = &ravl->root;
struct ravl_node *dst = NULL;
while (*dstp != NULL) {
dst = (*dstp);
int cmp_result = ravl->compare(ravl_data(n), ravl_data(dst));
if (cmp_result == 0)
goto error_duplicate;
dstp = &dst->slots[cmp_result > 0];
}
n->parent = dst;
*dstp = n;
ravl_balance(ravl, n);
return 0;
error_duplicate:
errno = EEXIST;
Free(n);
return -1;
}
/*
* ravl_node_type_most -- (internal) returns left-most or right-most node in
* the subtree
*/
static struct ravl_node *
ravl_node_type_most(struct ravl_node *n, enum ravl_slot_type t)
{
while (n->slots[t] != NULL)
n = n->slots[t];
return n;
}
/*
* ravl_node_cessor -- (internal) returns the successor or predecessor of the
* node
*/
static struct ravl_node *
ravl_node_cessor(struct ravl_node *n, enum ravl_slot_type t)
{
/*
* If t child is present, we are looking for t-opposite-most node
* in t child subtree
*/
if (n->slots[t])
return ravl_node_type_most(n->slots[t], ravl_slot_opposite(t));
/* otherwise get the first parent on the t path */
while (n->parent != NULL && n == n->parent->slots[t])
n = n->parent;
return n->parent;
}
/*
* ravl_node_successor -- (internal) returns node's successor
*
* It's the first node larger than n.
*/
static struct ravl_node *
ravl_node_successor(struct ravl_node *n)
{
return ravl_node_cessor(n, RAVL_RIGHT);
}
/*
* ravl_node_successor -- (internal) returns node's successor
*
* It's the first node smaller than n.
*/
static struct ravl_node *
ravl_node_predecessor(struct ravl_node *n)
{
return ravl_node_cessor(n, RAVL_LEFT);
}
/*
* ravl_predicate_holds -- (internal) verifies the given predicate for
* the current node in the search path
*
* If the predicate holds for the given node or a node that can be directly
* derived from it, returns 1. Otherwise returns 0.
*/
static int
ravl_predicate_holds(struct ravl *ravl, int result, struct ravl_node **ret,
struct ravl_node *n, const void *data, enum ravl_predicate flags)
{
if (flags & RAVL_PREDICATE_EQUAL) {
if (result == 0) {
*ret = n;
return 1;
}
}
if (flags & RAVL_PREDICATE_GREATER) {
if (result < 0) { /* data < n->data */
*ret = n;
return 0;
} else if (result == 0) {
*ret = ravl_node_successor(n);
return 1;
}
}
if (flags & RAVL_PREDICATE_LESS) {
if (result > 0) { /* data > n->data */
*ret = n;
return 0;
} else if (result == 0) {
*ret = ravl_node_predecessor(n);
return 1;
}
}
return 0;
}
/*
* ravl_find -- searches for the node in the tree
*/
struct ravl_node *
ravl_find(struct ravl *ravl, const void *data, enum ravl_predicate flags)
{
LOG(6, NULL);
struct ravl_node *r = NULL;
struct ravl_node *n = ravl->root;
while (n) {
int result = ravl->compare(data, ravl_data(n));
if (ravl_predicate_holds(ravl, result, &r, n, data, flags))
return r;
n = n->slots[result > 0];
}
return r;
}
/*
* ravl_remove -- removes the given node from the tree
*/
void
ravl_remove(struct ravl *ravl, struct ravl_node *n)
{
LOG(6, NULL);
if (n->slots[RAVL_LEFT] != NULL && n->slots[RAVL_RIGHT] != NULL) {
/* if both children are present, remove the successor instead */
struct ravl_node *s = ravl_node_successor(n);
memcpy(n->data, s->data, ravl->data_size);
ravl_remove(ravl, s);
} else {
/* swap n with the child that may exist */
struct ravl_node *r = n->slots[RAVL_LEFT] ?
n->slots[RAVL_LEFT] : n->slots[RAVL_RIGHT];
if (r != NULL)
r->parent = n->parent;
*ravl_node_ref(ravl, n) = r;
Free(n);
}
}
/*
* ravl_data -- returns the data contained within the node
*/
void *
ravl_data(struct ravl_node *node)
{
if (node->pointer_based) {
void *data;
memcpy(&data, node->data, sizeof(void *));
return data;
} else {
return (void *)node->data;
}
}
| 12,600 | 20.801038 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/vecq.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* vecq.h -- vector queue (FIFO) interface
*/
#ifndef PMDK_VECQ_H
#define PMDK_VECQ_H 1
#include <stddef.h>
#include "util.h"
#include "out.h"
#include "alloc.h"
#ifdef __cplusplus
extern "C" {
#endif
#define VECQ_INIT_SIZE (64)
#define VECQ(name, type)\
struct name {\
type *buffer;\
size_t capacity;\
size_t front;\
size_t back;\
}
#define VECQ_INIT(vec) do {\
(vec)->buffer = NULL;\
(vec)->capacity = 0;\
(vec)->front = 0;\
(vec)->back = 0;\
} while (0)
#define VECQ_REINIT(vec) do {\
VALGRIND_ANNOTATE_NEW_MEMORY((vec), sizeof(*vec));\
VALGRIND_ANNOTATE_NEW_MEMORY((vec)->buffer,\
(sizeof(*(vec)->buffer) * ((vec)->capacity)));\
(vec)->front = 0;\
(vec)->back = 0;\
} while (0)
#define VECQ_FRONT_POS(vec)\
((vec)->front & ((vec)->capacity - 1))
#define VECQ_BACK_POS(vec)\
((vec)->back & ((vec)->capacity - 1))
#define VECQ_FRONT(vec)\
(vec)->buffer[VECQ_FRONT_POS(vec)]
#define VECQ_BACK(vec)\
(vec)->buffer[VECQ_BACK_POS(vec)]
#define VECQ_DEQUEUE(vec)\
((vec)->buffer[(((vec)->front++) & ((vec)->capacity - 1))])
#define VECQ_SIZE(vec)\
((vec)->back - (vec)->front)
static inline int
realloc_set(void **buf, size_t s)
{
void *tbuf = Realloc(*buf, s);
if (tbuf == NULL) {
ERR("!Realloc");
return -1;
}
*buf = tbuf;
return 0;
}
#define VECQ_NCAPACITY(vec)\
((vec)->capacity == 0 ? VECQ_INIT_SIZE : (vec)->capacity * 2)
#define VECQ_GROW(vec)\
(realloc_set((void **)&(vec)->buffer,\
VECQ_NCAPACITY(vec) * sizeof(*(vec)->buffer)) ? -1 :\
(memcpy((vec)->buffer + (vec)->capacity, (vec)->buffer,\
VECQ_FRONT_POS(vec) * sizeof(*(vec)->buffer)),\
(vec)->front = VECQ_FRONT_POS(vec),\
(vec)->back = (vec)->front + (vec)->capacity,\
(vec)->capacity = VECQ_NCAPACITY(vec),\
0\
))
#define VECQ_INSERT(vec, element)\
(VECQ_BACK(vec) = element, (vec)->back += 1, 0)
#define VECQ_ENQUEUE(vec, element)\
((vec)->capacity == VECQ_SIZE(vec) ?\
(VECQ_GROW(vec) == 0 ? VECQ_INSERT(vec, element) : -1) :\
VECQ_INSERT(vec, element))
#define VECQ_CAPACITY(vec)\
((vec)->capacity)
#define VECQ_FOREACH(el, vec)\
for (size_t _vec_i = 0;\
_vec_i < VECQ_SIZE(vec) &&\
(((el) = (vec)->buffer[_vec_i & ((vec)->capacity - 1)]), 1);\
++_vec_i)
#define VECQ_FOREACH_REVERSE(el, vec)\
for (size_t _vec_i = VECQ_SIZE(vec);\
_vec_i > 0 &&\
(((el) = (vec)->buffer[(_vec_i - 1) & ((vec)->capacity - 1)]), 1);\
--_vec_i)
#define VECQ_CLEAR(vec) do {\
(vec)->front = 0;\
(vec)->back = 0;\
} while (0)
#define VECQ_DELETE(vec) do {\
Free((vec)->buffer);\
(vec)->buffer = NULL;\
(vec)->capacity = 0;\
(vec)->front = 0;\
(vec)->back = 0;\
} while (0)
#ifdef __cplusplus
}
#endif
#endif /* PMDK_VECQ_H */
| 2,731 | 20.178295 | 68 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/os_deep_linux.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* os_deep_linux.c -- Linux abstraction layer
*/
#define _GNU_SOURCE
#include <inttypes.h>
#include <fcntl.h>
#include <sys/stat.h>
#include "out.h"
#include "os.h"
#include "mmap.h"
#include "file.h"
#include "libpmem.h"
#include "os_deep.h"
#include "../libpmem2/deep_flush.h"
/*
* os_deep_type -- (internal) perform deep operation based on a pmem
* mapping type
*/
static int
os_deep_type(const struct map_tracker *mt, void *addr, size_t len)
{
LOG(15, "mt %p addr %p len %zu", mt, addr, len);
switch (mt->type) {
case PMEM_DEV_DAX:
pmem_drain();
int ret = pmem2_deep_flush_write(mt->region_id);
if (ret < 0) {
if (ret == PMEM2_E_NOSUPP) {
errno = ENOTSUP;
LOG(1, "!deep_flush not supported");
} else {
errno = pmem2_err_to_errno(ret);
LOG(2, "cannot write to deep_flush"
"in region %u", mt->region_id);
}
return -1;
}
return 0;
case PMEM_MAP_SYNC:
return pmem_msync(addr, len);
default:
ASSERT(0);
return -1;
}
}
/*
* os_range_deep_common -- perform deep action of given address range
*/
int
os_range_deep_common(uintptr_t addr, size_t len)
{
LOG(3, "addr 0x%016" PRIxPTR " len %zu", addr, len);
while (len != 0) {
const struct map_tracker *mt = util_range_find(addr, len);
/* no more overlapping track regions or NOT a device DAX */
if (mt == NULL) {
LOG(15, "pmem_msync addr %p, len %lu",
(void *)addr, len);
return pmem_msync((void *)addr, len);
}
/*
* For range that intersects with the found mapping
* write to (Device DAX) deep_flush file.
* Call msync for the non-intersecting part.
*/
if (mt->base_addr > addr) {
size_t curr_len = mt->base_addr - addr;
if (curr_len > len)
curr_len = len;
if (pmem_msync((void *)addr, curr_len) != 0)
return -1;
len -= curr_len;
if (len == 0)
return 0;
addr = mt->base_addr;
}
size_t mt_in_len = mt->end_addr - addr;
size_t persist_len = MIN(len, mt_in_len);
if (os_deep_type(mt, (void *)addr, persist_len))
return -1;
if (mt->end_addr >= addr + len)
return 0;
len -= mt_in_len;
addr = mt->end_addr;
}
return 0;
}
/*
* os_part_deep_common -- common function to handle both
* deep_persist and deep_drain part flush cases.
*/
int
os_part_deep_common(struct pool_replica *rep, unsigned partidx, void *addr,
size_t len, int flush)
{
LOG(3, "part %p part %d addr %p len %lu flush %d",
rep, partidx, addr, len, flush);
if (!rep->is_pmem) {
/*
* In case of part on non-pmem call msync on the range
* to deep flush the data. Deep drain is empty as all
* data is msynced to persistence.
*/
if (!flush)
return 0;
if (pmem_msync(addr, len)) {
LOG(1, "pmem_msync(%p, %lu)", addr, len);
return -1;
}
return 0;
}
struct pool_set_part part = rep->part[partidx];
/* Call deep flush if it was requested */
if (flush) {
LOG(15, "pmem_deep_flush addr %p, len %lu", addr, len);
pmem_deep_flush(addr, len);
}
/*
* Before deep drain call normal drain to ensure that data
* is at least in WPQ.
*/
pmem_drain();
if (part.is_dev_dax) {
/*
* During deep_drain for part on device DAX search for
* device region id, and perform WPQ flush on found
* device DAX region.
*/
unsigned region_id;
int ret = util_ddax_region_find(part.path, ®ion_id);
if (ret < 0) {
if (errno == ENOENT) {
errno = ENOTSUP;
LOG(1, "!deep_flush not supported");
} else {
LOG(1, "invalid dax_region id %u", region_id);
}
return -1;
}
if (pmem2_deep_flush_write(region_id)) {
LOG(1, "pmem2_deep_flush_write(%u)",
region_id);
return -1;
}
} else {
/*
* For deep_drain on normal pmem it is enough to
* call msync on one page.
*/
if (pmem_msync(addr, MIN(Pagesize, len))) {
LOG(1, "pmem_msync(%p, %lu)", addr, len);
return -1;
}
}
return 0;
}
| 3,932 | 21.095506 | 75 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/file_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* file_windows.c -- Windows emulation of Linux-specific system calls
*/
/*
* XXX - The initial approach to PMDK for Windows port was to minimize the
* amount of changes required in the core part of the library, and to avoid
* preprocessor conditionals, if possible. For that reason, some of the
* Linux system calls that have no equivalents on Windows have been emulated
* using Windows API.
* Note that it was not a goal to fully emulate POSIX-compliant behavior
* of mentioned functions. They are used only internally, so current
* implementation is just good enough to satisfy PMDK needs and to make it
* work on Windows.
*/
#include <windows.h>
#include <sys/stat.h>
#include <sys/file.h>
#include "alloc.h"
#include "file.h"
#include "out.h"
#include "os.h"
/*
* util_tmpfile -- create a temporary file
*/
int
util_tmpfile(const char *dir, const char *templ, int flags)
{
LOG(3, "dir \"%s\" template \"%s\" flags %x", dir, templ, flags);
/* only O_EXCL is allowed here */
ASSERT(flags == 0 || flags == O_EXCL);
int oerrno;
int fd = -1;
size_t len = strlen(dir) + strlen(templ) + 1;
char *fullname = Malloc(sizeof(*fullname) * len);
if (fullname == NULL) {
ERR("!Malloc");
return -1;
}
int ret = _snprintf(fullname, len, "%s%s", dir, templ);
if (ret < 0 || ret >= len) {
ERR("snprintf: %d", ret);
goto err;
}
LOG(4, "fullname \"%s\"", fullname);
/*
* XXX - block signals and modify file creation mask for the time
* of mkstmep() execution. Restore previous settings once the file
* is created.
*/
fd = os_mkstemp(fullname);
if (fd < 0) {
ERR("!os_mkstemp");
goto err;
}
/*
* There is no point to use unlink() here. First, because it does not
* work on open files. Second, because the file is created with
* O_TEMPORARY flag, and it looks like such temp files cannot be open
* from another process, even though they are visible on
* the filesystem.
*/
Free(fullname);
return fd;
err:
Free(fullname);
oerrno = errno;
if (fd != -1)
(void) os_close(fd);
errno = oerrno;
return -1;
}
/*
* util_is_absolute_path -- check if the path is absolute
*/
int
util_is_absolute_path(const char *path)
{
LOG(3, "path \"%s\"", path);
if (path == NULL || path[0] == '\0')
return 0;
if (path[0] == '\\' || path[1] == ':')
return 1;
return 0;
}
/*
* util_file_mkdir -- creates new dir
*/
int
util_file_mkdir(const char *path, mode_t mode)
{
/*
* On windows we cannot create read only dir so mode
* parameter is useless.
*/
UNREFERENCED_PARAMETER(mode);
LOG(3, "path: %s mode: %d", path, mode);
return _mkdir(path);
}
/*
* util_file_dir_open -- open a directory
*/
int
util_file_dir_open(struct dir_handle *handle, const char *path)
{
/* init handle */
handle->handle = NULL;
handle->path = path;
return 0;
}
/*
* util_file_dir_next - read next file in directory
*/
int
util_file_dir_next(struct dir_handle *handle, struct file_info *info)
{
WIN32_FIND_DATAA data;
if (handle->handle == NULL) {
handle->handle = FindFirstFileA(handle->path, &data);
if (handle->handle == NULL)
return 1;
} else {
if (FindNextFileA(handle->handle, &data) == 0)
return 1;
}
info->filename[NAME_MAX] = '\0';
strncpy(info->filename, data.cFileName, NAME_MAX + 1);
if (info->filename[NAME_MAX] != '\0')
return -1; /* filename truncated */
info->is_dir = data.dwFileAttributes == FILE_ATTRIBUTE_DIRECTORY;
return 0;
}
/*
* util_file_dir_close -- close a directory
*/
int
util_file_dir_close(struct dir_handle *handle)
{
return FindClose(handle->handle);
}
/*
* util_file_dir_remove -- remove directory
*/
int
util_file_dir_remove(const char *path)
{
return RemoveDirectoryA(path) == 0 ? -1 : 0;
}
/*
* util_file_device_dax_alignment -- returns internal Device DAX alignment
*/
size_t
util_file_device_dax_alignment(const char *path)
{
LOG(3, "path \"%s\"", path);
return 0;
}
/*
* util_ddax_region_find -- returns DEV dax region id that contains file
*/
int
util_ddax_region_find(const char *path, unsigned *region_id)
{
LOG(3, "path \"%s\"", path);
return -1;
}
| 4,186 | 20.253807 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/mmap.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* mmap.c -- mmap utilities
*/
#include <errno.h>
#include <inttypes.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <unistd.h>
#include "file.h"
#include "queue.h"
#include "mmap.h"
#include "sys_util.h"
#include "os.h"
#include "alloc.h"
#include "libpmem2.h"
int Mmap_no_random;
void *Mmap_hint;
static os_rwlock_t Mmap_list_lock;
static PMDK_SORTEDQ_HEAD(map_list_head, map_tracker) Mmap_list =
PMDK_SORTEDQ_HEAD_INITIALIZER(Mmap_list);
/*
* util_mmap_init -- initialize the mmap utils
*
* This is called from the library initialization code.
*/
void
util_mmap_init(void)
{
LOG(3, NULL);
util_rwlock_init(&Mmap_list_lock);
/*
* For testing, allow overriding the default mmap() hint address.
* If hint address is defined, it also disables address randomization.
*/
char *e = os_getenv("PMEM_MMAP_HINT");
if (e) {
char *endp;
errno = 0;
unsigned long long val = strtoull(e, &endp, 16);
if (errno || endp == e) {
LOG(2, "Invalid PMEM_MMAP_HINT");
} else if (os_access(OS_MAPFILE, R_OK)) {
LOG(2, "No /proc, PMEM_MMAP_HINT ignored");
} else {
Mmap_hint = (void *)val;
Mmap_no_random = 1;
LOG(3, "PMEM_MMAP_HINT set to %p", Mmap_hint);
}
}
}
/*
* util_mmap_fini -- clean up the mmap utils
*
* This is called before process stop.
*/
void
util_mmap_fini(void)
{
LOG(3, NULL);
util_rwlock_destroy(&Mmap_list_lock);
}
/*
* util_map -- memory map a file
*
* This is just a convenience function that calls mmap() with the
* appropriate arguments and includes our trace points.
*/
void *
util_map(int fd, os_off_t off, size_t len, int flags, int rdonly,
size_t req_align, int *map_sync)
{
LOG(3, "fd %d len %zu flags %d rdonly %d req_align %zu map_sync %p",
fd, len, flags, rdonly, req_align, map_sync);
void *base;
void *addr = util_map_hint(len, req_align);
if (addr == MAP_FAILED) {
LOG(1, "cannot find a contiguous region of given size");
return NULL;
}
if (req_align)
ASSERTeq((uintptr_t)addr % req_align, 0);
int proto = rdonly ? PROT_READ : PROT_READ|PROT_WRITE;
base = util_map_sync(addr, len, proto, flags, fd, off, map_sync);
if (base == MAP_FAILED) {
ERR("!mmap %zu bytes", len);
return NULL;
}
LOG(3, "mapped at %p", base);
return base;
}
/*
* util_unmap -- unmap a file
*
* This is just a convenience function that calls munmap() with the
* appropriate arguments and includes our trace points.
*/
int
util_unmap(void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
/*
* XXX Workaround for https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=169608
*/
#ifdef __FreeBSD__
if (!IS_PAGE_ALIGNED((uintptr_t)addr)) {
errno = EINVAL;
ERR("!munmap");
return -1;
}
#endif
int retval = munmap(addr, len);
if (retval < 0)
ERR("!munmap");
return retval;
}
/*
* util_range_ro -- set a memory range read-only
*/
int
util_range_ro(void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
uintptr_t uptr;
int retval;
/*
* mprotect requires addr to be a multiple of pagesize, so
* adjust addr and len to represent the full 4k chunks
* covering the given range.
*/
/* increase len by the amount we gain when we round addr down */
len += (uintptr_t)addr & (Pagesize - 1);
/* round addr down to page boundary */
uptr = (uintptr_t)addr & ~(Pagesize - 1);
if ((retval = mprotect((void *)uptr, len, PROT_READ)) < 0)
ERR("!mprotect: PROT_READ");
return retval;
}
/*
* util_range_rw -- set a memory range read-write
*/
int
util_range_rw(void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
uintptr_t uptr;
int retval;
/*
* mprotect requires addr to be a multiple of pagesize, so
* adjust addr and len to represent the full 4k chunks
* covering the given range.
*/
/* increase len by the amount we gain when we round addr down */
len += (uintptr_t)addr & (Pagesize - 1);
/* round addr down to page boundary */
uptr = (uintptr_t)addr & ~(Pagesize - 1);
if ((retval = mprotect((void *)uptr, len, PROT_READ|PROT_WRITE)) < 0)
ERR("!mprotect: PROT_READ|PROT_WRITE");
return retval;
}
/*
* util_range_none -- set a memory range for no access allowed
*/
int
util_range_none(void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
uintptr_t uptr;
int retval;
/*
* mprotect requires addr to be a multiple of pagesize, so
* adjust addr and len to represent the full 4k chunks
* covering the given range.
*/
/* increase len by the amount we gain when we round addr down */
len += (uintptr_t)addr & (Pagesize - 1);
/* round addr down to page boundary */
uptr = (uintptr_t)addr & ~(Pagesize - 1);
if ((retval = mprotect((void *)uptr, len, PROT_NONE)) < 0)
ERR("!mprotect: PROT_NONE");
return retval;
}
/*
* util_range_comparer -- (internal) compares the two mapping trackers
*/
static intptr_t
util_range_comparer(struct map_tracker *a, struct map_tracker *b)
{
return ((intptr_t)a->base_addr - (intptr_t)b->base_addr);
}
/*
* util_range_find_unlocked -- (internal) find the map tracker
* for given address range
*
* Returns the first entry at least partially overlapping given range.
* It's up to the caller to check whether the entry exactly matches the range,
* or if the range spans multiple entries.
*/
static struct map_tracker *
util_range_find_unlocked(uintptr_t addr, size_t len)
{
LOG(10, "addr 0x%016" PRIxPTR " len %zu", addr, len);
uintptr_t end = addr + len;
struct map_tracker *mt;
PMDK_SORTEDQ_FOREACH(mt, &Mmap_list, entry) {
if (addr < mt->end_addr &&
(addr >= mt->base_addr || end > mt->base_addr))
goto out;
/* break if there is no chance to find matching entry */
if (addr < mt->base_addr)
break;
}
mt = NULL;
out:
return mt;
}
/*
* util_range_find -- find the map tracker for given address range
* the same as util_range_find_unlocked but locked
*/
struct map_tracker *
util_range_find(uintptr_t addr, size_t len)
{
LOG(10, "addr 0x%016" PRIxPTR " len %zu", addr, len);
util_rwlock_rdlock(&Mmap_list_lock);
struct map_tracker *mt = util_range_find_unlocked(addr, len);
util_rwlock_unlock(&Mmap_list_lock);
return mt;
}
/*
* util_range_register -- add a memory range into a map tracking list
*/
int
util_range_register(const void *addr, size_t len, const char *path,
enum pmem_map_type type)
{
LOG(3, "addr %p len %zu path %s type %d", addr, len, path, type);
/* check if not tracked already */
if (util_range_find((uintptr_t)addr, len) != NULL) {
ERR(
"duplicated persistent memory range; presumably unmapped with munmap() instead of pmem_unmap(): addr %p len %zu",
addr, len);
errno = ENOMEM;
return -1;
}
struct map_tracker *mt;
mt = Malloc(sizeof(struct map_tracker));
if (mt == NULL) {
ERR("!Malloc");
return -1;
}
mt->base_addr = (uintptr_t)addr;
mt->end_addr = mt->base_addr + len;
mt->type = type;
if (type == PMEM_DEV_DAX) {
unsigned region_id;
int ret = util_ddax_region_find(path, ®ion_id);
if (ret < 0) {
ERR("Cannot find DAX device region id");
return -1;
}
mt->region_id = region_id;
}
util_rwlock_wrlock(&Mmap_list_lock);
PMDK_SORTEDQ_INSERT(&Mmap_list, mt, entry, struct map_tracker,
util_range_comparer);
util_rwlock_unlock(&Mmap_list_lock);
return 0;
}
/*
* util_range_split -- (internal) remove or split a map tracking entry
*/
static int
util_range_split(struct map_tracker *mt, const void *addrp, const void *endp)
{
LOG(3, "begin %p end %p", addrp, endp);
uintptr_t addr = (uintptr_t)addrp;
uintptr_t end = (uintptr_t)endp;
ASSERTne(mt, NULL);
if (addr == end || addr % Mmap_align != 0 || end % Mmap_align != 0) {
ERR(
"invalid munmap length, must be non-zero and page aligned");
return -1;
}
struct map_tracker *mtb = NULL;
struct map_tracker *mte = NULL;
/*
* 1) b e b e
* xxxxxxxxxxxxx => xxx.......xxxx - mtb+mte
* 2) b e b e
* xxxxxxxxxxxxx => xxxxxxx....... - mtb
* 3) b e b e
* xxxxxxxxxxxxx => ........xxxxxx - mte
* 4) b e b e
* xxxxxxxxxxxxx => .............. - <none>
*/
if (addr > mt->base_addr) {
/* case #1/2 */
/* new mapping at the beginning */
mtb = Malloc(sizeof(struct map_tracker));
if (mtb == NULL) {
ERR("!Malloc");
goto err;
}
mtb->base_addr = mt->base_addr;
mtb->end_addr = addr;
mtb->region_id = mt->region_id;
mtb->type = mt->type;
}
if (end < mt->end_addr) {
/* case #1/3 */
/* new mapping at the end */
mte = Malloc(sizeof(struct map_tracker));
if (mte == NULL) {
ERR("!Malloc");
goto err;
}
mte->base_addr = end;
mte->end_addr = mt->end_addr;
mte->region_id = mt->region_id;
mte->type = mt->type;
}
PMDK_SORTEDQ_REMOVE(&Mmap_list, mt, entry);
if (mtb) {
PMDK_SORTEDQ_INSERT(&Mmap_list, mtb, entry,
struct map_tracker, util_range_comparer);
}
if (mte) {
PMDK_SORTEDQ_INSERT(&Mmap_list, mte, entry,
struct map_tracker, util_range_comparer);
}
/* free entry for the original mapping */
Free(mt);
return 0;
err:
Free(mtb);
Free(mte);
return -1;
}
/*
* util_range_unregister -- remove a memory range
* from map tracking list
*
* Remove the region between [begin,end]. If it's in a middle of the existing
* mapping, it results in two new map trackers.
*/
int
util_range_unregister(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
int ret = 0;
util_rwlock_wrlock(&Mmap_list_lock);
/*
* Changes in the map tracker list must match the underlying behavior.
*
* $ man 2 mmap:
* The address addr must be a multiple of the page size (but length
* need not be). All pages containing a part of the indicated range
* are unmapped.
*
* This means that we must align the length to the page size.
*/
len = PAGE_ALIGNED_UP_SIZE(len);
void *end = (char *)addr + len;
/* XXX optimize the loop */
struct map_tracker *mt;
while ((mt = util_range_find_unlocked((uintptr_t)addr, len)) != NULL) {
if (util_range_split(mt, addr, end) != 0) {
ret = -1;
break;
}
}
util_rwlock_unlock(&Mmap_list_lock);
return ret;
}
/*
* util_range_is_pmem -- return true if entire range
* is persistent memory
*/
int
util_range_is_pmem(const void *addrp, size_t len)
{
LOG(10, "addr %p len %zu", addrp, len);
uintptr_t addr = (uintptr_t)addrp;
int retval = 1;
util_rwlock_rdlock(&Mmap_list_lock);
do {
struct map_tracker *mt = util_range_find(addr, len);
if (mt == NULL) {
LOG(4, "address not found 0x%016" PRIxPTR, addr);
retval = 0;
break;
}
LOG(10, "range found - begin 0x%016" PRIxPTR
" end 0x%016" PRIxPTR,
mt->base_addr, mt->end_addr);
if (mt->base_addr > addr) {
LOG(10, "base address doesn't match: "
"0x%" PRIxPTR " > 0x%" PRIxPTR,
mt->base_addr, addr);
retval = 0;
break;
}
uintptr_t map_len = mt->end_addr - addr;
if (map_len > len)
map_len = len;
len -= map_len;
addr += map_len;
} while (len > 0);
util_rwlock_unlock(&Mmap_list_lock);
return retval;
}
| 11,141 | 21.063366 | 115 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/mmap_posix.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* mmap_posix.c -- memory-mapped files for Posix
*/
#include <stdio.h>
#include <sys/mman.h>
#include <sys/param.h>
#include "mmap.h"
#include "out.h"
#include "os.h"
#define PROCMAXLEN 2048 /* maximum expected line length in /proc files */
char *Mmap_mapfile = OS_MAPFILE; /* Should be modified only for testing */
#ifdef __FreeBSD__
static const char * const sscanf_os = "%p %p";
#else
static const char * const sscanf_os = "%p-%p";
#endif
/*
* util_map_hint_unused -- use /proc to determine a hint address for mmap()
*
* This is a helper function for util_map_hint().
* It opens up /proc/self/maps and looks for the first unused address
* in the process address space that is:
* - greater or equal 'minaddr' argument,
* - large enough to hold range of given length,
* - aligned to the specified unit.
*
* Asking for aligned address like this will allow the DAX code to use large
* mappings. It is not an error if mmap() ignores the hint and chooses
* different address.
*/
char *
util_map_hint_unused(void *minaddr, size_t len, size_t align)
{
LOG(3, "minaddr %p len %zu align %zu", minaddr, len, align);
ASSERT(align > 0);
FILE *fp;
if ((fp = os_fopen(Mmap_mapfile, "r")) == NULL) {
ERR("!%s", Mmap_mapfile);
return MAP_FAILED;
}
char line[PROCMAXLEN]; /* for fgets() */
char *lo = NULL; /* beginning of current range in maps file */
char *hi = NULL; /* end of current range in maps file */
char *raddr = minaddr; /* ignore regions below 'minaddr' */
if (raddr == NULL)
raddr += Pagesize;
raddr = (char *)roundup((uintptr_t)raddr, align);
while (fgets(line, PROCMAXLEN, fp) != NULL) {
/* check for range line */
if (sscanf(line, sscanf_os, &lo, &hi) == 2) {
LOG(4, "%p-%p", lo, hi);
if (lo > raddr) {
if ((uintptr_t)(lo - raddr) >= len) {
LOG(4, "unused region of size %zu "
"found at %p",
lo - raddr, raddr);
break;
} else {
LOG(4, "region is too small: %zu < %zu",
lo - raddr, len);
}
}
if (hi > raddr) {
raddr = (char *)roundup((uintptr_t)hi, align);
LOG(4, "nearest aligned addr %p", raddr);
}
if (raddr == NULL) {
LOG(4, "end of address space reached");
break;
}
}
}
/*
* Check for a case when this is the last unused range in the address
* space, but is not large enough. (very unlikely)
*/
if ((raddr != NULL) && (UINTPTR_MAX - (uintptr_t)raddr < len)) {
ERR("end of address space reached");
raddr = MAP_FAILED;
}
fclose(fp);
LOG(3, "returning %p", raddr);
return raddr;
}
/*
* util_map_hint -- determine hint address for mmap()
*
* If PMEM_MMAP_HINT environment variable is not set, we let the system to pick
* the randomized mapping address. Otherwise, a user-defined hint address
* is used.
*
* ALSR in 64-bit Linux kernel uses 28-bit of randomness for mmap
* (bit positions 12-39), which means the base mapping address is randomized
* within [0..1024GB] range, with 4KB granularity. Assuming additional
* 1GB alignment, it results in 1024 possible locations.
*
* Configuring the hint address via PMEM_MMAP_HINT environment variable
* disables address randomization. In such case, the function will search for
* the first unused, properly aligned region of given size, above the specified
* address.
*/
char *
util_map_hint(size_t len, size_t req_align)
{
LOG(3, "len %zu req_align %zu", len, req_align);
char *hint_addr = MAP_FAILED;
/* choose the desired alignment based on the requested length */
size_t align = util_map_hint_align(len, req_align);
if (Mmap_no_random) {
LOG(4, "user-defined hint %p", Mmap_hint);
hint_addr = util_map_hint_unused(Mmap_hint, len, align);
} else {
/*
* Create dummy mapping to find an unused region of given size.
* Request for increased size for later address alignment.
* Use MAP_PRIVATE with read-only access to simulate
* zero cost for overcommit accounting. Note: MAP_NORESERVE
* flag is ignored if overcommit is disabled (mode 2).
*/
char *addr = mmap(NULL, len + align, PROT_READ,
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
ERR("!mmap MAP_ANONYMOUS");
} else {
LOG(4, "system choice %p", addr);
hint_addr = (char *)roundup((uintptr_t)addr, align);
munmap(addr, len + align);
}
}
LOG(4, "hint %p", hint_addr);
return hint_addr;
}
/*
* util_map_sync -- memory map given file into memory, if MAP_SHARED flag is
* provided it attempts to use MAP_SYNC flag. Otherwise it fallbacks to
* mmap(2).
*/
void *
util_map_sync(void *addr, size_t len, int proto, int flags, int fd,
os_off_t offset, int *map_sync)
{
LOG(15, "addr %p len %zu proto %x flags %x fd %d offset %ld "
"map_sync %p", addr, len, proto, flags, fd, offset, map_sync);
if (map_sync)
*map_sync = 0;
/* if map_sync is NULL do not even try to mmap with MAP_SYNC flag */
if (!map_sync || flags & MAP_PRIVATE)
return mmap(addr, len, proto, flags, fd, offset);
/* MAP_SHARED */
void *ret = mmap(addr, len, proto,
flags | MAP_SHARED_VALIDATE | MAP_SYNC,
fd, offset);
if (ret != MAP_FAILED) {
LOG(4, "mmap with MAP_SYNC succeeded");
*map_sync = 1;
return ret;
}
if (errno == EINVAL || errno == ENOTSUP) {
LOG(4, "mmap with MAP_SYNC not supported");
return mmap(addr, len, proto, flags, fd, offset);
}
/* other error */
return MAP_FAILED;
}
| 5,438 | 27.036082 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/ravl.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* ravl.h -- internal definitions for ravl tree
*/
#ifndef LIBPMEMOBJ_RAVL_H
#define LIBPMEMOBJ_RAVL_H 1
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
struct ravl;
struct ravl_node;
enum ravl_predicate {
RAVL_PREDICATE_EQUAL = 1 << 0,
RAVL_PREDICATE_GREATER = 1 << 1,
RAVL_PREDICATE_LESS = 1 << 2,
RAVL_PREDICATE_LESS_EQUAL =
RAVL_PREDICATE_EQUAL | RAVL_PREDICATE_LESS,
RAVL_PREDICATE_GREATER_EQUAL =
RAVL_PREDICATE_EQUAL | RAVL_PREDICATE_GREATER,
};
typedef int ravl_compare(const void *lhs, const void *rhs);
typedef void ravl_cb(void *data, void *arg);
typedef void ravl_constr(void *data, size_t data_size, const void *arg);
struct ravl *ravl_new(ravl_compare *compare);
struct ravl *ravl_new_sized(ravl_compare *compare, size_t data_size);
void ravl_delete(struct ravl *ravl);
void ravl_delete_cb(struct ravl *ravl, ravl_cb cb, void *arg);
void ravl_foreach(struct ravl *ravl, ravl_cb cb, void *arg);
int ravl_empty(struct ravl *ravl);
void ravl_clear(struct ravl *ravl);
int ravl_insert(struct ravl *ravl, const void *data);
int ravl_emplace(struct ravl *ravl, ravl_constr constr, const void *arg);
int ravl_emplace_copy(struct ravl *ravl, const void *data);
struct ravl_node *ravl_find(struct ravl *ravl, const void *data,
enum ravl_predicate predicate_flags);
void *ravl_data(struct ravl_node *node);
void ravl_remove(struct ravl *ravl, struct ravl_node *node);
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_RAVL_H */
| 1,556 | 27.309091 | 73 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/pool_hdr.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* pool_hdr.h -- internal definitions for pool header module
*/
#ifndef PMDK_POOL_HDR_H
#define PMDK_POOL_HDR_H 1
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include "uuid.h"
#include "shutdown_state.h"
#include "util.h"
#include "page_size.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Number of bits per type in alignment descriptor
*/
#define ALIGNMENT_DESC_BITS 4
/*
* architecture identification flags
*
* These flags allow to unambiguously determine the architecture
* on which the pool was created.
*
* The alignment_desc field contains information about alignment
* of the following basic types:
* - char
* - short
* - int
* - long
* - long long
* - size_t
* - os_off_t
* - float
* - double
* - long double
* - void *
*
* The alignment of each type is computed as an offset of field
* of specific type in the following structure:
* struct {
* char byte;
* type field;
* };
*
* The value is decremented by 1 and masked by 4 bits.
* Multiple alignments are stored on consecutive 4 bits of each
* type in the order specified above.
*
* The values used in the machine, and machine_class fields are in
* principle independent of operating systems, and object formats.
* In practice they happen to match constants used in ELF object headers.
*/
struct arch_flags {
uint64_t alignment_desc; /* alignment descriptor */
uint8_t machine_class; /* address size -- 64 bit or 32 bit */
uint8_t data; /* data encoding -- LE or BE */
uint8_t reserved[4];
uint16_t machine; /* required architecture */
};
#define POOL_HDR_ARCH_LEN sizeof(struct arch_flags)
/* possible values of the machine class field in the above struct */
#define PMDK_MACHINE_CLASS_64 2 /* 64 bit pointers, 64 bit size_t */
/* possible values of the machine field in the above struct */
#define PMDK_MACHINE_X86_64 62
#define PMDK_MACHINE_AARCH64 183
#define PMDK_MACHINE_PPC64 21
/* possible values of the data field in the above struct */
#define PMDK_DATA_LE 1 /* 2's complement, little endian */
#define PMDK_DATA_BE 2 /* 2's complement, big endian */
/*
* features flags
*/
typedef struct {
uint32_t compat; /* mask: compatible "may" features */
uint32_t incompat; /* mask: "must support" features */
uint32_t ro_compat; /* mask: force RO if unsupported */
} features_t;
/*
* header used at the beginning of all types of memory pools
*
* for pools build on persistent memory, the integer types
* below are stored in little-endian byte order.
*/
#define POOL_HDR_SIG_LEN 8
#define POOL_HDR_UNUSED_SIZE 1904
#define POOL_HDR_UNUSED2_SIZE 1976
#define POOL_HDR_ALIGN_PAD (PMEM_PAGESIZE - 4096)
struct pool_hdr {
char signature[POOL_HDR_SIG_LEN];
uint32_t major; /* format major version number */
features_t features; /* features flags */
uuid_t poolset_uuid; /* pool set UUID */
uuid_t uuid; /* UUID of this file */
uuid_t prev_part_uuid; /* prev part */
uuid_t next_part_uuid; /* next part */
uuid_t prev_repl_uuid; /* prev replica */
uuid_t next_repl_uuid; /* next replica */
uint64_t crtime; /* when created (seconds since epoch) */
struct arch_flags arch_flags; /* architecture identification flags */
unsigned char unused[POOL_HDR_UNUSED_SIZE]; /* must be zero */
/* not checksumed */
unsigned char unused2[POOL_HDR_UNUSED2_SIZE]; /* must be zero */
struct shutdown_state sds; /* shutdown status */
uint64_t checksum; /* checksum of above fields */
#if PMEM_PAGESIZE > 4096 /* prevent zero size array */
unsigned char align_pad[POOL_HDR_ALIGN_PAD]; /* alignment pad */
#endif
};
#define POOL_HDR_SIZE (sizeof(struct pool_hdr))
#define POOL_DESC_SIZE PMEM_PAGESIZE
void util_convert2le_hdr(struct pool_hdr *hdrp);
void util_convert2h_hdr_nocheck(struct pool_hdr *hdrp);
void util_get_arch_flags(struct arch_flags *arch_flags);
int util_check_arch_flags(const struct arch_flags *arch_flags);
features_t util_get_unknown_features(features_t features, features_t known);
int util_feature_check(struct pool_hdr *hdrp, features_t features);
int util_feature_cmp(features_t features, features_t ref);
int util_feature_is_zero(features_t features);
int util_feature_is_set(features_t features, features_t flag);
void util_feature_enable(features_t *features, features_t new_feature);
void util_feature_disable(features_t *features, features_t new_feature);
const char *util_feature2str(features_t feature, features_t *found);
features_t util_str2feature(const char *str);
uint32_t util_str2pmempool_feature(const char *str);
uint32_t util_feature2pmempool_feature(features_t feat);
/*
* set of macros for determining the alignment descriptor
*/
#define DESC_MASK ((1 << ALIGNMENT_DESC_BITS) - 1)
#define alignment_of(t) offsetof(struct { char c; t x; }, x)
#define alignment_desc_of(t) (((uint64_t)alignment_of(t) - 1) & DESC_MASK)
#define alignment_desc()\
(alignment_desc_of(char) << 0 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(short) << 1 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(int) << 2 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(long) << 3 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(long long) << 4 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(size_t) << 5 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(off_t) << 6 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(float) << 7 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(double) << 8 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(long double) << 9 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(void *) << 10 * ALIGNMENT_DESC_BITS)
#define POOL_FEAT_ZERO 0x0000U
static const features_t features_zero =
{POOL_FEAT_ZERO, POOL_FEAT_ZERO, POOL_FEAT_ZERO};
/*
* compat features
*/
#define POOL_FEAT_CHECK_BAD_BLOCKS 0x0001U /* check bad blocks in a pool */
#define POOL_FEAT_COMPAT_ALL \
(POOL_FEAT_CHECK_BAD_BLOCKS)
#define FEAT_COMPAT(X) \
{POOL_FEAT_##X, POOL_FEAT_ZERO, POOL_FEAT_ZERO}
/*
* incompat features
*/
#define POOL_FEAT_SINGLEHDR 0x0001U /* pool header only in the first part */
#define POOL_FEAT_CKSUM_2K 0x0002U /* only first 2K of hdr checksummed */
#define POOL_FEAT_SDS 0x0004U /* check shutdown state */
#define POOL_FEAT_INCOMPAT_ALL \
(POOL_FEAT_SINGLEHDR | POOL_FEAT_CKSUM_2K | POOL_FEAT_SDS)
/*
* incompat features effective values (if applicable)
*/
#ifdef SDS_ENABLED
#define POOL_E_FEAT_SDS POOL_FEAT_SDS
#else
#define POOL_E_FEAT_SDS 0x0000U /* empty */
#endif
#define POOL_FEAT_COMPAT_VALID \
(POOL_FEAT_CHECK_BAD_BLOCKS)
#define POOL_FEAT_INCOMPAT_VALID \
(POOL_FEAT_SINGLEHDR | POOL_FEAT_CKSUM_2K | POOL_E_FEAT_SDS)
#if defined(_WIN32) || NDCTL_ENABLED
#define POOL_FEAT_INCOMPAT_DEFAULT \
(POOL_FEAT_CKSUM_2K | POOL_E_FEAT_SDS)
#else
/*
* shutdown state support on Linux requires root access on kernel < 4.20 with
* ndctl < 63 so it is disabled by default
*/
#define POOL_FEAT_INCOMPAT_DEFAULT \
(POOL_FEAT_CKSUM_2K)
#endif
#if NDCTL_ENABLED
#define POOL_FEAT_COMPAT_DEFAULT \
(POOL_FEAT_CHECK_BAD_BLOCKS)
#else
#define POOL_FEAT_COMPAT_DEFAULT \
(POOL_FEAT_ZERO)
#endif
#define FEAT_INCOMPAT(X) \
{POOL_FEAT_ZERO, POOL_FEAT_##X, POOL_FEAT_ZERO}
#define POOL_FEAT_VALID \
{POOL_FEAT_COMPAT_VALID, POOL_FEAT_INCOMPAT_VALID, POOL_FEAT_ZERO}
/*
* defines the first not checksummed field - all fields after this will be
* ignored during checksum calculations.
*/
#define POOL_HDR_CSUM_2K_END_OFF offsetof(struct pool_hdr, unused2)
#define POOL_HDR_CSUM_4K_END_OFF offsetof(struct pool_hdr, checksum)
/*
* pick the first not checksummed field. 2K variant is used if
* POOL_FEAT_CKSUM_2K incompat feature is set.
*/
#define POOL_HDR_CSUM_END_OFF(hdrp) \
((hdrp)->features.incompat & POOL_FEAT_CKSUM_2K) \
? POOL_HDR_CSUM_2K_END_OFF : POOL_HDR_CSUM_4K_END_OFF
/* ignore shutdown state if incompat feature is disabled */
#define IGNORE_SDS(hdrp) \
(((hdrp) != NULL) && (((hdrp)->features.incompat & POOL_FEAT_SDS) == 0))
#ifdef __cplusplus
}
#endif
#endif
| 7,980 | 29.696154 | 77 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/os_deep_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
/*
* os_deep_windows.c -- Windows abstraction layer for deep_* functions
*/
#include <windows.h>
#include "out.h"
#include "os.h"
#include "set.h"
#include "libpmem.h"
/*
* os_range_deep_common -- call msnyc for non DEV dax
*/
int
os_range_deep_common(uintptr_t addr, size_t len)
{
LOG(3, "os_range_deep_common addr %p len %lu", addr, len);
if (len == 0)
return 0;
return pmem_msync((void *)addr, len);
}
/*
* os_part_deep_common -- common function to handle both
* deep_persist and deep_drain part flush cases.
*/
int
os_part_deep_common(struct pool_replica *rep, unsigned partidx, void *addr,
size_t len, int flush)
{
LOG(3, "part %p part %d addr %p len %lu flush %d",
rep, partidx, addr, len, flush);
if (!rep->is_pmem) {
/*
* In case of part on non-pmem call msync on the range
* to deep flush the data. Deep drain is empty as all
* data is msynced to persistence.
*/
if (!flush)
return 0;
if (pmem_msync(addr, len)) {
LOG(1, "pmem_msync(%p, %lu)", addr, len);
return -1;
}
return 0;
}
/* Call deep flush if it was requested */
if (flush) {
LOG(15, "pmem_deep_flush addr %p, len %lu", addr, len);
pmem_deep_flush(addr, len);
}
/*
* Before deep drain call normal drain to ensure that data
* is at least in WPQ.
*/
pmem_drain();
/*
* For deep_drain on normal pmem it is enough to
* call msync on one page.
*/
if (pmem_msync(addr, MIN(Pagesize, len))) {
LOG(1, "pmem_msync(%p, %lu)", addr, len);
return -1;
}
return 0;
}
| 1,598 | 20.039474 | 75 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/mmap_windows.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* Copyright (c) 2015-2017, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* mmap_windows.c -- memory-mapped files for Windows
*/
#include <sys/mman.h>
#include "mmap.h"
#include "out.h"
/*
* util_map_hint_unused -- use VirtualQuery to determine hint address
*
* This is a helper function for util_map_hint().
* It iterates through memory regions and looks for the first unused address
* in the process address space that is:
* - greater or equal 'minaddr' argument,
* - large enough to hold range of given length,
* - aligned to the specified unit.
*/
char *
util_map_hint_unused(void *minaddr, size_t len, size_t align)
{
LOG(3, "minaddr %p len %zu align %zu", minaddr, len, align);
ASSERT(align > 0);
MEMORY_BASIC_INFORMATION mi;
char *lo = NULL; /* beginning of current range in maps file */
char *hi = NULL; /* end of current range in maps file */
char *raddr = minaddr; /* ignore regions below 'minaddr' */
if (raddr == NULL)
raddr += Pagesize;
raddr = (char *)roundup((uintptr_t)raddr, align);
while ((uintptr_t)raddr < UINTPTR_MAX - len) {
size_t ret = VirtualQuery(raddr, &mi, sizeof(mi));
if (ret == 0) {
ERR("VirtualQuery %p", raddr);
return MAP_FAILED;
}
LOG(4, "addr %p len %zu state %d",
mi.BaseAddress, mi.RegionSize, mi.State);
if ((mi.State != MEM_FREE) || (mi.RegionSize < len)) {
raddr = (char *)mi.BaseAddress + mi.RegionSize;
raddr = (char *)roundup((uintptr_t)raddr, align);
LOG(4, "nearest aligned addr %p", raddr);
} else {
LOG(4, "unused region of size %zu found at %p",
mi.RegionSize, mi.BaseAddress);
return mi.BaseAddress;
}
}
LOG(4, "end of address space reached");
return MAP_FAILED;
}
/*
* util_map_hint -- determine hint address for mmap()
*
* XXX - Windows doesn't support large DAX pages yet, so there is
* no point in aligning for the same.
*/
char *
util_map_hint(size_t len, size_t req_align)
{
LOG(3, "len %zu req_align %zu", len, req_align);
char *hint_addr = MAP_FAILED;
/* choose the desired alignment based on the requested length */
size_t align = util_map_hint_align(len, req_align);
if (Mmap_no_random) {
LOG(4, "user-defined hint %p", Mmap_hint);
hint_addr = util_map_hint_unused(Mmap_hint, len, align);
} else {
/*
* Create dummy mapping to find an unused region of given size.
* Request for increased size for later address alignment.
*
* Use MAP_NORESERVE flag to only reserve the range of pages
* rather than commit. We don't want the pages to be actually
* backed by the operating system paging file, as the swap
* file is usually too small to handle terabyte pools.
*/
char *addr = mmap(NULL, len + align, PROT_READ,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
if (addr != MAP_FAILED) {
LOG(4, "system choice %p", addr);
hint_addr = (char *)roundup((uintptr_t)addr, align);
munmap(addr, len + align);
}
}
LOG(4, "hint %p", hint_addr);
return hint_addr;
}
/*
* util_map_sync -- memory map given file into memory
*/
void *
util_map_sync(void *addr, size_t len, int proto, int flags, int fd,
os_off_t offset, int *map_sync)
{
LOG(15, "addr %p len %zu proto %x flags %x fd %d offset %ld",
addr, len, proto, flags, fd, offset);
if (map_sync)
*map_sync = 0;
return mmap(addr, len, proto, flags, fd, offset);
}
| 4,965 | 31.887417 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemlog/logfile/addlog.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* addlog -- given a log file, append a log entry
*
* Usage:
* fallocate -l 1G /path/to/pm-aware/file
* addlog /path/to/pm-aware/file "first line of entry" "second line"
*/
#include <ex_common.h>
#include <sys/stat.h>
#include <stdio.h>
#include <fcntl.h>
#include <time.h>
#include <stdlib.h>
#include <string.h>
#include <libpmemlog.h>
#include "logentry.h"
int
main(int argc, char *argv[])
{
PMEMlogpool *plp;
struct logentry header;
struct iovec *iovp;
struct iovec *next_iovp;
int iovcnt;
if (argc < 3) {
fprintf(stderr, "usage: %s filename lines...\n", argv[0]);
exit(1);
}
const char *path = argv[1];
/* create the log in the given file, or open it if already created */
plp = pmemlog_create(path, 0, CREATE_MODE_RW);
if (plp == NULL &&
(plp = pmemlog_open(path)) == NULL) {
perror(path);
exit(1);
}
/* fill in the header */
time(&header.timestamp);
header.pid = getpid();
/*
* Create an iov for pmemlog_appendv(). For each argument given,
* allocate two entries (one for the string, one for the newline
* appended to the string). Allocate 1 additional entry for the
* header that gets prepended to the entry.
*/
iovcnt = (argc - 2) * 2 + 2;
if ((iovp = malloc(sizeof(*iovp) * iovcnt)) == NULL) {
perror("malloc");
exit(1);
}
next_iovp = iovp;
/* put the header into iov first */
next_iovp->iov_base = &header;
next_iovp->iov_len = sizeof(header);
next_iovp++;
/*
* Now put each arg in, following it with the string "\n".
* Calculate a total character count in header.len along the way.
*/
header.len = 0;
for (int arg = 2; arg < argc; arg++) {
/* add the string given */
next_iovp->iov_base = argv[arg];
next_iovp->iov_len = strlen(argv[arg]);
header.len += next_iovp->iov_len;
next_iovp++;
/* add the newline */
next_iovp->iov_base = "\n";
next_iovp->iov_len = 1;
header.len += 1;
next_iovp++;
}
/*
* pad with NULs (at least one) to align next entry to sizeof(long long)
* bytes
*/
int a = sizeof(long long);
int len_to_round = 1 + (a - (header.len + 1) % a) % a;
char *buf[sizeof(long long)] = {0};
next_iovp->iov_base = buf;
next_iovp->iov_len = len_to_round;
header.len += len_to_round;
next_iovp++;
/* atomically add it all to the log */
if (pmemlog_appendv(plp, iovp, iovcnt) < 0) {
perror("pmemlog_appendv");
free(iovp);
exit(1);
}
free(iovp);
pmemlog_close(plp);
return 0;
}
| 2,511 | 21.230088 | 73 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemblk/assetdb/asset_checkout.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* asset_checkout -- mark an asset as checked out to someone
*
* Usage:
* asset_checkin /path/to/pm-aware/file asset-ID name
*/
#include <ex_common.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include <libpmemblk.h>
#include "asset.h"
int
main(int argc, char *argv[])
{
PMEMblkpool *pbp;
struct asset asset;
int assetid;
if (argc < 4) {
fprintf(stderr, "usage: %s assetdb asset-ID name\n", argv[0]);
exit(1);
}
const char *path = argv[1];
assetid = atoi(argv[2]);
assert(assetid > 0);
/* open an array of atomically writable elements */
if ((pbp = pmemblk_open(path, sizeof(struct asset))) == NULL) {
perror("pmemblk_open");
exit(1);
}
/* read a required element in */
if (pmemblk_read(pbp, &asset, assetid) < 0) {
perror("pmemblk_read");
exit(1);
}
/* check if it contains any data */
if ((asset.state != ASSET_FREE) &&
(asset.state != ASSET_CHECKED_OUT)) {
fprintf(stderr, "Asset ID %d not found", assetid);
exit(1);
}
if (asset.state == ASSET_CHECKED_OUT) {
fprintf(stderr, "Asset ID %d already checked out\n", assetid);
exit(1);
}
/* update user name, set checked out state, and take timestamp */
strncpy(asset.user, argv[3], ASSET_USER_NAME_MAX - 1);
asset.user[ASSET_USER_NAME_MAX - 1] = '\0';
asset.state = ASSET_CHECKED_OUT;
time(&asset.time);
/* put it back in the block */
if (pmemblk_write(pbp, &asset, assetid) < 0) {
perror("pmemblk_write");
exit(1);
}
pmemblk_close(pbp);
return 0;
}
| 1,634 | 20.233766 | 66 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/buffons_needle_problem.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* buffons_needle_problem.c <path> [<n>] -- example illustrating
* usage of libpmemobj
*
* Calculates pi number by solving Buffon's needle problem.
* Takes one/two arguments -- path of the file and integer amount of trials
* or only path when continuing simulation after interruption.
* The greater number of trials, the higher calculation precision.
*/
#include <ex_common.h>
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#ifdef _WIN32
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#include <time.h>
#include <libpmemobj.h>
/*
* Layout definition
*/
POBJ_LAYOUT_BEGIN(pi);
POBJ_LAYOUT_ROOT(pi, struct my_root)
POBJ_LAYOUT_END(pi)
/*
* Used for changing degrees into radians
*/
#define RADIAN_CALCULATE M_PI / 180.0
static PMEMobjpool *pop;
struct my_root {
double x; /* coordinate of the needle's center */
double angle; /* angle between vertical position and the needle */
double l; /* length of the needle */
double sin_angle_l; /* sin(angle) * l */
double pi; /* calculated pi number */
double d; /* distance between lines on the board */
uint64_t i; /* variable used in for loop */
uint64_t p; /* amount of the positive trials */
uint64_t n; /* amount of the trials */
};
static void
print_usage(char *argv_main[])
{
printf("usage: %s <path> [<n>]\n",
argv_main[0]);
}
/*
* random_number -- randomizes number in range [0,1]
*/
static double
random_number(void)
{
return (double)rand() / (double)RAND_MAX;
}
int
main(int argc, char *argv[])
{
if (argc < 2 || argc > 3) {
print_usage(argv);
return 1;
}
const char *path = argv[1];
if (file_exists(path) != 0) {
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(pi),
PMEMOBJ_MIN_POOL, 0666)) == NULL) {
perror("failed to create pool\n");
return 1;
}
} else {
if ((pop = pmemobj_open(path,
POBJ_LAYOUT_NAME(pi))) == NULL) {
perror("failed to open pool\n");
return 1;
}
}
srand((unsigned int)time(NULL));
TOID(struct my_root) root = POBJ_ROOT(pop, struct my_root);
struct my_root *const rootp_rw = D_RW(root);
if (argc == 3) {
const char *n = argv[2];
char *endptr;
errno = 0;
uint64_t ull_n = strtoull(n, &endptr, 10);
if (*endptr != '\0' ||
(ull_n == ULLONG_MAX && errno == ERANGE)) {
perror("wrong n parameter\n");
print_usage(argv);
pmemobj_close(pop);
return 1;
}
TX_BEGIN(pop) {
TX_ADD(root);
rootp_rw->l = 0.9;
rootp_rw->d = 1.0;
rootp_rw->i = 0;
rootp_rw->p = 0;
rootp_rw->n = ull_n;
} TX_END
}
for (; rootp_rw->i < rootp_rw->n; ) {
TX_BEGIN(pop) {
TX_ADD(root);
rootp_rw->angle = random_number() *
90 * RADIAN_CALCULATE;
rootp_rw->x = random_number() * rootp_rw->d / 2;
rootp_rw->sin_angle_l = rootp_rw->l /
2 * sin(rootp_rw->angle);
if (rootp_rw->x <= rootp_rw->sin_angle_l) {
rootp_rw->p++;
}
rootp_rw->pi = (2 * rootp_rw->l *
rootp_rw->n) / (rootp_rw->p *
rootp_rw->d);
rootp_rw->i++;
} TX_END
}
printf("%f\n", D_RO(root)->pi);
pmemobj_close(pop);
return 0;
}
| 3,119 | 20.22449 | 75 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/pi.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* pi.c -- example usage of user lists
*
* Calculates pi number with multiple threads using Leibniz formula.
*/
#include <ex_common.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <assert.h>
#include <inttypes.h>
#include <libpmemobj.h>
#ifndef _WIN32
#include <pthread.h>
#endif
/*
* Layout definition
*/
POBJ_LAYOUT_BEGIN(pi);
POBJ_LAYOUT_ROOT(pi, struct pi);
POBJ_LAYOUT_TOID(pi, struct pi_task);
POBJ_LAYOUT_END(pi);
static PMEMobjpool *pop;
struct pi_task_proto {
uint64_t start;
uint64_t stop;
long double result;
};
struct pi_task {
struct pi_task_proto proto;
POBJ_LIST_ENTRY(struct pi_task) todo;
POBJ_LIST_ENTRY(struct pi_task) done;
};
struct pi {
POBJ_LIST_HEAD(todo, struct pi_task) todo;
POBJ_LIST_HEAD(done, struct pi_task) done;
};
/*
* pi_task_construct -- task constructor
*/
static int
pi_task_construct(PMEMobjpool *pop, void *ptr, void *arg)
{
struct pi_task *t = (struct pi_task *)ptr;
struct pi_task_proto *p = (struct pi_task_proto *)arg;
t->proto = *p;
pmemobj_persist(pop, t, sizeof(*t));
return 0;
}
/*
* calc_pi -- worker for pi calculation
*/
#ifndef _WIN32
static void *
calc_pi(void *arg)
#else
static DWORD WINAPI
calc_pi(LPVOID arg)
#endif
{
TOID(struct pi) pi = POBJ_ROOT(pop, struct pi);
TOID(struct pi_task) task = *((TOID(struct pi_task) *)arg);
long double result = 0;
for (uint64_t i = D_RO(task)->proto.start;
i < D_RO(task)->proto.stop; ++i) {
result += (pow(-1, (double)i) / (2 * i + 1));
}
D_RW(task)->proto.result = result;
pmemobj_persist(pop, &D_RW(task)->proto.result, sizeof(result));
POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(pi)->todo, &D_RW(pi)->done,
task, todo, done);
return NULL;
}
/*
* calc_pi_mt -- calculate all the pending to-do tasks
*/
static void
calc_pi_mt(void)
{
TOID(struct pi) pi = POBJ_ROOT(pop, struct pi);
int pending = 0;
TOID(struct pi_task) iter;
POBJ_LIST_FOREACH(iter, &D_RO(pi)->todo, todo)
pending++;
if (pending == 0)
return;
int i = 0;
TOID(struct pi_task) *tasks = (TOID(struct pi_task) *)malloc(
sizeof(TOID(struct pi_task)) * pending);
if (tasks == NULL) {
fprintf(stderr, "failed to allocate tasks\n");
return;
}
POBJ_LIST_FOREACH(iter, &D_RO(pi)->todo, todo)
tasks[i++] = iter;
#ifndef _WIN32
pthread_t workers[pending];
for (i = 0; i < pending; ++i)
if (pthread_create(&workers[i], NULL, calc_pi, &tasks[i]) != 0)
break;
for (i = i - 1; i >= 0; --i)
pthread_join(workers[i], NULL);
#else
HANDLE *workers = (HANDLE *) malloc(sizeof(HANDLE) * pending);
for (i = 0; i < pending; ++i) {
workers[i] = CreateThread(NULL, 0, calc_pi,
&tasks[i], 0, NULL);
if (workers[i] == NULL)
break;
}
WaitForMultipleObjects(i, workers, TRUE, INFINITE);
for (i = i - 1; i >= 0; --i)
CloseHandle(workers[i]);
free(workers);
#endif
free(tasks);
}
/*
* prep_todo_list -- create tasks to be done
*/
static int
prep_todo_list(int threads, int ops)
{
TOID(struct pi) pi = POBJ_ROOT(pop, struct pi);
if (!POBJ_LIST_EMPTY(&D_RO(pi)->todo))
return -1;
int ops_per_thread = ops / threads;
uint64_t last = 0; /* last calculated denominator */
TOID(struct pi_task) iter;
POBJ_LIST_FOREACH(iter, &D_RO(pi)->done, done) {
if (last < D_RO(iter)->proto.stop)
last = D_RO(iter)->proto.stop;
}
int i;
for (i = 0; i < threads; ++i) {
uint64_t start = last + (i * ops_per_thread);
struct pi_task_proto proto;
proto.start = start;
proto.stop = start + ops_per_thread;
proto.result = 0;
POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(pi)->todo, todo,
sizeof(struct pi_task), pi_task_construct, &proto);
}
return 0;
}
int
main(int argc, char *argv[])
{
if (argc < 3) {
printf("usage: %s file-name "
"[print|done|todo|finish|calc <# of threads> <ops>]\n",
argv[0]);
return 1;
}
const char *path = argv[1];
pop = NULL;
if (file_exists(path) != 0) {
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(pi),
PMEMOBJ_MIN_POOL, CREATE_MODE_RW)) == NULL) {
printf("failed to create pool\n");
return 1;
}
} else {
if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(pi))) == NULL) {
printf("failed to open pool\n");
return 1;
}
}
TOID(struct pi) pi = POBJ_ROOT(pop, struct pi);
char op = argv[2][0];
switch (op) {
case 'p': { /* print pi */
long double pi_val = 0;
TOID(struct pi_task) iter;
POBJ_LIST_FOREACH(iter, &D_RO(pi)->done, done) {
pi_val += D_RO(iter)->proto.result;
}
printf("pi: %Lf\n", pi_val * 4);
} break;
case 'd': { /* print done list */
TOID(struct pi_task) iter;
POBJ_LIST_FOREACH(iter, &D_RO(pi)->done, done) {
printf("(%" PRIu64 " - %" PRIu64 ") = %Lf\n",
D_RO(iter)->proto.start,
D_RO(iter)->proto.stop,
D_RO(iter)->proto.result);
}
} break;
case 't': { /* print to-do list */
TOID(struct pi_task) iter;
POBJ_LIST_FOREACH(iter, &D_RO(pi)->todo, todo) {
printf("(%" PRIu64 " - %" PRIu64 ") = %Lf\n",
D_RO(iter)->proto.start,
D_RO(iter)->proto.stop,
D_RO(iter)->proto.result);
}
} break;
case 'c': { /* calculate pi */
if (argc < 5) {
printf("usage: %s file-name "
"calc <# of threads> <ops>\n",
argv[0]);
return 1;
}
int threads = atoi(argv[3]);
int ops = atoi(argv[4]);
assert((threads > 0) && (ops > 0));
if (prep_todo_list(threads, ops) == -1)
printf("pending todo tasks\n");
else
calc_pi_mt();
} break;
case 'f': { /* finish to-do tasks */
calc_pi_mt();
} break;
}
pmemobj_close(pop);
return 0;
}
| 5,620 | 20.786822 | 68 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/setjmp.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* setjmp.c -- example illustrating an issue with indeterminate value
* of non-volatile automatic variables after transaction abort.
* See libpmemobj(7) for details.
*
* NOTE: To observe the problem (likely segfault on a second call to free()),
* the example program should be compiled with optimizations enabled (-O2).
*/
#include <stdlib.h>
#include <stdio.h>
#include <libpmemobj.h>
/* name of our layout in the pool */
#define LAYOUT_NAME "setjmp_example"
int
main(int argc, const char *argv[])
{
const char path[] = "/pmem-fs/myfile";
PMEMobjpool *pop;
/* create the pmemobj pool */
pop = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL, 0666);
if (pop == NULL) {
perror(path);
exit(1);
}
/* initialize pointer variables with invalid addresses */
int *bad_example_1 = (int *)0xBAADF00D;
int *bad_example_2 = (int *)0xBAADF00D;
int *bad_example_3 = (int *)0xBAADF00D;
int *volatile good_example = (int *)0xBAADF00D;
TX_BEGIN(pop) {
bad_example_1 = malloc(sizeof(int));
bad_example_2 = malloc(sizeof(int));
bad_example_3 = malloc(sizeof(int));
good_example = malloc(sizeof(int));
/* manual or library abort called here */
pmemobj_tx_abort(EINVAL);
} TX_ONCOMMIT {
/*
* This section is longjmp-safe
*/
} TX_ONABORT {
/*
* This section is not longjmp-safe
*/
free(good_example); /* OK */
free(bad_example_1); /* undefined behavior */
} TX_FINALLY {
/*
* This section is not longjmp-safe on transaction abort only
*/
free(bad_example_2); /* undefined behavior */
} TX_END
free(bad_example_3); /* undefined behavior */
pmemobj_close(pop);
return 0;
}
| 1,723 | 23.985507 | 77 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/pmemblk/obj_pmemblk.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_pmemblk.c -- alternate pmemblk implementation based on pmemobj
*
* usage: obj_pmemblk [co] file blk_size [cmd[:blk_num[:data]]...]
*
* c - create file
* o - open file
*
* The "cmd" arguments match the pmemblk functions:
* w - write to a block
* r - read a block
* z - zero a block
* n - write out number of available blocks
* e - put a block in error state
*/
#include <ex_common.h>
#include <sys/stat.h>
#include <string.h>
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <errno.h>
#include "libpmemobj.h"
#include "libpmem.h"
#include "libpmemblk.h"
#define USABLE_SIZE (7.0 / 10)
#define POOL_SIZE ((size_t)(1024 * 1024 * 100))
#define MAX_POOL_SIZE ((size_t)1024 * 1024 * 1024 * 16)
#define MAX_THREADS 256
#define BSIZE_MAX ((size_t)(1024 * 1024 * 10))
#define ZERO_MASK (1 << 0)
#define ERROR_MASK (1 << 1)
POBJ_LAYOUT_BEGIN(obj_pmemblk);
POBJ_LAYOUT_ROOT(obj_pmemblk, struct base);
POBJ_LAYOUT_TOID(obj_pmemblk, uint8_t);
POBJ_LAYOUT_END(obj_pmemblk);
/* The root object struct holding all necessary data */
struct base {
TOID(uint8_t) data; /* contiguous memory region */
TOID(uint8_t) flags; /* block flags */
size_t bsize; /* block size */
size_t nblocks; /* number of available blocks */
PMEMmutex locks[MAX_THREADS]; /* thread synchronization locks */
};
/*
* pmemblk_map -- (internal) read or initialize the blk pool
*/
static int
pmemblk_map(PMEMobjpool *pop, size_t bsize, size_t fsize)
{
int retval = 0;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
/* read pool descriptor and validate user provided values */
if (D_RO(bp)->bsize) {
if (bsize && D_RO(bp)->bsize != bsize)
return -1;
else
return 0;
}
/* new pool, calculate and store metadata */
TX_BEGIN(pop) {
TX_ADD(bp);
D_RW(bp)->bsize = bsize;
size_t pool_size = (size_t)(fsize * USABLE_SIZE);
D_RW(bp)->nblocks = pool_size / bsize;
D_RW(bp)->data = TX_ZALLOC(uint8_t, pool_size);
D_RW(bp)->flags = TX_ZALLOC(uint8_t,
sizeof(uint8_t) * D_RO(bp)->nblocks);
} TX_ONABORT {
retval = -1;
} TX_END
return retval;
}
/*
* pmemblk_open -- open a block memory pool
*/
PMEMblkpool *
pmemblk_open(const char *path, size_t bsize)
{
PMEMobjpool *pop = pmemobj_open(path, POBJ_LAYOUT_NAME(obj_pmemblk));
if (pop == NULL)
return NULL;
struct stat buf;
if (stat(path, &buf)) {
perror("stat");
return NULL;
}
return pmemblk_map(pop, bsize, buf.st_size) ? NULL : (PMEMblkpool *)pop;
}
/*
* pmemblk_create -- create a block memory pool
*/
PMEMblkpool *
pmemblk_create(const char *path, size_t bsize, size_t poolsize, mode_t mode)
{
/* max size of a single allocation is 16GB */
if (poolsize > MAX_POOL_SIZE) {
errno = EINVAL;
return NULL;
}
PMEMobjpool *pop = pmemobj_create(path, POBJ_LAYOUT_NAME(obj_pmemblk),
poolsize, mode);
if (pop == NULL)
return NULL;
return pmemblk_map(pop, bsize, poolsize) ? NULL : (PMEMblkpool *)pop;
}
/*
* pmemblk_close -- close a block memory pool
*/
void
pmemblk_close(PMEMblkpool *pbp)
{
pmemobj_close((PMEMobjpool *)pbp);
}
/*
* pmemblk_check -- block memory pool consistency check
*/
int
pmemblk_check(const char *path, size_t bsize)
{
int ret = pmemobj_check(path, POBJ_LAYOUT_NAME(obj_pmemblk));
if (ret)
return ret;
/* open just to validate block size */
PMEMblkpool *pop = pmemblk_open(path, bsize);
if (!pop)
return -1;
pmemblk_close(pop);
return 0;
}
/*
* pmemblk_set_error -- not available in this implementation
*/
int
pmemblk_set_error(PMEMblkpool *pbp, long long blockno)
{
PMEMobjpool *pop = (PMEMobjpool *)pbp;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
int retval = 0;
if (blockno >= (long long)D_RO(bp)->nblocks)
return 1;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX,
&D_RW(bp)->locks[blockno % MAX_THREADS], TX_PARAM_NONE) {
uint8_t *flags = D_RW(D_RW(bp)->flags) + blockno;
/* add the modified flags to the undo log */
pmemobj_tx_add_range_direct(flags, sizeof(*flags));
*flags |= ERROR_MASK;
} TX_ONABORT {
retval = 1;
} TX_END
return retval;
}
/*
* pmemblk_nblock -- return number of usable blocks in a block memory pool
*/
size_t
pmemblk_nblock(PMEMblkpool *pbp)
{
PMEMobjpool *pop = (PMEMobjpool *)pbp;
return ((struct base *)pmemobj_direct(pmemobj_root(pop,
sizeof(struct base))))->nblocks;
}
/*
* pmemblk_read -- read a block in a block memory pool
*/
int
pmemblk_read(PMEMblkpool *pbp, void *buf, long long blockno)
{
PMEMobjpool *pop = (PMEMobjpool *)pbp;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
if (blockno >= (long long)D_RO(bp)->nblocks)
return 1;
pmemobj_mutex_lock(pop, &D_RW(bp)->locks[blockno % MAX_THREADS]);
/* check the error mask */
uint8_t *flags = D_RW(D_RW(bp)->flags) + blockno;
if ((*flags & ERROR_MASK) != 0) {
pmemobj_mutex_unlock(pop,
&D_RW(bp)->locks[blockno % MAX_THREADS]);
errno = EIO;
return 1;
}
/* the block is zeroed, reverse zeroing logic */
if ((*flags & ZERO_MASK) == 0) {
memset(buf, 0, D_RO(bp)->bsize);
} else {
size_t block_off = blockno * D_RO(bp)->bsize;
uint8_t *src = D_RW(D_RW(bp)->data) + block_off;
memcpy(buf, src, D_RO(bp)->bsize);
}
pmemobj_mutex_unlock(pop, &D_RW(bp)->locks[blockno % MAX_THREADS]);
return 0;
}
/*
* pmemblk_write -- write a block (atomically) in a block memory pool
*/
int
pmemblk_write(PMEMblkpool *pbp, const void *buf, long long blockno)
{
PMEMobjpool *pop = (PMEMobjpool *)pbp;
int retval = 0;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
if (blockno >= (long long)D_RO(bp)->nblocks)
return 1;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX,
&D_RW(bp)->locks[blockno % MAX_THREADS], TX_PARAM_NONE) {
size_t block_off = blockno * D_RO(bp)->bsize;
uint8_t *dst = D_RW(D_RW(bp)->data) + block_off;
/* add the modified block to the undo log */
pmemobj_tx_add_range_direct(dst, D_RO(bp)->bsize);
memcpy(dst, buf, D_RO(bp)->bsize);
/* clear the error flag and set the zero flag */
uint8_t *flags = D_RW(D_RW(bp)->flags) + blockno;
/* add the modified flags to the undo log */
pmemobj_tx_add_range_direct(flags, sizeof(*flags));
*flags &= ~ERROR_MASK;
/* use reverse logic for zero mask */
*flags |= ZERO_MASK;
} TX_ONABORT {
retval = 1;
} TX_END
return retval;
}
/*
* pmemblk_set_zero -- zero a block in a block memory pool
*/
int
pmemblk_set_zero(PMEMblkpool *pbp, long long blockno)
{
PMEMobjpool *pop = (PMEMobjpool *)pbp;
int retval = 0;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
if (blockno >= (long long)D_RO(bp)->nblocks)
return 1;
TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX,
&D_RW(bp)->locks[blockno % MAX_THREADS], TX_PARAM_NONE) {
uint8_t *flags = D_RW(D_RW(bp)->flags) + blockno;
/* add the modified flags to the undo log */
pmemobj_tx_add_range_direct(flags, sizeof(*flags));
/* use reverse logic for zero mask */
*flags &= ~ZERO_MASK;
} TX_ONABORT {
retval = 1;
} TX_END
return retval;
}
int
main(int argc, char *argv[])
{
if (argc < 4) {
fprintf(stderr, "usage: %s [co] file blk_size"\
" [cmd[:blk_num[:data]]...]\n", argv[0]);
return 1;
}
unsigned long bsize = strtoul(argv[3], NULL, 10);
assert(bsize <= BSIZE_MAX);
if (bsize == 0) {
perror("blk_size cannot be 0");
return 1;
}
PMEMblkpool *pbp;
if (strncmp(argv[1], "c", 1) == 0) {
pbp = pmemblk_create(argv[2], bsize, POOL_SIZE,
CREATE_MODE_RW);
} else if (strncmp(argv[1], "o", 1) == 0) {
pbp = pmemblk_open(argv[2], bsize);
} else {
fprintf(stderr, "usage: %s [co] file blk_size"
" [cmd[:blk_num[:data]]...]\n", argv[0]);
return 1;
}
if (pbp == NULL) {
perror("pmemblk_create/pmemblk_open");
return 1;
}
/* process the command line arguments */
for (int i = 4; i < argc; i++) {
switch (*argv[i]) {
case 'w': {
printf("write: %s\n", argv[i] + 2);
const char *block_str = strtok(argv[i] + 2,
":");
const char *data = strtok(NULL, ":");
assert(block_str != NULL);
assert(data != NULL);
unsigned long block = strtoul(block_str,
NULL, 10);
if (pmemblk_write(pbp, data, block))
perror("pmemblk_write failed");
break;
}
case 'r': {
printf("read: %s\n", argv[i] + 2);
char *buf = (char *)malloc(bsize);
assert(buf != NULL);
const char *block_str = strtok(argv[i] + 2,
":");
assert(block_str != NULL);
if (pmemblk_read(pbp, buf, strtoul(block_str,
NULL, 10))) {
perror("pmemblk_read failed");
free(buf);
break;
}
buf[bsize - 1] = '\0';
printf("%s\n", buf);
free(buf);
break;
}
case 'z': {
printf("zero: %s\n", argv[i] + 2);
const char *block_str = strtok(argv[i] + 2,
":");
assert(block_str != NULL);
if (pmemblk_set_zero(pbp, strtoul(block_str,
NULL, 10)))
perror("pmemblk_set_zero failed");
break;
}
case 'e': {
printf("error: %s\n", argv[i] + 2);
const char *block_str = strtok(argv[i] + 2,
":");
assert(block_str != NULL);
if (pmemblk_set_error(pbp, strtoul(block_str,
NULL, 10)))
perror("pmemblk_set_error failed");
break;
}
case 'n': {
printf("nblocks: ");
printf("%zu\n", pmemblk_nblock(pbp));
break;
}
default: {
fprintf(stderr, "unrecognized command %s\n",
argv[i]);
break;
}
};
}
/* all done */
pmemblk_close(pbp);
return 0;
}
| 9,447 | 22.62 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/slab_allocator/main.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* main.c -- example usage of a slab-like mechanism implemented in libpmemobj
*
* This application does nothing besides demonstrating the example slab
* allocator mechanism.
*
* By using the CTL alloc class API we can instrument libpmemobj to optimally
* manage memory for the pool.
*/
#include <ex_common.h>
#include <assert.h>
#include <stdio.h>
#include "slab_allocator.h"
POBJ_LAYOUT_BEGIN(slab_allocator);
POBJ_LAYOUT_ROOT(slab_allocator, struct root);
POBJ_LAYOUT_TOID(slab_allocator, struct bar);
POBJ_LAYOUT_TOID(slab_allocator, struct foo);
POBJ_LAYOUT_END(slab_allocator);
struct foo {
char data[100];
};
struct bar {
char data[500];
};
struct root {
TOID(struct foo) foop;
TOID(struct bar) barp;
};
int
main(int argc, char *argv[])
{
if (argc < 2) {
printf("usage: %s file-name\n", argv[0]);
return 1;
}
const char *path = argv[1];
PMEMobjpool *pop;
if (file_exists(path) != 0) {
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(btree),
PMEMOBJ_MIN_POOL, 0666)) == NULL) {
perror("failed to create pool\n");
return 1;
}
} else {
if ((pop = pmemobj_open(path,
POBJ_LAYOUT_NAME(btree))) == NULL) {
perror("failed to open pool\n");
return 1;
}
}
struct slab_allocator *foo_producer = slab_new(pop, sizeof(struct foo));
assert(foo_producer != NULL);
struct slab_allocator *bar_producer = slab_new(pop, sizeof(struct bar));
assert(bar_producer != NULL);
TOID(struct root) root = POBJ_ROOT(pop, struct root);
if (TOID_IS_NULL(D_RO(root)->foop)) {
TX_BEGIN(pop) {
TX_SET(root, foop.oid, slab_tx_alloc(foo_producer));
} TX_END
}
if (TOID_IS_NULL(D_RO(root)->barp)) {
slab_alloc(bar_producer, &D_RW(root)->barp.oid, NULL, NULL);
}
assert(pmemobj_alloc_usable_size(D_RO(root)->foop.oid) ==
sizeof(struct foo));
assert(pmemobj_alloc_usable_size(D_RO(root)->barp.oid) ==
sizeof(struct bar));
slab_delete(foo_producer);
slab_delete(bar_producer);
pmemobj_close(pop);
return 0;
}
| 2,066 | 21.225806 | 77 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/slab_allocator/slab_allocator.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* slab_allocator.h -- slab-like mechanism for libpmemobj
*/
#ifndef SLAB_ALLOCATOR_H
#define SLAB_ALLOCATOR_H
#include <libpmemobj.h>
struct slab_allocator;
struct slab_allocator *slab_new(PMEMobjpool *pop, size_t size);
void slab_delete(struct slab_allocator *slab);
int slab_alloc(struct slab_allocator *slab, PMEMoid *oid,
pmemobj_constr constructor, void *arg);
PMEMoid slab_tx_alloc(struct slab_allocator *slab);
#endif /* SLAB_ALLOCATOR_H */
| 542 | 22.608696 | 63 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/array/array.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* array.c -- example of arrays usage
*/
#include <ex_common.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <sys/stat.h>
#include <libpmemobj.h>
#define TOID_ARRAY(x) TOID(x)
#define COUNT_OF(x) (sizeof(x) / sizeof(x[0]))
#define MAX_BUFFLEN 30
#define MAX_TYPE_NUM 8
POBJ_LAYOUT_BEGIN(array);
POBJ_LAYOUT_TOID(array, struct array_elm);
POBJ_LAYOUT_TOID(array, int);
POBJ_LAYOUT_TOID(array, PMEMoid);
POBJ_LAYOUT_TOID(array, TOID(struct array_elm));
POBJ_LAYOUT_TOID(array, struct array_info);
POBJ_LAYOUT_END(array);
static PMEMobjpool *pop;
enum array_types {
UNKNOWN_ARRAY_TYPE,
INT_ARRAY_TYPE,
PMEMOID_ARRAY_TYPE,
TOID_ARRAY_TYPE,
MAX_ARRAY_TYPE
};
struct array_elm {
int id;
};
struct array_info {
char name[MAX_BUFFLEN];
size_t size;
enum array_types type;
PMEMoid array;
};
/*
* print_usage -- print general usage
*/
static void
print_usage(void)
{
printf("usage: ./array <file-name> "
"<alloc|realloc|free|print>"
" <array-name> [<size> [<TOID|PMEMoid|int>]]\n");
}
/*
* get type -- parse argument given as type of array
*/
static enum array_types
get_type(const char *type_name)
{
const char *names[MAX_ARRAY_TYPE] = {"", "int", "PMEMoid", "TOID"};
enum array_types type;
for (type = (enum array_types)(MAX_ARRAY_TYPE - 1);
type > UNKNOWN_ARRAY_TYPE;
type = (enum array_types)(type - 1)) {
if (strcmp(names[type], type_name) == 0)
break;
}
if (type == UNKNOWN_ARRAY_TYPE)
fprintf(stderr, "unknown type: %s\n", type_name);
return type;
}
/*
* find_aray -- return info about array with proper name
*/
static TOID(struct array_info)
find_array(const char *name)
{
TOID(struct array_info) info;
POBJ_FOREACH_TYPE(pop, info) {
if (strncmp(D_RO(info)->name, name, MAX_BUFFLEN) == 0)
return info;
}
return TOID_NULL(struct array_info);
}
/*
* elm_constructor -- constructor of array_elm type object
*/
static int
elm_constructor(PMEMobjpool *pop, void *ptr, void *arg)
{
struct array_elm *obj = (struct array_elm *)ptr;
int *id = (int *)arg;
obj->id = *id;
pmemobj_persist(pop, obj, sizeof(*obj));
return 0;
}
/*
* print_int -- print array of int type
*/
static void
print_int(struct array_info *info)
{
TOID(int) array;
TOID_ASSIGN(array, info->array);
for (size_t i = 0; i < info->size; i++)
printf("%d ", D_RO(array)[i]);
}
/*
* print_pmemoid -- print array of PMEMoid type
*/
static void
print_pmemoid(struct array_info *info)
{
TOID(PMEMoid) array;
TOID(struct array_elm) elm;
TOID_ASSIGN(array, info->array);
for (size_t i = 0; i < info->size; i++) {
TOID_ASSIGN(elm, D_RW(array)[i]);
printf("%d ", D_RO(elm)->id);
}
}
/*
* print_toid -- print array of TOID(struct array_elm) type
*/
static void
print_toid(struct array_info *info)
{
TOID_ARRAY(TOID(struct array_elm)) array;
TOID_ASSIGN(array, info->array);
for (size_t i = 0; i < info->size; i++)
printf("%d ", D_RO(D_RO(array)[i])->id);
}
typedef void (*fn_print)(struct array_info *info);
static fn_print print_array[] = {NULL, print_int, print_pmemoid, print_toid};
/*
* free_int -- de-allocate array of int type
*/
static void
free_int(struct array_info *info)
{
TOID(int) array;
TOID_ASSIGN(array, info->array);
/*
* When there is persistent array of simple type allocated,
* there is enough to de-allocate persistent pointer
*/
POBJ_FREE(&array);
}
/*
* free_pmemoid -- de-allocate array of PMEMoid type
*/
static void
free_pmemoid(struct array_info *info)
{
TOID(PMEMoid) array;
TOID_ASSIGN(array, info->array);
/*
* When there is persistent array of persistent pointer type allocated,
* there is necessary to de-allocate each element, if they were
* allocated earlier
*/
for (size_t i = 0; i < info->size; i++)
pmemobj_free(&D_RW(array)[i]);
POBJ_FREE(&array);
}
/*
* free_toid -- de-allocate array of TOID(struct array_elm) type
*/
static void
free_toid(struct array_info *info)
{
TOID_ARRAY(TOID(struct array_elm)) array;
TOID_ASSIGN(array, info->array);
/*
* When there is persistent array of persistent pointer type allocated,
* there is necessary to de-allocate each element, if they were
* allocated earlier
*/
for (size_t i = 0; i < info->size; i++)
POBJ_FREE(&D_RW(array)[i]);
POBJ_FREE(&array);
}
typedef void (*fn_free)(struct array_info *info);
static fn_free free_array[] = {NULL, free_int, free_pmemoid, free_toid};
/*
* realloc_int -- reallocate array of int type
*/
static PMEMoid
realloc_int(PMEMoid *info, size_t prev_size, size_t size)
{
TOID(int) array;
TOID_ASSIGN(array, *info);
POBJ_REALLOC(pop, &array, int, size * sizeof(int));
if (size > prev_size) {
for (size_t i = prev_size; i < size; i++)
D_RW(array)[i] = (int)i;
pmemobj_persist(pop,
D_RW(array) + prev_size,
(size - prev_size) * sizeof(*D_RW(array)));
}
return array.oid;
}
/*
* realloc_pmemoid -- reallocate array of PMEMoid type
*/
static PMEMoid
realloc_pmemoid(PMEMoid *info, size_t prev_size, size_t size)
{
TOID(PMEMoid) array;
TOID_ASSIGN(array, *info);
pmemobj_zrealloc(pop, &array.oid, sizeof(PMEMoid) * size,
TOID_TYPE_NUM(PMEMoid));
for (size_t i = prev_size; i < size; i++) {
if (pmemobj_alloc(pop, &D_RW(array)[i],
sizeof(struct array_elm), TOID_TYPE_NUM(PMEMoid),
elm_constructor, &i)) {
fprintf(stderr, "pmemobj_alloc\n");
assert(0);
}
}
return array.oid;
}
/*
* realloc_toid -- reallocate array of TOID(struct array_elm) type
*/
static PMEMoid
realloc_toid(PMEMoid *info, size_t prev_size, size_t size)
{
TOID_ARRAY(TOID(struct array_elm)) array;
TOID_ASSIGN(array, *info);
pmemobj_zrealloc(pop, &array.oid,
sizeof(TOID(struct array_elm)) * size,
TOID_TYPE_NUM_OF(array));
for (size_t i = prev_size; i < size; i++) {
POBJ_NEW(pop, &D_RW(array)[i], struct array_elm,
elm_constructor, &i);
if (TOID_IS_NULL(D_RW(array)[i])) {
fprintf(stderr, "POBJ_ALLOC\n");
assert(0);
}
}
return array.oid;
}
typedef PMEMoid (*fn_realloc)(PMEMoid *info, size_t prev_size, size_t size);
static fn_realloc realloc_array[] =
{NULL, realloc_int, realloc_pmemoid, realloc_toid};
/*
* alloc_int -- allocate array of int type
*/
static PMEMoid
alloc_int(size_t size)
{
TOID(int) array;
/*
* To allocate persistent array of simple type is enough to allocate
* pointer with size equal to number of elements multiplied by size of
* user-defined structure.
*/
POBJ_ALLOC(pop, &array, int, sizeof(int) * size,
NULL, NULL);
if (TOID_IS_NULL(array)) {
fprintf(stderr, "POBJ_ALLOC\n");
return OID_NULL;
}
for (size_t i = 0; i < size; i++)
D_RW(array)[i] = (int)i;
pmemobj_persist(pop, D_RW(array), size * sizeof(*D_RW(array)));
return array.oid;
}
/*
* alloc_pmemoid -- allocate array of PMEMoid type
*/
static PMEMoid
alloc_pmemoid(size_t size)
{
TOID(PMEMoid) array;
/*
* To allocate persistent array of PMEMoid type is necessary to allocate
* pointer with size equal to number of elements multiplied by size of
* PMEMoid and to allocate each of elements separately.
*/
POBJ_ALLOC(pop, &array, PMEMoid, sizeof(PMEMoid) * size,
NULL, NULL);
if (TOID_IS_NULL(array)) {
fprintf(stderr, "POBJ_ALLOC\n");
return OID_NULL;
}
for (size_t i = 0; i < size; i++) {
if (pmemobj_alloc(pop, &D_RW(array)[i],
sizeof(struct array_elm),
TOID_TYPE_NUM(PMEMoid), elm_constructor, &i)) {
fprintf(stderr, "pmemobj_alloc\n");
}
}
return array.oid;
}
/*
* alloc_toid -- allocate array of TOID(struct array_elm) type
*/
static PMEMoid
alloc_toid(size_t size)
{
TOID_ARRAY(TOID(struct array_elm)) array;
/*
* To allocate persistent array of TOID with user-defined structure type
* is necessary to allocate pointer with size equal to number of
* elements multiplied by size of TOID of proper type and to allocate
* each of elements separately.
*/
POBJ_ALLOC(pop, &array, TOID(struct array_elm),
sizeof(TOID(struct array_elm)) * size, NULL, NULL);
if (TOID_IS_NULL(array)) {
fprintf(stderr, "POBJ_ALLOC\n");
return OID_NULL;
}
for (size_t i = 0; i < size; i++) {
POBJ_NEW(pop, &D_RW(array)[i], struct array_elm,
elm_constructor, &i);
if (TOID_IS_NULL(D_RW(array)[i])) {
fprintf(stderr, "POBJ_ALLOC\n");
assert(0);
}
}
return array.oid;
}
typedef PMEMoid (*fn_alloc)(size_t size);
static fn_alloc alloc_array[] = {NULL, alloc_int, alloc_pmemoid, alloc_toid};
/*
* do_print -- print values stored by proper array
*/
static void
do_print(int argc, char *argv[])
{
if (argc != 1) {
printf("usage: ./array <file-name> print <array-name>\n");
return;
}
TOID(struct array_info) array_info = find_array(argv[0]);
if (TOID_IS_NULL(array_info)) {
printf("%s doesn't exist\n", argv[0]);
return;
}
printf("%s:\n", argv[0]);
print_array[D_RO(array_info)->type](D_RW(array_info));
printf("\n");
}
/*
* do_free -- de-allocate proper array and proper TOID of array_info type
*/
static void
do_free(int argc, char *argv[])
{
if (argc != 1) {
printf("usage: ./array <file-name> free <array-name>\n");
return;
}
TOID(struct array_info) array_info = find_array(argv[0]);
if (TOID_IS_NULL(array_info)) {
printf("%s doesn't exist\n", argv[0]);
return;
}
free_array[D_RO(array_info)->type](D_RW(array_info));
POBJ_FREE(&array_info);
}
/*
* do_realloc -- reallocate proper array to given size and update information
* in array_info structure
*/
static void
do_realloc(int argc, char *argv[])
{
if (argc != 2) {
printf("usage: ./array <file-name> realloc"
" <array-name> <size>\n");
return;
}
size_t size = atoi(argv[1]);
TOID(struct array_info) array_info = find_array(argv[0]);
if (TOID_IS_NULL(array_info)) {
printf("%s doesn't exist\n", argv[0]);
return;
}
struct array_info *info = D_RW(array_info);
info->array = realloc_array[info->type](&info->array, info->size, size);
if (OID_IS_NULL(info->array)) {
if (size != 0)
printf("POBJ_REALLOC\n");
}
info->size = size;
pmemobj_persist(pop, info, sizeof(*info));
}
/*
* do_alloc -- allocate persistent array and TOID of array_info type
* and set it with information about new array
*/
static void
do_alloc(int argc, char *argv[])
{
if (argc != 3) {
printf("usage: ./array <file-name> alloc <array-name>"
"<size> <type>\n");
return;
}
enum array_types type = get_type(argv[2]);
if (type == UNKNOWN_ARRAY_TYPE)
return;
size_t size = atoi(argv[1]);
TOID(struct array_info) array_info = find_array(argv[0]);
if (!TOID_IS_NULL(array_info))
POBJ_FREE(&array_info);
POBJ_ZNEW(pop, &array_info, struct array_info);
struct array_info *info = D_RW(array_info);
strncpy(info->name, argv[0], MAX_BUFFLEN - 1);
info->name[MAX_BUFFLEN - 1] = '\0';
info->size = size;
info->type = type;
info->array = alloc_array[type](size);
if (OID_IS_NULL(info->array))
assert(0);
pmemobj_persist(pop, info, sizeof(*info));
}
typedef void (*fn_op)(int argc, char *argv[]);
static fn_op operations[] = {do_alloc, do_realloc, do_free, do_print};
int
main(int argc, char *argv[])
{
if (argc < 3) {
print_usage();
return 1;
}
const char *path = argv[1];
pop = NULL;
if (file_exists(path) != 0) {
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(array),
PMEMOBJ_MIN_POOL, CREATE_MODE_RW)) == NULL) {
printf("failed to create pool\n");
return 1;
}
} else {
if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(array)))
== NULL) {
printf("failed to open pool\n");
return 1;
}
}
const char *option = argv[2];
argv += 3;
argc -= 3;
const char *names[] = {"alloc", "realloc", "free", "print"};
unsigned i = 0;
for (; i < COUNT_OF(names) && strcmp(option, names[i]) != 0; i++);
if (i != COUNT_OF(names))
operations[i](argc, argv);
else
print_usage();
pmemobj_close(pop);
return 0;
}
| 11,844 | 22.595618 | 77 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/list_map/skiplist_map.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* skiplist_map.c -- Skiplist implementation
*/
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include "skiplist_map.h"
#define SKIPLIST_LEVELS_NUM 4
#define NULL_NODE TOID_NULL(struct skiplist_map_node)
#include <x86intrin.h>
static inline uint64_t getCycle(){
uint32_t cycles_high, cycles_low, pid;
asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx
"mov %%edx, %0\n\t"
"mov %%eax, %1\n\t"
"mov %%ecx, %2\n\t"
:"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars
:// no input
:"%eax", "%edx", "%ecx" // clobbered by rdtscp
);
return((uint64_t)cycles_high << 32) | cycles_low;
}
struct skiplist_map_entry {
uint64_t key;
PMEMoid value;
};
struct skiplist_map_node {
TOID(struct skiplist_map_node) next[SKIPLIST_LEVELS_NUM];
struct skiplist_map_entry entry;
};
/*
* skiplist_map_create -- allocates a new skiplist instance
*/
int
skiplist_map_create(PMEMobjpool *pop, TOID(struct skiplist_map_node) *map,
void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(map, sizeof(*map));
*map = TX_ZNEW(struct skiplist_map_node);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* skiplist_map_clear -- removes all elements from the map
*/
int
skiplist_map_clear(PMEMobjpool *pop, TOID(struct skiplist_map_node) map)
{
while (!TOID_EQUALS(D_RO(map)->next[0], NULL_NODE)) {
TOID(struct skiplist_map_node) next = D_RO(map)->next[0];
skiplist_map_remove_free(pop, map, D_RO(next)->entry.key);
}
return 0;
}
/*
* skiplist_map_destroy -- cleanups and frees skiplist instance
*/
int
skiplist_map_destroy(PMEMobjpool *pop, TOID(struct skiplist_map_node) *map)
{
int ret = 0;
TX_BEGIN(pop) {
skiplist_map_clear(pop, *map);
pmemobj_tx_add_range_direct(map, sizeof(*map));
TX_FREE(*map);
*map = TOID_NULL(struct skiplist_map_node);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* skiplist_map_insert_new -- allocates a new object and inserts it into
* the list
*/
int
skiplist_map_insert_new(PMEMobjpool *pop, TOID(struct skiplist_map_node) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid n = pmemobj_tx_alloc(size, type_num);
constructor(pop, pmemobj_direct(n), arg);
skiplist_map_insert(pop, map, key, n);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* skiplist_map_insert_node -- (internal) adds new node in selected place
*/
static void
skiplist_map_insert_node(TOID(struct skiplist_map_node) new_node,
TOID(struct skiplist_map_node) path[SKIPLIST_LEVELS_NUM])
{
unsigned current_level = 0;
do {
TX_ADD_FIELD(path[current_level], next[current_level]);
D_RW(new_node)->next[current_level] =
D_RO(path[current_level])->next[current_level];
D_RW(path[current_level])->next[current_level] = new_node;
} while (++current_level < SKIPLIST_LEVELS_NUM && rand() % 2 == 0);
}
/*
* skiplist_map_map_find -- (internal) returns path to searched node, or if
* node doesn't exist, it will return path to place where key should be.
*/
static void
skiplist_map_find(uint64_t key, TOID(struct skiplist_map_node) map,
TOID(struct skiplist_map_node) *path)
{
int current_level;
TOID(struct skiplist_map_node) active = map;
for (current_level = SKIPLIST_LEVELS_NUM - 1;
current_level >= 0; current_level--) {
for (TOID(struct skiplist_map_node) next =
D_RO(active)->next[current_level];
!TOID_EQUALS(next, NULL_NODE) &&
D_RO(next)->entry.key < key;
next = D_RO(active)->next[current_level]) {
active = next;
}
path[current_level] = active;
}
}
/*
* skiplist_map_insert -- inserts a new key-value pair into the map
*/
#ifdef GET_NDP_BREAKDOWN
uint64_t ulogCycles;
uint64_t waitCycles;
uint64_t resetCycles;
#endif
int
skiplist_map_insert(PMEMobjpool *pop, TOID(struct skiplist_map_node) map,
uint64_t key, PMEMoid value)
{
int ret = 0;
#ifdef GET_NDP_BREAKDOWN
ulogCycles = 0;
waitCycles = 0;
#endif
#ifdef GET_NDP_PERFORMENCE
uint64_t btreetxCycles = 0;
uint64_t endCycles, startCycles;
for(int i=0;i<RUN_COUNT;i++){
#endif
TOID(struct skiplist_map_node) new_node;
TOID(struct skiplist_map_node) path[SKIPLIST_LEVELS_NUM];
#ifdef GET_NDP_PERFORMENCE
startCycles = getCycle();
#endif
TX_BEGIN(pop) {
new_node = TX_ZNEW(struct skiplist_map_node);
D_RW(new_node)->entry.key = key;
D_RW(new_node)->entry.value = value;
skiplist_map_find(key, map, path);
skiplist_map_insert_node(new_node, path);
} TX_ONABORT {
ret = 1;
} TX_END
#ifdef GET_NDP_PERFORMENCE
endCycles = getCycle();
btreetxCycles += endCycles - startCycles;
}
double totTime = ((double)btreetxCycles)/2000000000;
printf("ctree TX/s = %f\nctree tx total time = %f\n",RUN_COUNT/totTime,totTime);
#endif
#ifdef GET_NDP_BREAKDOWN
printf("ctree tx cmd issue total time = %f\n", (((double)ulogCycles)/2000000000));
printf("ctree tx total wait time = %f\n", (((double)waitCycles)/2000000000));
#endif
return ret;
}
/*
* skiplist_map_remove_free -- removes and frees an object from the list
*/
int
skiplist_map_remove_free(PMEMobjpool *pop, TOID(struct skiplist_map_node) map,
uint64_t key)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid val = skiplist_map_remove(pop, map, key);
pmemobj_tx_free(val);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* skiplist_map_remove_node -- (internal) removes selected node
*/
static void
skiplist_map_remove_node(
TOID(struct skiplist_map_node) path[SKIPLIST_LEVELS_NUM])
{
TOID(struct skiplist_map_node) to_remove = D_RO(path[0])->next[0];
int i;
for (i = 0; i < SKIPLIST_LEVELS_NUM; i++) {
if (TOID_EQUALS(D_RO(path[i])->next[i], to_remove)) {
TX_ADD_FIELD(path[i], next[i]);
D_RW(path[i])->next[i] = D_RO(to_remove)->next[i];
}
}
}
/*
* skiplist_map_remove -- removes key-value pair from the map
*/
PMEMoid
skiplist_map_remove(PMEMobjpool *pop, TOID(struct skiplist_map_node) map,
uint64_t key)
{
PMEMoid ret = OID_NULL;
#ifdef GET_NDP_BREAKDOWN
ulogCycles = 0;
waitCycles = 0;
#endif
#ifdef GET_NDP_PERFORMENCE
uint64_t btreetxCycles = 0;
uint64_t endCycles, startCycles;
for(int i=0;i<RUN_COUNT;i++){
#endif
TOID(struct skiplist_map_node) path[SKIPLIST_LEVELS_NUM];
TOID(struct skiplist_map_node) to_remove;
#ifdef GET_NDP_PERFORMENCE
startCycles = getCycle();
#endif
TX_BEGIN(pop) {
skiplist_map_find(key, map, path);
to_remove = D_RO(path[0])->next[0];
if (!TOID_EQUALS(to_remove, NULL_NODE) &&
D_RO(to_remove)->entry.key == key) {
ret = D_RO(to_remove)->entry.value;
skiplist_map_remove_node(path);
}
} TX_ONABORT {
ret = OID_NULL;
} TX_END
#ifdef GET_NDP_PERFORMENCE
endCycles = getCycle();
btreetxCycles += endCycles - startCycles;
}
double totTime = ((double)btreetxCycles)/2000000000;
printf("ctree TX/s = %f\nctree tx total time = %f\n",RUN_COUNT/totTime,totTime);
#endif
#ifdef GET_NDP_BREAKDOWN
printf("ctree tx cmd issue total time = %f\n", (((double)ulogCycles)/2000000000));
printf("ctree tx total wait time = %f\n", (((double)waitCycles)/2000000000));
#endif
return ret;
}
/*
* skiplist_map_get -- searches for a value of the key
*/
PMEMoid
skiplist_map_get(PMEMobjpool *pop, TOID(struct skiplist_map_node) map,
uint64_t key)
{
PMEMoid ret = OID_NULL;
TOID(struct skiplist_map_node) path[SKIPLIST_LEVELS_NUM], found;
skiplist_map_find(key, map, path);
found = D_RO(path[0])->next[0];
if (!TOID_EQUALS(found, NULL_NODE) &&
D_RO(found)->entry.key == key) {
ret = D_RO(found)->entry.value;
}
return ret;
}
/*
* skiplist_map_lookup -- searches if a key exists
*/
int
skiplist_map_lookup(PMEMobjpool *pop, TOID(struct skiplist_map_node) map,
uint64_t key)
{
int ret = 0;
TOID(struct skiplist_map_node) path[SKIPLIST_LEVELS_NUM], found;
skiplist_map_find(key, map, path);
found = D_RO(path[0])->next[0];
if (!TOID_EQUALS(found, NULL_NODE) &&
D_RO(found)->entry.key == key) {
ret = 1;
}
return ret;
}
/*
* skiplist_map_foreach -- calls function for each node on a list
*/
int
skiplist_map_foreach(PMEMobjpool *pop, TOID(struct skiplist_map_node) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
TOID(struct skiplist_map_node) next = map;
while (!TOID_EQUALS(D_RO(next)->next[0], NULL_NODE)) {
next = D_RO(next)->next[0];
cb(D_RO(next)->entry.key, D_RO(next)->entry.value, arg);
}
return 0;
}
/*
* skiplist_map_is_empty -- checks whether the list map is empty
*/
int
skiplist_map_is_empty(PMEMobjpool *pop, TOID(struct skiplist_map_node) map)
{
return TOID_IS_NULL(D_RO(map)->next[0]);
}
/*
* skiplist_map_check -- check if given persistent object is a skiplist
*/
int
skiplist_map_check(PMEMobjpool *pop, TOID(struct skiplist_map_node) map)
{
return TOID_IS_NULL(map) || !TOID_VALID(map);
}
| 8,913 | 23.899441 | 83 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/list_map/skiplist_map.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* skiplist_map.h -- sorted list collection implementation
*/
#ifndef SKIPLIST_MAP_H
#define SKIPLIST_MAP_H
#include <libpmemobj.h>
#ifndef SKIPLIST_MAP_TYPE_OFFSET
#define SKIPLIST_MAP_TYPE_OFFSET 2020
#endif
struct skiplist_map_node;
TOID_DECLARE(struct skiplist_map_node, SKIPLIST_MAP_TYPE_OFFSET + 0);
int skiplist_map_check(PMEMobjpool *pop, TOID(struct skiplist_map_node) map);
int skiplist_map_create(PMEMobjpool *pop, TOID(struct skiplist_map_node) *map,
void *arg);
int skiplist_map_destroy(PMEMobjpool *pop, TOID(struct skiplist_map_node) *map);
int skiplist_map_insert(PMEMobjpool *pop, TOID(struct skiplist_map_node) map,
uint64_t key, PMEMoid value);
int skiplist_map_insert_new(PMEMobjpool *pop,
TOID(struct skiplist_map_node) map, uint64_t key, size_t size,
unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid skiplist_map_remove(PMEMobjpool *pop,
TOID(struct skiplist_map_node) map, uint64_t key);
int skiplist_map_remove_free(PMEMobjpool *pop,
TOID(struct skiplist_map_node) map, uint64_t key);
int skiplist_map_clear(PMEMobjpool *pop, TOID(struct skiplist_map_node) map);
PMEMoid skiplist_map_get(PMEMobjpool *pop, TOID(struct skiplist_map_node) map,
uint64_t key);
int skiplist_map_lookup(PMEMobjpool *pop, TOID(struct skiplist_map_node) map,
uint64_t key);
int skiplist_map_foreach(PMEMobjpool *pop, TOID(struct skiplist_map_node) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
int skiplist_map_is_empty(PMEMobjpool *pop, TOID(struct skiplist_map_node) map);
#endif /* SKIPLIST_MAP_H */
| 1,688 | 36.533333 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/hashmap/hashmap_tx.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
#ifndef HASHMAP_TX_H
#define HASHMAP_TX_H
#include <stddef.h>
#include <stdint.h>
#include <hashmap.h>
#include <libpmemobj.h>
#ifndef HASHMAP_TX_TYPE_OFFSET
#define HASHMAP_TX_TYPE_OFFSET 1004
#endif
struct hashmap_tx;
TOID_DECLARE(struct hashmap_tx, HASHMAP_TX_TYPE_OFFSET + 0);
int hm_tx_check(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap);
int hm_tx_create(PMEMobjpool *pop, TOID(struct hashmap_tx) *map, void *arg);
int hm_tx_init(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap);
int hm_tx_insert(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
uint64_t key, PMEMoid value);
PMEMoid hm_tx_remove(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
uint64_t key);
PMEMoid hm_tx_get(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
uint64_t key);
int hm_tx_lookup(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
uint64_t key);
int hm_tx_foreach(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
size_t hm_tx_count(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap);
int hm_tx_cmd(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
unsigned cmd, uint64_t arg);
#endif /* HASHMAP_TX_H */
| 1,270 | 34.305556 | 76 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/hashmap/hashmap_rp.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
#ifndef HASHMAP_RP_H
#define HASHMAP_RP_H
#include <stddef.h>
#include <stdint.h>
#include <hashmap.h>
#include <libpmemobj.h>
#ifndef HASHMAP_RP_TYPE_OFFSET
#define HASHMAP_RP_TYPE_OFFSET 1008
#endif
/* Flags to indicate if insertion is being made during rebuild process */
#define HASHMAP_RP_REBUILD 1
#define HASHMAP_RP_NO_REBUILD 0
/* Initial number of entries for hashamap_rp */
#define INIT_ENTRIES_NUM_RP 16
/* Load factor to indicate resize threshold */
#define HASHMAP_RP_LOAD_FACTOR 0.5f
/* Maximum number of swaps allowed during single insertion */
#define HASHMAP_RP_MAX_SWAPS 150
/* Size of an action array used during single insertion */
#define HASHMAP_RP_MAX_ACTIONS (4 * HASHMAP_RP_MAX_SWAPS + 5)
struct hashmap_rp;
TOID_DECLARE(struct hashmap_rp, HASHMAP_RP_TYPE_OFFSET + 0);
int hm_rp_check(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap);
int hm_rp_create(PMEMobjpool *pop, TOID(struct hashmap_rp) *map, void *arg);
int hm_rp_init(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap);
int hm_rp_insert(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
uint64_t key, PMEMoid value);
PMEMoid hm_rp_remove(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
uint64_t key);
PMEMoid hm_rp_get(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
uint64_t key);
int hm_rp_lookup(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
uint64_t key);
int hm_rp_foreach(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
size_t hm_rp_count(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap);
int hm_rp_cmd(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
unsigned cmd, uint64_t arg);
#endif /* HASHMAP_RP_H */
| 1,780 | 36.104167 | 76 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/hashmap/hashmap_internal.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
#ifndef HASHSET_INTERNAL_H
#define HASHSET_INTERNAL_H
/* large prime number used as a hashing function coefficient */
#define HASH_FUNC_COEFF_P 32212254719ULL
/* initial number of buckets */
#define INIT_BUCKETS_NUM 10
/* number of values in a bucket which trigger hashtable rebuild check */
#define MIN_HASHSET_THRESHOLD 5
/* number of values in a bucket which force hashtable rebuild */
#define MAX_HASHSET_THRESHOLD 10
#endif
| 521 | 25.1 | 72 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/hashmap/hashmap_tx.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/* integer hash set implementation which uses only transaction APIs */
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <inttypes.h>
#include <libpmemobj.h>
#include "hashmap_tx.h"
#include "hashmap_internal.h"
static inline uint64_t getCycle(){
uint32_t cycles_high, cycles_low, pid;
asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx
"mov %%edx, %0\n\t"
"mov %%eax, %1\n\t"
"mov %%ecx, %2\n\t"
:"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars
:// no input
:"%eax", "%edx", "%ecx" // clobbered by rdtscp
);
return((uint64_t)cycles_high << 32) | cycles_low;
}
/* layout definition */
TOID_DECLARE(struct buckets, HASHMAP_TX_TYPE_OFFSET + 1);
TOID_DECLARE(struct entry, HASHMAP_TX_TYPE_OFFSET + 2);
struct entry {
uint64_t key;
PMEMoid value;
/* next entry list pointer */
TOID(struct entry) next;
};
struct buckets {
/* number of buckets */
size_t nbuckets;
/* array of lists */
TOID(struct entry) bucket[];
};
struct hashmap_tx {
/* random number generator seed */
uint32_t seed;
/* hash function coefficients */
uint32_t hash_fun_a;
uint32_t hash_fun_b;
uint64_t hash_fun_p;
/* number of values inserted */
uint64_t count;
/* buckets */
TOID(struct buckets) buckets;
};
/*
* create_hashmap -- hashmap initializer
*/
static void
create_hashmap(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint32_t seed)
{
size_t len = INIT_BUCKETS_NUM;
size_t sz = sizeof(struct buckets) +
len * sizeof(TOID(struct entry));
TX_BEGIN(pop) {
TX_ADD(hashmap);
D_RW(hashmap)->seed = seed;
do {
D_RW(hashmap)->hash_fun_a = (uint32_t)rand();
} while (D_RW(hashmap)->hash_fun_a == 0);
D_RW(hashmap)->hash_fun_b = (uint32_t)rand();
D_RW(hashmap)->hash_fun_p = HASH_FUNC_COEFF_P;
D_RW(hashmap)->buckets = TX_ZALLOC(struct buckets, sz);
D_RW(D_RW(hashmap)->buckets)->nbuckets = len;
} TX_ONABORT {
fprintf(stderr, "%s: transaction aborted: %s\n", __func__,
pmemobj_errormsg());
abort();
} TX_END
}
/*
* hash -- the simplest hashing function,
* see https://en.wikipedia.org/wiki/Universal_hashing#Hashing_integers
*/
static uint64_t
hash(const TOID(struct hashmap_tx) *hashmap,
const TOID(struct buckets) *buckets, uint64_t value)
{
uint32_t a = D_RO(*hashmap)->hash_fun_a;
uint32_t b = D_RO(*hashmap)->hash_fun_b;
uint64_t p = D_RO(*hashmap)->hash_fun_p;
size_t len = D_RO(*buckets)->nbuckets;
return ((a * value + b) % p) % len;
}
/*
* hm_tx_rebuild -- rebuilds the hashmap with a new number of buckets
*/
static void
hm_tx_rebuild(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, size_t new_len)
{
TOID(struct buckets) buckets_old = D_RO(hashmap)->buckets;
if (new_len == 0)
new_len = D_RO(buckets_old)->nbuckets;
size_t sz_old = sizeof(struct buckets) +
D_RO(buckets_old)->nbuckets *
sizeof(TOID(struct entry));
size_t sz_new = sizeof(struct buckets) +
new_len * sizeof(TOID(struct entry));
TX_BEGIN(pop) {
TX_ADD_FIELD(hashmap, buckets);
TOID(struct buckets) buckets_new =
TX_ZALLOC(struct buckets, sz_new);
D_RW(buckets_new)->nbuckets = new_len;
pmemobj_tx_add_range(buckets_old.oid, 0, sz_old);
for (size_t i = 0; i < D_RO(buckets_old)->nbuckets; ++i) {
while (!TOID_IS_NULL(D_RO(buckets_old)->bucket[i])) {
TOID(struct entry) en =
D_RO(buckets_old)->bucket[i];
uint64_t h = hash(&hashmap, &buckets_new,
D_RO(en)->key);
D_RW(buckets_old)->bucket[i] = D_RO(en)->next;
TX_ADD_FIELD(en, next);
D_RW(en)->next = D_RO(buckets_new)->bucket[h];
D_RW(buckets_new)->bucket[h] = en;
}
}
D_RW(hashmap)->buckets = buckets_new;
TX_FREE(buckets_old);
} TX_ONABORT {
fprintf(stderr, "%s: transaction aborted: %s\n", __func__,
pmemobj_errormsg());
/*
* We don't need to do anything here, because everything is
* consistent. The only thing affected is performance.
*/
} TX_END
}
/*
* hm_tx_insert -- inserts specified value into the hashmap,
* returns:
* - 0 if successful,
* - 1 if value already existed,
* - -1 if something bad happened
*/
#ifdef GET_NDP_BREAKDOWN
uint64_t ulogCycles;
uint64_t waitCycles;
uint64_t resetCycles;
#endif
int
hm_tx_insert(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
uint64_t key, PMEMoid value)
{
int ret = 0;
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
// TOID(struct entry) var;
uint64_t h = hash(&hashmap, &buckets, key);
int num = 0;
/*
for (var = D_RO(buckets)->bucket[h];
!TOID_IS_NULL(var);
var = D_RO(var)->next) {
if (D_RO(var)->key == key)
return 1;
num++;
}
*/
#ifdef GET_NDP_BREAKDOWN
ulogCycles = 0;
waitCycles = 0;
#endif
#ifdef GET_NDP_PERFORMENCE
uint64_t btreetxCycles = 0;
uint64_t endCycles, startCycles;
for(int i=0;i<RUN_COUNT;i++){
#endif
//uint64_t startCycles1,endCycles1;
#ifdef GET_NDP_PERFORMENCE
startCycles = getCycle();
#endif
TX_BEGIN(pop) {
TX_ADD_FIELD(D_RO(hashmap)->buckets, bucket[h]);
TX_ADD_FIELD(hashmap, count);
TOID(struct entry) e = TX_NEW(struct entry);
D_RW(e)->key = key;
D_RW(e)->value = value;
D_RW(e)->next = D_RO(buckets)->bucket[h];
D_RW(buckets)->bucket[h] = e;
D_RW(hashmap)->count++;
num++;
//printf("parallel time = %f\n", (((double)(endCycles1 - startCycles1))));
} TX_ONABORT {
fprintf(stderr, "transaction aborted: %s\n",
pmemobj_errormsg());
ret = -1;
} TX_END
#ifdef GET_NDP_PERFORMENCE
endCycles = getCycle();
btreetxCycles += endCycles - startCycles;
}
double totTime = ((double)btreetxCycles)/2000000000;
printf("btree TX/s = %f\ntotal tx total time = %f\n", RUN_COUNT/totTime, totTime);
#endif
#ifdef GET_NDP_BREAKDOWN
printf("total tx cmd issue time = %f\n", (((double)ulogCycles)/2000000000));
printf("total tx wait time = %f\n", (((double)waitCycles)/2000000000));
printf("total tx reset time = %f\n", (((double)resetCycles)/2000000000));
#endif
if (ret)
return ret;
return 0;
}
/*
* hm_tx_remove -- removes specified value from the hashmap,
* returns:
* - key's value if successful,
* - OID_NULL if value didn't exist or if something bad happened
*/
PMEMoid
hm_tx_remove(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key)
{
int ret = 0;
PMEMoid retoid;
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var, prev = TOID_NULL(struct entry);
#ifdef GET_NDP_BREAKDOWN
ulogCycles = 0;
waitCycles = 0;
#endif
#ifdef GET_NDP_PERFORMENCE
uint64_t btreetxCycles = 0;
uint64_t endCycles, startCycles;
for(int i=0;i<RUN_COUNT;i++){
#endif
uint64_t h = hash(&hashmap, &buckets, key);
for (var = D_RO(buckets)->bucket[h];
!TOID_IS_NULL(var);
prev = var, var = D_RO(var)->next) {
if (D_RO(var)->key == key)
break;
}
if (TOID_IS_NULL(var))
return OID_NULL;
retoid = D_RO(var)->value;
#ifdef GET_NDP_PERFORMENCE
startCycles = getCycle();
#endif
TX_BEGIN(pop) {
if (TOID_IS_NULL(prev))
TX_ADD_FIELD(D_RO(hashmap)->buckets, bucket[h]);
else
TX_ADD_FIELD(prev, next);
TX_ADD_FIELD(hashmap, count);
if (TOID_IS_NULL(prev))
D_RW(buckets)->bucket[h] = D_RO(var)->next;
else
D_RW(prev)->next = D_RO(var)->next;
D_RW(hashmap)->count--;
TX_FREE(var);
} TX_ONABORT {
fprintf(stderr, "transaction aborted: %s\n",
pmemobj_errormsg());
ret = -1;
} TX_END
#ifdef GET_NDP_PERFORMENCE
endCycles = getCycle();
btreetxCycles += endCycles - startCycles;
}
double totTime = ((double)btreetxCycles)/2000000000;
printf("btree TX/s = %f\ntotal tx total time = %f\n",RUN_COUNT/totTime,totTime);
#endif
#ifdef GET_NDP_BREAKDOWN
printf("total tx ulog time = %f\n", (((double)ulogCycles)/2000000000));
printf("total tx wait time = %f\n", (((double)waitCycles)/2000000000));
#endif
if (ret)
return OID_NULL;
if (D_RO(hashmap)->count < D_RO(buckets)->nbuckets)
hm_tx_rebuild(pop, hashmap, D_RO(buckets)->nbuckets / 2);
return retoid;
}
/*
* hm_tx_foreach -- prints all values from the hashmap
*/
int
hm_tx_foreach(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
int ret = 0;
for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i) {
if (TOID_IS_NULL(D_RO(buckets)->bucket[i]))
continue;
for (var = D_RO(buckets)->bucket[i]; !TOID_IS_NULL(var);
var = D_RO(var)->next) {
ret = cb(D_RO(var)->key, D_RO(var)->value, arg);
if (ret)
break;
}
}
return ret;
}
/*
* hm_tx_debug -- prints complete hashmap state
*/
static void
hm_tx_debug(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, FILE *out)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
fprintf(out, "a: %u b: %u p: %" PRIu64 "\n", D_RO(hashmap)->hash_fun_a,
D_RO(hashmap)->hash_fun_b, D_RO(hashmap)->hash_fun_p);
fprintf(out, "count: %" PRIu64 ", buckets: %zu\n",
D_RO(hashmap)->count, D_RO(buckets)->nbuckets);
for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i) {
if (TOID_IS_NULL(D_RO(buckets)->bucket[i]))
continue;
int num = 0;
fprintf(out, "%zu: ", i);
for (var = D_RO(buckets)->bucket[i]; !TOID_IS_NULL(var);
var = D_RO(var)->next) {
fprintf(out, "%" PRIu64 " ", D_RO(var)->key);
num++;
}
fprintf(out, "(%d)\n", num);
}
}
/*
* hm_tx_get -- checks whether specified value is in the hashmap
*/
PMEMoid
hm_tx_get(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
uint64_t h = hash(&hashmap, &buckets, key);
for (var = D_RO(buckets)->bucket[h];
!TOID_IS_NULL(var);
var = D_RO(var)->next)
if (D_RO(var)->key == key)
return D_RO(var)->value;
return OID_NULL;
}
/*
* hm_tx_lookup -- checks whether specified value exists
*/
int
hm_tx_lookup(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
uint64_t h = hash(&hashmap, &buckets, key);
for (var = D_RO(buckets)->bucket[h];
!TOID_IS_NULL(var);
var = D_RO(var)->next)
if (D_RO(var)->key == key)
return 1;
return 0;
}
/*
* hm_tx_count -- returns number of elements
*/
size_t
hm_tx_count(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap)
{
return D_RO(hashmap)->count;
}
/*
* hm_tx_init -- recovers hashmap state, called after pmemobj_open
*/
int
hm_tx_init(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap)
{
srand(D_RO(hashmap)->seed);
return 0;
}
/*
* hm_tx_create -- allocates new hashmap
*/
int
hm_tx_create(PMEMobjpool *pop, TOID(struct hashmap_tx) *map, void *arg)
{
struct hashmap_args *args = (struct hashmap_args *)arg;
int ret = 0;
TX_BEGIN(pop) {
TX_ADD_DIRECT(map);
*map = TX_ZNEW(struct hashmap_tx);
uint32_t seed = args ? args->seed : 0;
create_hashmap(pop, *map, seed);
} TX_ONABORT {
ret = -1;
} TX_END
return ret;
}
/*
* hm_tx_check -- checks if specified persistent object is an
* instance of hashmap
*/
int
hm_tx_check(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap)
{
return TOID_IS_NULL(hashmap) || !TOID_VALID(hashmap);
}
/*
* hm_tx_cmd -- execute cmd for hashmap
*/
int
hm_tx_cmd(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
unsigned cmd, uint64_t arg)
{
switch (cmd) {
case HASHMAP_CMD_REBUILD:
hm_tx_rebuild(pop, hashmap, arg);
return 0;
case HASHMAP_CMD_DEBUG:
if (!arg)
return -EINVAL;
hm_tx_debug(pop, hashmap, (FILE *)arg);
return 0;
default:
return -EINVAL;
}
}
| 11,602 | 22.825462 | 83 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/hashmap/hashmap_rp.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* Integer hash set implementation with open addressing Robin Hood collision
* resolution which uses action.h reserve/publish API.
*/
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <inttypes.h>
#include <libpmemobj.h>
#include "hashmap_rp.h"
#define TOMBSTONE_MASK (1ULL << 63)
#ifdef DEBUG
#define HM_ASSERT(cnd) assert(cnd)
#else
#define HM_ASSERT(cnd)
#endif
/* layout definition */
TOID_DECLARE(struct entry, HASHMAP_RP_TYPE_OFFSET + 1);
struct entry {
uint64_t key;
PMEMoid value;
uint64_t hash;
};
struct add_entry {
struct entry data;
/* position index in hashmap, where data should be inserted/updated */
size_t pos;
/* Action array to perform addition in set of actions */
struct pobj_action *actv;
/* Action array index counter */
size_t actv_cnt;
#ifdef DEBUG
/* Swaps counter for current insertion. Enabled in debug mode */
int swaps;
#endif
};
struct hashmap_rp {
/* number of values inserted */
uint64_t count;
/* container capacity */
uint64_t capacity;
/* resize threshold */
uint64_t resize_threshold;
/* entries */
TOID(struct entry) entries;
};
int *swaps_array = NULL;
#ifdef DEBUG
static inline int
is_power_of_2(uint64_t v)
{
return v && !(v & (v - 1));
}
#endif
/*
* entry_is_deleted -- checks 'tombstone' bit if hash is deleted
*/
static inline int
entry_is_deleted(uint64_t hash)
{
return (hash & TOMBSTONE_MASK) > 0;
}
/*
* entry_is_empty -- checks if entry is empty
*/
static inline int
entry_is_empty(uint64_t hash)
{
return hash == 0 || entry_is_deleted(hash);
}
/*
* increment_pos -- increment position index, skip 0
*/
static uint64_t
increment_pos(const struct hashmap_rp *hashmap, uint64_t pos)
{
HM_ASSERT(is_power_of_2(hashmap->capacity));
pos = (pos + 1) & (hashmap->capacity - 1);
return pos == 0 ? 1 : pos;
}
/*
* probe_distance -- returns probe number, an indicator how far from
* desired position given hash is stored in hashmap
*/
static uint64_t
probe_distance(const struct hashmap_rp *hashmap, uint64_t hash_key,
uint64_t slot_index)
{
uint64_t capacity = hashmap->capacity;
HM_ASSERT(is_power_of_2(hashmap->capacity));
return (int)(slot_index + capacity - hash_key) & (capacity - 1);
}
/*
* hash -- hash function based on Austin Appleby MurmurHash3 64-bit finalizer.
* Returned value is modified to work with special values for unused and
* and deleted hashes.
*/
static uint64_t
hash(const struct hashmap_rp *hashmap, uint64_t key)
{
key ^= key >> 33;
key *= 0xff51afd7ed558ccd;
key ^= key >> 33;
key *= 0xc4ceb9fe1a85ec53;
key ^= key >> 33;
HM_ASSERT(is_power_of_2(hashmap->capacity));
key &= hashmap->capacity - 1;
/* first, 'tombstone' bit is used to indicate deleted item */
key &= ~TOMBSTONE_MASK;
/*
* Ensure that we never return 0 as a hash, since we use 0 to
* indicate that element has never been used at all.
*/
return key == 0 ? 1 : key;
}
/*
* hashmap_create -- hashmap initializer
*/
static void
hashmap_create(PMEMobjpool *pop, TOID(struct hashmap_rp) *hashmap_p,
uint32_t seed)
{
struct pobj_action actv[4];
size_t actv_cnt = 0;
TOID(struct hashmap_rp) hashmap =
POBJ_RESERVE_NEW(pop, struct hashmap_rp, &actv[actv_cnt]);
if (TOID_IS_NULL(hashmap))
goto reserve_err;
actv_cnt++;
D_RW(hashmap)->count = 0;
D_RW(hashmap)->capacity = INIT_ENTRIES_NUM_RP;
D_RW(hashmap)->resize_threshold = (uint64_t)(INIT_ENTRIES_NUM_RP *
HASHMAP_RP_LOAD_FACTOR);
size_t sz = sizeof(struct entry) * D_RO(hashmap)->capacity;
/* init entries with zero in order to track unused hashes */
D_RW(hashmap)->entries = POBJ_XRESERVE_ALLOC(pop, struct entry, sz,
&actv[actv_cnt], POBJ_XALLOC_ZERO);
if (TOID_IS_NULL(D_RO(hashmap)->entries))
goto reserve_err;
actv_cnt++;
pmemobj_persist(pop, D_RW(hashmap), sizeof(struct hashmap_rp));
pmemobj_set_value(pop, &actv[actv_cnt++], &hashmap_p->oid.pool_uuid_lo,
hashmap.oid.pool_uuid_lo);
pmemobj_set_value(pop, &actv[actv_cnt++], &hashmap_p->oid.off,
hashmap.oid.off);
pmemobj_publish(pop, actv, actv_cnt);
#ifdef DEBUG
swaps_array = (int *)calloc(INIT_ENTRIES_NUM_RP, sizeof(int));
if (!swaps_array)
abort();
#endif
return;
reserve_err:
fprintf(stderr, "hashmap alloc failed: %s\n", pmemobj_errormsg());
pmemobj_cancel(pop, actv, actv_cnt);
abort();
}
/*
* entry_update -- updates entry in given hashmap with given arguments
*/
static void
entry_update(PMEMobjpool *pop, struct hashmap_rp *hashmap,
struct add_entry *args, int rebuild)
{
HM_ASSERT(HASHMAP_RP_MAX_ACTIONS > args->actv_cnt + 4);
struct entry *entry_p = D_RW(hashmap->entries);
entry_p += args->pos;
if (rebuild == HASHMAP_RP_REBUILD) {
entry_p->key = args->data.key;
entry_p->value = args->data.value;
entry_p->hash = args->data.hash;
} else {
pmemobj_set_value(pop, args->actv + args->actv_cnt++,
&entry_p->key, args->data.key);
pmemobj_set_value(pop, args->actv + args->actv_cnt++,
&entry_p->value.pool_uuid_lo,
args->data.value.pool_uuid_lo);
pmemobj_set_value(pop, args->actv + args->actv_cnt++,
&entry_p->value.off, args->data.value.off);
pmemobj_set_value(pop, args->actv + args->actv_cnt++,
&entry_p->hash, args->data.hash);
}
#ifdef DEBUG
assert(sizeof(swaps_array) / sizeof(swaps_array[0])
> args->pos);
swaps_array[args->pos] = args->swaps;
#endif
}
/*
* entry_add -- increments given hashmap's elements counter and calls
* entry_update
*/
static void
entry_add(PMEMobjpool *pop, struct hashmap_rp *hashmap, struct add_entry *args,
int rebuild)
{
HM_ASSERT(HASHMAP_RP_MAX_ACTIONS > args->actv_cnt + 1);
if (rebuild == HASHMAP_RP_REBUILD)
hashmap->count++;
else {
pmemobj_set_value(pop, args->actv + args->actv_cnt++,
&hashmap->count, hashmap->count + 1);
}
entry_update(pop, hashmap, args, rebuild);
}
/*
* insert_helper -- inserts specified value into the hashmap
* If function was called during rebuild process, no redo logs will be used.
* returns:
* - 0 if successful,
* - 1 if value already existed
* - -1 on error
*/
static int
insert_helper(PMEMobjpool *pop, struct hashmap_rp *hashmap, uint64_t key,
PMEMoid value, int rebuild)
{
HM_ASSERT(hashmap->count + 1 < hashmap->resize_threshold);
struct pobj_action actv[HASHMAP_RP_MAX_ACTIONS];
struct add_entry args;
args.data.key = key;
args.data.value = value;
args.data.hash = hash(hashmap, key);
args.pos = args.data.hash;
if (rebuild != HASHMAP_RP_REBUILD) {
args.actv = actv;
args.actv_cnt = 0;
}
uint64_t dist = 0;
struct entry *entry_p = NULL;
#ifdef DEBUG
int swaps = 0;
#endif
for (int n = 0; n < HASHMAP_RP_MAX_SWAPS; ++n) {
entry_p = D_RW(hashmap->entries);
entry_p += args.pos;
#ifdef DEBUG
args.swaps = swaps;
#endif
/* Case 1: key already exists, override value */
if (!entry_is_empty(entry_p->hash) &&
entry_p->key == args.data.key) {
entry_update(pop, hashmap, &args, rebuild);
if (rebuild != HASHMAP_RP_REBUILD)
pmemobj_publish(pop, args.actv, args.actv_cnt);
return 1;
}
/* Case 2: slot is empty from the beginning */
if (entry_p->hash == 0) {
entry_add(pop, hashmap, &args, rebuild);
if (rebuild != HASHMAP_RP_REBUILD)
pmemobj_publish(pop, args.actv, args.actv_cnt);
return 0;
}
/*
* Case 3: existing element (or tombstone) has probed less than
* current element. Swap them (or put into tombstone slot) and
* keep going to find another slot for that element.
*/
uint64_t existing_dist = probe_distance(hashmap, entry_p->hash,
args.pos);
if (existing_dist < dist) {
if (entry_is_deleted(entry_p->hash)) {
entry_add(pop, hashmap, &args, rebuild);
if (rebuild != HASHMAP_RP_REBUILD)
pmemobj_publish(pop, args.actv,
args.actv_cnt);
return 0;
}
struct entry temp = *entry_p;
entry_update(pop, hashmap, &args, rebuild);
args.data = temp;
#ifdef DEBUG
swaps++;
#endif
dist = existing_dist;
}
/*
* Case 4: increment slot number and probe counter, keep going
* to find free slot
*/
args.pos = increment_pos(hashmap, args.pos);
dist += 1;
}
fprintf(stderr, "insertion requires too many swaps\n");
if (rebuild != HASHMAP_RP_REBUILD)
pmemobj_cancel(pop, args.actv, args.actv_cnt);
return -1;
}
/*
* index_lookup -- checks if given key exists in hashmap.
* Returns index number if key was found, 0 otherwise.
*/
static uint64_t
index_lookup(const struct hashmap_rp *hashmap, uint64_t key)
{
const uint64_t hash_lookup = hash(hashmap, key);
uint64_t pos = hash_lookup;
uint64_t dist = 0;
const struct entry *entry_p = NULL;
do {
entry_p = D_RO(hashmap->entries);
entry_p += pos;
if (entry_p->hash == hash_lookup && entry_p->key == key)
return pos;
pos = increment_pos(hashmap, pos);
} while (entry_p->hash != 0 &&
(dist++) <= probe_distance(hashmap, entry_p->hash, pos) - 1);
return 0;
}
/*
* entries_cache -- cache entries from second argument in entries from first
* argument
*/
static int
entries_cache(PMEMobjpool *pop, struct hashmap_rp *dest,
const struct hashmap_rp *src)
{
const struct entry *e_begin = D_RO(src->entries);
const struct entry *e_end = e_begin + src->capacity;
for (const struct entry *e = e_begin; e != e_end; ++e) {
if (entry_is_empty(e->hash))
continue;
if (insert_helper(pop, dest, e->key,
e->value, HASHMAP_RP_REBUILD) == -1)
return -1;
}
HM_ASSERT(src->count == dest->count);
return 0;
}
/*
* hm_rp_rebuild -- rebuilds the hashmap with a new capacity.
* Returns 0 on success, -1 otherwise.
*/
static int
hm_rp_rebuild(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
size_t capacity_new)
{
/*
* We will need 6 actions:
* - 1 action to set new capacity
* - 1 action to set new resize threshold
* - 1 action to alloc memory for new entries
* - 1 action to free old entries
* - 2 actions to set new oid pointing to new entries
*/
struct pobj_action actv[6];
size_t actv_cnt = 0;
size_t sz_alloc = sizeof(struct entry) * capacity_new;
uint64_t resize_threshold_new = (uint64_t)(capacity_new *
HASHMAP_RP_LOAD_FACTOR);
pmemobj_set_value(pop, &actv[actv_cnt++], &D_RW(hashmap)->capacity,
capacity_new);
pmemobj_set_value(pop, &actv[actv_cnt++],
&D_RW(hashmap)->resize_threshold, resize_threshold_new);
struct hashmap_rp hashmap_rebuild;
hashmap_rebuild.count = 0;
hashmap_rebuild.capacity = capacity_new;
hashmap_rebuild.resize_threshold = resize_threshold_new;
hashmap_rebuild.entries = POBJ_XRESERVE_ALLOC(pop, struct entry,
sz_alloc, &actv[actv_cnt],
POBJ_XALLOC_ZERO);
if (TOID_IS_NULL(hashmap_rebuild.entries)) {
fprintf(stderr, "hashmap rebuild failed: %s\n",
pmemobj_errormsg());
goto rebuild_err;
}
actv_cnt++;
#ifdef DEBUG
free(swaps_array);
swaps_array = (int *)calloc(capacity_new, sizeof(int));
if (!swaps_array)
goto rebuild_err;
#endif
if (entries_cache(pop, &hashmap_rebuild, D_RW(hashmap)) == -1)
goto rebuild_err;
pmemobj_persist(pop, D_RW(hashmap_rebuild.entries), sz_alloc);
pmemobj_defer_free(pop, D_RW(hashmap)->entries.oid, &actv[actv_cnt++]);
pmemobj_set_value(pop, &actv[actv_cnt++],
&D_RW(hashmap)->entries.oid.pool_uuid_lo,
hashmap_rebuild.entries.oid.pool_uuid_lo);
pmemobj_set_value(pop, &actv[actv_cnt++],
&D_RW(hashmap)->entries.oid.off,
hashmap_rebuild.entries.oid.off);
HM_ASSERT(sizeof(actv) / sizeof(actv[0]) >= actv_cnt);
pmemobj_publish(pop, actv, actv_cnt);
return 0;
rebuild_err:
pmemobj_cancel(pop, actv, actv_cnt);
#ifdef DEBUG
free(swaps_array);
#endif
return -1;
}
/*
* hm_rp_create -- initializes hashmap state, called after pmemobj_create
*/
int
hm_rp_create(PMEMobjpool *pop, TOID(struct hashmap_rp) *map, void *arg)
{
struct hashmap_args *args = (struct hashmap_args *)arg;
uint32_t seed = args ? args->seed : 0;
hashmap_create(pop, map, seed);
return 0;
}
/*
* hm_rp_check -- checks if specified persistent object is an instance of
* hashmap
*/
int
hm_rp_check(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap)
{
return TOID_IS_NULL(hashmap) || !TOID_VALID(hashmap);
}
/*
* hm_rp_init -- recovers hashmap state, called after pmemobj_open.
* Since hashmap_rp is performing rebuild/insertion completely or not at all,
* function is dummy and simply returns 0.
*/
int
hm_rp_init(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap)
{
return 0;
}
/*
* hm_rp_insert -- rebuilds hashmap if necessary and wraps insert_helper.
* returns:
* - 0 if successful,
* - 1 if value already existed
* - -1 if something bad happened
*/
int
hm_rp_insert(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
uint64_t key, PMEMoid value)
{
if (D_RO(hashmap)->count + 1 >= D_RO(hashmap)->resize_threshold) {
uint64_t capacity_new = D_RO(hashmap)->capacity * 2;
if (hm_rp_rebuild(pop, hashmap, capacity_new) != 0)
return -1;
}
return insert_helper(pop, D_RW(hashmap), key, value,
HASHMAP_RP_NO_REBUILD);
}
/*
* hm_rp_remove -- removes specified key from the hashmap,
* returns:
* - key's value if successful,
* - OID_NULL if value didn't exist or if something bad happened
*/
PMEMoid
hm_rp_remove(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
uint64_t key)
{
const uint64_t pos = index_lookup(D_RO(hashmap), key);
if (pos == 0)
return OID_NULL;
struct entry *entry_p = D_RW(D_RW(hashmap)->entries);
entry_p += pos;
PMEMoid ret = entry_p->value;
size_t actvcnt = 0;
struct pobj_action actv[5];
pmemobj_set_value(pop, &actv[actvcnt++], &entry_p->hash,
entry_p->hash | TOMBSTONE_MASK);
pmemobj_set_value(pop, &actv[actvcnt++],
&entry_p->value.pool_uuid_lo, 0);
pmemobj_set_value(pop, &actv[actvcnt++], &entry_p->value.off, 0);
pmemobj_set_value(pop, &actv[actvcnt++], &entry_p->key, 0);
pmemobj_set_value(pop, &actv[actvcnt++], &D_RW(hashmap)->count,
D_RW(hashmap)->count - 1);
HM_ASSERT(sizeof(actv) / sizeof(actv[0]) >= actvcnt);
pmemobj_publish(pop, actv, actvcnt);
uint64_t reduced_threshold = (uint64_t)
(((uint64_t)(D_RO(hashmap)->capacity / 2))
* HASHMAP_RP_LOAD_FACTOR);
if (reduced_threshold >= INIT_ENTRIES_NUM_RP &&
D_RW(hashmap)->count < reduced_threshold &&
hm_rp_rebuild(pop, hashmap, D_RO(hashmap)->capacity / 2))
return OID_NULL;
return ret;
}
/*
* hm_rp_get -- checks whether specified key is in the hashmap.
* Returns associated value if key exists, OID_NULL otherwise.
*/
PMEMoid
hm_rp_get(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
uint64_t key)
{
struct entry *entry_p =
(struct entry *)pmemobj_direct(D_RW(hashmap)->entries.oid);
uint64_t pos = index_lookup(D_RO(hashmap), key);
return pos == 0 ? OID_NULL : (entry_p + pos)->value;
}
/*
* hm_rp_lookup -- checks whether specified key is in the hashmap.
* Returns 1 if key was found, 0 otherwise.
*/
int
hm_rp_lookup(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
uint64_t key)
{
return index_lookup(D_RO(hashmap), key) != 0;
}
/*
* hm_rp_foreach -- calls cb for all values from the hashmap
*/
int
hm_rp_foreach(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
struct entry *entry_p =
(struct entry *)pmemobj_direct(D_RO(hashmap)->entries.oid);
int ret = 0;
for (size_t i = 0; i < D_RO(hashmap)->capacity; ++i, ++entry_p) {
uint64_t hash = entry_p->hash;
if (entry_is_empty(hash))
continue;
ret = cb(entry_p->key, entry_p->value, arg);
if (ret)
return ret;
}
return 0;
}
/*
* hm_rp_debug -- prints complete hashmap state
*/
static void
hm_rp_debug(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, FILE *out)
{
#ifdef DEBUG
fprintf(out, "debug: true, ");
#endif
fprintf(out, "capacity: %" PRIu64 ", count: %" PRIu64 "\n",
D_RO(hashmap)->capacity, D_RO(hashmap)->count);
struct entry *entry_p = D_RW((D_RW(hashmap)->entries));
for (size_t i = 0; i < D_RO(hashmap)->capacity; ++i, ++entry_p) {
uint64_t hash = entry_p->hash;
if (entry_is_empty(hash))
continue;
uint64_t key = entry_p->key;
#ifdef DEBUG
fprintf(out, "%zu: %" PRIu64 " hash: %" PRIu64 " dist:%" PRIu32
" swaps:%u\n", i, key, hash,
probe_distance(D_RO(hashmap), hash, i),
swaps_array[i]);
#else
fprintf(out, "%zu: %" PRIu64 " dist:%" PRIu64 "\n", i, key,
probe_distance(D_RO(hashmap), hash, i));
#endif
}
}
/*
* hm_rp_count -- returns number of elements
*/
size_t
hm_rp_count(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap)
{
return D_RO(hashmap)->count;
}
/*
* hm_rp_cmd -- execute cmd for hashmap
*/
int
hm_rp_cmd(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
unsigned cmd, uint64_t arg)
{
switch (cmd) {
case HASHMAP_CMD_REBUILD:
hm_rp_rebuild(pop, hashmap, D_RO(hashmap)->capacity);
return 0;
case HASHMAP_CMD_DEBUG:
if (!arg)
return -EINVAL;
hm_rp_debug(pop, hashmap, (FILE *)arg);
return 0;
default:
return -EINVAL;
}
}
| 16,890 | 23.338617 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/hashmap/hashmap_atomic.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
#ifndef HASHMAP_ATOMIC_H
#define HASHMAP_ATOMIC_H
#include <stddef.h>
#include <stdint.h>
#include <hashmap.h>
#include <libpmemobj.h>
#ifndef HASHMAP_ATOMIC_TYPE_OFFSET
#define HASHMAP_ATOMIC_TYPE_OFFSET 1000
#endif
struct hashmap_atomic;
TOID_DECLARE(struct hashmap_atomic, HASHMAP_ATOMIC_TYPE_OFFSET + 0);
int hm_atomic_check(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap);
int hm_atomic_create(PMEMobjpool *pop, TOID(struct hashmap_atomic) *map,
void *arg);
int hm_atomic_init(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap);
int hm_atomic_insert(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key, PMEMoid value);
PMEMoid hm_atomic_remove(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key);
PMEMoid hm_atomic_get(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key);
int hm_atomic_lookup(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key);
int hm_atomic_foreach(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
size_t hm_atomic_count(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap);
int hm_atomic_cmd(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
unsigned cmd, uint64_t arg);
#endif /* HASHMAP_ATOMIC_H */
| 1,384 | 36.432432 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/hashmap/hashmap_atomic.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/* integer hash set implementation which uses only atomic APIs */
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <inttypes.h>
#include <libpmemobj.h>
#include "hashmap_atomic.h"
#include "hashmap_internal.h"
/* layout definition */
TOID_DECLARE(struct buckets, HASHMAP_ATOMIC_TYPE_OFFSET + 1);
TOID_DECLARE(struct entry, HASHMAP_ATOMIC_TYPE_OFFSET + 2);
struct entry {
uint64_t key;
PMEMoid value;
/* list pointer */
POBJ_LIST_ENTRY(struct entry) list;
};
struct entry_args {
uint64_t key;
PMEMoid value;
};
POBJ_LIST_HEAD(entries_head, struct entry);
struct buckets {
/* number of buckets */
size_t nbuckets;
/* array of lists */
struct entries_head bucket[];
};
struct hashmap_atomic {
/* random number generator seed */
uint32_t seed;
/* hash function coefficients */
uint32_t hash_fun_a;
uint32_t hash_fun_b;
uint64_t hash_fun_p;
/* number of values inserted */
uint64_t count;
/* whether "count" should be updated */
uint32_t count_dirty;
/* buckets */
TOID(struct buckets) buckets;
/* buckets, used during rehashing, null otherwise */
TOID(struct buckets) buckets_tmp;
};
/*
* create_entry -- entry initializer
*/
static int
create_entry(PMEMobjpool *pop, void *ptr, void *arg)
{
struct entry *e = (struct entry *)ptr;
struct entry_args *args = (struct entry_args *)arg;
e->key = args->key;
e->value = args->value;
memset(&e->list, 0, sizeof(e->list));
pmemobj_persist(pop, e, sizeof(*e));
return 0;
}
/*
* create_buckets -- buckets initializer
*/
static int
create_buckets(PMEMobjpool *pop, void *ptr, void *arg)
{
struct buckets *b = (struct buckets *)ptr;
b->nbuckets = *((size_t *)arg);
pmemobj_memset_persist(pop, &b->bucket, 0,
b->nbuckets * sizeof(b->bucket[0]));
pmemobj_persist(pop, &b->nbuckets, sizeof(b->nbuckets));
return 0;
}
/*
* create_hashmap -- hashmap initializer
*/
static void
create_hashmap(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint32_t seed)
{
D_RW(hashmap)->seed = seed;
do {
D_RW(hashmap)->hash_fun_a = (uint32_t)rand();
} while (D_RW(hashmap)->hash_fun_a == 0);
D_RW(hashmap)->hash_fun_b = (uint32_t)rand();
D_RW(hashmap)->hash_fun_p = HASH_FUNC_COEFF_P;
size_t len = INIT_BUCKETS_NUM;
size_t sz = sizeof(struct buckets) +
len * sizeof(struct entries_head);
if (POBJ_ALLOC(pop, &D_RW(hashmap)->buckets, struct buckets, sz,
create_buckets, &len)) {
fprintf(stderr, "root alloc failed: %s\n", pmemobj_errormsg());
abort();
}
pmemobj_persist(pop, D_RW(hashmap), sizeof(*D_RW(hashmap)));
}
/*
* hash -- the simplest hashing function,
* see https://en.wikipedia.org/wiki/Universal_hashing#Hashing_integers
*/
static uint64_t
hash(const TOID(struct hashmap_atomic) *hashmap,
const TOID(struct buckets) *buckets,
uint64_t value)
{
uint32_t a = D_RO(*hashmap)->hash_fun_a;
uint32_t b = D_RO(*hashmap)->hash_fun_b;
uint64_t p = D_RO(*hashmap)->hash_fun_p;
size_t len = D_RO(*buckets)->nbuckets;
return ((a * value + b) % p) % len;
}
/*
* hm_atomic_rebuild_finish -- finishes rebuild, assumes buckets_tmp is not null
*/
static void
hm_atomic_rebuild_finish(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap)
{
TOID(struct buckets) cur = D_RO(hashmap)->buckets;
TOID(struct buckets) tmp = D_RO(hashmap)->buckets_tmp;
for (size_t i = 0; i < D_RO(cur)->nbuckets; ++i) {
while (!POBJ_LIST_EMPTY(&D_RO(cur)->bucket[i])) {
TOID(struct entry) en =
POBJ_LIST_FIRST(&D_RO(cur)->bucket[i]);
uint64_t h = hash(&hashmap, &tmp, D_RO(en)->key);
if (POBJ_LIST_MOVE_ELEMENT_HEAD(pop,
&D_RW(cur)->bucket[i],
&D_RW(tmp)->bucket[h],
en, list, list)) {
fprintf(stderr, "move failed: %s\n",
pmemobj_errormsg());
abort();
}
}
}
POBJ_FREE(&D_RO(hashmap)->buckets);
D_RW(hashmap)->buckets = D_RO(hashmap)->buckets_tmp;
pmemobj_persist(pop, &D_RW(hashmap)->buckets,
sizeof(D_RW(hashmap)->buckets));
/*
* We have to set offset manually instead of substituting OID_NULL,
* because we won't be able to recover easily if crash happens after
* pool_uuid_lo, but before offset is set. Another reason why everyone
* should use transaction API.
* See recovery process in hm_init and TOID_IS_NULL macro definition.
*/
D_RW(hashmap)->buckets_tmp.oid.off = 0;
pmemobj_persist(pop, &D_RW(hashmap)->buckets_tmp,
sizeof(D_RW(hashmap)->buckets_tmp));
}
/*
* hm_atomic_rebuild -- rebuilds the hashmap with a new number of buckets
*/
static void
hm_atomic_rebuild(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
size_t new_len)
{
if (new_len == 0)
new_len = D_RO(D_RO(hashmap)->buckets)->nbuckets;
size_t sz = sizeof(struct buckets) +
new_len * sizeof(struct entries_head);
POBJ_ALLOC(pop, &D_RW(hashmap)->buckets_tmp, struct buckets, sz,
create_buckets, &new_len);
if (TOID_IS_NULL(D_RO(hashmap)->buckets_tmp)) {
fprintf(stderr,
"failed to allocate temporary space of size: %zu"
", %s\n",
new_len, pmemobj_errormsg());
return;
}
hm_atomic_rebuild_finish(pop, hashmap);
}
/*
* hm_atomic_insert -- inserts specified value into the hashmap,
* returns:
* - 0 if successful,
* - 1 if value already existed,
* - -1 if something bad happened
*/
int
hm_atomic_insert(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key, PMEMoid value)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
uint64_t h = hash(&hashmap, &buckets, key);
int num = 0;
POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[h], list) {
if (D_RO(var)->key == key)
return 1;
num++;
}
D_RW(hashmap)->count_dirty = 1;
pmemobj_persist(pop, &D_RW(hashmap)->count_dirty,
sizeof(D_RW(hashmap)->count_dirty));
struct entry_args args;
args.key = key;
args.value = value;
PMEMoid oid = POBJ_LIST_INSERT_NEW_HEAD(pop,
&D_RW(buckets)->bucket[h],
list, sizeof(struct entry), create_entry, &args);
if (OID_IS_NULL(oid)) {
fprintf(stderr, "failed to allocate entry: %s\n",
pmemobj_errormsg());
return -1;
}
D_RW(hashmap)->count++;
pmemobj_persist(pop, &D_RW(hashmap)->count,
sizeof(D_RW(hashmap)->count));
D_RW(hashmap)->count_dirty = 0;
pmemobj_persist(pop, &D_RW(hashmap)->count_dirty,
sizeof(D_RW(hashmap)->count_dirty));
num++;
if (num > MAX_HASHSET_THRESHOLD ||
(num > MIN_HASHSET_THRESHOLD &&
D_RO(hashmap)->count > 2 * D_RO(buckets)->nbuckets))
hm_atomic_rebuild(pop, hashmap, D_RW(buckets)->nbuckets * 2);
return 0;
}
/*
* hm_atomic_remove -- removes specified value from the hashmap,
* returns:
* - key's value if successful,
* - OID_NULL if value didn't exist or if something bad happened
*/
PMEMoid
hm_atomic_remove(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
uint64_t h = hash(&hashmap, &buckets, key);
POBJ_LIST_FOREACH(var, &D_RW(buckets)->bucket[h], list) {
if (D_RO(var)->key == key)
break;
}
if (TOID_IS_NULL(var))
return OID_NULL;
D_RW(hashmap)->count_dirty = 1;
pmemobj_persist(pop, &D_RW(hashmap)->count_dirty,
sizeof(D_RW(hashmap)->count_dirty));
if (POBJ_LIST_REMOVE_FREE(pop, &D_RW(buckets)->bucket[h],
var, list)) {
fprintf(stderr, "list remove failed: %s\n",
pmemobj_errormsg());
return OID_NULL;
}
D_RW(hashmap)->count--;
pmemobj_persist(pop, &D_RW(hashmap)->count,
sizeof(D_RW(hashmap)->count));
D_RW(hashmap)->count_dirty = 0;
pmemobj_persist(pop, &D_RW(hashmap)->count_dirty,
sizeof(D_RW(hashmap)->count_dirty));
if (D_RO(hashmap)->count < D_RO(buckets)->nbuckets)
hm_atomic_rebuild(pop, hashmap, D_RO(buckets)->nbuckets / 2);
return D_RO(var)->value;
}
/*
* hm_atomic_foreach -- prints all values from the hashmap
*/
int
hm_atomic_foreach(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
int ret = 0;
for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i)
POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[i], list) {
ret = cb(D_RO(var)->key, D_RO(var)->value, arg);
if (ret)
return ret;
}
return 0;
}
/*
* hm_atomic_debug -- prints complete hashmap state
*/
static void
hm_atomic_debug(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
FILE *out)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
fprintf(out, "a: %u b: %u p: %" PRIu64 "\n", D_RO(hashmap)->hash_fun_a,
D_RO(hashmap)->hash_fun_b, D_RO(hashmap)->hash_fun_p);
fprintf(out, "count: %" PRIu64 ", buckets: %zu\n",
D_RO(hashmap)->count, D_RO(buckets)->nbuckets);
for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i) {
if (POBJ_LIST_EMPTY(&D_RO(buckets)->bucket[i]))
continue;
int num = 0;
fprintf(out, "%zu: ", i);
POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[i], list) {
fprintf(out, "%" PRIu64 " ", D_RO(var)->key);
num++;
}
fprintf(out, "(%d)\n", num);
}
}
/*
* hm_atomic_get -- checks whether specified value is in the hashmap
*/
PMEMoid
hm_atomic_get(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
uint64_t h = hash(&hashmap, &buckets, key);
POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[h], list)
if (D_RO(var)->key == key)
return D_RO(var)->value;
return OID_NULL;
}
/*
* hm_atomic_lookup -- checks whether specified value is in the hashmap
*/
int
hm_atomic_lookup(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
uint64_t h = hash(&hashmap, &buckets, key);
POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[h], list)
if (D_RO(var)->key == key)
return 1;
return 0;
}
/*
* hm_atomic_create -- initializes hashmap state, called after pmemobj_create
*/
int
hm_atomic_create(PMEMobjpool *pop, TOID(struct hashmap_atomic) *map, void *arg)
{
struct hashmap_args *args = (struct hashmap_args *)arg;
uint32_t seed = args ? args->seed : 0;
POBJ_ZNEW(pop, map, struct hashmap_atomic);
create_hashmap(pop, *map, seed);
return 0;
}
/*
* hm_atomic_init -- recovers hashmap state, called after pmemobj_open
*/
int
hm_atomic_init(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap)
{
srand(D_RO(hashmap)->seed);
/* handle rebuild interruption */
if (!TOID_IS_NULL(D_RO(hashmap)->buckets_tmp)) {
printf("rebuild, previous attempt crashed\n");
if (TOID_EQUALS(D_RO(hashmap)->buckets,
D_RO(hashmap)->buckets_tmp)) {
/* see comment in hm_rebuild_finish */
D_RW(hashmap)->buckets_tmp.oid.off = 0;
pmemobj_persist(pop, &D_RW(hashmap)->buckets_tmp,
sizeof(D_RW(hashmap)->buckets_tmp));
} else if (TOID_IS_NULL(D_RW(hashmap)->buckets)) {
D_RW(hashmap)->buckets = D_RW(hashmap)->buckets_tmp;
pmemobj_persist(pop, &D_RW(hashmap)->buckets,
sizeof(D_RW(hashmap)->buckets));
/* see comment in hm_rebuild_finish */
D_RW(hashmap)->buckets_tmp.oid.off = 0;
pmemobj_persist(pop, &D_RW(hashmap)->buckets_tmp,
sizeof(D_RW(hashmap)->buckets_tmp));
} else {
hm_atomic_rebuild_finish(pop, hashmap);
}
}
/* handle insert or remove interruption */
if (D_RO(hashmap)->count_dirty) {
printf("count dirty, recalculating\n");
TOID(struct entry) var;
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
uint64_t cnt = 0;
for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i)
POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[i], list)
cnt++;
printf("old count: %" PRIu64 ", new count: %" PRIu64 "\n",
D_RO(hashmap)->count, cnt);
D_RW(hashmap)->count = cnt;
pmemobj_persist(pop, &D_RW(hashmap)->count,
sizeof(D_RW(hashmap)->count));
D_RW(hashmap)->count_dirty = 0;
pmemobj_persist(pop, &D_RW(hashmap)->count_dirty,
sizeof(D_RW(hashmap)->count_dirty));
}
return 0;
}
/*
* hm_atomic_check -- checks if specified persistent object is an
* instance of hashmap
*/
int
hm_atomic_check(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap)
{
return TOID_IS_NULL(hashmap) || !TOID_VALID(hashmap);
}
/*
* hm_atomic_count -- returns number of elements
*/
size_t
hm_atomic_count(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap)
{
return D_RO(hashmap)->count;
}
/*
* hm_atomic_cmd -- execute cmd for hashmap
*/
int
hm_atomic_cmd(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
unsigned cmd, uint64_t arg)
{
switch (cmd) {
case HASHMAP_CMD_REBUILD:
hm_atomic_rebuild(pop, hashmap, arg);
return 0;
case HASHMAP_CMD_DEBUG:
if (!arg)
return -EINVAL;
hm_atomic_debug(pop, hashmap, (FILE *)arg);
return 0;
default:
return -EINVAL;
}
}
| 12,825 | 24.001949 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/pmemlog/obj_pmemlog_simple.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* obj_pmemlog_simple.c -- alternate pmemlog implementation based on pmemobj
*
* usage: obj_pmemlog_simple [co] file [cmd[:param]...]
*
* c - create file
* o - open file
*
* The "cmd" arguments match the pmemlog functions:
* a - append
* v - appendv
* r - rewind
* w - walk
* n - nbyte
* t - tell
* "a", "w" and "v" require a parameter string(s) separated by a colon
*/
#include <ex_common.h>
#include <sys/stat.h>
#include <string.h>
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <errno.h>
#include "libpmemobj.h"
#include "libpmem.h"
#include "libpmemlog.h"
#define USABLE_SIZE (9.0 / 10)
#define MAX_POOL_SIZE (((size_t)1024 * 1024 * 1024 * 16))
#define POOL_SIZE ((size_t)(1024 * 1024 * 100))
POBJ_LAYOUT_BEGIN(obj_pmemlog_simple);
POBJ_LAYOUT_ROOT(obj_pmemlog_simple, struct base);
POBJ_LAYOUT_TOID(obj_pmemlog_simple, struct log);
POBJ_LAYOUT_END(obj_pmemlog_simple);
/* log entry header */
struct log_hdr {
uint64_t write_offset; /* data write offset */
size_t data_size; /* size available for data */
};
/* struct log stores the entire log entry */
struct log {
struct log_hdr hdr;
char data[];
};
/* struct base has the lock and log OID */
struct base {
PMEMrwlock rwlock; /* lock covering entire log */
TOID(struct log) log;
};
/*
* pmemblk_map -- (internal) read or initialize the log pool
*/
static int
pmemlog_map(PMEMobjpool *pop, size_t fsize)
{
int retval = 0;
TOID(struct base)bp;
bp = POBJ_ROOT(pop, struct base);
/* log already initialized */
if (!TOID_IS_NULL(D_RO(bp)->log))
return retval;
size_t pool_size = (size_t)(fsize * USABLE_SIZE);
/* max size of a single allocation is 16GB */
if (pool_size > MAX_POOL_SIZE) {
errno = EINVAL;
return 1;
}
TX_BEGIN(pop) {
TX_ADD(bp);
D_RW(bp)->log = TX_ZALLOC(struct log, pool_size);
D_RW(D_RW(bp)->log)->hdr.data_size =
pool_size - sizeof(struct log_hdr);
} TX_ONABORT {
retval = -1;
} TX_END
return retval;
}
/*
* pmemlog_open -- pool open wrapper
*/
PMEMlogpool *
pmemlog_open(const char *path)
{
PMEMobjpool *pop = pmemobj_open(path,
POBJ_LAYOUT_NAME(obj_pmemlog_simple));
assert(pop != NULL);
struct stat buf;
if (stat(path, &buf)) {
perror("stat");
return NULL;
}
return pmemlog_map(pop, buf.st_size) ? NULL : (PMEMlogpool *)pop;
}
/*
* pmemlog_create -- pool create wrapper
*/
PMEMlogpool *
pmemlog_create(const char *path, size_t poolsize, mode_t mode)
{
PMEMobjpool *pop = pmemobj_create(path,
POBJ_LAYOUT_NAME(obj_pmemlog_simple),
poolsize, mode);
assert(pop != NULL);
struct stat buf;
if (stat(path, &buf)) {
perror("stat");
return NULL;
}
return pmemlog_map(pop, buf.st_size) ? NULL : (PMEMlogpool *)pop;
}
/*
* pool_close -- pool close wrapper
*/
void
pmemlog_close(PMEMlogpool *plp)
{
pmemobj_close((PMEMobjpool *)plp);
}
/*
* pmemlog_nbyte -- return usable size of a log memory pool
*/
size_t
pmemlog_nbyte(PMEMlogpool *plp)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
TOID(struct log) logp;
logp = D_RO(POBJ_ROOT(pop, struct base))->log;
return D_RO(logp)->hdr.data_size;
}
/*
* pmemlog_append -- add data to a log memory pool
*/
int
pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
int retval = 0;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
TOID(struct log) logp;
logp = D_RW(bp)->log;
/* check for overrun */
if ((D_RO(logp)->hdr.write_offset + count)
> D_RO(logp)->hdr.data_size) {
errno = ENOMEM;
return 1;
}
/* begin a transaction, also acquiring the write lock for the log */
TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) {
char *dst = D_RW(logp)->data + D_RO(logp)->hdr.write_offset;
/* add hdr to undo log */
TX_ADD_FIELD(logp, hdr);
/* copy and persist data */
pmemobj_memcpy_persist(pop, dst, buf, count);
/* set the new offset */
D_RW(logp)->hdr.write_offset += count;
} TX_ONABORT {
retval = -1;
} TX_END
return retval;
}
/*
* pmemlog_appendv -- add gathered data to a log memory pool
*/
int
pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
int retval = 0;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
uint64_t total_count = 0;
/* calculate required space */
for (int i = 0; i < iovcnt; ++i)
total_count += iov[i].iov_len;
TOID(struct log) logp;
logp = D_RW(bp)->log;
/* check for overrun */
if ((D_RO(logp)->hdr.write_offset + total_count)
> D_RO(logp)->hdr.data_size) {
errno = ENOMEM;
return 1;
}
/* begin a transaction, also acquiring the write lock for the log */
TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) {
TX_ADD(D_RW(bp)->log);
/* append the data */
for (int i = 0; i < iovcnt; ++i) {
char *buf = (char *)iov[i].iov_base;
size_t count = iov[i].iov_len;
char *dst = D_RW(logp)->data
+ D_RO(logp)->hdr.write_offset;
/* copy and persist data */
pmemobj_memcpy_persist(pop, dst, buf, count);
/* set the new offset */
D_RW(logp)->hdr.write_offset += count;
}
} TX_ONABORT {
retval = -1;
} TX_END
return retval;
}
/*
* pmemlog_tell -- return current write point in a log memory pool
*/
long long
pmemlog_tell(PMEMlogpool *plp)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
TOID(struct log) logp;
logp = D_RO(POBJ_ROOT(pop, struct base))->log;
return D_RO(logp)->hdr.write_offset;
}
/*
* pmemlog_rewind -- discard all data, resetting a log memory pool to empty
*/
void
pmemlog_rewind(PMEMlogpool *plp)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
/* begin a transaction, also acquiring the write lock for the log */
TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) {
/* add the hdr to the undo log */
TX_ADD_FIELD(D_RW(bp)->log, hdr);
/* reset the write offset */
D_RW(D_RW(bp)->log)->hdr.write_offset = 0;
} TX_END
}
/*
* pmemlog_walk -- walk through all data in a log memory pool
*
* chunksize of 0 means process_chunk gets called once for all data
* as a single chunk.
*/
void
pmemlog_walk(PMEMlogpool *plp, size_t chunksize,
int (*process_chunk)(const void *buf, size_t len, void *arg), void *arg)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
/* acquire a rdlock here */
int err;
if ((err = pmemobj_rwlock_rdlock(pop, &D_RW(bp)->rwlock)) != 0) {
errno = err;
return;
}
TOID(struct log) logp;
logp = D_RW(bp)->log;
size_t read_size = chunksize ? chunksize : D_RO(logp)->hdr.data_size;
char *read_ptr = D_RW(logp)->data;
const char *write_ptr = (D_RO(logp)->data
+ D_RO(logp)->hdr.write_offset);
while (read_ptr < write_ptr) {
read_size = MIN(read_size, (size_t)(write_ptr - read_ptr));
(*process_chunk)(read_ptr, read_size, arg);
read_ptr += read_size;
}
pmemobj_rwlock_unlock(pop, &D_RW(bp)->rwlock);
}
/*
* process_chunk -- (internal) process function for log_walk
*/
static int
process_chunk(const void *buf, size_t len, void *arg)
{
char *tmp = (char *)malloc(len + 1);
if (tmp == NULL) {
fprintf(stderr, "malloc error\n");
return 0;
}
memcpy(tmp, buf, len);
tmp[len] = '\0';
printf("log contains:\n");
printf("%s\n", tmp);
free(tmp);
return 1; /* continue */
}
/*
* count_iovec -- (internal) count the number of iovec items
*/
static int
count_iovec(char *arg)
{
int count = 1;
char *pch = strchr(arg, ':');
while (pch != NULL) {
++count;
pch = strchr(++pch, ':');
}
return count;
}
/*
* fill_iovec -- (internal) fill out the iovec
*/
static void
fill_iovec(struct iovec *iov, char *arg)
{
char *pch = strtok(arg, ":");
while (pch != NULL) {
iov->iov_base = pch;
iov->iov_len = strlen((char *)iov->iov_base);
++iov;
pch = strtok(NULL, ":");
}
}
int
main(int argc, char *argv[])
{
if (argc < 2) {
fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]);
return 1;
}
PMEMlogpool *plp;
if (strncmp(argv[1], "c", 1) == 0) {
plp = pmemlog_create(argv[2], POOL_SIZE, CREATE_MODE_RW);
} else if (strncmp(argv[1], "o", 1) == 0) {
plp = pmemlog_open(argv[2]);
} else {
fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]);
return 1;
}
if (plp == NULL) {
perror("pmemlog_create/pmemlog_open");
return 1;
}
/* process the command line arguments */
for (int i = 3; i < argc; i++) {
switch (*argv[i]) {
case 'a': {
printf("append: %s\n", argv[i] + 2);
if (pmemlog_append(plp, argv[i] + 2,
strlen(argv[i] + 2)))
fprintf(stderr, "pmemlog_append"
" error\n");
break;
}
case 'v': {
printf("appendv: %s\n", argv[i] + 2);
int count = count_iovec(argv[i] + 2);
struct iovec *iov = (struct iovec *)malloc(
count * sizeof(struct iovec));
if (iov == NULL) {
fprintf(stderr, "malloc error\n");
return 1;
}
fill_iovec(iov, argv[i] + 2);
if (pmemlog_appendv(plp, iov, count))
fprintf(stderr, "pmemlog_appendv"
" error\n");
free(iov);
break;
}
case 'r': {
printf("rewind\n");
pmemlog_rewind(plp);
break;
}
case 'w': {
printf("walk\n");
unsigned long walksize = strtoul(argv[i] + 2,
NULL, 10);
pmemlog_walk(plp, walksize, process_chunk,
NULL);
break;
}
case 'n': {
printf("nbytes: %zu\n", pmemlog_nbyte(plp));
break;
}
case 't': {
printf("offset: %lld\n", pmemlog_tell(plp));
break;
}
default: {
fprintf(stderr, "unrecognized command %s\n",
argv[i]);
break;
}
};
}
/* all done */
pmemlog_close(plp);
return 0;
}
| 9,720 | 21.043084 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/pmemlog/obj_pmemlog.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_pmemlog.c -- alternate pmemlog implementation based on pmemobj
*
* usage: obj_pmemlog [co] file [cmd[:param]...]
*
* c - create file
* o - open file
*
* The "cmd" arguments match the pmemlog functions:
* a - append
* v - appendv
* r - rewind
* w - walk
* n - nbyte
* t - tell
* "a" and "v" require a parameter string(s) separated by a colon
*/
#include <ex_common.h>
#include <sys/stat.h>
#include <string.h>
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include "libpmemobj.h"
#include "libpmem.h"
#include "libpmemlog.h"
#define LAYOUT_NAME "obj_pmemlog"
#define POOL_SIZE ((size_t)(1024 * 1024 * 100))
/* types of allocations */
enum types {
LOG_TYPE,
LOG_HDR_TYPE,
BASE_TYPE,
MAX_TYPES
};
/* log entry header */
struct log_hdr {
PMEMoid next; /* object ID of the next log buffer */
size_t size; /* size of this log buffer */
};
/* struct log stores the entire log entry */
struct log {
struct log_hdr hdr; /* entry header */
char data[]; /* log entry data */
};
/* struct base keeps track of the beginning of the log list */
struct base {
PMEMoid head; /* object ID of the first log buffer */
PMEMoid tail; /* object ID of the last log buffer */
PMEMrwlock rwlock; /* lock covering entire log */
size_t bytes_written; /* number of bytes stored in the pool */
};
/*
* pmemlog_open -- pool open wrapper
*/
PMEMlogpool *
pmemlog_open(const char *path)
{
return (PMEMlogpool *)pmemobj_open(path, LAYOUT_NAME);
}
/*
* pmemlog_create -- pool create wrapper
*/
PMEMlogpool *
pmemlog_create(const char *path, size_t poolsize, mode_t mode)
{
return (PMEMlogpool *)pmemobj_create(path, LAYOUT_NAME,
poolsize, mode);
}
/*
* pmemlog_close -- pool close wrapper
*/
void
pmemlog_close(PMEMlogpool *plp)
{
pmemobj_close((PMEMobjpool *)plp);
}
/*
* pmemlog_nbyte -- not available in this implementation
*/
size_t
pmemlog_nbyte(PMEMlogpool *plp)
{
/* N/A */
return 0;
}
/*
* pmemlog_append -- add data to a log memory pool
*/
int
pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
PMEMoid baseoid = pmemobj_root(pop, sizeof(struct base));
struct base *bp = pmemobj_direct(baseoid);
/* set the return point */
jmp_buf env;
if (setjmp(env)) {
/* end the transaction */
(void) pmemobj_tx_end();
return 1;
}
/* begin a transaction, also acquiring the write lock for the log */
if (pmemobj_tx_begin(pop, env, TX_PARAM_RWLOCK, &bp->rwlock,
TX_PARAM_NONE))
return -1;
/* allocate the new node to be inserted */
PMEMoid log = pmemobj_tx_alloc(count + sizeof(struct log_hdr),
LOG_TYPE);
struct log *logp = pmemobj_direct(log);
logp->hdr.size = count;
memcpy(logp->data, buf, count);
logp->hdr.next = OID_NULL;
/* add the modified root object to the undo log */
pmemobj_tx_add_range(baseoid, 0, sizeof(struct base));
if (bp->tail.off == 0) {
/* update head */
bp->head = log;
} else {
/* add the modified tail entry to the undo log */
pmemobj_tx_add_range(bp->tail, 0, sizeof(struct log));
((struct log *)pmemobj_direct(bp->tail))->hdr.next = log;
}
bp->tail = log; /* update tail */
bp->bytes_written += count;
pmemobj_tx_commit();
(void) pmemobj_tx_end();
return 0;
}
/*
* pmemlog_appendv -- add gathered data to a log memory pool
*/
int
pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
PMEMoid baseoid = pmemobj_root(pop, sizeof(struct base));
struct base *bp = pmemobj_direct(baseoid);
/* set the return point */
jmp_buf env;
if (setjmp(env)) {
/* end the transaction */
pmemobj_tx_end();
return 1;
}
/* begin a transaction, also acquiring the write lock for the log */
if (pmemobj_tx_begin(pop, env, TX_PARAM_RWLOCK, &bp->rwlock,
TX_PARAM_NONE))
return -1;
/* add the base object to the undo log - once for the transaction */
pmemobj_tx_add_range(baseoid, 0, sizeof(struct base));
/* add the tail entry once to the undo log, if it is set */
if (!OID_IS_NULL(bp->tail))
pmemobj_tx_add_range(bp->tail, 0, sizeof(struct log));
/* append the data */
for (int i = 0; i < iovcnt; ++i) {
char *buf = iov[i].iov_base;
size_t count = iov[i].iov_len;
/* allocate the new node to be inserted */
PMEMoid log = pmemobj_tx_alloc(count + sizeof(struct log_hdr),
LOG_TYPE);
struct log *logp = pmemobj_direct(log);
logp->hdr.size = count;
memcpy(logp->data, buf, count);
logp->hdr.next = OID_NULL;
if (bp->tail.off == 0) {
bp->head = log; /* update head */
} else {
((struct log *)pmemobj_direct(bp->tail))->hdr.next =
log;
}
bp->tail = log; /* update tail */
bp->bytes_written += count;
}
pmemobj_tx_commit();
(void) pmemobj_tx_end();
return 0;
}
/*
* pmemlog_tell -- returns the current write point for the log
*/
long long
pmemlog_tell(PMEMlogpool *plp)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
struct base *bp = pmemobj_direct(pmemobj_root(pop,
sizeof(struct base)));
if (pmemobj_rwlock_rdlock(pop, &bp->rwlock) != 0)
return 0;
long long bytes_written = bp->bytes_written;
pmemobj_rwlock_unlock(pop, &bp->rwlock);
return bytes_written;
}
/*
* pmemlog_rewind -- discard all data, resetting a log memory pool to empty
*/
void
pmemlog_rewind(PMEMlogpool *plp)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
PMEMoid baseoid = pmemobj_root(pop, sizeof(struct base));
struct base *bp = pmemobj_direct(baseoid);
/* set the return point */
jmp_buf env;
if (setjmp(env)) {
/* end the transaction */
pmemobj_tx_end();
return;
}
/* begin a transaction, also acquiring the write lock for the log */
if (pmemobj_tx_begin(pop, env, TX_PARAM_RWLOCK, &bp->rwlock,
TX_PARAM_NONE))
return;
/* add the root object to the undo log */
pmemobj_tx_add_range(baseoid, 0, sizeof(struct base));
/* free all log nodes */
while (bp->head.off != 0) {
PMEMoid nextoid =
((struct log *)pmemobj_direct(bp->head))->hdr.next;
pmemobj_tx_free(bp->head);
bp->head = nextoid;
}
bp->head = OID_NULL;
bp->tail = OID_NULL;
bp->bytes_written = 0;
pmemobj_tx_commit();
(void) pmemobj_tx_end();
}
/*
* pmemlog_walk -- walk through all data in a log memory pool
*
* As this implementation holds the size of each entry, the chunksize is ignored
* and the process_chunk function gets the actual entry length.
*/
void
pmemlog_walk(PMEMlogpool *plp, size_t chunksize,
int (*process_chunk)(const void *buf, size_t len, void *arg), void *arg)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
struct base *bp = pmemobj_direct(pmemobj_root(pop,
sizeof(struct base)));
if (pmemobj_rwlock_rdlock(pop, &bp->rwlock) != 0)
return;
/* process all chunks */
struct log *next = pmemobj_direct(bp->head);
while (next != NULL) {
(*process_chunk)(next->data, next->hdr.size, arg);
next = pmemobj_direct(next->hdr.next);
}
pmemobj_rwlock_unlock(pop, &bp->rwlock);
}
/*
* process_chunk -- (internal) process function for log_walk
*/
static int
process_chunk(const void *buf, size_t len, void *arg)
{
char *tmp = malloc(len + 1);
if (tmp == NULL) {
fprintf(stderr, "malloc error\n");
return 0;
}
memcpy(tmp, buf, len);
tmp[len] = '\0';
printf("log contains:\n");
printf("%s\n", tmp);
free(tmp);
return 1;
}
/*
* count_iovec -- (internal) count the number of iovec items
*/
static int
count_iovec(char *arg)
{
int count = 1;
char *pch = strchr(arg, ':');
while (pch != NULL) {
++count;
pch = strchr(++pch, ':');
}
return count;
}
/*
* fill_iovec -- (internal) fill out the iovec
*/
static void
fill_iovec(struct iovec *iov, char *arg)
{
char *pch = strtok(arg, ":");
while (pch != NULL) {
iov->iov_base = pch;
iov->iov_len = strlen((char *)iov->iov_base);
++iov;
pch = strtok(NULL, ":");
}
}
int
main(int argc, char *argv[])
{
if (argc < 2) {
fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]);
return 1;
}
PMEMlogpool *plp;
if (strncmp(argv[1], "c", 1) == 0) {
plp = pmemlog_create(argv[2], POOL_SIZE, CREATE_MODE_RW);
} else if (strncmp(argv[1], "o", 1) == 0) {
plp = pmemlog_open(argv[2]);
} else {
fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]);
return 1;
}
if (plp == NULL) {
perror("pmemlog_create/pmemlog_open");
return 1;
}
/* process the command line arguments */
for (int i = 3; i < argc; i++) {
switch (*argv[i]) {
case 'a': {
printf("append: %s\n", argv[i] + 2);
if (pmemlog_append(plp, argv[i] + 2,
strlen(argv[i] + 2)))
fprintf(stderr, "pmemlog_append"
" error\n");
break;
}
case 'v': {
printf("appendv: %s\n", argv[i] + 2);
int count = count_iovec(argv[i] + 2);
struct iovec *iov = calloc(count,
sizeof(struct iovec));
fill_iovec(iov, argv[i] + 2);
if (pmemlog_appendv(plp, iov, count))
fprintf(stderr, "pmemlog_appendv"
" error\n");
free(iov);
break;
}
case 'r': {
printf("rewind\n");
pmemlog_rewind(plp);
break;
}
case 'w': {
printf("walk\n");
pmemlog_walk(plp, 0, process_chunk, NULL);
break;
}
case 'n': {
printf("nbytes: %zu\n", pmemlog_nbyte(plp));
break;
}
case 't': {
printf("offset: %lld\n", pmemlog_tell(plp));
break;
}
default: {
fprintf(stderr, "unrecognized command %s\n",
argv[i]);
break;
}
};
}
/* all done */
pmemlog_close(plp);
return 0;
}
| 9,486 | 20.960648 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/pmemlog/obj_pmemlog_macros.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_pmemlog_macros.c -- alternate pmemlog implementation based on pmemobj
*
* usage: obj_pmemlog_macros [co] file [cmd[:param]...]
*
* c - create file
* o - open file
*
* The "cmd" arguments match the pmemlog functions:
* a - append
* v - appendv
* r - rewind
* w - walk
* n - nbyte
* t - tell
* "a" and "v" require a parameter string(s) separated by a colon
*/
#include <ex_common.h>
#include <sys/stat.h>
#include <string.h>
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include "libpmemobj.h"
#include "libpmem.h"
#include "libpmemlog.h"
#define POOL_SIZE ((size_t)(1024 * 1024 * 100))
POBJ_LAYOUT_BEGIN(obj_pmemlog_macros);
POBJ_LAYOUT_ROOT(obj_pmemlog_macros, struct base);
POBJ_LAYOUT_TOID(obj_pmemlog_macros, struct log);
POBJ_LAYOUT_END(obj_pmemlog_macros);
/* log entry header */
struct log_hdr {
TOID(struct log) next; /* object ID of the next log buffer */
size_t size; /* size of this log buffer */
};
/* struct log stores the entire log entry */
struct log {
struct log_hdr hdr; /* entry header */
char data[]; /* log entry data */
};
/* struct base keeps track of the beginning of the log list */
struct base {
TOID(struct log) head; /* object ID of the first log buffer */
TOID(struct log) tail; /* object ID of the last log buffer */
PMEMrwlock rwlock; /* lock covering entire log */
size_t bytes_written; /* number of bytes stored in the pool */
};
/*
* pmemlog_open -- pool open wrapper
*/
PMEMlogpool *
pmemlog_open(const char *path)
{
return (PMEMlogpool *)pmemobj_open(path,
POBJ_LAYOUT_NAME(obj_pmemlog_macros));
}
/*
* pmemlog_create -- pool create wrapper
*/
PMEMlogpool *
pmemlog_create(const char *path, size_t poolsize, mode_t mode)
{
return (PMEMlogpool *)pmemobj_create(path,
POBJ_LAYOUT_NAME(obj_pmemlog_macros),
poolsize, mode);
}
/*
* pool_close -- pool close wrapper
*/
void
pmemlog_close(PMEMlogpool *plp)
{
pmemobj_close((PMEMobjpool *)plp);
}
/*
* pmemlog_nbyte -- not available in this implementation
*/
size_t
pmemlog_nbyte(PMEMlogpool *plp)
{
/* N/A */
return 0;
}
/*
* pmemlog_append -- add data to a log memory pool
*/
int
pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
int retval = 0;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
/* begin a transaction, also acquiring the write lock for the log */
TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) {
/* allocate the new node to be inserted */
TOID(struct log) logp;
logp = TX_ALLOC(struct log, count + sizeof(struct log_hdr));
D_RW(logp)->hdr.size = count;
memcpy(D_RW(logp)->data, buf, count);
D_RW(logp)->hdr.next = TOID_NULL(struct log);
/* add the modified root object to the undo log */
TX_ADD(bp);
if (TOID_IS_NULL(D_RO(bp)->tail)) {
/* update head */
D_RW(bp)->head = logp;
} else {
/* add the modified tail entry to the undo log */
TX_ADD(D_RW(bp)->tail);
D_RW(D_RW(bp)->tail)->hdr.next = logp;
}
D_RW(bp)->tail = logp; /* update tail */
D_RW(bp)->bytes_written += count;
} TX_ONABORT {
retval = -1;
} TX_END
return retval;
}
/*
* pmemlog_appendv -- add gathered data to a log memory pool
*/
int
pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
int retval = 0;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
/* begin a transaction, also acquiring the write lock for the log */
TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) {
/* add the base object and tail entry to the undo log */
TX_ADD(bp);
if (!TOID_IS_NULL(D_RO(bp)->tail))
TX_ADD(D_RW(bp)->tail);
/* append the data */
for (int i = 0; i < iovcnt; ++i) {
char *buf = (char *)iov[i].iov_base;
size_t count = iov[i].iov_len;
/* allocate the new node to be inserted */
TOID(struct log) logp;
logp = TX_ALLOC(struct log,
count + sizeof(struct log_hdr));
D_RW(logp)->hdr.size = count;
memcpy(D_RW(logp)->data, buf, count);
D_RW(logp)->hdr.next = TOID_NULL(struct log);
/* update head or tail accordingly */
if (TOID_IS_NULL(D_RO(bp)->tail))
D_RW(bp)->head = logp;
else
D_RW(D_RW(bp)->tail)->hdr.next = logp;
/* update tail */
D_RW(bp)->tail = logp;
D_RW(bp)->bytes_written += count;
}
} TX_ONABORT {
retval = -1;
} TX_END
return retval;
}
/*
* pmemlog_tell -- returns the current write point for the log
*/
long long
pmemlog_tell(PMEMlogpool *plp)
{
TOID(struct base) bp;
bp = POBJ_ROOT((PMEMobjpool *)plp, struct base);
return D_RO(bp)->bytes_written;
}
/*
* pmemlog_rewind -- discard all data, resetting a log memory pool to empty
*/
void
pmemlog_rewind(PMEMlogpool *plp)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
/* begin a transaction, also acquiring the write lock for the log */
TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) {
/* add the root object to the undo log */
TX_ADD(bp);
while (!TOID_IS_NULL(D_RO(bp)->head)) {
TOID(struct log) nextp;
nextp = D_RW(D_RW(bp)->head)->hdr.next;
TX_FREE(D_RW(bp)->head);
D_RW(bp)->head = nextp;
}
D_RW(bp)->head = TOID_NULL(struct log);
D_RW(bp)->tail = TOID_NULL(struct log);
D_RW(bp)->bytes_written = 0;
} TX_END
}
/*
* pmemlog_walk -- walk through all data in a log memory pool
*
* As this implementation holds the size of each entry, the chunksize is ignored
* and the process_chunk function gets the actual entry length.
*/
void
pmemlog_walk(PMEMlogpool *plp, size_t chunksize,
int (*process_chunk)(const void *buf, size_t len, void *arg), void *arg)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
/* acquire a read lock */
if (pmemobj_rwlock_rdlock(pop, &D_RW(bp)->rwlock) != 0)
return;
TOID(struct log) next;
next = D_RO(bp)->head;
/* process all chunks */
while (!TOID_IS_NULL(next)) {
(*process_chunk)(D_RO(next)->data,
D_RO(next)->hdr.size, arg);
next = D_RO(next)->hdr.next;
}
pmemobj_rwlock_unlock(pop, &D_RW(bp)->rwlock);
}
/*
* process_chunk -- (internal) process function for log_walk
*/
static int
process_chunk(const void *buf, size_t len, void *arg)
{
char *tmp = (char *)malloc(len + 1);
if (tmp == NULL) {
fprintf(stderr, "malloc error\n");
return 0;
}
memcpy(tmp, buf, len);
tmp[len] = '\0';
printf("log contains:\n");
printf("%s\n", tmp);
free(tmp);
return 1; /* continue */
}
/*
* count_iovec -- (internal) count the number of iovec items
*/
static int
count_iovec(char *arg)
{
int count = 1;
char *pch = strchr(arg, ':');
while (pch != NULL) {
++count;
pch = strchr(++pch, ':');
}
return count;
}
/*
* fill_iovec -- (internal) fill out the iovec
*/
static void
fill_iovec(struct iovec *iov, char *arg)
{
char *pch = strtok(arg, ":");
while (pch != NULL) {
iov->iov_base = pch;
iov->iov_len = strlen((char *)iov->iov_base);
++iov;
pch = strtok(NULL, ":");
}
}
int
main(int argc, char *argv[])
{
if (argc < 2) {
fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]);
return 1;
}
PMEMlogpool *plp;
if (strncmp(argv[1], "c", 1) == 0) {
plp = pmemlog_create(argv[2], POOL_SIZE, CREATE_MODE_RW);
} else if (strncmp(argv[1], "o", 1) == 0) {
plp = pmemlog_open(argv[2]);
} else {
fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]);
return 1;
}
if (plp == NULL) {
perror("pmemlog_create/pmemlog_open");
return 1;
}
/* process the command line arguments */
for (int i = 3; i < argc; i++) {
switch (*argv[i]) {
case 'a': {
printf("append: %s\n", argv[i] + 2);
if (pmemlog_append(plp, argv[i] + 2,
strlen(argv[i] + 2)))
fprintf(stderr, "pmemlog_append"
" error\n");
break;
}
case 'v': {
printf("appendv: %s\n", argv[i] + 2);
int count = count_iovec(argv[i] + 2);
struct iovec *iov = (struct iovec *)malloc(
count * sizeof(struct iovec));
if (iov == NULL) {
fprintf(stderr, "malloc error\n");
break;
}
fill_iovec(iov, argv[i] + 2);
if (pmemlog_appendv(plp, iov, count))
fprintf(stderr, "pmemlog_appendv"
" error\n");
free(iov);
break;
}
case 'r': {
printf("rewind\n");
pmemlog_rewind(plp);
break;
}
case 'w': {
printf("walk\n");
pmemlog_walk(plp, 0, process_chunk, NULL);
break;
}
case 'n': {
printf("nbytes: %zu\n", pmemlog_nbyte(plp));
break;
}
case 't': {
printf("offset: %lld\n", pmemlog_tell(plp));
break;
}
default: {
fprintf(stderr, "unrecognized command %s\n",
argv[i]);
break;
}
};
}
/* all done */
pmemlog_close(plp);
return 0;
}
| 8,866 | 21.448101 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/libart/arttree_examine.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: arttree_examine.c
*
* Description: implementation of examine function for ART tree structures
*
* Author: Andreas Bluemle, Dieter Kasper
* [email protected]
* [email protected]
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
#include <stdio.h>
#include <libgen.h>
#include <string.h>
#include <unistd.h>
#include <inttypes.h>
#include <stdlib.h>
#include <getopt.h>
#include <stdint.h>
#include <stdbool.h>
#include "arttree_structures.h"
/*
* examine context
*/
struct examine_ctx {
struct pmem_context *pmem_ctx;
char *offset_string;
uint64_t offset;
char *type_name;
int32_t type;
int32_t hexdump;
};
static struct examine_ctx *ex_ctx = NULL;
struct examine {
const char *name;
const char *brief;
int (*func)(char *, struct examine_ctx *, off_t);
void (*help)(char *);
};
/* local functions */
static int examine_parse_args(char *appname, int ac, char *av[],
struct examine_ctx *ex_ctx);
static struct examine *get_examine(char *type_name);
static void print_usage(char *appname);
static void dump_PMEMoid(char *prefix, PMEMoid *oid);
static int examine_PMEMoid(char *appname, struct examine_ctx *ctx, off_t off);
static int examine_art_tree_root(char *appname,
struct examine_ctx *ctx, off_t off);
static int examine_art_node_u(char *appname,
struct examine_ctx *ctx, off_t off);
static int examine_art_node4(char *appname,
struct examine_ctx *ctx, off_t off);
static int examine_art_node16(char *appname,
struct examine_ctx *ctx, off_t off);
static int examine_art_node48(char *appname,
struct examine_ctx *ctx, off_t off);
static int examine_art_node256(char *appname,
struct examine_ctx *ctx, off_t off);
#if 0 /* XXX */
static int examine_art_node(char *appname,
struct examine_ctx *ctx, off_t off);
#else
static int examine_art_node(art_node *an);
#endif
static int examine_art_leaf(char *appname,
struct examine_ctx *ctx, off_t off);
static int examine_var_string(char *appname,
struct examine_ctx *ctx, off_t off);
/* global visible interface */
void arttree_examine_help(char *appname);
int arttree_examine_func(char *appname,
struct pmem_context *ctx, int ac, char *av[]);
static const char *arttree_examine_help_str =
"Examine data structures (objects) of ART tree\n"
"Arguments: <offset> <type>\n"
" <offset> offset of object in pmem file\n"
" <type> one of art_tree_root, art_node_u, art_node,"
" art_node4, art_node16, art_node48, art_node256, art_leaf\n"
;
static const struct option long_options[] = {
{"hexdump", no_argument, NULL, 'x'},
{NULL, 0, NULL, 0 },
};
static struct examine ex_funcs[] = {
{
.name = "PMEMobj",
.brief = "examine PMEMoid structure",
.func = examine_PMEMoid,
.help = NULL,
},
{
.name = "art_tree_root",
.brief = "examine art_tree_root structure",
.func = examine_art_tree_root,
.help = NULL,
},
{
.name = "art_node_u",
.brief = "examine art_node_u structure",
.func = examine_art_node_u,
.help = NULL,
},
{
.name = "art_node4",
.brief = "examine art_node4 structure",
.func = examine_art_node4,
.help = NULL,
},
{
.name = "art_node16",
.brief = "examine art_node16 structure",
.func = examine_art_node16,
.help = NULL,
},
{
.name = "art_node48",
.brief = "examine art_node48 structure",
.func = examine_art_node48,
.help = NULL,
},
{
.name = "art_node256",
.brief = "examine art_node256 structure",
.func = examine_art_node256,
.help = NULL,
},
{
.name = "art_leaf",
.brief = "examine art_leaf structure",
.func = examine_art_leaf,
.help = NULL,
},
{
.name = "var_string",
.brief = "examine var_string structure",
.func = examine_var_string,
.help = NULL,
},
};
/*
* number of arttree examine commands
*/
#define COMMANDS_NUMBER (sizeof(ex_funcs) / sizeof(ex_funcs[0]))
void
arttree_examine_help(char *appname)
{
printf("%s %s\n", appname, arttree_examine_help_str);
}
int
arttree_examine_func(char *appname, struct pmem_context *ctx,
int ac, char *av[])
{
int errors = 0;
off_t offset;
struct examine *ex;
if (ctx == NULL) {
return -1;
}
if (ex_ctx == NULL) {
ex_ctx = (struct examine_ctx *)
calloc(1, sizeof(struct examine_ctx));
if (ex_ctx == NULL) {
return -1;
}
}
ex_ctx->pmem_ctx = ctx;
if (examine_parse_args(appname, ac, av, ex_ctx) != 0) {
fprintf(stderr, "%s::%s: error parsing arguments\n",
appname, __FUNCTION__);
errors++;
}
if (!errors) {
offset = (off_t)strtol(ex_ctx->offset_string, NULL, 0);
ex = get_examine(ex_ctx->type_name);
if (ex != NULL) {
ex->func(appname, ex_ctx, offset);
}
}
return errors;
}
static int
examine_parse_args(char *appname, int ac, char *av[],
struct examine_ctx *ex_ctx)
{
int ret = 0;
int opt;
optind = 0;
while ((opt = getopt_long(ac, av, "x", long_options, NULL)) != -1) {
switch (opt) {
case 'x':
ex_ctx->hexdump = 1;
break;
default:
print_usage(appname);
ret = 1;
}
}
if (ret == 0) {
ex_ctx->offset_string = strdup(av[optind + 0]);
ex_ctx->type_name = strdup(av[optind + 1]);
}
return ret;
}
static void
print_usage(char *appname)
{
printf("%s: examine <offset> <type>\n", appname);
}
/*
* get_command -- returns command for specified command name
*/
static struct examine *
get_examine(char *type_name)
{
if (type_name == NULL) {
return NULL;
}
for (size_t i = 0; i < COMMANDS_NUMBER; i++) {
if (strcmp(type_name, ex_funcs[i].name) == 0)
return &ex_funcs[i];
}
return NULL;
}
static void
dump_PMEMoid(char *prefix, PMEMoid *oid)
{
printf("%s { PMEMoid pool_uuid_lo %" PRIx64
" off 0x%" PRIx64 " = %" PRId64 " }\n",
prefix, oid->pool_uuid_lo, oid->off, oid->off);
}
static int
examine_PMEMoid(char *appname, struct examine_ctx *ctx, off_t off)
{
void *p = (void *)(ctx->pmem_ctx->addr + off);
dump_PMEMoid("PMEMoid", p);
return 0;
}
static int
examine_art_tree_root(char *appname, struct examine_ctx *ctx, off_t off)
{
art_tree_root *tree_root = (art_tree_root *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, art_tree_root {\n", (long long)off);
printf(" size %d\n", tree_root->size);
dump_PMEMoid(" art_node_u", (PMEMoid *)&(tree_root->root));
printf("\n};\n");
return 0;
}
static int
examine_art_node_u(char *appname, struct examine_ctx *ctx, off_t off)
{
art_node_u *node_u = (art_node_u *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, art_node_u {\n", (long long)off);
printf(" type %d [%s]\n", node_u->art_node_type,
art_node_names[node_u->art_node_type]);
printf(" tag %d\n", node_u->art_node_tag);
switch (node_u->art_node_type) {
case ART_NODE4:
dump_PMEMoid(" art_node4 oid",
&(node_u->u.an4.oid));
break;
case ART_NODE16:
dump_PMEMoid(" art_node16 oid",
&(node_u->u.an16.oid));
break;
case ART_NODE48:
dump_PMEMoid(" art_node48 oid",
&(node_u->u.an48.oid));
break;
case ART_NODE256:
dump_PMEMoid(" art_node256 oid",
&(node_u->u.an256.oid));
break;
case ART_LEAF:
dump_PMEMoid(" art_leaf oid",
&(node_u->u.al.oid));
break;
default: printf("ERROR: unknown node type\n");
break;
}
printf("\n};\n");
return 0;
}
static int
examine_art_node4(char *appname, struct examine_ctx *ctx, off_t off)
{
art_node4 *an4 = (art_node4 *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, art_node4 {\n", (long long)off);
examine_art_node(&(an4->n));
printf("keys [");
for (int i = 0; i < 4; i++) {
printf("%c ", an4->keys[i]);
}
printf("]\nnodes [\n");
for (int i = 0; i < 4; i++) {
dump_PMEMoid(" art_node_u oid",
&(an4->children[i].oid));
}
printf("\n]");
printf("\n};\n");
return 0;
}
static int
examine_art_node16(char *appname, struct examine_ctx *ctx, off_t off)
{
art_node16 *an16 = (art_node16 *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, art_node16 {\n", (long long)off);
examine_art_node(&(an16->n));
printf("keys [");
for (int i = 0; i < 16; i++) {
printf("%c ", an16->keys[i]);
}
printf("]\nnodes [\n");
for (int i = 0; i < 16; i++) {
dump_PMEMoid(" art_node_u oid",
&(an16->children[i].oid));
}
printf("\n]");
printf("\n};\n");
return 0;
}
static int
examine_art_node48(char *appname, struct examine_ctx *ctx, off_t off)
{
art_node48 *an48 = (art_node48 *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, art_node48 {\n", (long long)off);
examine_art_node(&(an48->n));
printf("keys [");
for (int i = 0; i < 256; i++) {
printf("%c ", an48->keys[i]);
}
printf("]\nnodes [\n");
for (int i = 0; i < 48; i++) {
dump_PMEMoid(" art_node_u oid",
&(an48->children[i].oid));
}
printf("\n]");
printf("\n};\n");
return 0;
}
static int
examine_art_node256(char *appname, struct examine_ctx *ctx, off_t off)
{
art_node256 *an256 = (art_node256 *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, art_node256 {\n", (long long)off);
examine_art_node(&(an256->n));
printf("nodes [\n");
for (int i = 0; i < 256; i++) {
dump_PMEMoid(" art_node_u oid",
&(an256->children[i].oid));
}
printf("\n]");
printf("\n};\n");
return 0;
}
#if 0 /* XXX */
static int
examine_art_node(char *appname, struct examine_ctx *ctx, off_t off)
{
art_node *an = (art_node *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, art_node {\n", (long long)off);
printf(" num_children %d\n", an->num_children);
printf(" partial_len %d\n", an->partial_len);
printf(" partial [");
for (int i = 0; i < 10; i++) {
printf("%c ", an->partial[i]);
}
printf("\n]");
printf("\n};\n");
return 0;
}
#else
static int
examine_art_node(art_node *an)
{
printf("art_node {\n");
printf(" num_children %d\n", an->num_children);
printf(" partial_len %" PRIu32 "\n", an->partial_len);
printf(" partial [");
for (int i = 0; i < 10; i++) {
printf("%c ", an->partial[i]);
}
printf("\n]");
printf("\n};\n");
return 0;
}
#endif
static int
examine_art_leaf(char *appname, struct examine_ctx *ctx, off_t off)
{
art_leaf *al = (art_leaf *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, art_leaf {\n", (long long)off);
dump_PMEMoid(" var_string key oid ", &(al->key.oid));
dump_PMEMoid(" var_string value oid", &(al->value.oid));
printf("\n};\n");
return 0;
}
static int
examine_var_string(char *appname, struct examine_ctx *ctx, off_t off)
{
var_string *vs = (var_string *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, var_string {\n", (long long)off);
printf(" len %zu s [%s]", vs->len, vs->s);
printf("\n};\n");
return 0;
}
| 12,509 | 24.478615 | 78 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/libart/arttree_structures.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: arttree_structures.c
*
* Description: Examine pmem structures; structures and unions taken from
* the preprocessor output of a libpmemobj compatible program.
*
* Author: Andreas Bluemle, Dieter Kasper
* [email protected]
* [email protected]
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
#ifdef __FreeBSD__
#define _WITH_GETLINE
#endif
#include <stdio.h>
#include <fcntl.h>
#include <libgen.h>
#include <string.h>
#include <unistd.h>
#include <stdlib.h>
#include <getopt.h>
#include <stdint.h>
#include <stdbool.h>
#include <assert.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include "arttree_structures.h"
#include <stdarg.h>
#define APPNAME "examine_arttree"
#define SRCVERSION "0.2"
size_t art_node_sizes[art_node_types] = {
sizeof(art_node4),
sizeof(art_node16),
sizeof(art_node48),
sizeof(art_node256),
sizeof(art_leaf),
sizeof(art_node_u),
sizeof(art_node),
sizeof(art_tree_root),
sizeof(var_string),
};
char *art_node_names[art_node_types] = {
"art_node4",
"art_node16",
"art_node48",
"art_node256",
"art_leaf",
"art_node_u",
"art_node",
"art_tree_root",
"var_string"
};
/*
* long_options -- command line arguments
*/
static const struct option long_options[] = {
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0 },
};
/*
* command -- struct for commands definition
*/
struct command {
const char *name;
const char *brief;
int (*func)(char *, struct pmem_context *, int, char *[]);
void (*help)(char *);
};
/*
* number of arttree_structures commands
*/
#define COMMANDS_NUMBER (sizeof(commands) / sizeof(commands[0]))
static void print_help(char *appname);
static void print_usage(char *appname);
static void print_version(char *appname);
static int quit_func(char *appname, struct pmem_context *ctx,
int argc, char *argv[]);
static void quit_help(char *appname);
static int set_root_func(char *appname, struct pmem_context *ctx,
int argc, char *argv[]);
static void set_root_help(char *appname);
static int help_func(char *appname, struct pmem_context *ctx,
int argc, char *argv[]);
static void help_help(char *appname);
static struct command *get_command(char *cmd_str);
static int ctx_init(struct pmem_context *ctx, char *filename);
static int arttree_structures_func(char *appname, struct pmem_context *ctx,
int ac, char *av[]);
static void arttree_structures_help(char *appname);
static int arttree_info_func(char *appname, struct pmem_context *ctx,
int ac, char *av[]);
static void arttree_info_help(char *appname);
extern int arttree_examine_func();
extern void arttree_examine_help();
extern int arttree_search_func();
extern void arttree_search_help();
void outv_err(const char *fmt, ...);
void outv_err_vargs(const char *fmt, va_list ap);
static struct command commands[] = {
{
.name = "structures",
.brief = "print information about ART structures",
.func = arttree_structures_func,
.help = arttree_structures_help,
},
{
.name = "info",
.brief = "print information and statistics"
" about an ART tree pool",
.func = arttree_info_func,
.help = arttree_info_help,
},
{
.name = "examine",
.brief = "examine data structures from an ART tree",
.func = arttree_examine_func,
.help = arttree_examine_help,
},
{
.name = "search",
.brief = "search for a key in an ART tree",
.func = arttree_search_func,
.help = arttree_search_help,
},
{
.name = "set_root",
.brief = "define offset of root of an ART tree",
.func = set_root_func,
.help = set_root_help,
},
{
.name = "help",
.brief = "print help text about a command",
.func = help_func,
.help = help_help,
},
{
.name = "quit",
.brief = "quit ART tree structure examiner",
.func = quit_func,
.help = quit_help,
},
};
static struct pmem_context ctx;
/*
* outv_err -- print error message
*/
void
outv_err(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
outv_err_vargs(fmt, ap);
va_end(ap);
}
/*
* outv_err_vargs -- print error message
*/
void
outv_err_vargs(const char *fmt, va_list ap)
{
fprintf(stderr, "error: ");
vfprintf(stderr, fmt, ap);
if (!strchr(fmt, '\n'))
fprintf(stderr, "\n");
}
/*
* print_usage -- prints usage message
*/
static void
print_usage(char *appname)
{
printf("usage: %s [--help] <pmem file> <command> [<args>]\n", appname);
}
/*
* print_version -- prints version message
*/
static void
print_version(char *appname)
{
printf("%s %s\n", appname, SRCVERSION);
}
/*
* print_help -- prints help message
*/
static void
print_help(char *appname)
{
print_usage(appname);
print_version(appname);
printf("\n");
printf("Options:\n");
printf(" -h, --help display this help and exit\n");
printf("\n");
printf("The available commands are:\n");
for (size_t i = 0; i < COMMANDS_NUMBER; i++)
printf("%s\t- %s\n", commands[i].name, commands[i].brief);
printf("\n");
}
/*
* set_root_help -- prints help message for set root command
*/
static void
set_root_help(char *appname)
{
printf("Usage: set_root <offset>\n");
printf(" define the offset of the art tree root\n");
}
/*
* set_root_func -- set_root define the offset of the art tree root
*/
static int
set_root_func(char *appname, struct pmem_context *ctx, int argc, char *argv[])
{
int retval = 0;
uint64_t root_offset;
if (argc == 2) {
root_offset = strtol(argv[1], NULL, 0);
ctx->art_tree_root_offset = root_offset;
} else {
set_root_help(appname);
retval = 1;
}
return retval;
}
/*
* quit_help -- prints help message for quit command
*/
static void
quit_help(char *appname)
{
printf("Usage: quit\n");
printf(" terminate arttree structure examiner\n");
}
/*
* quit_func -- quit arttree structure examiner
*/
static int
quit_func(char *appname, struct pmem_context *ctx, int argc, char *argv[])
{
printf("\n");
exit(0);
return 0;
}
/*
* help_help -- prints help message for help command
*/
static void
help_help(char *appname)
{
printf("Usage: %s help <command>\n", appname);
}
/*
* help_func -- prints help message for specified command
*/
static int
help_func(char *appname, struct pmem_context *ctx, int argc, char *argv[])
{
if (argc > 1) {
char *cmd_str = argv[1];
struct command *cmdp = get_command(cmd_str);
if (cmdp && cmdp->help) {
cmdp->help(appname);
return 0;
} else {
outv_err("No help text for '%s' command\n", cmd_str);
return -1;
}
} else {
print_help(appname);
return -1;
}
}
static const char *arttree_structures_help_str =
"Show information about known ART tree structures\n"
;
static void
arttree_structures_help(char *appname)
{
printf("%s %s\n", appname, arttree_structures_help_str);
}
static int
arttree_structures_func(char *appname, struct pmem_context *ctx,
int ac, char *av[])
{
(void) appname;
(void) ac;
(void) av;
printf(
"typedef struct pmemoid {\n"
" uint64_t pool_uuid_lo;\n"
" uint64_t off;\n"
"} PMEMoid;\n");
printf("sizeof(PMEMoid) = %zu\n\n\n", sizeof(PMEMoid));
printf(
"struct _art_node_u; typedef struct _art_node_u art_node_u;\n"
"struct _art_node_u { \n"
" uint8_t art_node_type; \n"
" uint8_t art_node_tag; \n"
"};\n");
printf("sizeof(art_node_u) = %zu\n\n\n", sizeof(art_node_u));
printf(
"struct _art_node; typedef struct _art_node art_node;\n"
"struct _art_node {\n"
" uint8_t type;\n"
" uint8_t num_children;\n"
" uint32_t partial_len;\n"
" unsigned char partial[10];\n"
"};\n");
printf("sizeof(art_node) = %zu\n\n\n", sizeof(art_node));
printf(
"typedef uint8_t _toid_art_node_toid_type_num[8];\n");
printf("sizeof(_toid_art_node_toid_type_num[8]) = %zu\n\n\n",
sizeof(_toid_art_node_toid_type_num[8]));
printf(
"union _toid_art_node_u_toid {\n"
" PMEMoid oid;\n"
" art_node_u *_type;\n"
" _toid_art_node_u_toid_type_num *_type_num;\n"
"};\n");
printf("sizeof(union _toid_art_node_u_toid) = %zu\n\n\n",
sizeof(union _toid_art_node_u_toid));
printf(
"typedef uint8_t _toid_art_node_toid_type_num[8];\n");
printf("sizeof(_toid_art_node_toid_type_num[8]) = %zu\n\n\n",
sizeof(_toid_art_node_toid_type_num[8]));
printf(
"union _toid_art_node_toid {\n"
" PMEMoid oid; \n"
" art_node *_type; \n"
" _toid_art_node_toid_type_num *_type_num;\n"
"};\n");
printf("sizeof(union _toid_art_node_toid) = %zu\n\n\n",
sizeof(union _toid_art_node_toid));
printf(
"struct _art_node4; typedef struct _art_node4 art_node4;\n"
"struct _art_node4 {\n"
" art_node n;\n"
" unsigned char keys[4];\n"
" union _toid_art_node_u_toid children[4];\n"
"};\n");
printf("sizeof(art_node4) = %zu\n\n\n", sizeof(art_node4));
printf(
"struct _art_node16; typedef struct _art_node16 art_node16;\n"
"struct _art_node16 {\n"
" art_node n;\n"
" unsigned char keys[16];\n"
" union _toid_art_node_u_toid children[16];\n"
"};\n");
printf("sizeof(art_node16) = %zu\n\n\n", sizeof(art_node16));
printf(
"struct _art_node48; typedef struct _art_node48 art_node48;\n"
"struct _art_node48 {\n"
" art_node n;\n"
" unsigned char keys[256];\n"
" union _toid_art_node_u_toid children[48];\n"
"};\n");
printf("sizeof(art_node48) = %zu\n\n\n", sizeof(art_node48));
printf(
"struct _art_node256; typedef struct _art_node256 art_node256;\n"
"struct _art_node256 {\n"
" art_ndoe n;\n"
" union _toid_art_node_u_toid children[256];\n"
"};\n");
printf("sizeof(art_node256) = %zu\n\n\n", sizeof(art_node256));
printf(
"struct _art_leaf; typedef struct _art_leaf art_leaf;\n"
"struct _art_leaf {\n"
" union _toid_var_string_toid value;\n"
" union _toid_var_string_toid key;\n"
"};\n");
printf("sizeof(art_leaf) = %zu\n\n\n", sizeof(art_leaf));
return 0;
}
static const char *arttree_info_help_str =
"Show information about known ART tree structures\n"
;
static void
arttree_info_help(char *appname)
{
printf("%s %s\n", appname, arttree_info_help_str);
}
static int
arttree_info_func(char *appname, struct pmem_context *ctx, int ac, char *av[])
{
printf("%s: %s not yet implemented\n", appname, __FUNCTION__);
return 0;
}
/*
* get_command -- returns command for specified command name
*/
static struct command *
get_command(char *cmd_str)
{
if (cmd_str == NULL) {
return NULL;
}
for (size_t i = 0; i < COMMANDS_NUMBER; i++) {
if (strcmp(cmd_str, commands[i].name) == 0)
return &commands[i];
}
return NULL;
}
static int
ctx_init(struct pmem_context *ctx, char *filename)
{
int errors = 0;
if (filename == NULL)
errors++;
if (ctx == NULL)
errors++;
if (errors)
return errors;
ctx->filename = strdup(filename);
assert(ctx->filename != NULL);
ctx->fd = -1;
ctx->addr = NULL;
ctx->art_tree_root_offset = 0;
if (access(ctx->filename, F_OK) != 0)
return 1;
if ((ctx->fd = open(ctx->filename, O_RDONLY)) == -1)
return 1;
struct stat stbuf;
if (fstat(ctx->fd, &stbuf) < 0)
return 1;
ctx->psize = stbuf.st_size;
if ((ctx->addr = mmap(NULL, ctx->psize, PROT_READ,
MAP_SHARED, ctx->fd, 0)) == MAP_FAILED)
return 1;
return 0;
}
static void
ctx_fini(struct pmem_context *ctx)
{
munmap(ctx->addr, ctx->psize);
close(ctx->fd);
free(ctx->filename);
}
int
main(int ac, char *av[])
{
int opt;
int option_index;
int ret = 0;
size_t len;
ssize_t read;
char *cmd_str;
char *args[20];
int nargs;
char *line;
struct command *cmdp = NULL;
while ((opt = getopt_long(ac, av, "h",
long_options, &option_index)) != -1) {
switch (opt) {
case 'h':
print_help(APPNAME);
return 0;
default:
print_usage(APPNAME);
return -1;
}
}
if (optind >= ac) {
fprintf(stderr, "ERROR: missing arguments\n");
print_usage(APPNAME);
return -1;
}
ctx_init(&ctx, av[optind]);
if (optind + 1 < ac) {
/* execute command as given on command line */
cmd_str = av[optind + 1];
cmdp = get_command(cmd_str);
if (cmdp != NULL) {
ret = cmdp->func(APPNAME, &ctx, ac - 2, av + 2);
}
} else {
/* interactive mode: read commands and execute them */
line = NULL;
printf("\n> ");
while ((read = getline(&line, &len, stdin)) != -1) {
if (line[read - 1] == '\n') {
line[read - 1] = '\0';
}
args[0] = strtok(line, " ");
cmdp = get_command(args[0]);
if (cmdp == NULL) {
printf("[%s]: command not supported\n",
args[0] ? args[0] : "NULL");
printf("\n> ");
continue;
}
nargs = 1;
while (1) {
args[nargs] = strtok(NULL, " ");
if (args[nargs] == NULL) {
break;
}
nargs++;
}
ret = cmdp->func(APPNAME, &ctx, nargs, args);
printf("\n> ");
}
if (line != NULL) {
free(line);
}
}
ctx_fini(&ctx);
return ret;
}
| 14,768 | 22.898058 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/libart/arttree_structures.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: arttree_structures.h
*
* Description: known structures of the ART tree
*
* Author: Andreas Bluemle, Dieter Kasper
* [email protected]
* [email protected]
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
#ifndef _ARTTREE_STRUCTURES_H
#define _ARTTREE_STRUCTURES_H
#define MAX_PREFIX_LEN 10
/*
* pmem_context -- structure for pmempool file
*/
struct pmem_context {
char *filename;
size_t psize;
int fd;
char *addr;
uint64_t art_tree_root_offset;
};
struct _art_node_u; typedef struct _art_node_u art_node_u;
struct _art_node; typedef struct _art_node art_node;
struct _art_node4; typedef struct _art_node4 art_node4;
struct _art_node16; typedef struct _art_node16 art_node16;
struct _art_node48; typedef struct _art_node48 art_node48;
struct _art_node256; typedef struct _art_node256 art_node256;
struct _var_string; typedef struct _var_string var_string;
struct _art_leaf; typedef struct _art_leaf art_leaf;
struct _art_tree_root; typedef struct _art_tree_root art_tree_root;
typedef uint8_t art_tree_root_toid_type_num[65535];
typedef uint8_t _toid_art_node_u_toid_type_num[2];
typedef uint8_t _toid_art_node_toid_type_num[3];
typedef uint8_t _toid_art_node4_toid_type_num[4];
typedef uint8_t _toid_art_node16_toid_type_num[5];
typedef uint8_t _toid_art_node48_toid_type_num[6];
typedef uint8_t _toid_art_node256_toid_type_num[7];
typedef uint8_t _toid_art_leaf_toid_type_num[8];
typedef uint8_t _toid_var_string_toid_type_num[9];
typedef struct pmemoid {
uint64_t pool_uuid_lo;
uint64_t off;
} PMEMoid;
union _toid_art_node_u_toid {
PMEMoid oid;
art_node_u *_type;
_toid_art_node_u_toid_type_num *_type_num;
};
union art_tree_root_toid {
PMEMoid oid;
struct art_tree_root *_type;
art_tree_root_toid_type_num *_type_num;
};
union _toid_art_node_toid {
PMEMoid oid;
art_node *_type;
_toid_art_node_toid_type_num *_type_num;
};
union _toid_art_node4_toid {
PMEMoid oid;
art_node4 *_type;
_toid_art_node4_toid_type_num *_type_num;
};
union _toid_art_node16_toid {
PMEMoid oid;
art_node16 *_type;
_toid_art_node16_toid_type_num *_type_num;
};
union _toid_art_node48_toid {
PMEMoid oid;
art_node48 *_type;
_toid_art_node48_toid_type_num *_type_num;
};
union _toid_art_node256_toid {
PMEMoid oid;
art_node256 *_type;
_toid_art_node256_toid_type_num *_type_num;
};
union _toid_var_string_toid {
PMEMoid oid;
var_string *_type;
_toid_var_string_toid_type_num *_type_num;
};
union _toid_art_leaf_toid {
PMEMoid oid;
art_leaf *_type;
_toid_art_leaf_toid_type_num *_type_num;
};
struct _art_tree_root {
int size;
union _toid_art_node_u_toid root;
};
struct _art_node {
uint8_t num_children;
uint32_t partial_len;
unsigned char partial[MAX_PREFIX_LEN];
};
struct _art_node4 {
art_node n;
unsigned char keys[4];
union _toid_art_node_u_toid children[4];
};
struct _art_node16 {
art_node n;
unsigned char keys[16];
union _toid_art_node_u_toid children[16];
};
struct _art_node48 {
art_node n;
unsigned char keys[256];
union _toid_art_node_u_toid children[48];
};
struct _art_node256 {
art_node n;
union _toid_art_node_u_toid children[256];
};
struct _var_string {
size_t len;
unsigned char s[];
};
struct _art_leaf {
union _toid_var_string_toid value;
union _toid_var_string_toid key;
};
struct _art_node_u {
uint8_t art_node_type;
uint8_t art_node_tag;
union {
union _toid_art_node4_toid an4;
union _toid_art_node16_toid an16;
union _toid_art_node48_toid an48;
union _toid_art_node256_toid an256;
union _toid_art_leaf_toid al;
} u;
};
typedef enum {
ART_NODE4 = 0,
ART_NODE16 = 1,
ART_NODE48 = 2,
ART_NODE256 = 3,
ART_LEAF = 4,
ART_NODE_U = 5,
ART_NODE = 6,
ART_TREE_ROOT = 7,
VAR_STRING = 8,
art_node_types = 9 /* number of different art_nodes */
} art_node_type;
#define VALID_NODE_TYPE(n) (((n) >= 0) && ((n) < art_node_types))
extern size_t art_node_sizes[];
extern char *art_node_names[];
#endif /* _ARTTREE_STRUCTURES_H */
| 5,923 | 25.927273 | 78 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/libart/arttree.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: arttree.c
*
* Description: implement ART tree using libpmemobj based on libart
*
* Author: Andreas Bluemle, Dieter Kasper
* [email protected]
* [email protected]
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
#include <assert.h>
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include <strings.h>
#ifdef __FreeBSD__
#define _WITH_GETLINE
#endif
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <stdbool.h>
#include <inttypes.h>
#include <fcntl.h>
#include <emmintrin.h>
#include <sys/types.h>
#include <sys/mman.h>
#include "libpmemobj.h"
#include "arttree.h"
/*
* dummy structure so far; this should correspond to the datastore
* structure as defined in examples/libpmemobj/tree_map/datastore
*/
struct datastore
{
void *priv;
};
/*
* context - main context of datastore
*/
struct ds_context
{
char *filename; /* name of pool file */
int mode; /* operation mode */
int insertions; /* number of insert operations to perform */
int newpool; /* complete new memory pool */
size_t psize; /* size of pool */
PMEMobjpool *pop; /* pmemobj handle */
bool fileio;
unsigned fmode;
int fd; /* file descriptor for file io mode */
char *addr; /* base mapping address for file io mode */
unsigned char *key; /* for SEARCH, INSERT and REMOVE */
uint32_t key_len;
unsigned char *value; /* for INSERT */
uint32_t val_len;
};
#define FILL (1 << 1)
#define DUMP (1 << 2)
#define GRAPH (1 << 3)
#define INSERT (1 << 4)
#define SEARCH (1 << 5)
#define REMOVE (1 << 6)
struct ds_context my_context;
extern TOID(var_string) null_var_string;
extern TOID(art_leaf) null_art_leaf;
extern TOID(art_node_u) null_art_node_u;
#define read_key(p) read_line(p)
#define read_value(p) read_line(p)
int initialize_context(struct ds_context *ctx, int ac, char *av[]);
int initialize_pool(struct ds_context *ctx);
int add_elements(struct ds_context *ctx);
int insert_element(struct ds_context *ctx);
int search_element(struct ds_context *ctx);
int delete_element(struct ds_context *ctx);
ssize_t read_line(unsigned char **line);
void exit_handler(struct ds_context *ctx);
int art_tree_map_init(struct datastore *ds, struct ds_context *ctx);
void pmemobj_ds_set_priv(struct datastore *ds, void *priv);
static int dump_art_leaf_callback(void *data,
const unsigned char *key, uint32_t key_len,
const unsigned char *val, uint32_t val_len);
static int dump_art_node_callback(void *data,
const unsigned char *key, uint32_t key_len,
const unsigned char *val, uint32_t val_len);
static void print_node_info(char *nodetype, uint64_t off, const art_node *an);
static int parse_keyval(struct ds_context *ctx, char *arg, int mode);
int
initialize_context(struct ds_context *ctx, int ac, char *av[])
{
int errors = 0;
int opt;
char mode;
if ((ctx == NULL) || (ac < 2)) {
errors++;
}
if (!errors) {
ctx->filename = NULL;
ctx->psize = PMEMOBJ_MIN_POOL;
ctx->newpool = 0;
ctx->pop = NULL;
ctx->fileio = false;
ctx->fmode = 0666;
ctx->mode = 0;
ctx->fd = -1;
}
if (!errors) {
while ((opt = getopt(ac, av, "s:m:n:")) != -1) {
switch (opt) {
case 'm':
mode = optarg[0];
if (mode == 'f') {
ctx->mode |= FILL;
} else if (mode == 'd') {
ctx->mode |= DUMP;
} else if (mode == 'g') {
ctx->mode |= GRAPH;
} else if (mode == 'i') {
ctx->mode |= INSERT;
parse_keyval(ctx, av[optind], INSERT);
optind++;
} else if (mode == 's') {
ctx->mode |= SEARCH;
parse_keyval(ctx, av[optind], SEARCH);
optind++;
} else if (mode == 'r') {
ctx->mode |= REMOVE;
parse_keyval(ctx, av[optind], REMOVE);
optind++;
} else {
errors++;
}
break;
case 'n': {
long insertions;
insertions = strtol(optarg, NULL, 0);
if (insertions > 0 && insertions < LONG_MAX) {
ctx->insertions = insertions;
}
break;
}
case 's': {
unsigned long poolsize;
poolsize = strtoul(optarg, NULL, 0);
if (poolsize >= PMEMOBJ_MIN_POOL) {
ctx->psize = poolsize;
}
break;
}
default:
errors++;
break;
}
}
}
if (!errors) {
ctx->filename = strdup(av[optind]);
}
return errors;
}
static int parse_keyval(struct ds_context *ctx, char *arg, int mode)
{
int errors = 0;
char *p;
p = strtok(arg, ":");
if (p == NULL) {
errors++;
}
if (!errors) {
if (ctx->mode & (SEARCH|REMOVE|INSERT)) {
ctx->key = (unsigned char *)strdup(p);
assert(ctx->key != NULL);
ctx->key_len = strlen(p) + 1;
}
if (ctx->mode & INSERT) {
p = strtok(NULL, ":");
assert(p != NULL);
ctx->value = (unsigned char *)strdup(p);
assert(ctx->value != NULL);
ctx->val_len = strlen(p) + 1;
}
}
return errors;
}
void
exit_handler(struct ds_context *ctx)
{
if (!ctx->fileio) {
if (ctx->pop) {
pmemobj_close(ctx->pop);
}
} else {
if (ctx->fd > (-1)) {
close(ctx->fd);
}
}
}
int
art_tree_map_init(struct datastore *ds, struct ds_context *ctx)
{
int errors = 0;
char *error_string;
/* calculate a required pool size */
if (ctx->psize < PMEMOBJ_MIN_POOL)
ctx->psize = PMEMOBJ_MIN_POOL;
if (!ctx->fileio) {
if (access(ctx->filename, F_OK) != 0) {
error_string = "pmemobj_create";
ctx->pop = pmemobj_create(ctx->filename,
POBJ_LAYOUT_NAME(arttree_tx),
ctx->psize, ctx->fmode);
ctx->newpool = 1;
} else {
error_string = "pmemobj_open";
ctx->pop = pmemobj_open(ctx->filename,
POBJ_LAYOUT_NAME(arttree_tx));
}
if (ctx->pop == NULL) {
perror(error_string);
errors++;
}
} else {
int flags = O_CREAT | O_RDWR | O_SYNC;
/* Create a file if it does not exist. */
if ((ctx->fd = open(ctx->filename, flags, ctx->fmode)) < 0) {
perror(ctx->filename);
errors++;
}
/* allocate the pmem */
if ((errno = posix_fallocate(ctx->fd, 0, ctx->psize)) != 0) {
perror("posix_fallocate");
errors++;
}
/* map file to memory */
if ((ctx->addr = mmap(NULL, ctx->psize, PROT_READ, MAP_SHARED,
ctx->fd, 0)) == MAP_FAILED) {
perror("mmap");
errors++;
}
}
if (!errors) {
pmemobj_ds_set_priv(ds, ctx);
} else {
if (ctx->fileio) {
if (ctx->addr != NULL) {
munmap(ctx->addr, ctx->psize);
}
if (ctx->fd >= 0) {
close(ctx->fd);
}
} else {
if (ctx->pop) {
pmemobj_close(ctx->pop);
}
}
}
return errors;
}
/*
* pmemobj_ds_set_priv -- set private structure of datastore
*/
void
pmemobj_ds_set_priv(struct datastore *ds, void *priv)
{
ds->priv = priv;
}
struct datastore myds;
static void
usage(char *progname)
{
printf("usage: %s -m [f|d|g] file\n", progname);
printf(" -m mode known modes are\n");
printf(" f fill create and fill art tree\n");
printf(" i insert insert an element into the art tree\n");
printf(" s search search for a key in the art tree\n");
printf(" r remove remove an element from the art tree\n");
printf(" d dump dump art tree\n");
printf(" g graph dump art tree as a graphviz dot graph\n");
printf(" -n <number> number of key-value pairs to insert"
" into the art tree\n");
printf(" -s <size> size in bytes of the memory pool"
" (minimum and default: 8 MB)");
printf("\nfilling an art tree is done by reading key-value pairs\n"
"from standard input.\n"
"Both keys and values are single line only.\n");
}
int
main(int argc, char *argv[])
{
if (initialize_context(&my_context, argc, argv) != 0) {
usage(argv[0]);
return 1;
}
if (art_tree_map_init(&myds, &my_context) != 0) {
fprintf(stderr, "failed to initialize memory pool file\n");
return 1;
}
if (my_context.pop == NULL) {
perror("pool initialization");
return 1;
}
if (art_tree_init(my_context.pop, &my_context.newpool)) {
perror("pool setup");
return 1;
}
if ((my_context.mode & FILL)) {
if (add_elements(&my_context)) {
perror("add elements");
return 1;
}
}
if ((my_context.mode & INSERT)) {
if (insert_element(&my_context)) {
perror("insert elements");
return 1;
}
}
if ((my_context.mode & SEARCH)) {
if (search_element(&my_context)) {
perror("search elements");
return 1;
}
}
if ((my_context.mode & REMOVE)) {
if (delete_element(&my_context)) {
perror("delete elements");
return 1;
}
}
if (my_context.mode & DUMP) {
art_iter(my_context.pop, dump_art_leaf_callback, NULL);
}
if (my_context.mode & GRAPH) {
printf("digraph g {\nrankdir=LR;\n");
art_iter(my_context.pop, dump_art_node_callback, NULL);
printf("}");
}
exit_handler(&my_context);
return 0;
}
int
add_elements(struct ds_context *ctx)
{
PMEMobjpool *pop;
int errors = 0;
int i;
int key_len;
int val_len;
unsigned char *key;
unsigned char *value;
if (ctx == NULL) {
errors++;
} else if (ctx->pop == NULL) {
errors++;
}
if (!errors) {
pop = ctx->pop;
for (i = 0; i < ctx->insertions; i++) {
key = NULL;
value = NULL;
key_len = read_key(&key);
val_len = read_value(&value);
art_insert(pop, key, key_len, value, val_len);
if (key != NULL)
free(key);
if (value != NULL)
free(value);
}
}
return errors;
}
int
insert_element(struct ds_context *ctx)
{
PMEMobjpool *pop;
int errors = 0;
if (ctx == NULL) {
errors++;
} else if (ctx->pop == NULL) {
errors++;
}
if (!errors) {
pop = ctx->pop;
art_insert(pop, ctx->key, ctx->key_len,
ctx->value, ctx->val_len);
}
return errors;
}
int
search_element(struct ds_context *ctx)
{
PMEMobjpool *pop;
TOID(var_string) value;
int errors = 0;
if (ctx == NULL) {
errors++;
} else if (ctx->pop == NULL) {
errors++;
}
if (!errors) {
pop = ctx->pop;
printf("search key [%s]: ", (char *)ctx->key);
value = art_search(pop, ctx->key, ctx->key_len);
if (TOID_IS_NULL(value)) {
printf("not found\n");
} else {
printf("value [%s]\n", D_RO(value)->s);
}
}
return errors;
}
int
delete_element(struct ds_context *ctx)
{
PMEMobjpool *pop;
int errors = 0;
if (ctx == NULL) {
errors++;
} else if (ctx->pop == NULL) {
errors++;
}
if (!errors) {
pop = ctx->pop;
art_delete(pop, ctx->key, ctx->key_len);
}
return errors;
}
ssize_t
read_line(unsigned char **line)
{
size_t len = -1;
ssize_t read = -1;
*line = NULL;
if ((read = getline((char **)line, &len, stdin)) > 0) {
(*line)[read - 1] = '\0';
}
return read;
}
static int
dump_art_leaf_callback(void *data,
const unsigned char *key, uint32_t key_len,
const unsigned char *val, uint32_t val_len)
{
cb_data *cbd;
if (data != NULL) {
cbd = (cb_data *)data;
printf("node type %d ", D_RO(cbd->node)->art_node_type);
if (D_RO(cbd->node)->art_node_type == art_leaf_t) {
printf("key len %" PRIu32 " = [%s], value len %" PRIu32
" = [%s]",
key_len,
key != NULL ? (char *)key : (char *)"NULL",
val_len,
val != NULL ? (char *)val : (char *)"NULL");
}
printf("\n");
} else {
printf("key len %" PRIu32 " = [%s], value len %" PRIu32
" = [%s]\n",
key_len,
key != NULL ? (char *)key : (char *)"NULL",
val_len,
val != NULL ? (char *)val : (char *)"NULL");
}
return 0;
}
static void
print_node_info(char *nodetype, uint64_t off, const art_node *an)
{
int p_len, i;
p_len = an->partial_len;
printf("N%" PRIx64 " [label=\"%s at\\n0x%" PRIx64 "\\n%d children",
off, nodetype, off, an->num_children);
if (p_len != 0) {
printf("\\nlen %d", p_len);
printf(": ");
for (i = 0; i < p_len; i++) {
printf("%c", an->partial[i]);
}
}
printf("\"];\n");
}
static int
dump_art_node_callback(void *data,
const unsigned char *key, uint32_t key_len,
const unsigned char *val, uint32_t val_len)
{
cb_data *cbd;
const art_node *an;
TOID(art_node4) an4;
TOID(art_node16) an16;
TOID(art_node48) an48;
TOID(art_node256) an256;
TOID(art_leaf) al;
TOID(art_node_u) child;
TOID(var_string) oid_key;
TOID(var_string) oid_value;
if (data != NULL) {
cbd = (cb_data *)data;
switch (D_RO(cbd->node)->art_node_type) {
case NODE4:
an4 = D_RO(cbd->node)->u.an4;
an = &(D_RO(an4)->n);
child = D_RO(an4)->children[cbd->child_idx];
if (!TOID_IS_NULL(child)) {
print_node_info("node4",
cbd->node.oid.off, an);
printf("N%" PRIx64 " -> N%" PRIx64
" [label=\"%c\"];\n",
cbd->node.oid.off,
child.oid.off,
D_RO(an4)->keys[cbd->child_idx]);
}
break;
case NODE16:
an16 = D_RO(cbd->node)->u.an16;
an = &(D_RO(an16)->n);
child = D_RO(an16)->children[cbd->child_idx];
if (!TOID_IS_NULL(child)) {
print_node_info("node16",
cbd->node.oid.off, an);
printf("N%" PRIx64 " -> N%" PRIx64
" [label=\"%c\"];\n",
cbd->node.oid.off,
child.oid.off,
D_RO(an16)->keys[cbd->child_idx]);
}
break;
case NODE48:
an48 = D_RO(cbd->node)->u.an48;
an = &(D_RO(an48)->n);
child = D_RO(an48)->children[cbd->child_idx];
if (!TOID_IS_NULL(child)) {
print_node_info("node48",
cbd->node.oid.off, an);
printf("N%" PRIx64 " -> N%" PRIx64
" [label=\"%c\"];\n",
cbd->node.oid.off,
child.oid.off,
D_RO(an48)->keys[cbd->child_idx]);
}
break;
case NODE256:
an256 = D_RO(cbd->node)->u.an256;
an = &(D_RO(an256)->n);
child = D_RO(an256)->children[cbd->child_idx];
if (!TOID_IS_NULL(child)) {
print_node_info("node256",
cbd->node.oid.off, an);
printf("N%" PRIx64 " -> N%" PRIx64
" [label=\"0x%x\"];\n",
cbd->node.oid.off,
child.oid.off,
(char)((cbd->child_idx) & 0xff));
}
break;
case art_leaf_t:
al = D_RO(cbd->node)->u.al;
oid_key = D_RO(al)->key;
oid_value = D_RO(al)->value;
printf("N%" PRIx64 " [shape=box,"
"label=\"leaf at\\n0x%" PRIx64 "\"];\n",
cbd->node.oid.off, cbd->node.oid.off);
printf("N%" PRIx64 " [shape=box,"
"label=\"key at 0x%" PRIx64 ": %s\"];\n",
oid_key.oid.off, oid_key.oid.off,
D_RO(oid_key)->s);
printf("N%" PRIx64 " [shape=box,"
"label=\"value at 0x%" PRIx64 ": %s\"];\n",
oid_value.oid.off, oid_value.oid.off,
D_RO(oid_value)->s);
printf("N%" PRIx64 " -> N%" PRIx64 ";\n",
cbd->node.oid.off, oid_key.oid.off);
printf("N%" PRIx64 " -> N%" PRIx64 ";\n",
cbd->node.oid.off, oid_value.oid.off);
break;
default:
break;
}
} else {
printf("leaf: key len %" PRIu32
" = [%s], value len %" PRIu32 " = [%s]\n",
key_len, key, val_len, val);
}
return 0;
}
| 16,439 | 22.688761 | 78 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/libart/art.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
* Copyright 2012, Armon Dadgar. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: art.h
*
* Description: header file for art tree on pmem implementation
*
* Author: Andreas Bluemle, Dieter Kasper
* [email protected]
* [email protected]
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
/*
* based on https://github.com/armon/libart/src/art.h
*/
#ifndef _ART_H
#define _ART_H
#ifdef __cplusplus
extern "C" {
#endif
#define MAX_PREFIX_LEN 10
typedef enum {
NODE4 = 0,
NODE16 = 1,
NODE48 = 2,
NODE256 = 3,
art_leaf_t = 4,
art_node_types = 5 /* number of different art_nodes */
} art_node_type;
char *art_node_names[] = {
"art_node4",
"art_node16",
"art_node48",
"art_node256",
"art_leaf"
};
/*
* forward declarations; these are required when typedef shall be
* used instead of struct
*/
struct _art_node_u; typedef struct _art_node_u art_node_u;
struct _art_node; typedef struct _art_node art_node;
struct _art_node4; typedef struct _art_node4 art_node4;
struct _art_node16; typedef struct _art_node16 art_node16;
struct _art_node48; typedef struct _art_node48 art_node48;
struct _art_node256; typedef struct _art_node256 art_node256;
struct _art_leaf; typedef struct _art_leaf art_leaf;
struct _var_string; typedef struct _var_string var_string;
POBJ_LAYOUT_BEGIN(arttree_tx);
POBJ_LAYOUT_ROOT(arttree_tx, struct art_tree_root);
POBJ_LAYOUT_TOID(arttree_tx, art_node_u);
POBJ_LAYOUT_TOID(arttree_tx, art_node4);
POBJ_LAYOUT_TOID(arttree_tx, art_node16);
POBJ_LAYOUT_TOID(arttree_tx, art_node48);
POBJ_LAYOUT_TOID(arttree_tx, art_node256);
POBJ_LAYOUT_TOID(arttree_tx, art_leaf);
POBJ_LAYOUT_TOID(arttree_tx, var_string);
POBJ_LAYOUT_END(arttree_tx);
struct _var_string {
size_t len;
unsigned char s[];
};
/*
* This struct is included as part of all the various node sizes
*/
struct _art_node {
uint8_t num_children;
uint32_t partial_len;
unsigned char partial[MAX_PREFIX_LEN];
};
/*
* Small node with only 4 children
*/
struct _art_node4 {
art_node n;
unsigned char keys[4];
TOID(art_node_u) children[4];
};
/*
* Node with 16 children
*/
struct _art_node16 {
art_node n;
unsigned char keys[16];
TOID(art_node_u) children[16];
};
/*
* Node with 48 children, but a full 256 byte field.
*/
struct _art_node48 {
art_node n;
unsigned char keys[256];
TOID(art_node_u) children[48];
};
/*
* Full node with 256 children
*/
struct _art_node256 {
art_node n;
TOID(art_node_u) children[256];
};
/*
* Represents a leaf. These are of arbitrary size, as they include the key.
*/
struct _art_leaf {
TOID(var_string) value;
TOID(var_string) key;
};
struct _art_node_u {
uint8_t art_node_type;
uint8_t art_node_tag;
union {
TOID(art_node4) an4; /* starts with art_node */
TOID(art_node16) an16; /* starts with art_node */
TOID(art_node48) an48; /* starts with art_node */
TOID(art_node256) an256; /* starts with art_node */
TOID(art_leaf) al;
} u;
};
struct art_tree_root {
int size;
TOID(art_node_u) root;
};
typedef struct _cb_data {
TOID(art_node_u) node;
int child_idx;
} cb_data;
/*
* Macros to manipulate art_node tags
*/
#define IS_LEAF(x) (((x)->art_node_type == art_leaf_t))
#define SET_LEAF(x) (((x)->art_node_tag = art_leaf_t))
#define COPY_BLOB(_obj, _blob, _len) \
D_RW(_obj)->len = _len; \
TX_MEMCPY(D_RW(_obj)->s, _blob, _len); \
D_RW(_obj)->s[(_len) - 1] = '\0';
typedef int(*art_callback)(void *data,
const unsigned char *key, uint32_t key_len,
const unsigned char *value, uint32_t val_len);
extern int art_tree_init(PMEMobjpool *pop, int *newpool);
extern uint64_t art_size(PMEMobjpool *pop);
extern int art_iter(PMEMobjpool *pop, art_callback cb, void *data);
extern TOID(var_string) art_insert(PMEMobjpool *pop,
const unsigned char *key, int key_len,
void *value, int val_len);
extern TOID(var_string) art_search(PMEMobjpool *pop,
const unsigned char *key, int key_len);
extern TOID(var_string) art_delete(PMEMobjpool *pop,
const unsigned char *key, int key_len);
#ifdef __cplusplus
}
#endif
#endif /* _ART_H */
| 5,998 | 26.773148 | 78 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/libart/arttree_search.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: arttree_search.c
*
* Description: implementation of search function for ART tree
*
* Author: Andreas Bluemle, Dieter Kasper
* [email protected]
* [email protected]
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
#include <stdio.h>
#include <inttypes.h>
#include <libgen.h>
#include <string.h>
#include <unistd.h>
#include <stdlib.h>
#include <getopt.h>
#include <stdint.h>
#include <stdbool.h>
#include <assert.h>
#include <sys/mman.h>
#include "arttree_structures.h"
/*
* search context
*/
struct search_ctx {
struct pmem_context *pmem_ctx;
unsigned char *search_key;
int32_t hexdump;
};
static struct search_ctx *s_ctx = NULL;
struct search {
const char *name;
const char *brief;
char *(*func)(char *, struct search_ctx *);
void (*help)(char *);
};
/* local functions */
static int search_parse_args(char *appname, int ac, char *av[],
struct search_ctx *s_ctx);
static struct search *get_search(char *type_name);
static void print_usage(char *appname);
static void dump_PMEMoid(char *prefix, PMEMoid *oid);
static char *search_key(char *appname, struct search_ctx *ctx);
static int leaf_matches(struct search_ctx *ctx, art_leaf *n,
unsigned char *key, size_t key_len, int depth);
static int check_prefix(art_node *an,
unsigned char *key, int key_len, int depth);
static uint64_t find_child(art_node *n, int node_type, unsigned char key);
static void *get_node(struct search_ctx *ctx, int node_type, uint64_t off);
static uint64_t get_offset_an(art_node_u *au);
static void dump_PMEMoid(char *prefix, PMEMoid *oid);
static void dump_art_tree_root(char *prefix, uint64_t off, void *p);
/* global visible interface */
void arttree_search_help(char *appname);
int arttree_search_func(char *appname, struct pmem_context *ctx,
int ac, char *av[]);
static const char *arttree_search_help_str =
"Search for key in ART tree\n"
"Arguments: <key>\n"
" <key> key\n"
;
static const struct option long_options[] = {
{"hexdump", no_argument, NULL, 'x'},
{NULL, 0, NULL, 0 },
};
static struct search s_funcs[] = {
{
.name = "key",
.brief = "search for key",
.func = search_key,
.help = NULL,
}
};
/* Simple inlined function */
static inline int
min(int a, int b)
{
return (a < b) ? b : a;
}
/*
* number of arttree examine commands
*/
#define COMMANDS_NUMBER (sizeof(s_funcs) / sizeof(s_funcs[0]))
void
arttree_search_help(char *appname)
{
printf("%s %s\n", appname, arttree_search_help_str);
}
int
arttree_search_func(char *appname, struct pmem_context *ctx, int ac, char *av[])
{
int errors = 0;
struct search *s;
char *value;
value = NULL;
if (ctx == NULL) {
return -1;
}
if (s_ctx == NULL) {
s_ctx = (struct search_ctx *)malloc(sizeof(struct search_ctx));
if (s_ctx == NULL) {
return -1;
}
memset(s_ctx, 0, sizeof(struct search_ctx));
}
if (ctx->art_tree_root_offset == 0) {
fprintf(stderr, "search functions require knowledge"
" about the art_tree_root.\n");
fprintf(stderr, "Use \"set_root <offset>\""
" to define where the \nart_tree_root object"
" resides in the pmem file.\n");
errors++;
}
s_ctx->pmem_ctx = ctx;
if (search_parse_args(appname, ac, av, s_ctx) != 0) {
fprintf(stderr, "%s::%s: error parsing arguments\n",
appname, __FUNCTION__);
errors++;
}
if (!errors) {
s = get_search("key");
if (s != NULL) {
value = s->func(appname, s_ctx);
}
if (value != NULL) {
printf("key [%s] found, value [%s]\n",
s_ctx->search_key, value);
} else {
printf("key [%s] not found\n", s_ctx->search_key);
}
}
if (s_ctx->search_key != NULL) {
free(s_ctx->search_key);
}
free(s_ctx);
return errors;
}
static int
search_parse_args(char *appname, int ac, char *av[], struct search_ctx *s_ctx)
{
int ret = 0;
int opt;
optind = 0;
while ((opt = getopt_long(ac, av, "x", long_options, NULL)) != -1) {
switch (opt) {
case 'x':
s_ctx->hexdump = 1;
break;
default:
print_usage(appname);
ret = 1;
}
}
if (ret == 0) {
s_ctx->search_key = (unsigned char *)strdup(av[optind + 0]);
}
return ret;
}
static void
print_usage(char *appname)
{
printf("%s: search <key>\n", appname);
}
/*
* get_search -- returns command for specified command name
*/
static struct search *
get_search(char *type_name)
{
if (type_name == NULL) {
return NULL;
}
for (size_t i = 0; i < COMMANDS_NUMBER; i++) {
if (strcmp(type_name, s_funcs[i].name) == 0)
return &s_funcs[i];
}
return NULL;
}
static void *
get_node(struct search_ctx *ctx, int node_type, uint64_t off)
{
if (!VALID_NODE_TYPE(node_type))
return NULL;
printf("%s at off 0x%" PRIx64 "\n", art_node_names[node_type], off);
return ctx->pmem_ctx->addr + off;
}
static int
leaf_matches(struct search_ctx *ctx, art_leaf *n,
unsigned char *key, size_t key_len, int depth)
{
var_string *n_key;
(void) depth;
n_key = (var_string *)get_node(ctx, VAR_STRING, n->key.oid.off);
if (n_key == NULL)
return 1;
// HACK for stupid null-terminated strings....
// else if (n_key->len != key_len)
// ret = 1;
if (n_key->len != key_len + 1)
return 1;
return memcmp(n_key->s, key, key_len);
}
static int
check_prefix(art_node *n, unsigned char *key, int key_len, int depth)
{
int max_cmp = min(min(n->partial_len, MAX_PREFIX_LEN), key_len - depth);
int idx;
for (idx = 0; idx < max_cmp; idx++) {
if (n->partial[idx] != key[depth + idx])
return idx;
}
return idx;
}
static uint64_t
find_child(art_node *n, int node_type, unsigned char c)
{
int i;
union {
art_node4 *p1;
art_node16 *p2;
art_node48 *p3;
art_node256 *p4;
} p;
printf("[%s] children %d search key %c [",
art_node_names[node_type], n->num_children, c);
switch (node_type) {
case ART_NODE4:
p.p1 = (art_node4 *)n;
for (i = 0; i < n->num_children; i++) {
printf("%c ", p.p1->keys[i]);
if (p.p1->keys[i] == c) {
printf("]\n");
return p.p1->children[i].oid.off;
}
}
break;
case ART_NODE16:
p.p2 = (art_node16 *)n;
for (i = 0; i < n->num_children; i++) {
printf("%c ", p.p2->keys[i]);
if (p.p2->keys[i] == c) {
printf("]\n");
return p.p2->children[i].oid.off;
}
}
break;
case ART_NODE48:
p.p3 = (art_node48 *)n;
i = p.p3->keys[c];
printf("%d ", p.p3->keys[c]);
if (i) {
printf("]\n");
return p.p3->children[i - 1].oid.off;
}
break;
case ART_NODE256:
p.p4 = (art_node256 *)n;
printf("0x%" PRIx64, p.p4->children[c].oid.off);
if (p.p4->children[c].oid.off != 0) {
printf("]\n");
return p.p4->children[c].oid.off;
}
break;
default:
abort();
}
printf("]\n");
return 0;
}
static uint64_t
get_offset_an(art_node_u *au)
{
uint64_t offset = 0;
switch (au->art_node_type) {
case ART_NODE4:
offset = au->u.an4.oid.off;
break;
case ART_NODE16:
offset = au->u.an16.oid.off;
break;
case ART_NODE48:
offset = au->u.an48.oid.off;
break;
case ART_NODE256:
offset = au->u.an256.oid.off;
break;
case ART_LEAF:
offset = au->u.al.oid.off;
break;
default:
break;
}
return offset;
}
static char *
search_key(char *appname, struct search_ctx *ctx)
{
int errors = 0;
void *p; /* something */
off_t p_off;
art_node_u *p_au; /* art_node_u */
off_t p_au_off;
void *p_an; /* specific art node from art_node_u */
off_t p_an_off;
art_node *an; /* art node */
var_string *n_value;
char *value;
int prefix_len;
int depth = 0;
int key_len;
uint64_t child_off;
key_len = strlen((char *)(ctx->search_key));
value = NULL;
p_off = ctx->pmem_ctx->art_tree_root_offset;
p = get_node(ctx, ART_TREE_ROOT, p_off);
assert(p != NULL);
dump_art_tree_root("art_tree_root", p_off, p);
p_au_off = ((art_tree_root *)p)->root.oid.off;
p_au = (art_node_u *)get_node(ctx, ART_NODE_U, p_au_off);
if (p_au == NULL)
errors++;
if (!errors) {
while (p_au) {
p_an_off = get_offset_an(p_au);
p_an = get_node(ctx, p_au->art_node_type, p_an_off);
assert(p_an != NULL);
if (p_au->art_node_type == ART_LEAF) {
if (!leaf_matches(ctx, (art_leaf *)p_an,
ctx->search_key, key_len, depth)) {
n_value = (var_string *)
get_node(ctx, VAR_STRING,
((art_leaf *)p_an)->value.oid.off);
return (char *)(n_value->s);
}
}
an = (art_node *)p_an;
if (an->partial_len) {
prefix_len = check_prefix(an, ctx->search_key,
key_len, depth);
if (prefix_len !=
min(MAX_PREFIX_LEN, an->partial_len)) {
return NULL;
}
depth = depth + an->partial_len;
}
child_off = find_child(an, p_au->art_node_type,
ctx->search_key[depth]);
if (child_off != 0) {
p_au_off = child_off;
p_au = get_node(ctx, ART_NODE_U, p_au_off);
} else {
p_au = NULL;
}
depth++;
}
}
if (errors) {
return NULL;
} else {
return value;
}
}
static void
dump_art_tree_root(char *prefix, uint64_t off, void *p)
{
art_tree_root *tree_root;
tree_root = (art_tree_root *)p;
printf("at offset 0x%" PRIx64 ", art_tree_root {\n", off);
printf(" size %d\n", tree_root->size);
dump_PMEMoid(" art_node_u", (PMEMoid *)&(tree_root->root));
printf("\n};\n");
}
static void
dump_PMEMoid(char *prefix, PMEMoid *oid)
{
printf("%s { PMEMoid pool_uuid_lo %" PRIx64
" off 0x%" PRIx64 " = %" PRId64 " }\n",
prefix, oid->pool_uuid_lo, oid->off, oid->off);
}
| 11,265 | 22.717895 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/libart/arttree.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: arttree.h
*
* Description: header file for art tree on pmem implementation
*
* Author: Andreas Bluemle, Dieter Kasper
* [email protected]
* [email protected]
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
#ifndef _ARTTREE_H
#define _ARTTREE_H
#ifdef __cplusplus
extern "C" {
#endif
#include "art.h"
#ifdef __cplusplus
}
#endif
#endif /* _ARTTREE_H */
| 2,337 | 34.969231 | 78 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/linkedlist/pmemobj_list.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* pmemobj_list.h -- macro definitions for persistent
* singly linked list and tail queue
*/
#ifndef PMEMOBJ_LISTS_H
#define PMEMOBJ_LISTS_H
#include <libpmemobj.h>
/*
* This file defines two types of persistent data structures:
* singly-linked lists and tail queue.
*
* All macros defined in this file must be used within libpmemobj
* transactional API. Following snippet presents example of usage:
*
* TX_BEGIN(pop) {
* POBJ_TAILQ_INIT(head);
* } TX_ONABORT {
* abort();
* } TX_END
*
* SLIST TAILQ
* _HEAD + +
* _ENTRY + +
* _INIT + +
* _EMPTY + +
* _FIRST + +
* _NEXT + +
* _PREV - +
* _LAST - +
* _FOREACH + +
* _FOREACH_REVERSE - +
* _INSERT_HEAD + +
* _INSERT_BEFORE - +
* _INSERT_AFTER + +
* _INSERT_TAIL - +
* _MOVE_ELEMENT_HEAD - +
* _MOVE_ELEMENT_TAIL - +
* _REMOVE_HEAD + -
* _REMOVE + +
* _REMOVE_FREE + +
* _SWAP_HEAD_TAIL - +
*/
/*
* Singly-linked List definitions.
*/
#define POBJ_SLIST_HEAD(name, type)\
struct name {\
TOID(type) pe_first;\
}
#define POBJ_SLIST_ENTRY(type)\
struct {\
TOID(type) pe_next;\
}
/*
* Singly-linked List access methods.
*/
#define POBJ_SLIST_EMPTY(head) (TOID_IS_NULL((head)->pe_first))
#define POBJ_SLIST_FIRST(head) ((head)->pe_first)
#define POBJ_SLIST_NEXT(elm, field) (D_RO(elm)->field.pe_next)
/*
* Singly-linked List functions.
*/
#define POBJ_SLIST_INIT(head) do {\
TX_ADD_DIRECT(&(head)->pe_first);\
TOID_ASSIGN((head)->pe_first, OID_NULL);\
} while (0)
#define POBJ_SLIST_INSERT_HEAD(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
TX_ADD_DIRECT(&elm_ptr->field.pe_next);\
elm_ptr->field.pe_next = (head)->pe_first;\
TX_SET_DIRECT(head, pe_first, elm);\
} while (0)
#define POBJ_SLIST_INSERT_AFTER(slistelm, elm, field) do {\
TOID_TYPEOF(slistelm) *slistelm_ptr = D_RW(slistelm);\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
TX_ADD_DIRECT(&elm_ptr->field.pe_next);\
elm_ptr->field.pe_next = slistelm_ptr->field.pe_next;\
TX_ADD_DIRECT(&slistelm_ptr->field.pe_next);\
slistelm_ptr->field.pe_next = elm;\
} while (0)
#define POBJ_SLIST_REMOVE_HEAD(head, field) do {\
TX_ADD_DIRECT(&(head)->pe_first);\
(head)->pe_first = D_RO((head)->pe_first)->field.pe_next;\
} while (0)
#define POBJ_SLIST_REMOVE(head, elm, field) do {\
if (TOID_EQUALS((head)->pe_first, elm)) {\
POBJ_SLIST_REMOVE_HEAD(head, field);\
} else {\
TOID_TYPEOF(elm) *curelm_ptr = D_RW((head)->pe_first);\
while (!TOID_EQUALS(curelm_ptr->field.pe_next, elm))\
curelm_ptr = D_RW(curelm_ptr->field.pe_next);\
TX_ADD_DIRECT(&curelm_ptr->field.pe_next);\
curelm_ptr->field.pe_next = D_RO(elm)->field.pe_next;\
}\
} while (0)
#define POBJ_SLIST_REMOVE_FREE(head, elm, field) do {\
POBJ_SLIST_REMOVE(head, elm, field);\
TX_FREE(elm);\
} while (0)
#define POBJ_SLIST_FOREACH(var, head, field)\
for ((var) = POBJ_SLIST_FIRST(head);\
!TOID_IS_NULL(var);\
var = POBJ_SLIST_NEXT(var, field))
/*
* Tail-queue definitions.
*/
#define POBJ_TAILQ_ENTRY(type)\
struct {\
TOID(type) pe_next;\
TOID(type) pe_prev;\
}
#define POBJ_TAILQ_HEAD(name, type)\
struct name {\
TOID(type) pe_first;\
TOID(type) pe_last;\
}
/*
* Tail-queue access methods.
*/
#define POBJ_TAILQ_FIRST(head) ((head)->pe_first)
#define POBJ_TAILQ_LAST(head) ((head)->pe_last)
#define POBJ_TAILQ_EMPTY(head) (TOID_IS_NULL((head)->pe_first))
#define POBJ_TAILQ_NEXT(elm, field) (D_RO(elm)->field.pe_next)
#define POBJ_TAILQ_PREV(elm, field) (D_RO(elm)->field.pe_prev)
/*
* Tail-queue List internal methods.
*/
#define _POBJ_SWAP_PTR(elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
TX_ADD_DIRECT(&elm_ptr->field);\
__typeof__(elm) temp = elm_ptr->field.pe_prev;\
elm_ptr->field.pe_prev = elm_ptr->field.pe_next;\
elm_ptr->field.pe_next = temp;\
} while (0)
/*
* Tail-queue functions.
*/
#define POBJ_TAILQ_SWAP_HEAD_TAIL(head, field) do {\
__typeof__((head)->pe_first) temp = (head)->pe_first;\
TX_ADD_DIRECT(head);\
(head)->pe_first = (head)->pe_last;\
(head)->pe_last = temp;\
} while (0)
#define POBJ_TAILQ_FOREACH(var, head, field)\
for ((var) = POBJ_TAILQ_FIRST(head);\
!TOID_IS_NULL(var);\
var = POBJ_TAILQ_NEXT(var, field))
#define POBJ_TAILQ_FOREACH_REVERSE(var, head, field)\
for ((var) = POBJ_TAILQ_LAST(head);\
!TOID_IS_NULL(var);\
var = POBJ_TAILQ_PREV(var, field))
#define POBJ_TAILQ_INIT(head) do {\
TX_ADD_FIELD_DIRECT(head, pe_first);\
TOID_ASSIGN((head)->pe_first, OID_NULL);\
TX_ADD_FIELD_DIRECT(head, pe_last);\
TOID_ASSIGN((head)->pe_last, OID_NULL);\
} while (0)
#define POBJ_TAILQ_INSERT_HEAD(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
if (TOID_IS_NULL((head)->pe_first)) {\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = (head)->pe_first;\
elm_ptr->field.pe_next = (head)->pe_first;\
TX_ADD_DIRECT(head);\
(head)->pe_first = elm;\
(head)->pe_last = elm;\
} else {\
TOID_TYPEOF(elm) *first = D_RW((head)->pe_first);\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_next = (head)->pe_first;\
elm_ptr->field.pe_prev = first->field.pe_prev;\
TX_ADD_DIRECT(&first->field.pe_prev);\
first->field.pe_prev = elm;\
TX_SET_DIRECT(head, pe_first, elm);\
}\
} while (0)
#define POBJ_TAILQ_INSERT_TAIL(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
if (TOID_IS_NULL((head)->pe_last)) {\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = (head)->pe_last;\
elm_ptr->field.pe_next = (head)->pe_last;\
TX_ADD_DIRECT(head);\
(head)->pe_first = elm;\
(head)->pe_last = elm;\
} else {\
TOID_TYPEOF(elm) *last = D_RW((head)->pe_last);\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = (head)->pe_last;\
elm_ptr->field.pe_next = last->field.pe_next;\
TX_ADD_DIRECT(&last->field.pe_next);\
last->field.pe_next = elm;\
TX_SET_DIRECT(head, pe_last, elm);\
}\
} while (0)
#define POBJ_TAILQ_INSERT_AFTER(listelm, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
TOID_TYPEOF(listelm) *listelm_ptr = D_RW(listelm);\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = listelm;\
elm_ptr->field.pe_next = listelm_ptr->field.pe_next;\
if (TOID_IS_NULL(listelm_ptr->field.pe_next)) {\
TX_SET_DIRECT(head, pe_last, elm);\
} else {\
TOID_TYPEOF(elm) *next = D_RW(listelm_ptr->field.pe_next);\
TX_ADD_DIRECT(&next->field.pe_prev);\
next->field.pe_prev = elm;\
}\
TX_ADD_DIRECT(&listelm_ptr->field.pe_next);\
listelm_ptr->field.pe_next = elm;\
} while (0)
#define POBJ_TAILQ_INSERT_BEFORE(listelm, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
TOID_TYPEOF(listelm) *listelm_ptr = D_RW(listelm);\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_next = listelm;\
elm_ptr->field.pe_prev = listelm_ptr->field.pe_prev;\
if (TOID_IS_NULL(listelm_ptr->field.pe_prev)) {\
TX_SET_DIRECT(head, pe_first, elm);\
} else {\
TOID_TYPEOF(elm) *prev = D_RW(listelm_ptr->field.pe_prev);\
TX_ADD_DIRECT(&prev->field.pe_next);\
prev->field.pe_next = elm; \
}\
TX_ADD_DIRECT(&listelm_ptr->field.pe_prev);\
listelm_ptr->field.pe_prev = elm;\
} while (0)
#define POBJ_TAILQ_REMOVE(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
if (TOID_IS_NULL(elm_ptr->field.pe_prev) &&\
TOID_IS_NULL(elm_ptr->field.pe_next)) {\
TX_ADD_DIRECT(head);\
(head)->pe_first = elm_ptr->field.pe_prev;\
(head)->pe_last = elm_ptr->field.pe_next;\
} else {\
if (TOID_IS_NULL(elm_ptr->field.pe_prev)) {\
TX_SET_DIRECT(head, pe_first, elm_ptr->field.pe_next);\
TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\
TX_ADD_DIRECT(&next->field.pe_prev);\
next->field.pe_prev = elm_ptr->field.pe_prev;\
} else {\
TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\
TX_ADD_DIRECT(&prev->field.pe_next);\
prev->field.pe_next = elm_ptr->field.pe_next;\
}\
if (TOID_IS_NULL(elm_ptr->field.pe_next)) {\
TX_SET_DIRECT(head, pe_last, elm_ptr->field.pe_prev);\
TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\
TX_ADD_DIRECT(&prev->field.pe_next);\
prev->field.pe_next = elm_ptr->field.pe_next;\
} else {\
TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\
TX_ADD_DIRECT(&next->field.pe_prev);\
next->field.pe_prev = elm_ptr->field.pe_prev;\
}\
}\
} while (0)
#define POBJ_TAILQ_REMOVE_FREE(head, elm, field) do {\
POBJ_TAILQ_REMOVE(head, elm, field);\
TX_FREE(elm);\
} while (0)
/*
* 2 cases: only two elements, the rest possibilities
* including that elm is the last one
*/
#define POBJ_TAILQ_MOVE_ELEMENT_HEAD(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
if (TOID_EQUALS((head)->pe_last, elm) &&\
TOID_EQUALS(D_RO((head)->pe_first)->field.pe_next, elm)) {\
_POBJ_SWAP_PTR(elm, field);\
_POBJ_SWAP_PTR((head)->pe_first, field);\
POBJ_TAILQ_SWAP_HEAD_TAIL(head, field);\
} else {\
TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\
TX_ADD_DIRECT(&prev->field.pe_next);\
prev->field.pe_next = elm_ptr->field.pe_next;\
if (TOID_EQUALS((head)->pe_last, elm)) {\
TX_SET_DIRECT(head, pe_last, elm_ptr->field.pe_prev);\
} else {\
TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\
TX_ADD_DIRECT(&next->field.pe_prev);\
next->field.pe_prev = elm_ptr->field.pe_prev;\
}\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = D_RO((head)->pe_first)->field.pe_prev;\
elm_ptr->field.pe_next = (head)->pe_first;\
TOID_TYPEOF(elm) *first = D_RW((head)->pe_first);\
TX_ADD_DIRECT(&first->field.pe_prev);\
first->field.pe_prev = elm;\
TX_SET_DIRECT(head, pe_first, elm);\
}\
} while (0)
#define POBJ_TAILQ_MOVE_ELEMENT_TAIL(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
if (TOID_EQUALS((head)->pe_first, elm) &&\
TOID_EQUALS(D_RO((head)->pe_last)->field.pe_prev, elm)) {\
_POBJ_SWAP_PTR(elm, field);\
_POBJ_SWAP_PTR((head)->pe_last, field);\
POBJ_TAILQ_SWAP_HEAD_TAIL(head, field);\
} else {\
TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\
TX_ADD_DIRECT(&next->field.pe_prev);\
next->field.pe_prev = elm_ptr->field.pe_prev;\
if (TOID_EQUALS((head)->pe_first, elm)) {\
TX_SET_DIRECT(head, pe_first, elm_ptr->field.pe_next);\
} else { \
TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\
TX_ADD_DIRECT(&prev->field.pe_next);\
prev->field.pe_next = elm_ptr->field.pe_next;\
}\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = (head)->pe_last;\
elm_ptr->field.pe_next = D_RO((head)->pe_last)->field.pe_next;\
__typeof__(elm_ptr) last = D_RW((head)->pe_last);\
TX_ADD_DIRECT(&last->field.pe_next);\
last->field.pe_next = elm;\
TX_SET_DIRECT(head, pe_last, elm);\
} \
} while (0)
#endif /* PMEMOBJ_LISTS_H */
| 11,243 | 30.762712 | 66 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/linkedlist/fifo.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* fifo.c - example of tail queue usage
*/
#include <ex_common.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "pmemobj_list.h"
POBJ_LAYOUT_BEGIN(list);
POBJ_LAYOUT_ROOT(list, struct fifo_root);
POBJ_LAYOUT_TOID(list, struct tqnode);
POBJ_LAYOUT_END(list);
POBJ_TAILQ_HEAD(tqueuehead, struct tqnode);
struct fifo_root {
struct tqueuehead head;
};
struct tqnode {
char data;
POBJ_TAILQ_ENTRY(struct tqnode) tnd;
};
static void
print_help(void)
{
printf("usage: fifo <pool> <option> [<type>]\n");
printf("\tAvailable options:\n");
printf("\tinsert, <character> Insert character into FIFO\n");
printf("\tremove, Remove element from FIFO\n");
printf("\tprint, Print all FIFO elements\n");
}
int
main(int argc, const char *argv[])
{
PMEMobjpool *pop;
const char *path;
if (argc < 3) {
print_help();
return 0;
}
path = argv[1];
if (file_exists(path) != 0) {
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(list),
PMEMOBJ_MIN_POOL, 0666)) == NULL) {
perror("failed to create pool\n");
return -1;
}
} else {
if ((pop = pmemobj_open(path,
POBJ_LAYOUT_NAME(list))) == NULL) {
perror("failed to open pool\n");
return -1;
}
}
TOID(struct fifo_root) root = POBJ_ROOT(pop, struct fifo_root);
struct tqueuehead *tqhead = &D_RW(root)->head;
TOID(struct tqnode) node;
double totaltime = 0;
if (strcmp(argv[2], "insert") == 0) {
if (argc == 4) {
totaltime = 0;
for(int i=0;i<1000000;i++){
clock_t start, end;
start = clock();
TX_BEGIN(pop) {
node = TX_NEW(struct tqnode);
D_RW(node)->data = *argv[3];
POBJ_TAILQ_INSERT_HEAD(tqhead, node, tnd);
} TX_ONABORT {
abort();
} TX_END
end = clock();
totaltime += ((double) (end - start)) / CLOCKS_PER_SEC;
}
printf("TX/s = %f %f\n",1000000/totaltime, totaltime);
printf("Added %c to FIFO\n", *argv[3]);
} else {
print_help();
}
} else if (strcmp(argv[2], "remove") == 0) {
totaltime = 0;
for(int i=0;i<1000000;i++){
clock_t start, end;
start = clock();
if (POBJ_TAILQ_EMPTY(tqhead)) {
printf("FIFO is empty\n");
} else {
node = POBJ_TAILQ_LAST(tqhead);
TX_BEGIN(pop) {
POBJ_TAILQ_REMOVE_FREE(tqhead, node, tnd);
} TX_ONABORT {
abort();
} TX_END
printf("Removed element from FIFO\n");
}
end = clock();
totaltime += ((double) (end - start)) / CLOCKS_PER_SEC;
}
printf("TX/s = %f %f\n",1000000/totaltime, totaltime);
} else if (strcmp(argv[2], "print") == 0) {
printf("Elements in FIFO:\n");
POBJ_TAILQ_FOREACH(node, tqhead, tnd) {
printf("%c\t", D_RO(node)->data);
}
printf("\n");
} else {
print_help();
}
pmemobj_close(pop);
return 0;
}
| 2,782 | 21.264 | 64 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map_hashmap_tx.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* map_hashmap_tx.c -- common interface for maps
*/
#include <map.h>
#include <hashmap_tx.h>
#include "map_hashmap_tx.h"
/*
* map_hm_tx_check -- wrapper for hm_tx_check
*/
static int
map_hm_tx_check(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_check(pop, hashmap_tx);
}
/*
* map_hm_tx_count -- wrapper for hm_tx_count
*/
static size_t
map_hm_tx_count(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_count(pop, hashmap_tx);
}
/*
* map_hm_tx_init -- wrapper for hm_tx_init
*/
static int
map_hm_tx_init(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_init(pop, hashmap_tx);
}
/*
* map_hm_tx_create -- wrapper for hm_tx_create
*/
static int
map_hm_tx_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg)
{
TOID(struct hashmap_tx) *hashmap_tx =
(TOID(struct hashmap_tx) *)map;
return hm_tx_create(pop, hashmap_tx, arg);
}
/*
* map_hm_tx_insert -- wrapper for hm_tx_insert
*/
static int
map_hm_tx_insert(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_insert(pop, hashmap_tx, key, value);
}
/*
* map_hm_tx_remove -- wrapper for hm_tx_remove
*/
static PMEMoid
map_hm_tx_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_remove(pop, hashmap_tx, key);
}
/*
* map_hm_tx_get -- wrapper for hm_tx_get
*/
static PMEMoid
map_hm_tx_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_get(pop, hashmap_tx, key);
}
/*
* map_hm_tx_lookup -- wrapper for hm_tx_lookup
*/
static int
map_hm_tx_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_lookup(pop, hashmap_tx, key);
}
/*
* map_hm_tx_foreach -- wrapper for hm_tx_foreach
*/
static int
map_hm_tx_foreach(PMEMobjpool *pop, TOID(struct map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_foreach(pop, hashmap_tx, cb, arg);
}
/*
* map_hm_tx_cmd -- wrapper for hm_tx_cmd
*/
static int
map_hm_tx_cmd(PMEMobjpool *pop, TOID(struct map) map,
unsigned cmd, uint64_t arg)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_cmd(pop, hashmap_tx, cmd, arg);
}
struct map_ops hashmap_tx_ops = {
/* .check = */ map_hm_tx_check,
/* .create = */ map_hm_tx_create,
/* .delete = */ NULL,
/* .init = */ map_hm_tx_init,
/* .insert = */ map_hm_tx_insert,
/* .insert_new = */ NULL,
/* .remove = */ map_hm_tx_remove,
/* .remove_free = */ NULL,
/* .clear = */ NULL,
/* .get = */ map_hm_tx_get,
/* .lookup = */ map_hm_tx_lookup,
/* .foreach = */ map_hm_tx_foreach,
/* .is_empty = */ NULL,
/* .count = */ map_hm_tx_count,
/* .cmd = */ map_hm_tx_cmd,
};
| 3,316 | 20.966887 | 70 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/kv_server.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* kv_server.c -- persistent tcp key-value store server
*/
#include <uv.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "libpmemobj.h"
#include "map.h"
#include "map_ctree.h"
#include "map_btree.h"
#include "map_rtree.h"
#include "map_rbtree.h"
#include "map_hashmap_atomic.h"
#include "map_hashmap_tx.h"
#include "map_hashmap_rp.h"
#include "map_skiplist.h"
#include "kv_protocol.h"
#define COUNT_OF(x) (sizeof(x) / sizeof(0[x]))
#define COMPILE_ERROR_ON(cond) ((void)sizeof(char[(cond) ? -1 : 1]))
int use_ndp_redo = 0;
POBJ_LAYOUT_BEGIN(kv_server);
POBJ_LAYOUT_ROOT(kv_server, struct root);
POBJ_LAYOUT_TOID(kv_server, struct map_value);
POBJ_LAYOUT_TOID(kv_server, uint64_t);
POBJ_LAYOUT_END(kv_server);
struct map_value {
uint64_t len;
char buf[];
};
struct root {
TOID(struct map) map;
};
static struct map_ctx *mapc;
static PMEMobjpool *pop;
static TOID(struct map) map;
static uv_tcp_t server;
static uv_loop_t *loop;
typedef int (*msg_handler)(uv_stream_t *client, const char *msg, size_t len);
struct write_req {
uv_write_t req;
uv_buf_t buf;
};
struct client_data {
char *buf; /* current message, always NULL terminated */
size_t buf_len; /* sizeof(buf) */
size_t len; /* actual length of the message (while parsing) */
};
/*
* djb2_hash -- string hashing function by Dan Bernstein
*/
static uint32_t
djb2_hash(const char *str)
{
uint32_t hash = 5381;
int c;
while ((c = *str++))
hash = ((hash << 5) + hash) + c;
return hash;
}
/*
* write_done_cb -- callback after message write completes
*/
static void
write_done_cb(uv_write_t *req, int status)
{
struct write_req *wr = (struct write_req *)req;
free(wr);
if (status == -1) {
printf("response failed");
}
}
/*
* client_close_cb -- callback after client tcp connection closes
*/
static void
client_close_cb(uv_handle_t *handle)
{
struct client_data *d = handle->data;
free(d->buf);
free(handle->data);
free(handle);
}
/*
* response_write -- response writing helper
*/
static void
response_write(uv_stream_t *client, char *resp, size_t len)
{
struct write_req *wr = malloc(sizeof(struct write_req));
assert(wr != NULL);
wr->buf = uv_buf_init(resp, len);
uv_write(&wr->req, client, &wr->buf, 1, write_done_cb);
}
/*
* response_msg -- predefined message writing helper
*/
static void
response_msg(uv_stream_t *client, enum resp_messages msg)
{
response_write(client, (char *)resp_msg[msg], strlen(resp_msg[msg]));
}
/*
* cmsg_insert_handler -- handler of INSERT client message
*/
static int
cmsg_insert_handler(uv_stream_t *client, const char *msg, size_t len)
{
int result = 0;
TX_BEGIN(pop) {
/*
* For simplicity sake the length of the value buffer is just
* a length of the message.
*/
TOID(struct map_value) val = TX_ZALLOC(struct map_value,
sizeof(struct map_value) + len);
char key[MAX_KEY_LEN];
int ret = sscanf(msg, "INSERT %254s %s\n", key, D_RW(val)->buf);
assert(ret == 2);
D_RW(val)->len = len;
/* properly terminate the value */
D_RW(val)->buf[strlen(D_RO(val)->buf)] = '\n';
map_insert(mapc, map, djb2_hash(key), val.oid);
} TX_ONABORT {
result = 1;
} TX_END
response_msg(client, result);
return 0;
}
/*
* cmsg_remove_handler -- handler of REMOVE client message
*/
static int
cmsg_remove_handler(uv_stream_t *client, const char *msg, size_t len)
{
char key[MAX_KEY_LEN] = {0};
/* check if the constant used in sscanf() below has the correct value */
COMPILE_ERROR_ON(MAX_KEY_LEN - 1 != 254);
int ret = sscanf(msg, "REMOVE %254s\n", key);
assert(ret == 1);
int result = map_remove_free(mapc, map, djb2_hash(key));
response_msg(client, result);
return 0;
}
/*
* cmsg_get_handler -- handler of GET client message
*/
static int
cmsg_get_handler(uv_stream_t *client, const char *msg, size_t len)
{
char key[MAX_KEY_LEN];
/* check if the constant used in sscanf() below has the correct value */
COMPILE_ERROR_ON(MAX_KEY_LEN - 1 != 254);
int ret = sscanf(msg, "GET %254s\n", key);
assert(ret == 1);
TOID(struct map_value) value;
TOID_ASSIGN(value, map_get(mapc, map, djb2_hash(key)));
if (TOID_IS_NULL(value)) {
response_msg(client, RESP_MSG_NULL);
} else {
response_write(client, D_RW(value)->buf, D_RO(value)->len);
}
return 0;
}
/*
* cmsg_bye_handler -- handler of BYE client message
*/
static int
cmsg_bye_handler(uv_stream_t *client, const char *msg, size_t len)
{
uv_close((uv_handle_t *)client, client_close_cb);
return 0;
}
/*
* cmsg_bye_handler -- handler of KILL client message
*/
static int
cmsg_kill_handler(uv_stream_t *client, const char *msg, size_t len)
{
uv_close((uv_handle_t *)client, client_close_cb);
uv_close((uv_handle_t *)&server, NULL);
return 0;
}
/* kv protocol implementation */
static msg_handler protocol_impl[MAX_CMSG] = {
cmsg_insert_handler,
cmsg_remove_handler,
cmsg_get_handler,
cmsg_bye_handler,
cmsg_kill_handler
};
/*
* cmsg_handle -- handles current client message
*/
static int
cmsg_handle(uv_stream_t *client, struct client_data *data)
{
int ret = 0;
int i;
for (i = 0; i < MAX_CMSG; ++i)
if (strncmp(kv_cmsg_token[i], data->buf,
strlen(kv_cmsg_token[i])) == 0)
break;
if (i == MAX_CMSG) {
response_msg(client, RESP_MSG_UNKNOWN);
} else {
ret = protocol_impl[i](client, data->buf, data->len);
}
data->len = 0; /* reset the message length */
return ret;
}
/*
* cmsg_handle_stream -- handle incoming tcp stream from clients
*/
static int
cmsg_handle_stream(uv_stream_t *client, struct client_data *data,
const char *buf, ssize_t nread)
{
char *last;
int ret;
size_t len;
/*
* A single read operation can contain zero or more operations, so this
* has to be handled appropriately. Client messages are terminated by
* newline character.
*/
while ((last = memchr(buf, '\n', nread)) != NULL) {
len = last - buf + 1;
nread -= len;
assert(data->len + len <= data->buf_len);
memcpy(data->buf + data->len, buf, len);
data->len += len;
if ((ret = cmsg_handle(client, data)) != 0)
return ret;
buf = last + 1;
}
if (nread != 0) {
memcpy(data->buf + data->len, buf, nread);
data->len += nread;
}
return 0;
}
static uv_buf_t msg_buf = {0};
/*
* get_read_buf_cb -- returns buffer for incoming client message
*/
static void
get_read_buf_cb(uv_handle_t *handle, size_t size, uv_buf_t *buf)
{
buf->base = msg_buf.base;
buf->len = msg_buf.len;
}
/*
* read_cb -- async tcp read from clients
*/
static void
read_cb(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf)
{
if (nread <= 0) {
printf("client connection closed\n");
uv_close((uv_handle_t *)client, client_close_cb);
return;
}
struct client_data *d = client->data;
if (d->buf_len < (d->len + nread + 1)) {
char *cbuf = realloc(d->buf, d->buf_len + nread + 1);
assert(cbuf != NULL);
/* zero only the new memory */
memset(cbuf + d->buf_len, 0, nread + 1);
d->buf_len += nread + 1;
d->buf = cbuf;
}
if (cmsg_handle_stream(client, client->data, buf->base, nread)) {
printf("client disconnect\n");
uv_close((uv_handle_t *)client, client_close_cb);
}
}
/*
* connection_cb -- async incoming client request
*/
static void
connection_cb(uv_stream_t *server, int status)
{
if (status != 0) {
printf("client connect error\n");
return;
}
printf("new client\n");
uv_tcp_t *client = malloc(sizeof(uv_tcp_t));
assert(client != NULL);
client->data = calloc(1, sizeof(struct client_data));
assert(client->data != NULL);
uv_tcp_init(loop, client);
if (uv_accept(server, (uv_stream_t *)client) == 0) {
uv_read_start((uv_stream_t *)client, get_read_buf_cb, read_cb);
} else {
uv_close((uv_handle_t *)client, client_close_cb);
}
}
static const struct {
struct map_ops *ops;
const char *name;
} maps[] = {
{MAP_HASHMAP_TX, "hashmap_tx"},
{MAP_HASHMAP_ATOMIC, "hashmap_atomic"},
{MAP_HASHMAP_RP, "hashmap_rp"},
{MAP_CTREE, "ctree"},
{MAP_BTREE, "btree"},
{MAP_RTREE, "rtree"},
{MAP_RBTREE, "rbtree"},
{MAP_SKIPLIST, "skiplist"}
};
/*
* get_map_ops_by_string -- parse the type string and return the associated ops
*/
static const struct map_ops *
get_map_ops_by_string(const char *type)
{
for (int i = 0; i < COUNT_OF(maps); ++i)
if (strcmp(maps[i].name, type) == 0)
return maps[i].ops;
return NULL;
}
#define KV_SIZE (PMEMOBJ_MIN_POOL)
#define MAX_READ_LEN (64 * 1024) /* 64 kilobytes */
int
main(int argc, char *argv[])
{
if (argc < 4) {
printf("usage: %s hashmap_tx|hashmap_atomic|hashmap_rp|"
"ctree|btree|rtree|rbtree|skiplist file-name port\n",
argv[0]);
return 1;
}
const char *path = argv[2];
const char *type = argv[1];
int port = atoi(argv[3]);
/* use only a single buffer for all incoming data */
void *read_buf = malloc(MAX_READ_LEN);
assert(read_buf != NULL);
msg_buf = uv_buf_init(read_buf, MAX_READ_LEN);
if (access(path, F_OK) != 0) {
pop = pmemobj_create(path, POBJ_LAYOUT_NAME(kv_server),
KV_SIZE, 0666);
if (pop == NULL) {
fprintf(stderr, "failed to create pool: %s\n",
pmemobj_errormsg());
return 1;
}
} else {
pop = pmemobj_open(path, POBJ_LAYOUT_NAME(kv_server));
if (pop == NULL) {
fprintf(stderr, "failed to open pool: %s\n",
pmemobj_errormsg());
return 1;
}
}
/* map context initialization */
mapc = map_ctx_init(get_map_ops_by_string(type), pop);
if (!mapc) {
pmemobj_close(pop);
fprintf(stderr, "map_ctx_init failed (wrong type?)\n");
return 1;
}
/* initialize the actual map */
TOID(struct root) root = POBJ_ROOT(pop, struct root);
if (TOID_IS_NULL(D_RO(root)->map)) {
/* create new if it doesn't exist (a fresh pool) */
map_create(mapc, &D_RW(root)->map, NULL);
}
map = D_RO(root)->map;
loop = uv_default_loop();
/* tcp server initialization */
uv_tcp_init(loop, &server);
struct sockaddr_in bind_addr;
uv_ip4_addr("0.0.0.0", port, &bind_addr);
int ret = uv_tcp_bind(&server, (const struct sockaddr *)&bind_addr, 0);
assert(ret == 0);
ret = uv_listen((uv_stream_t *)&server, SOMAXCONN, connection_cb);
assert(ret == 0);
ret = uv_run(loop, UV_RUN_DEFAULT);
assert(ret == 0);
/* no more events in the loop, release resources and quit */
uv_loop_delete(loop);
map_ctx_free(mapc);
pmemobj_close(pop);
free(read_buf);
return 0;
}
| 10,374 | 20.524896 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* map.h -- common interface for maps
*/
#ifndef MAP_H
#define MAP_H
#include <libpmemobj.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef MAP_TYPE_OFFSET
#define MAP_TYPE_OFFSET 1000
#endif
TOID_DECLARE(struct map, MAP_TYPE_OFFSET + 0);
struct map;
struct map_ctx;
struct map_ops {
int(*check)(PMEMobjpool *pop, TOID(struct map) map);
int(*create)(PMEMobjpool *pop, TOID(struct map) *map, void *arg);
int(*destroy)(PMEMobjpool *pop, TOID(struct map) *map);
int(*init)(PMEMobjpool *pop, TOID(struct map) map);
int(*insert)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value);
int(*insert_new)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, size_t size,
unsigned type_num,
void(*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid(*remove)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key);
int(*remove_free)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key);
int(*clear)(PMEMobjpool *pop, TOID(struct map) map);
PMEMoid(*get)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key);
int(*lookup)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key);
int(*foreach)(PMEMobjpool *pop, TOID(struct map) map,
int(*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg);
int(*is_empty)(PMEMobjpool *pop, TOID(struct map) map);
size_t(*count)(PMEMobjpool *pop, TOID(struct map) map);
int(*cmd)(PMEMobjpool *pop, TOID(struct map) map,
unsigned cmd, uint64_t arg);
};
struct map_ctx {
PMEMobjpool *pop;
const struct map_ops *ops;
};
struct map_ctx *map_ctx_init(const struct map_ops *ops, PMEMobjpool *pop);
void map_ctx_free(struct map_ctx *mapc);
int map_check(struct map_ctx *mapc, TOID(struct map) map);
int map_create(struct map_ctx *mapc, TOID(struct map) *map, void *arg);
int map_destroy(struct map_ctx *mapc, TOID(struct map) *map);
int map_init(struct map_ctx *mapc, TOID(struct map) map);
int map_insert(struct map_ctx *mapc, TOID(struct map) map,
uint64_t key, PMEMoid value);
int map_insert_new(struct map_ctx *mapc, TOID(struct map) map,
uint64_t key, size_t size,
unsigned type_num,
void(*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid map_remove(struct map_ctx *mapc, TOID(struct map) map, uint64_t key);
int map_remove_free(struct map_ctx *mapc, TOID(struct map) map, uint64_t key);
int map_clear(struct map_ctx *mapc, TOID(struct map) map);
PMEMoid map_get(struct map_ctx *mapc, TOID(struct map) map, uint64_t key);
int map_lookup(struct map_ctx *mapc, TOID(struct map) map, uint64_t key);
int map_foreach(struct map_ctx *mapc, TOID(struct map) map,
int(*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg);
int map_is_empty(struct map_ctx *mapc, TOID(struct map) map);
size_t map_count(struct map_ctx *mapc, TOID(struct map) map);
int map_cmd(struct map_ctx *mapc, TOID(struct map) map,
unsigned cmd, uint64_t arg);
#ifdef __cplusplus
}
#endif
#endif /* MAP_H */
| 3,010 | 31.728261 | 78 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/data_store.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* data_store.c -- tree_map example usage
*/
#include <ex_common.h>
#include <stdio.h>
#include <sys/stat.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include "map.h"
#include "map_ctree.h"
#include "map_btree.h"
#include "map_rbtree.h"
#include "map_hashmap_atomic.h"
#include "map_hashmap_tx.h"
#include "map_hashmap_rp.h"
#include "map_skiplist.h"
POBJ_LAYOUT_BEGIN(data_store);
POBJ_LAYOUT_ROOT(data_store, struct store_root);
POBJ_LAYOUT_TOID(data_store, struct store_item);
POBJ_LAYOUT_END(data_store);
/////////////////Page fault handling/////////////////
#include <bits/types/sig_atomic_t.h>
#include <bits/types/sigset_t.h>
#include <signal.h>
#include <unistd.h>
#include <sys/mman.h>
#include <fcntl.h>
#define SIGSTKSZ 8192
#define SA_SIGINFO 4
#define SA_ONSTACK 0x08000000 /* Use signal stack by using `sa_restorer'. */
#define SA_RESTART 0x10000000 /* Restart syscall on signal return. */
#define SA_NODEFER 0x40000000 /* Don't automatically block the signal when*/
stack_t _sigstk;
int updated_page_count = 0;
int all_updates = 0;
int start_timing = 0;
void * checkpoint_start;
void * page[50];
PMEMobjpool *pop;
void * device;
int tot_data_counter=0;
#define CHPSIZE 2048
void cmd_issue( uint32_t opcode,
uint32_t TXID,
uint32_t TID,
uint32_t OID,
uint64_t data_addr,
uint32_t data_size,
void * ptr){
//command with thread id encoded as first 8 bits of each word
uint32_t issue_cmd[7];
issue_cmd[0] = (TID<<24)|(opcode<<16)|(TXID<<8)|TID;
issue_cmd[1] = (TID<<24)|(OID<<16)|(data_addr>>48);
issue_cmd[2] = (TID<<24)|((data_addr & 0x0000FFFFFFFFFFFF)>>24);
issue_cmd[3] = (TID<<24)|(data_addr & 0x0000000000FFFFFF);
issue_cmd[4] = (TID<<24)|(data_size<<8);
issue_cmd[5] = (TID<<24)|(0X00FFFFFF>>16);
issue_cmd[6] = (TID<<24)|((0X00FFFFFF & 0x0000FFFF)<<8);
for(int i=0;i<7;i++){
// printf("%08x\n",issue_cmd[i]);
*((u_int32_t *) ptr) = issue_cmd[i];
}
}
static inline uint64_t getCycle(){
uint32_t cycles_high, cycles_low, pid;
asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx
"mov %%edx, %0\n\t"
"mov %%eax, %1\n\t"
"mov %%ecx, %2\n\t"
:"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars
:// no input
:"%eax", "%edx", "%ecx" // clobbered by rdtscp
);
return((uint64_t)cycles_high << 32) | cycles_low;
}
/// @brief Signal handler to trap SEGVs.
static void segvHandle(int signum, siginfo_t * siginfo, void * context) {
#define CPTIME
#ifdef CPTIME
uint64_t endCycles, startCycles,totalCycles;
startCycles = getCycle();
#endif
void * addr = siginfo->si_addr; // address of access
uint64_t pageNo = ((uint64_t)addr)/4096;
unsigned long * pageStart = (unsigned long *)(pageNo*4096);
// Check if this was a SEGV that we are supposed to trap.
if (siginfo->si_code == SEGV_ACCERR) {
mprotect(pageStart, 4096, PROT_READ|PROT_WRITE);
if(all_updates >= 5 || updated_page_count == 50){
for(int i=0;i<updated_page_count;i++){
//memcpy(checkpoint_start + 4096, pageStart,4096);
//pmemobj_persist(pop, checkpoint_start + 4096,4096);
cmd_issue(2,0,0,0, pageStart + i*4096,4096,device);
tot_data_counter++;
page[updated_page_count] = 0;
}
updated_page_count = 0;
all_updates = 0;
}
all_updates ++;
//printf("te\n");
for(int i=0; i<updated_page_count; i++){
if(page[i] == pageStart){
#ifdef CPTIME
endCycles = getCycle();
totalCycles = endCycles - startCycles;
double totTime = ((double)totalCycles)/2000000000;
printf("cp %f\n", totTime);
#endif
return;}
}
page[updated_page_count] = pageStart;
//printf("test1 %lx %d %d\n",page[updated_page_count],updated_page_count,all_updates);
updated_page_count++;
#ifdef CPTIME
endCycles = getCycle();
totalCycles = endCycles - startCycles;
double totTime = ((double)totalCycles)/2000000000;
printf("cp %f\n", totTime);
#endif
//*((int *)checkpoint_start) = 10;
//test++;
//printf("test1 %lx %d\n",updated_page_count);
} else if (siginfo->si_code == SEGV_MAPERR) {
fprintf (stderr, "%d : map error with addr %p!\n", getpid(), addr);
abort();
} else {
fprintf (stderr, "%d : other access error with addr %p.\n", getpid(), addr);
abort();
}
}
static void installSignalHandler(void) {
// Set up an alternate signal stack.
printf("page fault handler initialized!!\n");
_sigstk.ss_sp = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
_sigstk.ss_size = SIGSTKSZ;
_sigstk.ss_flags = 0;
sigaltstack(&_sigstk, (stack_t *) 0);
// Now set up a signal handler for SIGSEGV events.
struct sigaction siga;
sigemptyset(&siga.sa_mask);
// Set the following signals to a set
sigaddset(&siga.sa_mask, SIGSEGV);
sigaddset(&siga.sa_mask, SIGALRM);
sigprocmask(SIG_BLOCK, &siga.sa_mask, NULL);
// Point to the handler function.
siga.sa_flags = SA_SIGINFO | SA_ONSTACK | SA_RESTART | SA_NODEFER;
siga.sa_sigaction = segvHandle;
if (sigaction(SIGSEGV, &siga, NULL) == -1) {
perror("sigaction(SIGSEGV)");
exit(-1);
}
sigprocmask(SIG_UNBLOCK, &siga.sa_mask, NULL);
return;
}
static void setpage(void * addr){
uint64_t pageNo = ((uint64_t)addr)/4096;
unsigned long * pageStart = (unsigned long *)(pageNo*4096);
mprotect(pageStart, 4096, PROT_READ);
return;
}
static void resetpage(void * addr){
uint64_t pageNo = ((uint64_t)addr)/4096;
unsigned long * pageStart = (unsigned long *)(pageNo*4096);
mprotect(pageStart, 4096, PROT_READ|PROT_WRITE);
return;
}
void* open_device(const char* pathname)
{
//int fd = os_open("/sys/devices/pci0000:00/0000:00:00.2/iommu/ivhd0/devices/0000:0a:00.0/resource0",O_RDWR|O_SYNC);
int fd = open(pathname,O_RDWR|O_SYNC);
if(fd == -1)
{
printf("Couldnt opene file!!\n");
exit(0);
}
void * ptr = mmap(0,4096,PROT_READ|PROT_WRITE, MAP_SHARED,fd,0);
if(ptr == (void *)-1)
{
printf("Could not map memory!!\n");
exit(0);
}
printf("opened device without error!!\n");
return ptr;
}
///////////////////////////////////////////////////////////////
#define MAX_INSERTS 100000
int use_ndp_redo = 0;
static uint64_t nkeys;
static uint64_t keys[MAX_INSERTS];
//int page_skip_counter = 0;
TOID_DECLARE(struct page_checkpoint, 0);
struct page_checkpoint{
char page[50][4096];
};
struct store_item {
uint64_t item_data;
};
struct store_root {
TOID(struct map) map;
};
/*
* new_store_item -- transactionally creates and initializes new item
*/
static TOID(struct store_item)
new_store_item(void)
{
TOID(struct store_item) item = TX_NEW(struct store_item);
D_RW(item)->item_data = rand();
return item;
}
/*
* get_keys -- inserts the keys of the items by key order (sorted, descending)
*/
static int
get_keys(uint64_t key, PMEMoid value, void *arg)
{
keys[nkeys++] = key;
return 0;
}
/*
* dec_keys -- decrements the keys count for every item
*/
static int
dec_keys(uint64_t key, PMEMoid value, void *arg)
{
nkeys--;
return 0;
}
/*
* parse_map_type -- parse type of map
*/
static const struct map_ops *
parse_map_type(const char *type)
{
if (strcmp(type, "ctree") == 0)
return MAP_CTREE;
else if (strcmp(type, "btree") == 0)
return MAP_BTREE;
else if (strcmp(type, "rbtree") == 0)
return MAP_RBTREE;
else if (strcmp(type, "hashmap_atomic") == 0)
return MAP_HASHMAP_ATOMIC;
else if (strcmp(type, "hashmap_tx") == 0)
return MAP_HASHMAP_TX;
else if (strcmp(type, "hashmap_rp") == 0)
return MAP_HASHMAP_RP;
else if (strcmp(type, "skiplist") == 0)
return MAP_SKIPLIST;
return NULL;
}
void installSignalHandler (void) __attribute__ ((constructor));
int current_tx1;
int main(int argc, const char *argv[]) {
if (argc < 3) {
printf("usage: %s "
"<ctree|btree|rbtree|hashmap_atomic|hashmap_rp|"
"hashmap_tx|skiplist> file-name [nops]\n", argv[0]);
return 1;
}
const char *type = argv[1];
const char *path = argv[2];
const struct map_ops *map_ops = parse_map_type(type);
if (!map_ops) {
fprintf(stderr, "invalid container type -- '%s'\n", type);
return 1;
}
int nops = MAX_INSERTS;
if (argc > 3) {
nops = atoi(argv[3]);
if (nops <= 0 || nops > MAX_INSERTS) {
fprintf(stderr, "number of operations must be "
"in range 1..%d\n", MAX_INSERTS);
return 1;
}
}
//PMEMobjpool *pop;
srand((unsigned)time(NULL));
if (file_exists(path) != 0) {
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(data_store),
(1024*1024*512), 0666)) == NULL) {
perror("failed to create pool\n");
return 1;
}
} else {
if ((pop = pmemobj_open(path,
POBJ_LAYOUT_NAME(data_store))) == NULL) {
perror("failed to open pool\n");
return 1;
}
}
device = open_device("/sys/devices/pci0000:00/0000:00:00.2/iommu/ivhd0/devices/0000:0a:00.0/resource0");
//TOID(struct store_root) root = (TOID(struct store_root))pmemobj_root(pop, 1024*1024*512);
//struct queue *qu = pmemobj_direct(root);
//checkpoint_start = (void *)(qu + );
TOID(struct store_root) root = POBJ_ROOT(pop, struct store_root);
//checkpoint_start = D_RW(root) + (1024*1024*256);
TX_BEGIN(pop) {
checkpoint_start = D_RW(TX_NEW(struct page_checkpoint))->page;
} TX_END
struct map_ctx *mapc = map_ctx_init(map_ops, pop);
if (!mapc) {
perror("cannot allocate map context\n");
return 1;
}
/* delete the map if it exists */
if (!map_check(mapc, D_RW(root)->map))
map_destroy(mapc, &D_RW(root)->map);
/* insert random items in a transaction */
int aborted = 0;
uint64_t endCycles, startCycles,totalCycles;
TX_BEGIN(pop) {
map_create(mapc, &D_RW(root)->map, NULL);
} TX_END
//warmup database
/*for (int i = 0; i < 10000; ++i) {
TX_BEGIN(pop) {
int keyused = rand();
map_insert(mapc, D_RW(root)->map, keyused,
new_store_item().oid);
} TX_ONABORT {
perror("transaction aborted y\n");
map_ctx_free(mapc);
aborted = 1;
} TX_END
}*/
int keyread[10000];
startCycles = getCycle();
PMEMoid readval;
int readCount = 0;
for (int i = 0; i < nops; ++i) {
start_timing = 1;
TX_BEGIN(pop) {
if(i<(10000 -readCount)){
int keyused = rand();
keyread[i]= keyused;
map_insert(mapc, D_RW(root)->map, keyused,
new_store_item().oid);
}
else {
if(readCount == 7500)
readval = map_get(mapc, D_RW(root)->map, keyread[rand()%2500]);
else
readval = map_get(mapc, D_RW(root)->map, keyread[rand()%(10000 - readCount)]);
}
} TX_ONABORT {
perror("transaction aborted y\n");
map_ctx_free(mapc);
aborted = 1;
} TX_END
//updated_page_count = 0;
}
/* for (int i = 0; i < nops; ++i) {
current_tx1 = 1;
TX_BEGIN(pop) {
int keyused = rand();
map_insert(mapc, D_RW(root)->map, keyused,
new_store_item().oid);
} TX_ONABORT {
perror("transaction aborted y\n");
map_ctx_free(mapc);
aborted = 1;
} TX_END
//updated_page_count = 0;
}
*/
endCycles = getCycle();
totalCycles = endCycles - startCycles;
double totTime = ((double)totalCycles)/2000000000;
printf("TX/s %f\ntottime %f\n", nops/totTime, totTime);//RUN_COUNT/totTime, totTime);
map_ctx_free(mapc);
pmemobj_close(pop);
return 0;
}
| 11,362 | 23.38412 | 117 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map_rtree.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* map_rtree.c -- common interface for maps
*/
#include <rtree_map.h>
#include "map_rtree.h"
/*
* map_rtree_check -- wrapper for rtree_map_check
*/
static int
map_rtree_check(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_check(pop, rtree_map);
}
/*
* map_rtree_create -- wrapper for rtree_map_new
*/
static int
map_rtree_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg)
{
TOID(struct rtree_map) *rtree_map =
(TOID(struct rtree_map) *)map;
return rtree_map_create(pop, rtree_map, arg);
}
/*
* map_rtree_destroy -- wrapper for rtree_map_delete
*/
static int
map_rtree_destroy(PMEMobjpool *pop, TOID(struct map) *map)
{
TOID(struct rtree_map) *rtree_map =
(TOID(struct rtree_map) *)map;
return rtree_map_destroy(pop, rtree_map);
}
/*
* map_rtree_insert -- wrapper for rtree_map_insert
*/
static int
map_rtree_insert(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_insert(pop, rtree_map,
(unsigned char *)&key, sizeof(key), value);
}
/*
* map_rtree_insert_new -- wrapper for rtree_map_insert_new
*/
static int
map_rtree_insert_new(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, size_t size,
unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_insert_new(pop, rtree_map,
(unsigned char *)&key, sizeof(key), size,
type_num, constructor, arg);
}
/*
* map_rtree_remove -- wrapper for rtree_map_remove
*/
static PMEMoid
map_rtree_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_remove(pop, rtree_map,
(unsigned char *)&key, sizeof(key));
}
/*
* map_rtree_remove_free -- wrapper for rtree_map_remove_free
*/
static int
map_rtree_remove_free(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_remove_free(pop, rtree_map,
(unsigned char *)&key, sizeof(key));
}
/*
* map_rtree_clear -- wrapper for rtree_map_clear
*/
static int
map_rtree_clear(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_clear(pop, rtree_map);
}
/*
* map_rtree_get -- wrapper for rtree_map_get
*/
static PMEMoid
map_rtree_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_get(pop, rtree_map,
(unsigned char *)&key, sizeof(key));
}
/*
* map_rtree_lookup -- wrapper for rtree_map_lookup
*/
static int
map_rtree_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_lookup(pop, rtree_map,
(unsigned char *)&key, sizeof(key));
}
struct cb_arg2 {
int (*cb)(uint64_t key, PMEMoid value, void *arg);
void *arg;
};
/*
* map_rtree_foreach_cb -- wrapper for callback
*/
static int
map_rtree_foreach_cb(const unsigned char *key,
uint64_t key_size, PMEMoid value, void *arg2)
{
const struct cb_arg2 *const a2 = (const struct cb_arg2 *)arg2;
const uint64_t *const k2 = (uint64_t *)key;
return a2->cb(*k2, value, a2->arg);
}
/*
* map_rtree_foreach -- wrapper for rtree_map_foreach
*/
static int
map_rtree_foreach(PMEMobjpool *pop, TOID(struct map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg)
{
struct cb_arg2 arg2 = {cb, arg};
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_foreach(pop, rtree_map, map_rtree_foreach_cb, &arg2);
}
/*
* map_rtree_is_empty -- wrapper for rtree_map_is_empty
*/
static int
map_rtree_is_empty(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_is_empty(pop, rtree_map);
}
struct map_ops rtree_map_ops = {
/* .check = */map_rtree_check,
/* .create = */map_rtree_create,
/* .destroy = */map_rtree_destroy,
/* .init = */NULL,
/* .insert = */map_rtree_insert,
/* .insert_new = */map_rtree_insert_new,
/* .remove = */map_rtree_remove,
/* .remove_free = */map_rtree_remove_free,
/* .clear = */map_rtree_clear,
/* .get = */map_rtree_get,
/* .lookup = */map_rtree_lookup,
/* .foreach = */map_rtree_foreach,
/* .is_empty = */map_rtree_is_empty,
/* .count = */NULL,
/* .cmd = */NULL,
};
| 4,700 | 21.710145 | 75 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map_hashmap_rp.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* map_hashmap_rp.c -- common interface for maps
*/
#include <map.h>
#include <hashmap_rp.h>
#include "map_hashmap_rp.h"
/*
* map_hm_rp_check -- wrapper for hm_rp_check
*/
static int
map_hm_rp_check(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_check(pop, hashmap_rp);
}
/*
* map_hm_rp_count -- wrapper for hm_rp_count
*/
static size_t
map_hm_rp_count(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_count(pop, hashmap_rp);
}
/*
* map_hm_rp_init -- wrapper for hm_rp_init
*/
static int
map_hm_rp_init(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_init(pop, hashmap_rp);
}
/*
* map_hm_rp_create -- wrapper for hm_rp_create
*/
static int
map_hm_rp_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg)
{
TOID(struct hashmap_rp) *hashmap_rp =
(TOID(struct hashmap_rp) *)map;
return hm_rp_create(pop, hashmap_rp, arg);
}
/*
* map_hm_rp_insert -- wrapper for hm_rp_insert
*/
static int
map_hm_rp_insert(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_insert(pop, hashmap_rp, key, value);
}
/*
* map_hm_rp_remove -- wrapper for hm_rp_remove
*/
static PMEMoid
map_hm_rp_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_remove(pop, hashmap_rp, key);
}
/*
* map_hm_rp_get -- wrapper for hm_rp_get
*/
static PMEMoid
map_hm_rp_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_get(pop, hashmap_rp, key);
}
/*
* map_hm_rp_lookup -- wrapper for hm_rp_lookup
*/
static int
map_hm_rp_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_lookup(pop, hashmap_rp, key);
}
/*
* map_hm_rp_foreach -- wrapper for hm_rp_foreach
*/
static int
map_hm_rp_foreach(PMEMobjpool *pop, TOID(struct map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_foreach(pop, hashmap_rp, cb, arg);
}
/*
* map_hm_rp_cmd -- wrapper for hm_rp_cmd
*/
static int
map_hm_rp_cmd(PMEMobjpool *pop, TOID(struct map) map,
unsigned cmd, uint64_t arg)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_cmd(pop, hashmap_rp, cmd, arg);
}
struct map_ops hashmap_rp_ops = {
/* .check = */ map_hm_rp_check,
/* .create = */ map_hm_rp_create,
/* .destroy = */ NULL,
/* .init = */ map_hm_rp_init,
/* .insert = */ map_hm_rp_insert,
/* .insert_new = */ NULL,
/* .remove = */ map_hm_rp_remove,
/* .remove_free = */ NULL,
/* .clear = */ NULL,
/* .get = */ map_hm_rp_get,
/* .lookup = */ map_hm_rp_lookup,
/* .foreach = */ map_hm_rp_foreach,
/* .is_empty = */ NULL,
/* .count = */ map_hm_rp_count,
/* .cmd = */ map_hm_rp_cmd,
};
| 3,315 | 20.532468 | 70 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/hashmap_tx.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/* integer hash set implementation which uses only transaction APIs */
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <inttypes.h>
#include <libpmemobj.h>
#include "hashmap_tx.h"
#include "hashmap_internal.h"
/* layout definition */
TOID_DECLARE(struct buckets, HASHMAP_TX_TYPE_OFFSET + 1);
TOID_DECLARE(struct entry, HASHMAP_TX_TYPE_OFFSET + 2);
struct entry {
uint64_t key;
PMEMoid value;
/* next entry list pointer */
TOID(struct entry) next;
};
struct buckets {
/* number of buckets */
size_t nbuckets;
/* array of lists */
TOID(struct entry) bucket[];
};
struct hashmap_tx {
/* random number generator seed */
uint32_t seed;
/* hash function coefficients */
uint32_t hash_fun_a;
uint32_t hash_fun_b;
uint64_t hash_fun_p;
/* number of values inserted */
uint64_t count;
/* buckets */
TOID(struct buckets) buckets;
};
/*
* create_hashmap -- hashmap initializer
*/
static void
create_hashmap(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint32_t seed)
{
size_t len = INIT_BUCKETS_NUM;
size_t sz = sizeof(struct buckets) +
len * sizeof(TOID(struct entry));
TX_BEGIN(pop) {
TX_ADD(hashmap);
D_RW(hashmap)->seed = seed;
do {
D_RW(hashmap)->hash_fun_a = (uint32_t)rand();
} while (D_RW(hashmap)->hash_fun_a == 0);
D_RW(hashmap)->hash_fun_b = (uint32_t)rand();
D_RW(hashmap)->hash_fun_p = HASH_FUNC_COEFF_P;
D_RW(hashmap)->buckets = TX_ZALLOC(struct buckets, sz);
D_RW(D_RW(hashmap)->buckets)->nbuckets = len;
} TX_ONABORT {
fprintf(stderr, "%s: transaction aborted: %s\n", __func__,
pmemobj_errormsg());
abort();
} TX_END
}
/*
* hash -- the simplest hashing function,
* see https://en.wikipedia.org/wiki/Universal_hashing#Hashing_integers
*/
static uint64_t
hash(const TOID(struct hashmap_tx) *hashmap,
const TOID(struct buckets) *buckets, uint64_t value)
{
uint32_t a = D_RO(*hashmap)->hash_fun_a;
uint32_t b = D_RO(*hashmap)->hash_fun_b;
uint64_t p = D_RO(*hashmap)->hash_fun_p;
size_t len = D_RO(*buckets)->nbuckets;
return ((a * value + b) % p) % len;
}
/*
* hm_tx_rebuild -- rebuilds the hashmap with a new number of buckets
*/
static void
hm_tx_rebuild(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, size_t new_len)
{
TOID(struct buckets) buckets_old = D_RO(hashmap)->buckets;
if (new_len == 0)
new_len = D_RO(buckets_old)->nbuckets;
size_t sz_old = sizeof(struct buckets) +
D_RO(buckets_old)->nbuckets *
sizeof(TOID(struct entry));
size_t sz_new = sizeof(struct buckets) +
new_len * sizeof(TOID(struct entry));
TX_BEGIN(pop) {
TX_ADD_FIELD(hashmap, buckets);
TOID(struct buckets) buckets_new =
TX_ZALLOC(struct buckets, sz_new);
D_RW(buckets_new)->nbuckets = new_len;
pmemobj_tx_add_range(buckets_old.oid, 0, sz_old);
for (size_t i = 0; i < D_RO(buckets_old)->nbuckets; ++i) {
while (!TOID_IS_NULL(D_RO(buckets_old)->bucket[i])) {
TOID(struct entry) en =
D_RO(buckets_old)->bucket[i];
uint64_t h = hash(&hashmap, &buckets_new,
D_RO(en)->key);
D_RW(buckets_old)->bucket[i] = D_RO(en)->next;
TX_ADD_FIELD(en, next);
D_RW(en)->next = D_RO(buckets_new)->bucket[h];
D_RW(buckets_new)->bucket[h] = en;
}
}
D_RW(hashmap)->buckets = buckets_new;
TX_FREE(buckets_old);
} TX_ONABORT {
fprintf(stderr, "%s: transaction aborted: %s\n", __func__,
pmemobj_errormsg());
/*
* We don't need to do anything here, because everything is
* consistent. The only thing affected is performance.
*/
} TX_END
}
/*
* hm_tx_insert -- inserts specified value into the hashmap,
* returns:
* - 0 if successful,
* - 1 if value already existed,
* - -1 if something bad happened
*/
int
hm_tx_insert(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
uint64_t key, PMEMoid value)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
uint64_t h = hash(&hashmap, &buckets, key);
int num = 0;
for (var = D_RO(buckets)->bucket[h];
!TOID_IS_NULL(var);
var = D_RO(var)->next) {
if (D_RO(var)->key == key)
return 1;
num++;
}
int ret = 0;
TX_BEGIN(pop) {
TX_ADD_FIELD(D_RO(hashmap)->buckets, bucket[h]);
TX_ADD_FIELD(hashmap, count);
TOID(struct entry) e = TX_NEW(struct entry);
D_RW(e)->key = key;
D_RW(e)->value = value;
D_RW(e)->next = D_RO(buckets)->bucket[h];
D_RW(buckets)->bucket[h] = e;
D_RW(hashmap)->count++;
num++;
} TX_ONABORT {
fprintf(stderr, "transaction aborted: %s\n",
pmemobj_errormsg());
ret = -1;
} TX_END
if (ret)
return ret;
if (num > MAX_HASHSET_THRESHOLD ||
(num > MIN_HASHSET_THRESHOLD &&
D_RO(hashmap)->count > 2 * D_RO(buckets)->nbuckets))
hm_tx_rebuild(pop, hashmap, D_RO(buckets)->nbuckets * 2);
return 0;
}
/*
* hm_tx_remove -- removes specified value from the hashmap,
* returns:
* - key's value if successful,
* - OID_NULL if value didn't exist or if something bad happened
*/
PMEMoid
hm_tx_remove(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var, prev = TOID_NULL(struct entry);
uint64_t h = hash(&hashmap, &buckets, key);
for (var = D_RO(buckets)->bucket[h];
!TOID_IS_NULL(var);
prev = var, var = D_RO(var)->next) {
if (D_RO(var)->key == key)
break;
}
if (TOID_IS_NULL(var))
return OID_NULL;
int ret = 0;
PMEMoid retoid = D_RO(var)->value;
TX_BEGIN(pop) {
if (TOID_IS_NULL(prev))
TX_ADD_FIELD(D_RO(hashmap)->buckets, bucket[h]);
else
TX_ADD_FIELD(prev, next);
TX_ADD_FIELD(hashmap, count);
if (TOID_IS_NULL(prev))
D_RW(buckets)->bucket[h] = D_RO(var)->next;
else
D_RW(prev)->next = D_RO(var)->next;
D_RW(hashmap)->count--;
TX_FREE(var);
} TX_ONABORT {
fprintf(stderr, "transaction aborted: %s\n",
pmemobj_errormsg());
ret = -1;
} TX_END
if (ret)
return OID_NULL;
if (D_RO(hashmap)->count < D_RO(buckets)->nbuckets)
hm_tx_rebuild(pop, hashmap, D_RO(buckets)->nbuckets / 2);
return retoid;
}
/*
* hm_tx_foreach -- prints all values from the hashmap
*/
int
hm_tx_foreach(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
int ret = 0;
for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i) {
if (TOID_IS_NULL(D_RO(buckets)->bucket[i]))
continue;
for (var = D_RO(buckets)->bucket[i]; !TOID_IS_NULL(var);
var = D_RO(var)->next) {
ret = cb(D_RO(var)->key, D_RO(var)->value, arg);
if (ret)
break;
}
}
return ret;
}
/*
* hm_tx_debug -- prints complete hashmap state
*/
static void
hm_tx_debug(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, FILE *out)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
fprintf(out, "a: %u b: %u p: %" PRIu64 "\n", D_RO(hashmap)->hash_fun_a,
D_RO(hashmap)->hash_fun_b, D_RO(hashmap)->hash_fun_p);
fprintf(out, "count: %" PRIu64 ", buckets: %zu\n",
D_RO(hashmap)->count, D_RO(buckets)->nbuckets);
for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i) {
if (TOID_IS_NULL(D_RO(buckets)->bucket[i]))
continue;
int num = 0;
fprintf(out, "%zu: ", i);
for (var = D_RO(buckets)->bucket[i]; !TOID_IS_NULL(var);
var = D_RO(var)->next) {
fprintf(out, "%" PRIu64 " ", D_RO(var)->key);
num++;
}
fprintf(out, "(%d)\n", num);
}
}
/*
* hm_tx_get -- checks whether specified value is in the hashmap
*/
PMEMoid
hm_tx_get(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
uint64_t h = hash(&hashmap, &buckets, key);
for (var = D_RO(buckets)->bucket[h];
!TOID_IS_NULL(var);
var = D_RO(var)->next)
if (D_RO(var)->key == key)
return D_RO(var)->value;
return OID_NULL;
}
/*
* hm_tx_lookup -- checks whether specified value exists
*/
int
hm_tx_lookup(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
uint64_t h = hash(&hashmap, &buckets, key);
for (var = D_RO(buckets)->bucket[h];
!TOID_IS_NULL(var);
var = D_RO(var)->next)
if (D_RO(var)->key == key)
return 1;
return 0;
}
/*
* hm_tx_count -- returns number of elements
*/
size_t
hm_tx_count(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap)
{
return D_RO(hashmap)->count;
}
/*
* hm_tx_init -- recovers hashmap state, called after pmemobj_open
*/
int
hm_tx_init(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap)
{
srand(D_RO(hashmap)->seed);
return 0;
}
/*
* hm_tx_create -- allocates new hashmap
*/
int
hm_tx_create(PMEMobjpool *pop, TOID(struct hashmap_tx) *map, void *arg)
{
struct hashmap_args *args = (struct hashmap_args *)arg;
int ret = 0;
TX_BEGIN(pop) {
TX_ADD_DIRECT(map);
*map = TX_ZNEW(struct hashmap_tx);
uint32_t seed = args ? args->seed : 0;
create_hashmap(pop, *map, seed);
} TX_ONABORT {
ret = -1;
} TX_END
return ret;
}
/*
* hm_tx_check -- checks if specified persistent object is an
* instance of hashmap
*/
int
hm_tx_check(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap)
{
return TOID_IS_NULL(hashmap) || !TOID_VALID(hashmap);
}
/*
* hm_tx_cmd -- execute cmd for hashmap
*/
int
hm_tx_cmd(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
unsigned cmd, uint64_t arg)
{
switch (cmd) {
case HASHMAP_CMD_REBUILD:
hm_tx_rebuild(pop, hashmap, arg);
return 0;
case HASHMAP_CMD_DEBUG:
if (!arg)
return -EINVAL;
hm_tx_debug(pop, hashmap, (FILE *)arg);
return 0;
default:
return -EINVAL;
}
}
| 9,692 | 22.078571 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map_skiplist.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* map_skiplist.c -- common interface for maps
*/
#include <map.h>
#include <skiplist_map.h>
#include "map_skiplist.h"
/*
* map_skiplist_check -- wrapper for skiplist_map_check
*/
static int
map_skiplist_check(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_check(pop, skiplist_map);
}
/*
* map_skiplist_create -- wrapper for skiplist_map_new
*/
static int
map_skiplist_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg)
{
TOID(struct skiplist_map_node) *skiplist_map =
(TOID(struct skiplist_map_node) *)map;
return skiplist_map_create(pop, skiplist_map, arg);
}
/*
* map_skiplist_destroy -- wrapper for skiplist_map_delete
*/
static int
map_skiplist_destroy(PMEMobjpool *pop, TOID(struct map) *map)
{
TOID(struct skiplist_map_node) *skiplist_map =
(TOID(struct skiplist_map_node) *)map;
return skiplist_map_destroy(pop, skiplist_map);
}
/*
* map_skiplist_insert -- wrapper for skiplist_map_insert
*/
static int
map_skiplist_insert(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_insert(pop, skiplist_map, key, value);
}
/*
* map_skiplist_insert_new -- wrapper for skiplist_map_insert_new
*/
static int
map_skiplist_insert_new(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, size_t size,
unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_insert_new(pop, skiplist_map, key, size,
type_num, constructor, arg);
}
/*
* map_skiplist_remove -- wrapper for skiplist_map_remove
*/
static PMEMoid
map_skiplist_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_remove(pop, skiplist_map, key);
}
/*
* map_skiplist_remove_free -- wrapper for skiplist_map_remove_free
*/
static int
map_skiplist_remove_free(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_remove_free(pop, skiplist_map, key);
}
/*
* map_skiplist_clear -- wrapper for skiplist_map_clear
*/
static int
map_skiplist_clear(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_clear(pop, skiplist_map);
}
/*
* map_skiplist_get -- wrapper for skiplist_map_get
*/
static PMEMoid
map_skiplist_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_get(pop, skiplist_map, key);
}
/*
* map_skiplist_lookup -- wrapper for skiplist_map_lookup
*/
static int
map_skiplist_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_lookup(pop, skiplist_map, key);
}
/*
* map_skiplist_foreach -- wrapper for skiplist_map_foreach
*/
static int
map_skiplist_foreach(PMEMobjpool *pop, TOID(struct map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_foreach(pop, skiplist_map, cb, arg);
}
/*
* map_skiplist_is_empty -- wrapper for skiplist_map_is_empty
*/
static int
map_skiplist_is_empty(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_is_empty(pop, skiplist_map);
}
struct map_ops skiplist_map_ops = {
/* .check = */ map_skiplist_check,
/* .create = */ map_skiplist_create,
/* .destroy = */ map_skiplist_destroy,
/* .init = */ NULL,
/* .insert = */ map_skiplist_insert,
/* .insert_new = */ map_skiplist_insert_new,
/* .remove = */ map_skiplist_remove,
/* .remove_free = */ map_skiplist_remove_free,
/* .clear = */ map_skiplist_clear,
/* .get = */ map_skiplist_get,
/* .lookup = */ map_skiplist_lookup,
/* .foreach = */ map_skiplist_foreach,
/* .is_empty = */ map_skiplist_is_empty,
/* .count = */ NULL,
/* .cmd = */ NULL,
};
| 4,488 | 23.664835 | 78 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map_ctree.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* map_ctree.c -- common interface for maps
*/
#include <map.h>
#include <ctree_map.h>
#include "map_ctree.h"
/*
* map_ctree_check -- wrapper for ctree_map_check
*/
static int
map_ctree_check(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_check(pop, ctree_map);
}
/*
* map_ctree_create -- wrapper for ctree_map_create
*/
static int
map_ctree_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg)
{
TOID(struct ctree_map) *ctree_map =
(TOID(struct ctree_map) *)map;
return ctree_map_create(pop, ctree_map, arg);
}
/*
* map_ctree_destroy -- wrapper for ctree_map_destroy
*/
static int
map_ctree_destroy(PMEMobjpool *pop, TOID(struct map) *map)
{
TOID(struct ctree_map) *ctree_map =
(TOID(struct ctree_map) *)map;
return ctree_map_destroy(pop, ctree_map);
}
/*
* map_ctree_insert -- wrapper for ctree_map_insert
*/
static int
map_ctree_insert(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_insert(pop, ctree_map, key, value);
}
/*
* map_ctree_insert_new -- wrapper for ctree_map_insert_new
*/
static int
map_ctree_insert_new(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, size_t size,
unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_insert_new(pop, ctree_map, key, size,
type_num, constructor, arg);
}
/*
* map_ctree_remove -- wrapper for ctree_map_remove
*/
static PMEMoid
map_ctree_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_remove(pop, ctree_map, key);
}
/*
* map_ctree_remove_free -- wrapper for ctree_map_remove_free
*/
static int
map_ctree_remove_free(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_remove_free(pop, ctree_map, key);
}
/*
* map_ctree_clear -- wrapper for ctree_map_clear
*/
static int
map_ctree_clear(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_clear(pop, ctree_map);
}
/*
* map_ctree_get -- wrapper for ctree_map_get
*/
static PMEMoid
map_ctree_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_get(pop, ctree_map, key);
}
/*
* map_ctree_lookup -- wrapper for ctree_map_lookup
*/
static int
map_ctree_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_lookup(pop, ctree_map, key);
}
/*
* map_ctree_foreach -- wrapper for ctree_map_foreach
*/
static int
map_ctree_foreach(PMEMobjpool *pop, TOID(struct map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_foreach(pop, ctree_map, cb, arg);
}
/*
* map_ctree_is_empty -- wrapper for ctree_map_is_empty
*/
static int
map_ctree_is_empty(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_is_empty(pop, ctree_map);
}
struct map_ops ctree_map_ops = {
/* .check = */ map_ctree_check,
/* .create = */ map_ctree_create,
/* .destroy = */ map_ctree_destroy,
/* .init = */ NULL,
/* .insert = */ map_ctree_insert,
/* .insert_new = */ map_ctree_insert_new,
/* .remove = */ map_ctree_remove,
/* .remove_free = */ map_ctree_remove_free,
/* .clear = */ map_ctree_clear,
/* .get = */ map_ctree_get,
/* .lookup = */ map_ctree_lookup,
/* .foreach = */ map_ctree_foreach,
/* .is_empty = */ map_ctree_is_empty,
/* .count = */ NULL,
/* .cmd = */ NULL,
};
| 4,091 | 21.483516 | 75 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map_btree.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* map_btree.c -- common interface for maps
*/
#include <map.h>
#include <btree_map.h>
#include "map_btree.h"
/*
* map_btree_check -- wrapper for btree_map_check
*/
static int
map_btree_check(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_check(pop, btree_map);
}
/*
* map_btree_create -- wrapper for btree_map_create
*/
static int
map_btree_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg)
{
TOID(struct btree_map) *btree_map =
(TOID(struct btree_map) *)map;
return btree_map_create(pop, btree_map, arg);
}
/*
* map_btree_destroy -- wrapper for btree_map_destroy
*/
static int
map_btree_destroy(PMEMobjpool *pop, TOID(struct map) *map)
{
TOID(struct btree_map) *btree_map =
(TOID(struct btree_map) *)map;
return btree_map_destroy(pop, btree_map);
}
/*
* map_btree_insert -- wrapper for btree_map_insert
*/
static int
map_btree_insert(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_insert(pop, btree_map, key, value);
}
/*
* map_btree_insert_new -- wrapper for btree_map_insert_new
*/
static int
map_btree_insert_new(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, size_t size,
unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_insert_new(pop, btree_map, key, size,
type_num, constructor, arg);
}
/*
* map_btree_remove -- wrapper for btree_map_remove
*/
static PMEMoid
map_btree_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_remove(pop, btree_map, key);
}
/*
* map_btree_remove_free -- wrapper for btree_map_remove_free
*/
static int
map_btree_remove_free(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_remove_free(pop, btree_map, key);
}
/*
* map_btree_clear -- wrapper for btree_map_clear
*/
static int
map_btree_clear(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_clear(pop, btree_map);
}
/*
* map_btree_get -- wrapper for btree_map_get
*/
static PMEMoid
map_btree_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_get(pop, btree_map, key);
}
/*
* map_btree_lookup -- wrapper for btree_map_lookup
*/
static int
map_btree_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_lookup(pop, btree_map, key);
}
/*
* map_btree_foreach -- wrapper for btree_map_foreach
*/
static int
map_btree_foreach(PMEMobjpool *pop, TOID(struct map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_foreach(pop, btree_map, cb, arg);
}
/*
* map_btree_is_empty -- wrapper for btree_map_is_empty
*/
static int
map_btree_is_empty(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_is_empty(pop, btree_map);
}
struct map_ops btree_map_ops = {
/* .check = */ map_btree_check,
/* .create = */ map_btree_create,
/* .destroy = */ map_btree_destroy,
/* .init = */ NULL,
/* .insert = */ map_btree_insert,
/* .insert_new = */ map_btree_insert_new,
/* .remove = */ map_btree_remove,
/* .remove_free = */ map_btree_remove_free,
/* .clear = */ map_btree_clear,
/* .get = */ map_btree_get,
/* .lookup = */ map_btree_lookup,
/* .foreach = */ map_btree_foreach,
/* .is_empty = */ map_btree_is_empty,
/* .count = */ NULL,
/* .cmd = */ NULL,
};
| 4,091 | 21.483516 | 75 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map_hashmap_atomic.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* map_hashmap_atomic.c -- common interface for maps
*/
#include <map.h>
#include <hashmap_atomic.h>
#include "map_hashmap_atomic.h"
/*
* map_hm_atomic_check -- wrapper for hm_atomic_check
*/
static int
map_hm_atomic_check(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_check(pop, hashmap_atomic);
}
/*
* map_hm_atomic_count -- wrapper for hm_atomic_count
*/
static size_t
map_hm_atomic_count(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_count(pop, hashmap_atomic);
}
/*
* map_hm_atomic_init -- wrapper for hm_atomic_init
*/
static int
map_hm_atomic_init(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_init(pop, hashmap_atomic);
}
/*
* map_hm_atomic_new -- wrapper for hm_atomic_create
*/
static int
map_hm_atomic_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg)
{
TOID(struct hashmap_atomic) *hashmap_atomic =
(TOID(struct hashmap_atomic) *)map;
return hm_atomic_create(pop, hashmap_atomic, arg);
}
/*
* map_hm_atomic_insert -- wrapper for hm_atomic_insert
*/
static int
map_hm_atomic_insert(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_insert(pop, hashmap_atomic, key, value);
}
/*
* map_hm_atomic_remove -- wrapper for hm_atomic_remove
*/
static PMEMoid
map_hm_atomic_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_remove(pop, hashmap_atomic, key);
}
/*
* map_hm_atomic_get -- wrapper for hm_atomic_get
*/
static PMEMoid
map_hm_atomic_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_get(pop, hashmap_atomic, key);
}
/*
* map_hm_atomic_lookup -- wrapper for hm_atomic_lookup
*/
static int
map_hm_atomic_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_lookup(pop, hashmap_atomic, key);
}
/*
* map_hm_atomic_foreach -- wrapper for hm_atomic_foreach
*/
static int
map_hm_atomic_foreach(PMEMobjpool *pop, TOID(struct map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_foreach(pop, hashmap_atomic, cb, arg);
}
/*
* map_hm_atomic_cmd -- wrapper for hm_atomic_cmd
*/
static int
map_hm_atomic_cmd(PMEMobjpool *pop, TOID(struct map) map,
unsigned cmd, uint64_t arg)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_cmd(pop, hashmap_atomic, cmd, arg);
}
struct map_ops hashmap_atomic_ops = {
/* .check = */ map_hm_atomic_check,
/* .create = */ map_hm_atomic_create,
/* .destroy = */ NULL,
/* .init = */ map_hm_atomic_init,
/* .insert = */ map_hm_atomic_insert,
/* .insert_new = */ NULL,
/* .remove = */ map_hm_atomic_remove,
/* .remove_free = */ NULL,
/* .clear = */ NULL,
/* .get = */ map_hm_atomic_get,
/* .lookup = */ map_hm_atomic_lookup,
/* .foreach = */ map_hm_atomic_foreach,
/* .is_empty = */ NULL,
/* .count = */ map_hm_atomic_count,
/* .cmd = */ map_hm_atomic_cmd,
};
| 3,693 | 22.987013 | 74 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map_rbtree.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* map_rbtree.c -- common interface for maps
*/
#include <map.h>
#include <rbtree_map.h>
#include "map_rbtree.h"
/*
* map_rbtree_check -- wrapper for rbtree_map_check
*/
static int
map_rbtree_check(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_check(pop, rbtree_map);
}
/*
* map_rbtree_create -- wrapper for rbtree_map_new
*/
static int
map_rbtree_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg)
{
TOID(struct rbtree_map) *rbtree_map =
(TOID(struct rbtree_map) *)map;
return rbtree_map_create(pop, rbtree_map, arg);
}
/*
* map_rbtree_destroy -- wrapper for rbtree_map_delete
*/
static int
map_rbtree_destroy(PMEMobjpool *pop, TOID(struct map) *map)
{
TOID(struct rbtree_map) *rbtree_map =
(TOID(struct rbtree_map) *)map;
return rbtree_map_destroy(pop, rbtree_map);
}
/*
* map_rbtree_insert -- wrapper for rbtree_map_insert
*/
static int
map_rbtree_insert(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_insert(pop, rbtree_map, key, value);
}
/*
* map_rbtree_insert_new -- wrapper for rbtree_map_insert_new
*/
static int
map_rbtree_insert_new(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, size_t size,
unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_insert_new(pop, rbtree_map, key, size,
type_num, constructor, arg);
}
/*
* map_rbtree_remove -- wrapper for rbtree_map_remove
*/
static PMEMoid
map_rbtree_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_remove(pop, rbtree_map, key);
}
/*
* map_rbtree_remove_free -- wrapper for rbtree_map_remove_free
*/
static int
map_rbtree_remove_free(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_remove_free(pop, rbtree_map, key);
}
/*
* map_rbtree_clear -- wrapper for rbtree_map_clear
*/
static int
map_rbtree_clear(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_clear(pop, rbtree_map);
}
/*
* map_rbtree_get -- wrapper for rbtree_map_get
*/
static PMEMoid
map_rbtree_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_get(pop, rbtree_map, key);
}
/*
* map_rbtree_lookup -- wrapper for rbtree_map_lookup
*/
static int
map_rbtree_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_lookup(pop, rbtree_map, key);
}
/*
* map_rbtree_foreach -- wrapper for rbtree_map_foreach
*/
static int
map_rbtree_foreach(PMEMobjpool *pop, TOID(struct map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_foreach(pop, rbtree_map, cb, arg);
}
/*
* map_rbtree_is_empty -- wrapper for rbtree_map_is_empty
*/
static int
map_rbtree_is_empty(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_is_empty(pop, rbtree_map);
}
struct map_ops rbtree_map_ops = {
/* .check = */ map_rbtree_check,
/* .create = */ map_rbtree_create,
/* .destroy = */ map_rbtree_destroy,
/* .init = */ NULL,
/* .insert = */ map_rbtree_insert,
/* .insert_new = */ map_rbtree_insert_new,
/* .remove = */ map_rbtree_remove,
/* .remove_free = */ map_rbtree_remove_free,
/* .clear = */ map_rbtree_clear,
/* .get = */ map_rbtree_get,
/* .lookup = */ map_rbtree_lookup,
/* .foreach = */ map_rbtree_foreach,
/* .is_empty = */ map_rbtree_is_empty,
/* .count = */ NULL,
/* .cmd = */ NULL,
};
| 4,199 | 22.076923 | 76 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/tree_map/ctree_map.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* ctree_map.c -- Crit-bit trie implementation
*/
#include <ex_common.h>
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include "ctree_map.h"
#define BIT_IS_SET(n, i) (!!((n) & (1ULL << (i))))
#include <x86intrin.h>
static inline uint64_t getCycle(){
uint32_t cycles_high, cycles_low, pid;
asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx
"mov %%edx, %0\n\t"
"mov %%eax, %1\n\t"
"mov %%ecx, %2\n\t"
:"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars
:// no input
:"%eax", "%edx", "%ecx" // clobbered by rdtscp
);
return((uint64_t)cycles_high << 32) | cycles_low;
}
TOID_DECLARE(struct tree_map_node, CTREE_MAP_TYPE_OFFSET + 1);
static void setpage(void * addr){
uint64_t pageNo = ((uint64_t)addr)/4096;
unsigned long * pageStart = (unsigned long *)(pageNo*4096);
mprotect(pageStart, 4096, PROT_READ);
return;
}
struct tree_map_entry {
uint64_t key;
PMEMoid slot;
};
struct tree_map_node {
int diff; /* most significant differing bit */
struct tree_map_entry entries[2];
};
struct ctree_map {
struct tree_map_entry root;
};
/*
* find_crit_bit -- (internal) finds the most significant differing bit
*/
static int
find_crit_bit(uint64_t lhs, uint64_t rhs)
{
return find_last_set_64(lhs ^ rhs);
}
/*
* ctree_map_create -- allocates a new crit-bit tree instance
*/
int
ctree_map_create(PMEMobjpool *pop, TOID(struct ctree_map) *map, void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(map, sizeof(*map));
*map = TX_ZNEW(struct ctree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* ctree_map_clear_node -- (internal) clears this node and its children
*/
static void
ctree_map_clear_node(PMEMoid p)
{
if (OID_IS_NULL(p))
return;
if (OID_INSTANCEOF(p, struct tree_map_node)) {
TOID(struct tree_map_node) node;
TOID_ASSIGN(node, p);
ctree_map_clear_node(D_RW(node)->entries[0].slot);
ctree_map_clear_node(D_RW(node)->entries[1].slot);
}
pmemobj_tx_free(p);
}
/*
* ctree_map_clear -- removes all elements from the map
*/
int
ctree_map_clear(PMEMobjpool *pop, TOID(struct ctree_map) map)
{
TX_BEGIN(pop) {
ctree_map_clear_node(D_RW(map)->root.slot);
TX_ADD_FIELD(map, root);
D_RW(map)->root.slot = OID_NULL;
} TX_END
return 0;
}
/*
* ctree_map_destroy -- cleanups and frees crit-bit tree instance
*/
int
ctree_map_destroy(PMEMobjpool *pop, TOID(struct ctree_map) *map)
{
int ret = 0;
TX_BEGIN(pop) {
ctree_map_clear(pop, *map);
pmemobj_tx_add_range_direct(map, sizeof(*map));
TX_FREE(*map);
*map = TOID_NULL(struct ctree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* ctree_map_insert_leaf -- (internal) inserts a new leaf at the position
*/
static void
ctree_map_insert_leaf(struct tree_map_entry *p,
struct tree_map_entry e, int diff)
{
TOID(struct tree_map_node) new_node = TX_NEW(struct tree_map_node);
D_RW(new_node)->diff = diff;
int d = BIT_IS_SET(e.key, D_RO(new_node)->diff);
/* insert the leaf at the direction based on the critical bit */
D_RW(new_node)->entries[d] = e;
/* find the appropriate position in the tree to insert the node */
TOID(struct tree_map_node) node;
while (OID_INSTANCEOF(p->slot, struct tree_map_node)) {
TOID_ASSIGN(node, p->slot);
/* the critical bits have to be sorted */
if (D_RO(node)->diff < D_RO(new_node)->diff)
break;
p = &D_RW(node)->entries[BIT_IS_SET(e.key, D_RO(node)->diff)];
}
pmemobj_tx_add_range_direct(p, sizeof(*p));
/* insert the found destination in the other slot */
D_RW(new_node)->entries[!d] = *p;
p->key = 0;
p->slot = new_node.oid;
}
/*
* ctree_map_insert_new -- allocates a new object and inserts it into the tree
*/
int
ctree_map_insert_new(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid n = pmemobj_tx_alloc(size, type_num);
constructor(pop, pmemobj_direct(n), arg);
ctree_map_insert(pop, map, key, n);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* ctree_map_insert -- inserts a new key-value pair into the map
*/
#ifdef GET_NDP_BREAKDOWN
uint64_t ulogCycles;
uint64_t waitCycles;
uint64_t ulogcount;
#endif
int
ctree_map_insert(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key, PMEMoid value)
{
int ret = 0;
#ifdef GET_NDP_BREAKDOWN
ulogCycles = 0;
waitCycles = 0;
//ulogcount = 0;
//uint64_t maxulogcount=0;
#endif
#ifdef GET_NDP_PERFORMENCE
uint64_t btreetxCycles = 0;
uint64_t endCycles, startCycles;
for(int i=0;i<RUN_COUNT;i++){
#endif
struct tree_map_entry *p = &D_RW(map)->root;
/* descend the path until a best matching key is found */
TOID(struct tree_map_node) node;
while (!OID_IS_NULL(p->slot) &&
OID_INSTANCEOF(p->slot, struct tree_map_node)) {
TOID_ASSIGN(node, p->slot);
p = &D_RW(node)->entries[BIT_IS_SET(key, D_RW(node)->diff)];
}
struct tree_map_entry e = {key, value};
#ifdef GET_NDP_PERFORMENCE
startCycles = getCycle();
#endif
//uint64_t startCycles1,endCycles1;
TX_BEGIN(pop) {
if (p->key == 0 || p->key == key) {
pmemobj_tx_add_range_direct(p, sizeof(*p));
*p = e;
} else {
ctree_map_insert_leaf(&D_RW(map)->root, e,
find_crit_bit(p->key, key));
}
} TX_ONABORT {
ret = 1;
} TX_END
//if( maxulogcount < ulogcount)
// maxulogcount = ulogcount;
//ulogcount = 0;
#ifdef GET_NDP_PERFORMENCE
endCycles = getCycle();
btreetxCycles += endCycles - startCycles;
}
double totTime = ((double)btreetxCycles)/2000000000;
printf("ctree TX/s = %f\nctree tx total time = %f\n",RUN_COUNT/totTime,totTime);
#endif
#ifdef GET_NDP_BREAKDOWN
printf("ctree tx cmd issue total time = %f\n", (((double)ulogCycles)/2000000000));
printf("ctree tx total wait time = %f\n", (((double)waitCycles)/2000000000));
//printf("maxulogs = %ld\n", maxulogcount);
#endif
return ret;
}
/*
* ctree_map_get_leaf -- (internal) searches for a leaf of the key
*/
static struct tree_map_entry *
ctree_map_get_leaf(TOID(struct ctree_map) map, uint64_t key,
struct tree_map_entry **parent)
{
struct tree_map_entry *n = &D_RW(map)->root;
struct tree_map_entry *p = NULL;
TOID(struct tree_map_node) node;
while (!OID_IS_NULL(n->slot) &&
OID_INSTANCEOF(n->slot, struct tree_map_node)) {
TOID_ASSIGN(node, n->slot);
p = n;
n = &D_RW(node)->entries[BIT_IS_SET(key, D_RW(node)->diff)];
}
if (n->key == key) {
if (parent)
*parent = p;
return n;
}
return NULL;
}
/*
* ctree_map_remove_free -- removes and frees an object from the tree
*/
int
ctree_map_remove_free(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid val = ctree_map_remove(pop, map, key);
pmemobj_tx_free(val);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* ctree_map_remove -- removes key-value pair from the map
*/
PMEMoid
ctree_map_remove(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key)
{
PMEMoid ret;
#ifdef GET_NDP_BREAKDOWN
ulogCycles = 0;
waitCycles = 0;
#endif
#ifdef GET_NDP_PERFORMENCE
uint64_t btreetxCycles = 0;
uint64_t endCycles, startCycles;
for(int i=0;i<RUN_COUNT;i++){
startCycles = getCycle();
#endif
struct tree_map_entry *parent = NULL;
struct tree_map_entry *leaf = ctree_map_get_leaf(map, key, &parent);
if (leaf == NULL)
return OID_NULL;
ret = leaf->slot;
if (parent == NULL) { /* root */
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(leaf, sizeof(*leaf));
leaf->key = 0;
leaf->slot = OID_NULL;
} TX_END
} else {
/*
* In this situation:
* parent
* / \
* LEFT RIGHT
* there's no point in leaving the parent internal node
* so it's swapped with the remaining node and then also freed.
*/
#ifdef GET_NDP_PERFORMENCE
startCycles = getCycle();
#endif
TX_BEGIN(pop) {
struct tree_map_entry *dest = parent;
TOID(struct tree_map_node) node;
TOID_ASSIGN(node, parent->slot);
pmemobj_tx_add_range_direct(dest, sizeof(*dest));
*dest = D_RW(node)->entries[
D_RO(node)->entries[0].key == leaf->key];
TX_FREE(node);
} TX_END
#ifdef GET_NDP_PERFORMENCE
endCycles = getCycle();
btreetxCycles += endCycles - startCycles;
}
double totTime = ((double)btreetxCycles)/2000000000;
printf("btree TX/s = %f\nctree tx total time = %f\n",RUN_COUNT/totTime,totTime);
#endif
#ifdef GET_NDP_BREAKDOWN
printf("ctree ulog total time = %f\n", (((double)ulogCycles)/2000000000));
printf("ctree total wait time = %f\n", (((double)waitCycles)/2000000000));
#endif
}
return ret;
}
/*
* ctree_map_get -- searches for a value of the key
*/
PMEMoid
ctree_map_get(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key)
{
struct tree_map_entry *entry = ctree_map_get_leaf(map, key, NULL);
return entry ? entry->slot : OID_NULL;
}
/*
* ctree_map_lookup -- searches if a key exists
*/
int
ctree_map_lookup(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key)
{
struct tree_map_entry *entry = ctree_map_get_leaf(map, key, NULL);
return entry != NULL;
}
/*
* ctree_map_foreach_node -- (internal) recursively traverses tree
*/
static int
ctree_map_foreach_node(struct tree_map_entry e,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
int ret = 0;
if (OID_INSTANCEOF(e.slot, struct tree_map_node)) {
TOID(struct tree_map_node) node;
TOID_ASSIGN(node, e.slot);
if (ctree_map_foreach_node(D_RO(node)->entries[0],
cb, arg) == 0)
ctree_map_foreach_node(D_RO(node)->entries[1], cb, arg);
} else { /* leaf */
ret = cb(e.key, e.slot, arg);
}
return ret;
}
/*
* ctree_map_foreach -- initiates recursive traversal
*/
int
ctree_map_foreach(PMEMobjpool *pop, TOID(struct ctree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
if (OID_IS_NULL(D_RO(map)->root.slot))
return 0;
return ctree_map_foreach_node(D_RO(map)->root, cb, arg);
}
/*
* ctree_map_is_empty -- checks whether the tree map is empty
*/
int
ctree_map_is_empty(PMEMobjpool *pop, TOID(struct ctree_map) map)
{
return D_RO(map)->root.key == 0;
}
/*
* ctree_map_check -- check if given persistent object is a tree map
*/
int
ctree_map_check(PMEMobjpool *pop, TOID(struct ctree_map) map)
{
return TOID_IS_NULL(map) || !TOID_VALID(map);
}
| 10,423 | 22.011038 | 83 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/tree_map/ctree_map.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* ctree_map.h -- TreeMap sorted collection implementation
*/
#ifndef CTREE_MAP_H
#define CTREE_MAP_H
#include <libpmemobj.h>
#ifndef CTREE_MAP_TYPE_OFFSET
#define CTREE_MAP_TYPE_OFFSET 1008
#endif
struct ctree_map;
TOID_DECLARE(struct ctree_map, CTREE_MAP_TYPE_OFFSET + 0);
int ctree_map_check(PMEMobjpool *pop, TOID(struct ctree_map) map);
int ctree_map_create(PMEMobjpool *pop, TOID(struct ctree_map) *map, void *arg);
int ctree_map_destroy(PMEMobjpool *pop, TOID(struct ctree_map) *map);
int ctree_map_insert(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key, PMEMoid value);
int ctree_map_insert_new(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid ctree_map_remove(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key);
int ctree_map_remove_free(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key);
int ctree_map_clear(PMEMobjpool *pop, TOID(struct ctree_map) map);
PMEMoid ctree_map_get(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key);
int ctree_map_lookup(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key);
int ctree_map_foreach(PMEMobjpool *pop, TOID(struct ctree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
int ctree_map_is_empty(PMEMobjpool *pop, TOID(struct ctree_map) map);
#endif /* CTREE_MAP_H */
| 1,523 | 34.44186 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/tree_map/rtree_map.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rtree_map.c -- implementation of rtree
*/
#include <ex_common.h>
#include <assert.h>
#include <errno.h>
#include <limits.h>
#include <stdlib.h>
#include <stdbool.h>
#include <stdio.h>
#include "rtree_map.h"
#include <x86intrin.h>
static inline uint64_t getCycle(){
uint32_t cycles_high, cycles_low, pid;
asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx
"mov %%edx, %0\n\t"
"mov %%eax, %1\n\t"
"mov %%ecx, %2\n\t"
:"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars
:// no input
:"%eax", "%edx", "%ecx" // clobbered by rdtscp
);
return((uint64_t)cycles_high << 32) | cycles_low;
}
TOID_DECLARE(struct tree_map_node, RTREE_MAP_TYPE_OFFSET + 1);
/* Good values: 0x10 an 0x100, but implementation is bound to 0x100 */
#ifndef ALPHABET_SIZE
#define ALPHABET_SIZE 0x100
#endif
struct tree_map_node {
TOID(struct tree_map_node) slots[ALPHABET_SIZE];
unsigned has_value;
PMEMoid value;
uint64_t key_size;
unsigned char key[];
};
struct rtree_map {
TOID(struct tree_map_node) root;
};
/*
* rtree_map_create -- allocates a new rtree instance
*/
int
rtree_map_create(PMEMobjpool *pop, TOID(struct rtree_map) *map, void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
TX_ADD_DIRECT(map);
*map = TX_ZNEW(struct rtree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rtree_map_clear_node -- (internal) removes all elements from the node
*/
static void
rtree_map_clear_node(TOID(struct tree_map_node) node)
{
for (unsigned i = 0; i < ALPHABET_SIZE; i++) {
rtree_map_clear_node(D_RO(node)->slots[i]);
}
pmemobj_tx_add_range(node.oid, 0,
sizeof(struct tree_map_node) + D_RO(node)->key_size);
TX_FREE(node);
}
/*
* rtree_map_clear -- removes all elements from the map
*/
int
rtree_map_clear(PMEMobjpool *pop, TOID(struct rtree_map) map)
{
int ret = 0;
TX_BEGIN(pop) {
rtree_map_clear_node(D_RO(map)->root);
TX_ADD_FIELD(map, root);
D_RW(map)->root = TOID_NULL(struct tree_map_node);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rtree_map_destroy -- cleanups and frees rtree instance
*/
int
rtree_map_destroy(PMEMobjpool *pop, TOID(struct rtree_map) *map)
{
int ret = 0;
TX_BEGIN(pop) {
rtree_map_clear(pop, *map);
TX_ADD_DIRECT(map);
TX_FREE(*map);
*map = TOID_NULL(struct rtree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rtree_new_node -- (internal) inserts a node into an empty map
*/
static TOID(struct tree_map_node)
rtree_new_node(const unsigned char *key, uint64_t key_size,
PMEMoid value, unsigned has_value)
{
TOID(struct tree_map_node) node;
node = TX_ZALLOC(struct tree_map_node,
sizeof(struct tree_map_node) + key_size);
/*
* !!! Here should be: D_RO(node)->value
* ... because we don't change map
*/
D_RW(node)->value = value;
D_RW(node)->has_value = has_value;
D_RW(node)->key_size = key_size;
memcpy(D_RW(node)->key, key, key_size);
return node;
}
/*
* rtree_map_insert_empty -- (internal) inserts a node into an empty map
*/
static void
rtree_map_insert_empty(TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size, PMEMoid value)
{
TX_ADD_FIELD(map, root);
D_RW(map)->root = rtree_new_node(key, key_size, value, 1);
}
/*
* key_comm_len -- (internal) calculate the len of common part of keys
*/
static unsigned
key_comm_len(TOID(struct tree_map_node) node,
const unsigned char *key, uint64_t key_size)
{
unsigned i;
for (i = 0;
i < MIN(key_size, D_RO(node)->key_size) &&
key[i] == D_RO(node)->key[i];
i++)
;
return i;
}
/*
* rtree_map_insert_value -- (internal) inserts a pair into a tree
*/
static void
rtree_map_insert_value(TOID(struct tree_map_node) *node,
const unsigned char *key, uint64_t key_size, PMEMoid value)
{
unsigned i;
if (TOID_IS_NULL(*node)) {
TX_ADD_DIRECT(node);
*node = rtree_new_node(key, key_size, value, 1);
return;
}
i = key_comm_len(*node, key, key_size);
if (i != D_RO(*node)->key_size) {
/* Node does not exist. Let's add. */
TOID(struct tree_map_node) orig_node = *node;
TX_ADD_DIRECT(node);
if (i != key_size) {
*node = rtree_new_node(D_RO(orig_node)->key, i,
OID_NULL, 0);
} else {
*node = rtree_new_node(D_RO(orig_node)->key, i,
value, 1);
}
D_RW(*node)->slots[D_RO(orig_node)->key[i]] = orig_node;
TX_ADD_FIELD(orig_node, key_size);
D_RW(orig_node)->key_size -= i;
pmemobj_tx_add_range_direct(D_RW(orig_node)->key,
D_RO(orig_node)->key_size);
memmove(D_RW(orig_node)->key, D_RO(orig_node)->key + i,
D_RO(orig_node)->key_size);
if (i != key_size) {
D_RW(*node)->slots[key[i]] =
rtree_new_node(key + i, key_size - i, value, 1);
}
return;
}
if (i == key_size) {
if (OID_IS_NULL(D_RO(*node)->value) || D_RO(*node)->has_value) {
/* Just replace old value with new */
TX_ADD_FIELD(*node, value);
TX_ADD_FIELD(*node, has_value);
D_RW(*node)->value = value;
D_RW(*node)->has_value = 1;
} else {
/*
* Ignore. By the fact current value should be
* removed in advance, or handled in a different way.
*/
}
} else {
/* Recurse deeply */
return rtree_map_insert_value(&D_RW(*node)->slots[key[i]],
key + i, key_size - i, value);
}
}
/*
* rtree_map_is_empty -- checks whether the tree map is empty
*/
int
rtree_map_is_empty(PMEMobjpool *pop, TOID(struct rtree_map) map)
{
return TOID_IS_NULL(D_RO(map)->root);
}
/*
* rtree_map_insert -- inserts a new key-value pair into the map
*/
#ifdef GET_NDP_BREAKDOWN
uint64_t ulogCycles;
uint64_t waitCycles;
uint64_t resetCycles;
#endif
int
rtree_map_insert(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size, PMEMoid value)
{
int ret = 0;
#ifdef GET_NDP_BREAKDOWN
ulogCycles = 0;
waitCycles = 0;
#endif
#ifdef GET_NDP_PERFORMENCE
uint64_t btreetxCycles = 0;
uint64_t endCycles, startCycles;
for(int i=0;i<RUN_COUNT;i++){
startCycles = getCycle();
#endif
TX_BEGIN(pop) {
if (rtree_map_is_empty(pop, map)) {
rtree_map_insert_empty(map, key, key_size, value);
} else {
rtree_map_insert_value(&D_RW(map)->root,
key, key_size, value);
}
} TX_ONABORT {
ret = 1;
} TX_END
#ifdef GET_NDP_PERFORMENCE
endCycles = getCycle();
btreetxCycles += endCycles - startCycles;
}
double totTime = ((double)btreetxCycles)/2000000000;
printf("ctree TX/s = %f\nctree tx total time = %f\n",RUN_COUNT/totTime,totTime);
#endif
#ifdef GET_NDP_BREAKDOWN
printf("ctree tx cmd issue total time = %f\n", (((double)ulogCycles)/2000000000));
printf("ctree tx total wait time = %f\n", (((double)waitCycles)/2000000000));
#endif
return ret;
}
/*
* rtree_map_insert_new -- allocates a new object and inserts it into the tree
*/
int
rtree_map_insert_new(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size,
size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid n = pmemobj_tx_alloc(size, type_num);
constructor(pop, pmemobj_direct(n), arg);
rtree_map_insert(pop, map, key, key_size, n);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* is_leaf -- (internal) check a node for zero qty of children
*/
static bool
is_leaf(TOID(struct tree_map_node) node)
{
unsigned j;
for (j = 0;
j < ALPHABET_SIZE &&
TOID_IS_NULL(D_RO(node)->slots[j]);
j++)
;
return (j == ALPHABET_SIZE);
}
/*
* has_only_one_child -- (internal) check a node for qty of children
*/
static bool
has_only_one_child(TOID(struct tree_map_node) node, unsigned *child_idx)
{
unsigned j, child_qty;
for (j = 0, child_qty = 0; j < ALPHABET_SIZE; j++)
if (!TOID_IS_NULL(D_RO(node)->slots[j])) {
child_qty++;
*child_idx = j;
}
return (1 == child_qty);
}
/*
* remove_extra_node -- (internal) remove unneeded extra node
*/
static void
remove_extra_node(TOID(struct tree_map_node) *node)
{
unsigned child_idx = UINT_MAX;
TOID(struct tree_map_node) tmp, tmp_child;
/* Our node has child with only one child. */
tmp = *node;
has_only_one_child(tmp, &child_idx);
assert(child_idx != UINT_MAX);
tmp_child = D_RO(tmp)->slots[child_idx];
/*
* That child's incoming label is appended to the ours incoming label
* and the child is removed.
*/
uint64_t new_key_size = D_RO(tmp)->key_size + D_RO(tmp_child)->key_size;
unsigned char *new_key = (unsigned char *)malloc(new_key_size);
assert(new_key != NULL);
memcpy(new_key, D_RO(tmp)->key, D_RO(tmp)->key_size);
memcpy(new_key + D_RO(tmp)->key_size,
D_RO(tmp_child)->key,
D_RO(tmp_child)->key_size);
TX_ADD_DIRECT(node);
*node = rtree_new_node(new_key, new_key_size,
D_RO(tmp_child)->value, D_RO(tmp_child)->has_value);
free(new_key);
TX_FREE(tmp);
memcpy(D_RW(*node)->slots,
D_RO(tmp_child)->slots,
sizeof(D_RO(tmp_child)->slots));
TX_FREE(tmp_child);
}
/*
* rtree_map_remove_node -- (internal) removes node from tree
*/
static PMEMoid
rtree_map_remove_node(TOID(struct rtree_map) map,
TOID(struct tree_map_node) *node,
const unsigned char *key, uint64_t key_size,
bool *check_for_child)
{
bool c4c;
unsigned i, child_idx;
PMEMoid ret = OID_NULL;
*check_for_child = false;
if (TOID_IS_NULL(*node))
return OID_NULL;
i = key_comm_len(*node, key, key_size);
if (i != D_RO(*node)->key_size)
/* Node does not exist */
return OID_NULL;
if (i == key_size) {
if (0 == D_RO(*node)->has_value)
return OID_NULL;
/* Node is found */
ret = D_RO(*node)->value;
/* delete node from tree */
TX_ADD_FIELD((*node), value);
TX_ADD_FIELD((*node), has_value);
D_RW(*node)->value = OID_NULL;
D_RW(*node)->has_value = 0;
if (is_leaf(*node)) {
pmemobj_tx_add_range(node->oid, 0,
sizeof(*node) + D_RO(*node)->key_size);
TX_FREE(*node);
TX_ADD_DIRECT(node);
(*node) = TOID_NULL(struct tree_map_node);
}
return ret;
}
/* Recurse deeply */
ret = rtree_map_remove_node(map,
&D_RW(*node)->slots[key[i]],
key + i, key_size - i,
&c4c);
if (c4c) {
/* Our node has child with only one child. Remove. */
remove_extra_node(&D_RW(*node)->slots[key[i]]);
return ret;
}
if (has_only_one_child(*node, &child_idx) &&
(0 == D_RO(*node)->has_value)) {
*check_for_child = true;
}
return ret;
}
/*
* rtree_map_remove -- removes key-value pair from the map
*/
PMEMoid
rtree_map_remove(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size)
{
PMEMoid ret;
#ifdef GET_NDP_BREAKDOWN
ulogCycles = 0;
waitCycles = 0;
#endif
#ifdef GET_NDP_PERFORMENCE
uint64_t btreetxCycles = 0;
uint64_t endCycles, startCycles;
for(int i=0;i<RUN_COUNT;i++){
#endif
ret = OID_NULL;
bool check_for_child;
if (TOID_IS_NULL(map))
return OID_NULL;
#ifdef GET_NDP_PERFORMENCE
startCycles = getCycle();
#endif
TX_BEGIN(pop) {
ret = rtree_map_remove_node(map,
&D_RW(map)->root, key, key_size,
&check_for_child);
if (check_for_child) {
/* Our root node has only one child. Remove. */
remove_extra_node(&D_RW(map)->root);
}
} TX_END
#ifdef GET_NDP_PERFORMENCE
endCycles = getCycle();
btreetxCycles += endCycles - startCycles;
}
double totTime = ((double)btreetxCycles)/2000000000;
printf("ctree TX/s = %f\nctree tx total time = %f\n",RUN_COUNT/totTime,totTime);
#endif
#ifdef GET_NDP_BREAKDOWN
printf("ctree tx cmd issue total time = %f\n", (((double)ulogCycles)/2000000000));
printf("ctree tx total wait time = %f\n", (((double)waitCycles)/2000000000));
#endif
return ret;
}
/*
* rtree_map_remove_free -- removes and frees an object from the tree
*/
int
rtree_map_remove_free(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size)
{
int ret = 0;
if (TOID_IS_NULL(map))
return 1;
TX_BEGIN(pop) {
pmemobj_tx_free(rtree_map_remove(pop, map, key, key_size));
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rtree_map_get_in_node -- (internal) searches for a value in the node
*/
static PMEMoid
rtree_map_get_in_node(TOID(struct tree_map_node) node,
const unsigned char *key, uint64_t key_size)
{
unsigned i;
if (TOID_IS_NULL(node))
return OID_NULL;
i = key_comm_len(node, key, key_size);
if (i != D_RO(node)->key_size)
/* Node does not exist */
return OID_NULL;
if (i == key_size) {
/* Node is found */
return D_RO(node)->value;
} else {
/* Recurse deeply */
return rtree_map_get_in_node(D_RO(node)->slots[key[i]],
key + i, key_size - i);
}
}
/*
* rtree_map_get -- searches for a value of the key
*/
PMEMoid
rtree_map_get(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size)
{
if (TOID_IS_NULL(D_RO(map)->root))
return OID_NULL;
return rtree_map_get_in_node(D_RO(map)->root, key, key_size);
}
/*
* rtree_map_lookup_in_node -- (internal) searches for key if exists
*/
static int
rtree_map_lookup_in_node(TOID(struct tree_map_node) node,
const unsigned char *key, uint64_t key_size)
{
unsigned i;
if (TOID_IS_NULL(node))
return 0;
i = key_comm_len(node, key, key_size);
if (i != D_RO(node)->key_size)
/* Node does not exist */
return 0;
if (i == key_size) {
/* Node is found */
return 1;
}
/* Recurse deeply */
return rtree_map_lookup_in_node(D_RO(node)->slots[key[i]],
key + i, key_size - i);
}
/*
* rtree_map_lookup -- searches if key exists
*/
int
rtree_map_lookup(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size)
{
if (TOID_IS_NULL(D_RO(map)->root))
return 0;
return rtree_map_lookup_in_node(D_RO(map)->root, key, key_size);
}
/*
* rtree_map_foreach_node -- (internal) recursively traverses tree
*/
static int
rtree_map_foreach_node(const TOID(struct tree_map_node) node,
int (*cb)(const unsigned char *key, uint64_t key_size,
PMEMoid, void *arg),
void *arg)
{
unsigned i;
if (TOID_IS_NULL(node))
return 0;
for (i = 0; i < ALPHABET_SIZE; i++) {
if (rtree_map_foreach_node(D_RO(node)->slots[i], cb, arg) != 0)
return 1;
}
if (NULL != cb) {
if (cb(D_RO(node)->key, D_RO(node)->key_size,
D_RO(node)->value, arg) != 0)
return 1;
}
return 0;
}
/*
* rtree_map_foreach -- initiates recursive traversal
*/
int
rtree_map_foreach(PMEMobjpool *pop, TOID(struct rtree_map) map,
int (*cb)(const unsigned char *key, uint64_t key_size,
PMEMoid value, void *arg),
void *arg)
{
return rtree_map_foreach_node(D_RO(map)->root, cb, arg);
}
/*
* ctree_map_check -- check if given persistent object is a tree map
*/
int
rtree_map_check(PMEMobjpool *pop, TOID(struct rtree_map) map)
{
return TOID_IS_NULL(map) || !TOID_VALID(map);
}
| 14,705 | 21.081081 | 83 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/tree_map/btree_map.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* btree_map.c -- textbook implementation of btree /w preemptive splitting
*/
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include "btree_map.h"
#ifdef GET_NDP_PERFORMENCE
#include <x86intrin.h>
static inline uint64_t getCycle(){
uint32_t cycles_high, cycles_low, pid;
asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx
"mov %%edx, %0\n\t"
"mov %%eax, %1\n\t"
"mov %%ecx, %2\n\t"
:"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars
:// no input
:"%eax", "%edx", "%ecx" // clobbered by rdtscp
);
return((uint64_t)cycles_high << 32) | cycles_low;
}
#endif
static void setpage(void * addr){
uint64_t pageNo = ((uint64_t)addr)/4096;
unsigned long * pageStart = (unsigned long *)(pageNo*4096);
mprotect(pageStart, 4096, PROT_READ);
return;
}
TOID_DECLARE(struct tree_map_node, BTREE_MAP_TYPE_OFFSET + 1);
#define BTREE_ORDER 8 /* can't be odd */
#define BTREE_MIN ((BTREE_ORDER / 2) - 1) /* min number of keys per node */
struct tree_map_node_item {
uint64_t key;
PMEMoid value;
};
struct tree_map_node {
int n; /* number of occupied slots */
struct tree_map_node_item items[BTREE_ORDER - 1];
TOID(struct tree_map_node) slots[BTREE_ORDER];
};
struct btree_map {
TOID(struct tree_map_node) root;
};
/*
* set_empty_item -- (internal) sets null to the item
*/
static void
set_empty_item(struct tree_map_node_item *item)
{
item->key = 0;
item->value = OID_NULL;
}
/*
* btree_map_create -- allocates a new btree instance
*/
int
btree_map_create(PMEMobjpool *pop, TOID(struct btree_map) *map, void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(map, sizeof(*map));
*map = TX_ZNEW(struct btree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* btree_map_clear_node -- (internal) removes all elements from the node
*/
static void
btree_map_clear_node(TOID(struct tree_map_node) node)
{
for (int i = 0; i < D_RO(node)->n; ++i) {
btree_map_clear_node(D_RO(node)->slots[i]);
}
TX_FREE(node);
}
/*
* btree_map_clear -- removes all elements from the map
*/
int
btree_map_clear(PMEMobjpool *pop, TOID(struct btree_map) map)
{
int ret = 0;
TX_BEGIN(pop) {
btree_map_clear_node(D_RO(map)->root);
TX_ADD_FIELD(map, root);
D_RW(map)->root = TOID_NULL(struct tree_map_node);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* btree_map_destroy -- cleanups and frees btree instance
*/
int
btree_map_destroy(PMEMobjpool *pop, TOID(struct btree_map) *map)
{
int ret = 0;
TX_BEGIN(pop) {
btree_map_clear(pop, *map);
pmemobj_tx_add_range_direct(map, sizeof(*map));
TX_FREE(*map);
*map = TOID_NULL(struct btree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* btree_map_insert_item_at -- (internal) inserts an item at position
*/
static void
btree_map_insert_item_at(TOID(struct tree_map_node) node, int pos,
struct tree_map_node_item item)
{
D_RW(node)->items[pos] = item;
D_RW(node)->n += 1;
}
/*
* btree_map_insert_empty -- (internal) inserts an item into an empty node
*/
static void
btree_map_insert_empty(TOID(struct btree_map) map,
struct tree_map_node_item item)
{
TX_ADD_FIELD(map, root);
D_RW(map)->root = TX_ZNEW(struct tree_map_node);
btree_map_insert_item_at(D_RO(map)->root, 0, item);
}
/*
* btree_map_insert_node -- (internal) inserts and makes space for new node
*/
static void
btree_map_insert_node(TOID(struct tree_map_node) node, int p,
struct tree_map_node_item item,
TOID(struct tree_map_node) left, TOID(struct tree_map_node) right)
{
TX_ADD(node);
if (D_RO(node)->items[p].key != 0) { /* move all existing data */
memmove(&D_RW(node)->items[p + 1], &D_RW(node)->items[p],
sizeof(struct tree_map_node_item) * ((BTREE_ORDER - 2 - p)));
memmove(&D_RW(node)->slots[p + 1], &D_RW(node)->slots[p],
sizeof(TOID(struct tree_map_node)) * ((BTREE_ORDER - 1 - p)));
}
D_RW(node)->slots[p] = left;
D_RW(node)->slots[p + 1] = right;
btree_map_insert_item_at(node, p, item);
}
/*
* btree_map_create_split_node -- (internal) splits a node into two
*/
static TOID(struct tree_map_node)
btree_map_create_split_node(TOID(struct tree_map_node) node,
struct tree_map_node_item *m)
{
TOID(struct tree_map_node) right = TX_ZNEW(struct tree_map_node);
int c = (BTREE_ORDER / 2);
*m = D_RO(node)->items[c - 1]; /* select median item */
TX_ADD(node);
set_empty_item(&D_RW(node)->items[c - 1]);
/* move everything right side of median to the new node */
for (int i = c; i < BTREE_ORDER; ++i) {
if (i != BTREE_ORDER - 1) {
D_RW(right)->items[D_RW(right)->n++] =
D_RO(node)->items[i];
set_empty_item(&D_RW(node)->items[i]);
}
D_RW(right)->slots[i - c] = D_RO(node)->slots[i];
D_RW(node)->slots[i] = TOID_NULL(struct tree_map_node);
}
D_RW(node)->n = c - 1;
return right;
}
/*
* btree_map_find_dest_node -- (internal) finds a place to insert the new key at
*/
static TOID(struct tree_map_node)
btree_map_find_dest_node(TOID(struct btree_map) map,
TOID(struct tree_map_node) n, TOID(struct tree_map_node) parent,
uint64_t key, int *p)
{
if (D_RO(n)->n == BTREE_ORDER - 1) { /* node is full, perform a split */
struct tree_map_node_item m;
TOID(struct tree_map_node) right =
btree_map_create_split_node(n, &m);
if (!TOID_IS_NULL(parent)) {
btree_map_insert_node(parent, *p, m, n, right);
if (key > m.key) /* select node to continue search */
n = right;
} else { /* replacing root node, the tree grows in height */
TOID(struct tree_map_node) up =
TX_ZNEW(struct tree_map_node);
D_RW(up)->n = 1;
D_RW(up)->items[0] = m;
D_RW(up)->slots[0] = n;
D_RW(up)->slots[1] = right;
TX_ADD_FIELD(map, root);
D_RW(map)->root = up;
n = up;
}
}
int i;
for (i = 0; i < BTREE_ORDER - 1; ++i) {
*p = i;
/*
* The key either fits somewhere in the middle or at the
* right edge of the node.
*/
if (D_RO(n)->n == i || D_RO(n)->items[i].key > key) {
return TOID_IS_NULL(D_RO(n)->slots[i]) ? n :
btree_map_find_dest_node(map,
D_RO(n)->slots[i], n, key, p);
}
}
/*
* The key is bigger than the last node element, go one level deeper
* in the rightmost child.
*/
return btree_map_find_dest_node(map, D_RO(n)->slots[i], n, key, p);
}
/*
* btree_map_insert_item -- (internal) inserts and makes space for new item
*/
static void
btree_map_insert_item(TOID(struct tree_map_node) node, int p,
struct tree_map_node_item item)
{
TX_ADD(node);
if (D_RO(node)->items[p].key != 0) {
memmove(&D_RW(node)->items[p + 1], &D_RW(node)->items[p],
sizeof(struct tree_map_node_item) * ((BTREE_ORDER - 2 - p)));
}
btree_map_insert_item_at(node, p, item);
}
/*
* btree_map_is_empty -- checks whether the tree map is empty
*/
int
btree_map_is_empty(PMEMobjpool *pop, TOID(struct btree_map) map)
{
return TOID_IS_NULL(D_RO(map)->root) || D_RO(D_RO(map)->root)->n == 0;
}
/*
* btree_map_insert -- inserts a new key-value pair into the map
*/
#ifdef GET_NDP_BREAKDOWN
uint64_t ulogCycles;
uint64_t waitCycles;
#endif
int
btree_map_insert(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, PMEMoid value)
{
#ifdef GET_NDP_BREAKDOWN
ulogCycles = 0;
waitCycles = 0;
#endif
#ifdef GET_NDP_PERFORMENCE
uint64_t btreetxCycles = 0;
uint64_t endCycles, startCycles;
for(int i=0;i<RUN_COUNT;i++){
#endif
struct tree_map_node_item item = {key, value};
#ifdef GET_NDP_PERFORMENCE
startCycles = getCycle();
#endif
TX_BEGIN(pop) {
if (btree_map_is_empty(pop, map)) {
btree_map_insert_empty(map, item);
} else {
int p; /* position at the dest node to insert */
TOID(struct tree_map_node) parent =
TOID_NULL(struct tree_map_node);
TOID(struct tree_map_node) dest =
btree_map_find_dest_node(map, D_RW(map)->root,
parent, key, &p);
btree_map_insert_item(dest, p, item);
}
} TX_END
#ifdef GET_NDP_PERFORMENCE
endCycles = getCycle();
btreetxCycles += endCycles - startCycles;
}
double totTime = ((double)btreetxCycles)/2000000000;
printf("btree TX/s = %f\nbtree 1 tx total time = %f\n",RUN_COUNT/totTime,totTime);
#endif
#ifdef GET_NDP_BREAKDOWN
printf("btree 1 tx ulog time = %f\n", (((double)ulogCycles)/2000000000));
printf("btree 1 tx wait time = %f\n", (((double)waitCycles)/2000000000));
#endif
return 0;
}
/*
* btree_map_rotate_right -- (internal) takes one element from right sibling
*/
static void
btree_map_rotate_right(TOID(struct tree_map_node) rsb,
TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
/* move the separator from parent to the deficient node */
struct tree_map_node_item sep = D_RO(parent)->items[p];
btree_map_insert_item(node, D_RO(node)->n, sep);
/* the first element of the right sibling is the new separator */
TX_ADD_FIELD(parent, items[p]);
D_RW(parent)->items[p] = D_RO(rsb)->items[0];
/* the nodes are not necessarily leafs, so copy also the slot */
TX_ADD_FIELD(node, slots[D_RO(node)->n]);
D_RW(node)->slots[D_RO(node)->n] = D_RO(rsb)->slots[0];
TX_ADD(rsb);
D_RW(rsb)->n -= 1; /* it loses one element, but still > min */
/* move all existing elements back by one array slot */
memmove(D_RW(rsb)->items, D_RO(rsb)->items + 1,
sizeof(struct tree_map_node_item) * (D_RO(rsb)->n));
memmove(D_RW(rsb)->slots, D_RO(rsb)->slots + 1,
sizeof(TOID(struct tree_map_node)) * (D_RO(rsb)->n + 1));
}
/*
* btree_map_rotate_left -- (internal) takes one element from left sibling
*/
static void
btree_map_rotate_left(TOID(struct tree_map_node) lsb,
TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
/* move the separator from parent to the deficient node */
struct tree_map_node_item sep = D_RO(parent)->items[p - 1];
btree_map_insert_item(node, 0, sep);
/* the last element of the left sibling is the new separator */
TX_ADD_FIELD(parent, items[p - 1]);
D_RW(parent)->items[p - 1] = D_RO(lsb)->items[D_RO(lsb)->n - 1];
/* rotate the node children */
memmove(D_RW(node)->slots + 1, D_RO(node)->slots,
sizeof(TOID(struct tree_map_node)) * (D_RO(node)->n));
/* the nodes are not necessarily leafs, so copy also the slot */
D_RW(node)->slots[0] = D_RO(lsb)->slots[D_RO(lsb)->n];
TX_ADD_FIELD(lsb, n);
D_RW(lsb)->n -= 1; /* it loses one element, but still > min */
}
/*
* btree_map_merge -- (internal) merges node and right sibling
*/
static void
btree_map_merge(TOID(struct btree_map) map, TOID(struct tree_map_node) rn,
TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
struct tree_map_node_item sep = D_RO(parent)->items[p];
TX_ADD(node);
/* add separator to the deficient node */
D_RW(node)->items[D_RW(node)->n++] = sep;
/* copy right sibling data to node */
memcpy(&D_RW(node)->items[D_RO(node)->n], D_RO(rn)->items,
sizeof(struct tree_map_node_item) * D_RO(rn)->n);
memcpy(&D_RW(node)->slots[D_RO(node)->n], D_RO(rn)->slots,
sizeof(TOID(struct tree_map_node)) * (D_RO(rn)->n + 1));
D_RW(node)->n += D_RO(rn)->n;
TX_FREE(rn); /* right node is now empty */
TX_ADD(parent);
D_RW(parent)->n -= 1;
/* move everything to the right of the separator by one array slot */
memmove(D_RW(parent)->items + p, D_RW(parent)->items + p + 1,
sizeof(struct tree_map_node_item) * (D_RO(parent)->n - p));
memmove(D_RW(parent)->slots + p + 1, D_RW(parent)->slots + p + 2,
sizeof(TOID(struct tree_map_node)) * (D_RO(parent)->n - p + 1));
/* if the parent is empty then the tree shrinks in height */
if (D_RO(parent)->n == 0 && TOID_EQUALS(parent, D_RO(map)->root)) {
TX_ADD(map);
TX_FREE(D_RO(map)->root);
D_RW(map)->root = node;
}
}
/*
* btree_map_rebalance -- (internal) performs tree rebalance
*/
static void
btree_map_rebalance(TOID(struct btree_map) map, TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
TOID(struct tree_map_node) rsb = p >= D_RO(parent)->n ?
TOID_NULL(struct tree_map_node) : D_RO(parent)->slots[p + 1];
TOID(struct tree_map_node) lsb = p == 0 ?
TOID_NULL(struct tree_map_node) : D_RO(parent)->slots[p - 1];
if (!TOID_IS_NULL(rsb) && D_RO(rsb)->n > BTREE_MIN)
btree_map_rotate_right(rsb, node, parent, p);
else if (!TOID_IS_NULL(lsb) && D_RO(lsb)->n > BTREE_MIN)
btree_map_rotate_left(lsb, node, parent, p);
else if (TOID_IS_NULL(rsb)) /* always merge with rightmost node */
btree_map_merge(map, node, lsb, parent, p - 1);
else
btree_map_merge(map, rsb, node, parent, p);
}
/*
* btree_map_get_leftmost_leaf -- (internal) searches for the successor
*/
static TOID(struct tree_map_node)
btree_map_get_leftmost_leaf(TOID(struct btree_map) map,
TOID(struct tree_map_node) n, TOID(struct tree_map_node) *p)
{
if (TOID_IS_NULL(D_RO(n)->slots[0]))
return n;
*p = n;
return btree_map_get_leftmost_leaf(map, D_RO(n)->slots[0], p);
}
/*
* btree_map_remove_from_node -- (internal) removes element from node
*/
static void
btree_map_remove_from_node(TOID(struct btree_map) map,
TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
if (TOID_IS_NULL(D_RO(node)->slots[0])) { /* leaf */
TX_ADD(node);
if (D_RO(node)->n == 1 || p == BTREE_ORDER - 2) {
set_empty_item(&D_RW(node)->items[p]);
} else if (D_RO(node)->n != 1) {
memmove(&D_RW(node)->items[p],
&D_RW(node)->items[p + 1],
sizeof(struct tree_map_node_item) *
(D_RO(node)->n - p));
}
D_RW(node)->n -= 1;
return;
}
/* can't delete from non-leaf nodes, remove successor */
TOID(struct tree_map_node) rchild = D_RW(node)->slots[p + 1];
TOID(struct tree_map_node) lp = node;
TOID(struct tree_map_node) lm =
btree_map_get_leftmost_leaf(map, rchild, &lp);
TX_ADD_FIELD(node, items[p]);
D_RW(node)->items[p] = D_RO(lm)->items[0];
btree_map_remove_from_node(map, lm, lp, 0);
if (D_RO(lm)->n < BTREE_MIN) /* right child can be deficient now */
btree_map_rebalance(map, lm, lp,
TOID_EQUALS(lp, node) ? p + 1 : 0);
}
#define NODE_CONTAINS_ITEM(_n, _i, _k)\
((_i) != D_RO(_n)->n && D_RO(_n)->items[_i].key == (_k))
#define NODE_CHILD_CAN_CONTAIN_ITEM(_n, _i, _k)\
((_i) == D_RO(_n)->n || D_RO(_n)->items[_i].key > (_k)) &&\
!TOID_IS_NULL(D_RO(_n)->slots[_i])
/*
* btree_map_remove_item -- (internal) removes item from node
*/
static PMEMoid
btree_map_remove_item(TOID(struct btree_map) map,
TOID(struct tree_map_node) node, TOID(struct tree_map_node) parent,
uint64_t key, int p)
{
PMEMoid ret = OID_NULL;
for (int i = 0; i <= D_RO(node)->n; ++i) {
if (NODE_CONTAINS_ITEM(node, i, key)) {
ret = D_RO(node)->items[i].value;
btree_map_remove_from_node(map, node, parent, i);
break;
} else if (NODE_CHILD_CAN_CONTAIN_ITEM(node, i, key)) {
ret = btree_map_remove_item(map, D_RO(node)->slots[i],
node, key, i);
break;
}
}
/* check for deficient nodes walking up */
if (!TOID_IS_NULL(parent) && D_RO(node)->n < BTREE_MIN)
btree_map_rebalance(map, node, parent, p);
return ret;
}
/*
* btree_map_remove -- removes key-value pair from the map
*/
PMEMoid
btree_map_remove(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key)
{
PMEMoid ret = OID_NULL;
#ifdef GET_NDP_BREAKDOWN
ulogCycles = 0;
waitCycles = 0;
#endif
#ifdef GET_NDP_PERFORMENCE
uint64_t btreetxCycles = 0;
uint64_t endCycles, startCycles;
for(int i=0;i<RUN_COUNT;i++){
startCycles = getCycle();
#endif
TX_BEGIN(pop) {
ret = btree_map_remove_item(map, D_RW(map)->root,
TOID_NULL(struct tree_map_node), key, 0);
} TX_END
#ifdef GET_NDP_PERFORMENCE
endCycles = getCycle();
btreetxCycles += endCycles - startCycles;
}
double totTime = ((double)btreetxCycles)/2000000000;
printf("btree TX/s = %f\nbtree 1 tx total time = %f\n",RUN_COUNT/totTime,totTime);
#endif
#ifdef GET_NDP_BREAKDOWN
printf("btree 1 tx ulog time = %f\n", (((double)ulogCycles)/2000000000));
printf("btree 1 tx wait time = %f\n", (((double)waitCycles)/2000000000));
#endif
return ret;
}
/*
* btree_map_get_in_node -- (internal) searches for a value in the node
*/
static PMEMoid
btree_map_get_in_node(TOID(struct tree_map_node) node, uint64_t key)
{
for (int i = 0; i <= D_RO(node)->n; ++i) {
if (NODE_CONTAINS_ITEM(node, i, key))
return D_RO(node)->items[i].value;
else if (NODE_CHILD_CAN_CONTAIN_ITEM(node, i, key))
return btree_map_get_in_node(D_RO(node)->slots[i], key);
}
return OID_NULL;
}
/*
* btree_map_get -- searches for a value of the key
*/
PMEMoid
btree_map_get(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key)
{
if (TOID_IS_NULL(D_RO(map)->root))
return OID_NULL;
return btree_map_get_in_node(D_RO(map)->root, key);
}
/*
* btree_map_lookup_in_node -- (internal) searches for key if exists
*/
static int
btree_map_lookup_in_node(TOID(struct tree_map_node) node, uint64_t key)
{
for (int i = 0; i <= D_RO(node)->n; ++i) {
if (NODE_CONTAINS_ITEM(node, i, key))
return 1;
else if (NODE_CHILD_CAN_CONTAIN_ITEM(node, i, key))
return btree_map_lookup_in_node(
D_RO(node)->slots[i], key);
}
return 0;
}
/*
* btree_map_lookup -- searches if key exists
*/
int
btree_map_lookup(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key)
{
if (TOID_IS_NULL(D_RO(map)->root))
return 0;
return btree_map_lookup_in_node(D_RO(map)->root, key);
}
/*
* btree_map_foreach_node -- (internal) recursively traverses tree
*/
static int
btree_map_foreach_node(const TOID(struct tree_map_node) p,
int (*cb)(uint64_t key, PMEMoid, void *arg), void *arg)
{
if (TOID_IS_NULL(p))
return 0;
for (int i = 0; i <= D_RO(p)->n; ++i) {
if (btree_map_foreach_node(D_RO(p)->slots[i], cb, arg) != 0)
return 1;
if (i != D_RO(p)->n && D_RO(p)->items[i].key != 0) {
if (cb(D_RO(p)->items[i].key, D_RO(p)->items[i].value,
arg) != 0)
return 1;
}
}
return 0;
}
/*
* btree_map_foreach -- initiates recursive traversal
*/
int
btree_map_foreach(PMEMobjpool *pop, TOID(struct btree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
return btree_map_foreach_node(D_RO(map)->root, cb, arg);
}
/*
* ctree_map_check -- check if given persistent object is a tree map
*/
int
btree_map_check(PMEMobjpool *pop, TOID(struct btree_map) map)
{
return TOID_IS_NULL(map) || !TOID_VALID(map);
}
/*
* btree_map_insert_new -- allocates a new object and inserts it into the tree
*/
int
btree_map_insert_new(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid n = pmemobj_tx_alloc(size, type_num);
constructor(pop, pmemobj_direct(n), arg);
btree_map_insert(pop, map, key, n);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* btree_map_remove_free -- removes and frees an object from the tree
*/
int
btree_map_remove_free(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid val = btree_map_remove(pop, map, key);
pmemobj_tx_free(val);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
| 19,016 | 25.158184 | 83 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/tree_map/rtree_map.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* rtree_map.h -- Radix TreeMap collection implementation
*/
#ifndef RTREE_MAP_H
#define RTREE_MAP_H
#include <libpmemobj.h>
#ifndef RTREE_MAP_TYPE_OFFSET
#define RTREE_MAP_TYPE_OFFSET 1020
#endif
struct rtree_map;
TOID_DECLARE(struct rtree_map, RTREE_MAP_TYPE_OFFSET + 0);
int rtree_map_check(PMEMobjpool *pop, TOID(struct rtree_map) map);
int rtree_map_create(PMEMobjpool *pop, TOID(struct rtree_map) *map, void *arg);
int rtree_map_destroy(PMEMobjpool *pop, TOID(struct rtree_map) *map);
int rtree_map_insert(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size, PMEMoid value);
int rtree_map_insert_new(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size,
size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid rtree_map_remove(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_remove_free(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_clear(PMEMobjpool *pop, TOID(struct rtree_map) map);
PMEMoid rtree_map_get(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_lookup(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_foreach(PMEMobjpool *pop, TOID(struct rtree_map) map,
int (*cb)(const unsigned char *key, uint64_t key_size,
PMEMoid value, void *arg),
void *arg);
int rtree_map_is_empty(PMEMobjpool *pop, TOID(struct rtree_map) map);
#endif /* RTREE_MAP_H */
| 1,739 | 36.826087 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/tree_map/rbtree_map.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* rbtree_map.h -- TreeMap sorted collection implementation
*/
#ifndef RBTREE_MAP_H
#define RBTREE_MAP_H
#include <libpmemobj.h>
#ifndef RBTREE_MAP_TYPE_OFFSET
#define RBTREE_MAP_TYPE_OFFSET 1016
#endif
struct rbtree_map;
TOID_DECLARE(struct rbtree_map, RBTREE_MAP_TYPE_OFFSET + 0);
int rbtree_map_check(PMEMobjpool *pop, TOID(struct rbtree_map) map);
int rbtree_map_create(PMEMobjpool *pop, TOID(struct rbtree_map) *map,
void *arg);
int rbtree_map_destroy(PMEMobjpool *pop, TOID(struct rbtree_map) *map);
int rbtree_map_insert(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, PMEMoid value);
int rbtree_map_insert_new(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid rbtree_map_remove(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_remove_free(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_clear(PMEMobjpool *pop, TOID(struct rbtree_map) map);
PMEMoid rbtree_map_get(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_lookup(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_foreach(PMEMobjpool *pop, TOID(struct rbtree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
int rbtree_map_is_empty(PMEMobjpool *pop, TOID(struct rbtree_map) map);
#endif /* RBTREE_MAP_H */
| 1,557 | 34.409091 | 73 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/tree_map/btree_map.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* btree_map.h -- TreeMap sorted collection implementation
*/
#ifndef BTREE_MAP_H
#define BTREE_MAP_H
#include <libpmemobj.h>
#ifndef BTREE_MAP_TYPE_OFFSET
#define BTREE_MAP_TYPE_OFFSET 1012
#endif
struct btree_map;
TOID_DECLARE(struct btree_map, BTREE_MAP_TYPE_OFFSET + 0);
int btree_map_check(PMEMobjpool *pop, TOID(struct btree_map) map);
int btree_map_create(PMEMobjpool *pop, TOID(struct btree_map) *map, void *arg);
int btree_map_destroy(PMEMobjpool *pop, TOID(struct btree_map) *map);
int btree_map_insert(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, PMEMoid value);
int btree_map_insert_new(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid btree_map_remove(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_remove_free(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_clear(PMEMobjpool *pop, TOID(struct btree_map) map);
PMEMoid btree_map_get(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_lookup(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_foreach(PMEMobjpool *pop, TOID(struct btree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
int btree_map_is_empty(PMEMobjpool *pop, TOID(struct btree_map) map);
#endif /* BTREE_MAP_H */
| 1,523 | 34.44186 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/tree_map/rbtree_map.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* rbtree.c -- red-black tree implementation /w sentinel nodes
*/
#include <assert.h>
#include <errno.h>
#include "rbtree_map.h"
#include <stdio.h>
#include <x86intrin.h>
static inline uint64_t getCycle(){
uint32_t cycles_high, cycles_low, pid;
asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx
"mov %%edx, %0\n\t"
"mov %%eax, %1\n\t"
"mov %%ecx, %2\n\t"
:"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars
:// no input
:"%eax", "%edx", "%ecx" // clobbered by rdtscp
);
return((uint64_t)cycles_high << 32) | cycles_low;
}
TOID_DECLARE(struct tree_map_node, RBTREE_MAP_TYPE_OFFSET + 1);
#define NODE_P(_n)\
D_RW(_n)->parent
#define NODE_GRANDP(_n)\
NODE_P(NODE_P(_n))
#define NODE_PARENT_AT(_n, _rbc)\
D_RW(NODE_P(_n))->slots[_rbc]
#define NODE_PARENT_RIGHT(_n)\
NODE_PARENT_AT(_n, RB_RIGHT)
#define NODE_IS(_n, _rbc)\
TOID_EQUALS(_n, NODE_PARENT_AT(_n, _rbc))
#define NODE_IS_RIGHT(_n)\
TOID_EQUALS(_n, NODE_PARENT_RIGHT(_n))
#define NODE_LOCATION(_n)\
NODE_IS_RIGHT(_n)
#define RB_FIRST(_m)\
D_RW(D_RW(_m)->root)->slots[RB_LEFT]
#define NODE_IS_NULL(_n)\
TOID_EQUALS(_n, s)
enum rb_color {
COLOR_BLACK,
COLOR_RED,
MAX_COLOR
};
enum rb_children {
RB_LEFT,
RB_RIGHT,
MAX_RB
};
struct tree_map_node {
uint64_t key;
PMEMoid value;
enum rb_color color;
TOID(struct tree_map_node) parent;
TOID(struct tree_map_node) slots[MAX_RB];
};
struct rbtree_map {
TOID(struct tree_map_node) sentinel;
TOID(struct tree_map_node) root;
};
/*
* rbtree_map_create -- allocates a new red-black tree instance
*/
int
rbtree_map_create(PMEMobjpool *pop, TOID(struct rbtree_map) *map, void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(map, sizeof(*map));
*map = TX_ZNEW(struct rbtree_map);
TOID(struct tree_map_node) s = TX_ZNEW(struct tree_map_node);
D_RW(s)->color = COLOR_BLACK;
D_RW(s)->parent = s;
D_RW(s)->slots[RB_LEFT] = s;
D_RW(s)->slots[RB_RIGHT] = s;
TOID(struct tree_map_node) r = TX_ZNEW(struct tree_map_node);
D_RW(r)->color = COLOR_BLACK;
D_RW(r)->parent = s;
D_RW(r)->slots[RB_LEFT] = s;
D_RW(r)->slots[RB_RIGHT] = s;
D_RW(*map)->sentinel = s;
D_RW(*map)->root = r;
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rbtree_map_clear_node -- (internal) clears this node and its children
*/
static void
rbtree_map_clear_node(TOID(struct rbtree_map) map, TOID(struct tree_map_node) p)
{
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
if (!NODE_IS_NULL(D_RO(p)->slots[RB_LEFT]))
rbtree_map_clear_node(map, D_RO(p)->slots[RB_LEFT]);
if (!NODE_IS_NULL(D_RO(p)->slots[RB_RIGHT]))
rbtree_map_clear_node(map, D_RO(p)->slots[RB_RIGHT]);
TX_FREE(p);
}
/*
* rbtree_map_clear -- removes all elements from the map
*/
int
rbtree_map_clear(PMEMobjpool *pop, TOID(struct rbtree_map) map)
{
TX_BEGIN(pop) {
rbtree_map_clear_node(map, D_RW(map)->root);
TX_ADD_FIELD(map, root);
TX_ADD_FIELD(map, sentinel);
TX_FREE(D_RW(map)->sentinel);
D_RW(map)->root = TOID_NULL(struct tree_map_node);
D_RW(map)->sentinel = TOID_NULL(struct tree_map_node);
} TX_END
return 0;
}
/*
* rbtree_map_destroy -- cleanups and frees red-black tree instance
*/
int
rbtree_map_destroy(PMEMobjpool *pop, TOID(struct rbtree_map) *map)
{
int ret = 0;
TX_BEGIN(pop) {
rbtree_map_clear(pop, *map);
pmemobj_tx_add_range_direct(map, sizeof(*map));
TX_FREE(*map);
*map = TOID_NULL(struct rbtree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rbtree_map_rotate -- (internal) performs a left/right rotation around a node
*/
static void
rbtree_map_rotate(TOID(struct rbtree_map) map,
TOID(struct tree_map_node) node, enum rb_children c)
{
TOID(struct tree_map_node) child = D_RO(node)->slots[!c];
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
TX_ADD(node);
TX_ADD(child);
D_RW(node)->slots[!c] = D_RO(child)->slots[c];
if (!TOID_EQUALS(D_RO(child)->slots[c], s))
TX_SET(D_RW(child)->slots[c], parent, node);
NODE_P(child) = NODE_P(node);
TX_SET(NODE_P(node), slots[NODE_LOCATION(node)], child);
D_RW(child)->slots[c] = node;
D_RW(node)->parent = child;
}
/*
* rbtree_map_insert_bst -- (internal) inserts a node in regular BST fashion
*/
static void
rbtree_map_insert_bst(TOID(struct rbtree_map) map, TOID(struct tree_map_node) n)
{
TOID(struct tree_map_node) parent = D_RO(map)->root;
TOID(struct tree_map_node) *dst = &RB_FIRST(map);
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
D_RW(n)->slots[RB_LEFT] = s;
D_RW(n)->slots[RB_RIGHT] = s;
while (!NODE_IS_NULL(*dst)) {
parent = *dst;
dst = &D_RW(*dst)->slots[D_RO(n)->key > D_RO(*dst)->key];
}
TX_SET(n, parent, parent);
pmemobj_tx_add_range_direct(dst, sizeof(*dst));
*dst = n;
}
/*
* rbtree_map_recolor -- (internal) restores red-black tree properties
*/
static TOID(struct tree_map_node)
rbtree_map_recolor(TOID(struct rbtree_map) map,
TOID(struct tree_map_node) n, enum rb_children c)
{
TOID(struct tree_map_node) uncle = D_RO(NODE_GRANDP(n))->slots[!c];
if (D_RO(uncle)->color == COLOR_RED) {
TX_SET(uncle, color, COLOR_BLACK);
TX_SET(NODE_P(n), color, COLOR_BLACK);
TX_SET(NODE_GRANDP(n), color, COLOR_RED);
return NODE_GRANDP(n);
} else {
if (NODE_IS(n, !c)) {
n = NODE_P(n);
rbtree_map_rotate(map, n, c);
}
TX_SET(NODE_P(n), color, COLOR_BLACK);
TX_SET(NODE_GRANDP(n), color, COLOR_RED);
rbtree_map_rotate(map, NODE_GRANDP(n), (enum rb_children)!c);
}
return n;
}
/*
* rbtree_map_insert -- inserts a new key-value pair into the map
*/
#ifdef GET_NDP_BREAKDOWN
uint64_t ulogCycles;
uint64_t waitCycles;
uint64_t resetCycles;
#endif
int
rbtree_map_insert(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, PMEMoid value)
{
int ret = 0;
#ifdef GET_NDP_BREAKDOWN
ulogCycles = 0;
waitCycles = 0;
#endif
#ifdef GET_NDP_PERFORMENCE
uint64_t btreetxCycles = 0;
uint64_t endCycles, startCycles;
for(int i=0;i<RUN_COUNT;i++){
startCycles = getCycle();
#endif
TX_BEGIN(pop) {
TOID(struct tree_map_node) n = TX_ZNEW(struct tree_map_node);
D_RW(n)->key = key;
D_RW(n)->value = value;
rbtree_map_insert_bst(map, n);
D_RW(n)->color = COLOR_RED;
while (D_RO(NODE_P(n))->color == COLOR_RED)
n = rbtree_map_recolor(map, n, (enum rb_children)
NODE_LOCATION(NODE_P(n)));
TX_SET(RB_FIRST(map), color, COLOR_BLACK);
} TX_END
#ifdef GET_NDP_PERFORMENCE
endCycles = getCycle();
btreetxCycles += endCycles - startCycles;
}
double totTime = ((double)btreetxCycles)/2000000000;
printf("btree TX/s = %f\nbtree 1 tx total time = %f\n",RUN_COUNT/totTime,totTime);
#endif
#ifdef GET_NDP_BREAKDOWN
printf("btree 1 tx ulog time = %f\n", (((double)ulogCycles)/2000000000));
printf("btree 1 tx wait time = %f\n", (((double)waitCycles)/2000000000));
#endif
return ret;
}
/*
* rbtree_map_successor -- (internal) returns the successor of a node
*/
static TOID(struct tree_map_node)
rbtree_map_successor(TOID(struct rbtree_map) map, TOID(struct tree_map_node) n)
{
TOID(struct tree_map_node) dst = D_RO(n)->slots[RB_RIGHT];
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
if (!TOID_EQUALS(s, dst)) {
while (!NODE_IS_NULL(D_RO(dst)->slots[RB_LEFT]))
dst = D_RO(dst)->slots[RB_LEFT];
} else {
dst = D_RO(n)->parent;
while (TOID_EQUALS(n, D_RO(dst)->slots[RB_RIGHT])) {
n = dst;
dst = NODE_P(dst);
}
if (TOID_EQUALS(dst, D_RO(map)->root))
return s;
}
return dst;
}
/*
* rbtree_map_find_node -- (internal) returns the node that contains the key
*/
static TOID(struct tree_map_node)
rbtree_map_find_node(TOID(struct rbtree_map) map, uint64_t key)
{
TOID(struct tree_map_node) dst = RB_FIRST(map);
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
while (!NODE_IS_NULL(dst)) {
if (D_RO(dst)->key == key)
return dst;
dst = D_RO(dst)->slots[key > D_RO(dst)->key];
}
return TOID_NULL(struct tree_map_node);
}
/*
* rbtree_map_repair_branch -- (internal) restores red-black tree in one branch
*/
static TOID(struct tree_map_node)
rbtree_map_repair_branch(TOID(struct rbtree_map) map,
TOID(struct tree_map_node) n, enum rb_children c)
{
TOID(struct tree_map_node) sb = NODE_PARENT_AT(n, !c); /* sibling */
if (D_RO(sb)->color == COLOR_RED) {
TX_SET(sb, color, COLOR_BLACK);
TX_SET(NODE_P(n), color, COLOR_RED);
rbtree_map_rotate(map, NODE_P(n), c);
sb = NODE_PARENT_AT(n, !c);
}
if (D_RO(D_RO(sb)->slots[RB_RIGHT])->color == COLOR_BLACK &&
D_RO(D_RO(sb)->slots[RB_LEFT])->color == COLOR_BLACK) {
TX_SET(sb, color, COLOR_RED);
return D_RO(n)->parent;
} else {
if (D_RO(D_RO(sb)->slots[!c])->color == COLOR_BLACK) {
TX_SET(D_RW(sb)->slots[c], color, COLOR_BLACK);
TX_SET(sb, color, COLOR_RED);
rbtree_map_rotate(map, sb, (enum rb_children)!c);
sb = NODE_PARENT_AT(n, !c);
}
TX_SET(sb, color, D_RO(NODE_P(n))->color);
TX_SET(NODE_P(n), color, COLOR_BLACK);
TX_SET(D_RW(sb)->slots[!c], color, COLOR_BLACK);
rbtree_map_rotate(map, NODE_P(n), c);
return RB_FIRST(map);
}
return n;
}
/*
* rbtree_map_repair -- (internal) restores red-black tree properties
* after remove
*/
static void
rbtree_map_repair(TOID(struct rbtree_map) map, TOID(struct tree_map_node) n)
{
/* if left, repair right sibling, otherwise repair left sibling. */
while (!TOID_EQUALS(n, RB_FIRST(map)) && D_RO(n)->color == COLOR_BLACK)
n = rbtree_map_repair_branch(map, n, (enum rb_children)
NODE_LOCATION(n));
TX_SET(n, color, COLOR_BLACK);
}
/*
* rbtree_map_remove -- removes key-value pair from the map
*/
PMEMoid
rbtree_map_remove(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key)
{
PMEMoid ret = OID_NULL;
#ifdef GET_NDP_BREAKDOWN
ulogCycles = 0;
waitCycles = 0;
#endif
#ifdef GET_NDP_PERFORMENCE
uint64_t btreetxCycles = 0;
uint64_t endCycles, startCycles;
for(int i=0;i<RUN_COUNT;i++){
#endif
TOID(struct tree_map_node) n = rbtree_map_find_node(map, key);
if (TOID_IS_NULL(n))
return ret;
ret = D_RO(n)->value;
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
TOID(struct tree_map_node) r = D_RO(map)->root;
TOID(struct tree_map_node) y = (NODE_IS_NULL(D_RO(n)->slots[RB_LEFT]) ||
NODE_IS_NULL(D_RO(n)->slots[RB_RIGHT]))
? n : rbtree_map_successor(map, n);
TOID(struct tree_map_node) x = NODE_IS_NULL(D_RO(y)->slots[RB_LEFT]) ?
D_RO(y)->slots[RB_RIGHT] : D_RO(y)->slots[RB_LEFT];
#ifdef GET_NDP_PERFORMENCE
startCycles = getCycle();
#endif
TX_BEGIN(pop) {
TX_SET(x, parent, NODE_P(y));
if (TOID_EQUALS(NODE_P(x), r)) {
TX_SET(r, slots[RB_LEFT], x);
} else {
TX_SET(NODE_P(y), slots[NODE_LOCATION(y)], x);
}
if (D_RO(y)->color == COLOR_BLACK)
rbtree_map_repair(map, x);
if (!TOID_EQUALS(y, n)) {
TX_ADD(y);
D_RW(y)->slots[RB_LEFT] = D_RO(n)->slots[RB_LEFT];
D_RW(y)->slots[RB_RIGHT] = D_RO(n)->slots[RB_RIGHT];
D_RW(y)->parent = D_RO(n)->parent;
D_RW(y)->color = D_RO(n)->color;
TX_SET(D_RW(n)->slots[RB_LEFT], parent, y);
TX_SET(D_RW(n)->slots[RB_RIGHT], parent, y);
TX_SET(NODE_P(n), slots[NODE_LOCATION(n)], y);
}
TX_FREE(n);
} TX_END
#ifdef GET_NDP_PERFORMENCE
endCycles = getCycle();
btreetxCycles += endCycles - startCycles;
}
double totTime = ((double)btreetxCycles)/2000000000;
printf("btree TX/s = %f\nbtree 1 tx total time = %f\n",RUN_COUNT/totTime,totTime);
#endif
#ifdef GET_NDP_BREAKDOWN
printf("btree 1 tx ulog time = %f\n", (((double)ulogCycles)/2000000000));
printf("btree 1 tx wait time = %f\n", (((double)waitCycles)/2000000000));
#endif
return ret;
}
/*
* rbtree_map_get -- searches for a value of the key
*/
PMEMoid
rbtree_map_get(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key)
{
TOID(struct tree_map_node) node = rbtree_map_find_node(map, key);
if (TOID_IS_NULL(node))
return OID_NULL;
return D_RO(node)->value;
}
/*
* rbtree_map_lookup -- searches if key exists
*/
int
rbtree_map_lookup(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key)
{
TOID(struct tree_map_node) node = rbtree_map_find_node(map, key);
if (TOID_IS_NULL(node))
return 0;
return 1;
}
/*
* rbtree_map_foreach_node -- (internal) recursively traverses tree
*/
static int
rbtree_map_foreach_node(TOID(struct rbtree_map) map,
TOID(struct tree_map_node) p,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
int ret = 0;
if (TOID_EQUALS(p, D_RO(map)->sentinel))
return 0;
if ((ret = rbtree_map_foreach_node(map,
D_RO(p)->slots[RB_LEFT], cb, arg)) == 0) {
if ((ret = cb(D_RO(p)->key, D_RO(p)->value, arg)) == 0)
rbtree_map_foreach_node(map,
D_RO(p)->slots[RB_RIGHT], cb, arg);
}
return ret;
}
/*
* rbtree_map_foreach -- initiates recursive traversal
*/
int
rbtree_map_foreach(PMEMobjpool *pop, TOID(struct rbtree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
return rbtree_map_foreach_node(map, RB_FIRST(map), cb, arg);
}
/*
* rbtree_map_is_empty -- checks whether the tree map is empty
*/
int
rbtree_map_is_empty(PMEMobjpool *pop, TOID(struct rbtree_map) map)
{
return TOID_IS_NULL(RB_FIRST(map));
}
/*
* rbtree_map_check -- check if given persistent object is a tree map
*/
int
rbtree_map_check(PMEMobjpool *pop, TOID(struct rbtree_map) map)
{
return TOID_IS_NULL(map) || !TOID_VALID(map);
}
/*
* rbtree_map_insert_new -- allocates a new object and inserts it into the tree
*/
int
rbtree_map_insert_new(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid n = pmemobj_tx_alloc(size, type_num);
constructor(pop, pmemobj_direct(n), arg);
rbtree_map_insert(pop, map, key, n);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rbtree_map_remove_free -- removes and frees an object from the tree
*/
int
rbtree_map_remove_free(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid val = rbtree_map_remove(pop, map, key);
pmemobj_tx_free(val);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
| 14,171 | 23.102041 | 83 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmempool/manpage.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* manpage.c -- simple example for the libpmempool man page
*/
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <libpmempool.h>
#define PATH "./pmem-fs/myfile"
#define CHECK_FLAGS (PMEMPOOL_CHECK_FORMAT_STR|PMEMPOOL_CHECK_REPAIR|\
PMEMPOOL_CHECK_VERBOSE)
int
main(int argc, char *argv[])
{
PMEMpoolcheck *ppc;
struct pmempool_check_status *status;
enum pmempool_check_result ret;
/* arguments for check */
struct pmempool_check_args args = {
.path = PATH,
.backup_path = NULL,
.pool_type = PMEMPOOL_POOL_TYPE_DETECT,
.flags = CHECK_FLAGS
};
/* initialize check context */
if ((ppc = pmempool_check_init(&args, sizeof(args))) == NULL) {
perror("pmempool_check_init");
exit(EXIT_FAILURE);
}
/* perform check and repair, answer 'yes' for each question */
while ((status = pmempool_check(ppc)) != NULL) {
switch (status->type) {
case PMEMPOOL_CHECK_MSG_TYPE_ERROR:
printf("%s\n", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_INFO:
printf("%s\n", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_QUESTION:
printf("%s\n", status->str.msg);
status->str.answer = "yes";
break;
default:
pmempool_check_end(ppc);
exit(EXIT_FAILURE);
}
}
/* finalize the check and get the result */
ret = pmempool_check_end(ppc);
switch (ret) {
case PMEMPOOL_CHECK_RESULT_CONSISTENT:
case PMEMPOOL_CHECK_RESULT_REPAIRED:
return 0;
default:
return 1;
}
}
| 1,555 | 21.882353 | 70 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/librpmem/rpmem_ssh.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_ssh.h -- rpmem ssh transport layer header file
*/
#ifndef RPMEM_SSH_H
#define RPMEM_SSH_H 1
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_ssh;
struct rpmem_ssh *rpmem_ssh_open(const struct rpmem_target_info *info);
struct rpmem_ssh *rpmem_ssh_exec(const struct rpmem_target_info *info, ...);
struct rpmem_ssh *rpmem_ssh_execv(const struct rpmem_target_info *info,
const char **argv);
int rpmem_ssh_close(struct rpmem_ssh *rps);
int rpmem_ssh_send(struct rpmem_ssh *rps, const void *buff, size_t len);
int rpmem_ssh_recv(struct rpmem_ssh *rps, void *buff, size_t len);
int rpmem_ssh_monitor(struct rpmem_ssh *rps, int nonblock);
const char *rpmem_ssh_strerror(struct rpmem_ssh *rps, int oerrno);
#ifdef __cplusplus
}
#endif
#endif
| 866 | 23.771429 | 76 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/librpmem/rpmem_fip.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_fip.h -- rpmem libfabric provider module header file
*/
#ifndef RPMEM_FIP_H
#define RPMEM_FIP_H
#include <stdint.h>
#include <netinet/in.h>
#include <sys/types.h>
#include <sys/socket.h>
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_fip;
struct rpmem_fip_attr {
enum rpmem_provider provider;
size_t max_wq_size;
enum rpmem_persist_method persist_method;
void *laddr;
size_t size;
size_t buff_size;
unsigned nlanes;
void *raddr;
uint64_t rkey;
};
struct rpmem_fip *rpmem_fip_init(const char *node, const char *service,
struct rpmem_fip_attr *attr, unsigned *nlanes);
void rpmem_fip_fini(struct rpmem_fip *fip);
int rpmem_fip_connect(struct rpmem_fip *fip);
int rpmem_fip_close(struct rpmem_fip *fip);
int rpmem_fip_process_start(struct rpmem_fip *fip);
int rpmem_fip_process_stop(struct rpmem_fip *fip);
int rpmem_fip_flush(struct rpmem_fip *fip, size_t offset, size_t len,
unsigned lane, unsigned flags);
int rpmem_fip_drain(struct rpmem_fip *fip, unsigned lane);
int rpmem_fip_persist(struct rpmem_fip *fip, size_t offset, size_t len,
unsigned lane, unsigned flags);
int rpmem_fip_read(struct rpmem_fip *fip, void *buff,
size_t len, size_t off, unsigned lane);
void rpmem_fip_probe_fork_safety(void);
size_t rpmem_fip_get_wq_size(struct rpmem_fip *fip);
#ifdef __cplusplus
}
#endif
#endif
| 1,427 | 22.032258 | 71 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/librpmem/rpmem.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmem.c -- main source file for librpmem
*/
#include <stdlib.h>
#include <netdb.h>
#include <stdio.h>
#include <errno.h>
#include <limits.h>
#include <inttypes.h>
#include "librpmem.h"
#include "out.h"
#include "os.h"
#include "os_thread.h"
#include "util.h"
#include "rpmem.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_obc.h"
#include "rpmem_fip.h"
#include "rpmem_fip_common.h"
#include "rpmem_ssh.h"
#include "rpmem_proto.h"
#define RPMEM_REMOVE_FLAGS_ALL (\
RPMEM_REMOVE_FORCE | \
RPMEM_REMOVE_POOL_SET \
)
#define RPMEM_CHECK_FORK() do {\
if (Rpmem_fork_unsafe) {\
ERR("libfabric is initialized without fork() support");\
return NULL;\
}\
} while (0)
static os_once_t Rpmem_fork_unsafe_key_once = OS_ONCE_INIT;
/*
* rpmem_pool -- remote pool context
*/
struct rpmem_pool {
struct rpmem_obc *obc; /* out-of-band connection handle */
struct rpmem_fip *fip; /* fabric provider handle */
struct rpmem_target_info *info;
char fip_service[NI_MAXSERV];
enum rpmem_provider provider;
size_t max_wq_size; /* max WQ size supported by provider */
os_thread_t monitor;
int closing;
int no_headers;
/*
* Last error code, need to be volatile because it can
* be accessed by multiple threads.
*/
volatile int error;
};
/*
* env_get_bool -- parse value of specified environment variable as a bool
*
* Return values:
* 0 - defined, valp has value
* 1 - not defined
* -1 - parsing error
*/
static int
env_get_bool(const char *name, int *valp)
{
LOG(3, "name %s, valp %p", name, valp);
const char *env = os_getenv(name);
if (!env)
return 1;
char *endptr;
errno = 0;
long val = strtol(env, &endptr, 10);
if (*endptr != '\0' || errno)
goto err;
if (val < INT_MIN || val > INT_MAX)
goto err;
*valp = (int)val;
return 0;
err:
RPMEM_LOG(ERR, "!parsing '%s' environment variable failed", name);
return -1;
}
/*
* rpmem_get_provider -- set provider based on node address and environment
*/
static int
rpmem_set_provider(RPMEMpool *rpp, const char *node)
{
LOG(3, "rpp %p, node %s", rpp, node);
struct rpmem_fip_probe probe;
enum rpmem_provider prov = RPMEM_PROV_UNKNOWN;
int ret = rpmem_fip_probe_get(node, &probe);
if (ret)
return -1;
/*
* The sockets provider can be used only if specified environment
* variable is set to 1.
*/
if (rpmem_fip_probe(probe, RPMEM_PROV_LIBFABRIC_SOCKETS)) {
int enable;
ret = env_get_bool(RPMEM_PROV_SOCKET_ENV, &enable);
if (!ret && enable) {
prov = RPMEM_PROV_LIBFABRIC_SOCKETS;
}
}
/*
* The verbs provider is enabled by default. If appropriate
* environment variable is set to 0, the verbs provider is disabled.
*
* The verbs provider has higher priority than sockets provider.
*/
if (rpmem_fip_probe(probe, RPMEM_PROV_LIBFABRIC_VERBS)) {
int enable;
ret = env_get_bool(RPMEM_PROV_VERBS_ENV, &enable);
if (ret == 1 || (!ret && enable))
prov = RPMEM_PROV_LIBFABRIC_VERBS;
}
if (prov == RPMEM_PROV_UNKNOWN)
return -1;
RPMEM_ASSERT(prov < MAX_RPMEM_PROV);
rpp->max_wq_size = probe.max_wq_size[prov];
rpp->provider = prov;
return 0;
}
/*
* rpmem_monitor_thread -- connection monitor background thread
*/
static void *
rpmem_monitor_thread(void *arg)
{
LOG(3, "arg %p", arg);
RPMEMpool *rpp = arg;
int ret = rpmem_obc_monitor(rpp->obc, 0);
if (ret && !rpp->closing) {
RPMEM_LOG(ERR, "unexpected data received");
rpp->error = errno;
}
return NULL;
}
/*
* rpmem_common_init -- common routine for initialization
*/
static RPMEMpool *
rpmem_common_init(const char *target)
{
LOG(3, "target %s", target);
int ret;
RPMEMpool *rpp = calloc(1, sizeof(*rpp));
if (!rpp) {
ERR("!calloc");
goto err_malloc_rpmem;
}
rpp->info = rpmem_target_parse(target);
if (!rpp->info) {
ERR("!parsing target node address failed");
goto err_target_split;
}
ret = rpmem_set_provider(rpp, rpp->info->node);
if (ret) {
errno = ENOMEDIUM;
ERR("cannot find provider");
goto err_provider;
}
RPMEM_LOG(NOTICE, "provider: %s", rpmem_provider_to_str(rpp->provider));
if (rpp->provider == RPMEM_PROV_LIBFABRIC_SOCKETS) {
/* libfabric's sockets provider does not support IPv6 */
RPMEM_LOG(NOTICE, "forcing using IPv4");
rpp->info->flags |= RPMEM_FLAGS_USE_IPV4;
}
rpp->obc = rpmem_obc_init();
if (!rpp->obc) {
ERR("!out-of-band connection initialization failed");
goto err_obc_init;
}
RPMEM_LOG(INFO, "establishing out-of-band connection");
ret = rpmem_obc_connect(rpp->obc, rpp->info);
if (ret) {
ERR("!out-of-band connection failed");
goto err_obc_connect;
}
RPMEM_LOG(NOTICE, "out-of-band connection established");
return rpp;
err_obc_connect:
rpmem_obc_fini(rpp->obc);
err_obc_init:
err_provider:
rpmem_target_free(rpp->info);
err_target_split:
free(rpp);
err_malloc_rpmem:
return NULL;
}
/*
* rpmem_common_fini -- common routing for deinitialization
*/
static void
rpmem_common_fini(RPMEMpool *rpp, int join)
{
LOG(3, "rpp %p, join %d", rpp, join);
rpmem_obc_disconnect(rpp->obc);
if (join) {
int ret = os_thread_join(&rpp->monitor, NULL);
if (ret) {
errno = ret;
ERR("joining monitor thread failed");
}
}
rpmem_obc_fini(rpp->obc);
rpmem_target_free(rpp->info);
free(rpp);
}
/*
* rpmem_common_fip_init -- common routine for initializing fabric provider
*/
static int
rpmem_common_fip_init(RPMEMpool *rpp, struct rpmem_req_attr *req,
struct rpmem_resp_attr *resp, void *pool_addr, size_t pool_size,
unsigned *nlanes, size_t buff_size)
{
LOG(3, "rpp %p, req %p, resp %p, pool_addr %p, pool_size %zu, nlanes "
"%p", rpp, req, resp, pool_addr, pool_size, nlanes);
int ret;
struct rpmem_fip_attr fip_attr = {
.provider = req->provider,
.max_wq_size = rpp->max_wq_size,
.persist_method = resp->persist_method,
.laddr = pool_addr,
.size = pool_size,
.buff_size = buff_size,
.nlanes = min(*nlanes, resp->nlanes),
.raddr = (void *)resp->raddr,
.rkey = resp->rkey,
};
ret = util_snprintf(rpp->fip_service, sizeof(rpp->fip_service),
"%u", resp->port);
if (ret < 0) {
ERR("!snprintf");
goto err_port;
}
rpp->fip = rpmem_fip_init(rpp->info->node, rpp->fip_service,
&fip_attr, nlanes);
if (!rpp->fip) {
ERR("!in-band connection initialization failed");
ret = -1;
goto err_fip_init;
}
RPMEM_LOG(NOTICE, "final nlanes: %u", *nlanes);
RPMEM_LOG(INFO, "establishing in-band connection");
ret = rpmem_fip_connect(rpp->fip);
if (ret) {
ERR("!establishing in-band connection failed");
goto err_fip_connect;
}
RPMEM_LOG(NOTICE, "in-band connection established");
return 0;
err_fip_connect:
rpmem_fip_fini(rpp->fip);
err_fip_init:
err_port:
return ret;
}
/*
* rpmem_common_fip_fini -- common routine for deinitializing fabric provider
*/
static void
rpmem_common_fip_fini(RPMEMpool *rpp)
{
LOG(3, "rpp %p", rpp);
RPMEM_LOG(INFO, "closing in-band connection");
rpmem_fip_fini(rpp->fip);
RPMEM_LOG(NOTICE, "in-band connection closed");
}
/*
* rpmem_log_args -- log input arguments for rpmem_create and rpmem_open
*/
static void
rpmem_log_args(const char *req, const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned nlanes)
{
LOG(3, "req %s, target %s, pool_set_name %s, pool_addr %p, pool_size "
"%zu, nlanes %d", req, target, pool_set_name, pool_addr,
pool_size, nlanes);
RPMEM_LOG(NOTICE, "%s request:", req);
RPMEM_LOG(NOTICE, "\ttarget: %s", target);
RPMEM_LOG(NOTICE, "\tpool set: %s", pool_set_name);
RPMEM_LOG(INFO, "\tpool addr: %p", pool_addr);
RPMEM_LOG(INFO, "\tpool size: %lu", pool_size);
RPMEM_LOG(NOTICE, "\tnlanes: %u", nlanes);
}
/*
* rpmem_log_resp -- log response attributes
*/
static void
rpmem_log_resp(const char *req, const struct rpmem_resp_attr *resp)
{
LOG(3, "req %s, resp %p", req, resp);
RPMEM_LOG(NOTICE, "%s request response:", req);
RPMEM_LOG(NOTICE, "\tnlanes: %u", resp->nlanes);
RPMEM_LOG(NOTICE, "\tport: %u", resp->port);
RPMEM_LOG(NOTICE, "\tpersist method: %s",
rpmem_persist_method_to_str(resp->persist_method));
RPMEM_LOG(NOTICE, "\tremote addr: 0x%" PRIx64, resp->raddr);
}
/*
* rpmem_check_args -- validate user's arguments
*/
static int
rpmem_check_args(void *pool_addr, size_t pool_size, unsigned *nlanes)
{
LOG(3, "pool_addr %p, pool_size %zu, nlanes %p", pool_addr, pool_size,
nlanes);
if (!pool_addr) {
errno = EINVAL;
ERR("invalid pool address");
return -1;
}
if (!IS_PAGE_ALIGNED((uintptr_t)pool_addr)) {
errno = EINVAL;
ERR("Pool address must be aligned to page size (%llu)",
Pagesize);
return -1;
}
if (!IS_PAGE_ALIGNED(pool_size)) {
errno = EINVAL;
ERR("Pool size must be aligned to page size (%llu)",
Pagesize);
return -1;
}
if (!pool_size) {
errno = EINVAL;
ERR("invalid pool size");
return -1;
}
if (!nlanes) {
errno = EINVAL;
ERR("lanes pointer cannot be NULL");
return -1;
}
if (!(*nlanes)) {
errno = EINVAL;
ERR("number of lanes must be positive");
return -1;
}
return 0;
}
/*
* rpmem_create -- create remote pool on target node
*
* target -- target node in format [<user>@]<target_name>[:<port>]
* pool_set_name -- remote pool set name
* pool_addr -- local pool memory address which will be replicated
* pool_size -- required pool size
* nlanes -- number of lanes
* create_attr -- pool attributes used for creating the pool on remote node
*/
RPMEMpool *
rpmem_create(const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned *nlanes,
const struct rpmem_pool_attr *create_attr)
{
LOG(3, "target %s, pool_set_name %s, pool_addr %p, pool_size %zu, "
"nlanes %p, create_attr %p", target, pool_set_name,
pool_addr, pool_size, nlanes, create_attr);
os_once(&Rpmem_fork_unsafe_key_once, &rpmem_fip_probe_fork_safety);
RPMEM_CHECK_FORK();
rpmem_log_args("create", target, pool_set_name,
pool_addr, pool_size, *nlanes);
if (rpmem_check_args(pool_addr, pool_size, nlanes))
return NULL;
RPMEMpool *rpp = rpmem_common_init(target);
if (!rpp)
goto err_common_init;
size_t buff_size = RPMEM_DEF_BUFF_SIZE;
struct rpmem_req_attr req = {
.pool_size = pool_size,
.nlanes = min(*nlanes, Rpmem_max_nlanes),
.provider = rpp->provider,
.pool_desc = pool_set_name,
.buff_size = buff_size,
};
struct rpmem_resp_attr resp;
int ret = rpmem_obc_create(rpp->obc, &req, &resp, create_attr);
if (ret) {
RPMEM_LOG(ERR, "!create request failed");
goto err_obc_create;
}
if (create_attr == NULL ||
util_is_zeroed(create_attr, sizeof(*create_attr)))
rpp->no_headers = 1;
rpmem_log_resp("create", &resp);
ret = rpmem_common_fip_init(rpp, &req, &resp,
pool_addr, pool_size, nlanes, buff_size);
if (ret)
goto err_fip_init;
ret = os_thread_create(&rpp->monitor, NULL, rpmem_monitor_thread, rpp);
if (ret) {
errno = ret;
ERR("!starting monitor thread");
goto err_monitor;
}
return rpp;
err_monitor:
rpmem_common_fip_fini(rpp);
err_fip_init:
rpmem_obc_close(rpp->obc, RPMEM_CLOSE_FLAGS_REMOVE);
err_obc_create:
rpmem_common_fini(rpp, 0);
err_common_init:
return NULL;
}
/*
* rpmem_open -- open remote pool on target node
*
* target -- target node in format [<user>@]<target_name>[:<port>]
* pool_set_name -- remote pool set name
* pool_addr -- local pool memory address which will be replicated
* pool_size -- required pool size
* nlanes -- number of lanes
* open_attr -- pool attributes, received from remote host
*/
RPMEMpool *
rpmem_open(const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned *nlanes,
struct rpmem_pool_attr *open_attr)
{
LOG(3, "target %s, pool_set_name %s, pool_addr %p, pool_size %zu, "
"nlanes %p, create_attr %p", target, pool_set_name,
pool_addr, pool_size, nlanes, open_attr);
os_once(&Rpmem_fork_unsafe_key_once, &rpmem_fip_probe_fork_safety);
RPMEM_CHECK_FORK();
rpmem_log_args("open", target, pool_set_name,
pool_addr, pool_size, *nlanes);
if (rpmem_check_args(pool_addr, pool_size, nlanes))
return NULL;
RPMEMpool *rpp = rpmem_common_init(target);
if (!rpp)
goto err_common_init;
size_t buff_size = RPMEM_DEF_BUFF_SIZE;
struct rpmem_req_attr req = {
.pool_size = pool_size,
.nlanes = min(*nlanes, Rpmem_max_nlanes),
.provider = rpp->provider,
.pool_desc = pool_set_name,
.buff_size = buff_size,
};
struct rpmem_resp_attr resp;
int ret = rpmem_obc_open(rpp->obc, &req, &resp, open_attr);
if (ret) {
RPMEM_LOG(ERR, "!open request failed");
goto err_obc_create;
}
if (open_attr == NULL || util_is_zeroed(open_attr, sizeof(*open_attr)))
rpp->no_headers = 1;
rpmem_log_resp("open", &resp);
ret = rpmem_common_fip_init(rpp, &req, &resp,
pool_addr, pool_size, nlanes, buff_size);
if (ret)
goto err_fip_init;
ret = os_thread_create(&rpp->monitor, NULL, rpmem_monitor_thread, rpp);
if (ret) {
errno = ret;
ERR("!starting monitor thread");
goto err_monitor;
}
return rpp;
err_monitor:
rpmem_common_fip_fini(rpp);
err_fip_init:
rpmem_obc_close(rpp->obc, 0);
err_obc_create:
rpmem_common_fini(rpp, 0);
err_common_init:
return NULL;
}
/*
* rpmem_close -- close remote pool on target node
*/
int
rpmem_close(RPMEMpool *rpp)
{
LOG(3, "rpp %p", rpp);
RPMEM_LOG(INFO, "closing out-of-band connection");
util_fetch_and_or32(&rpp->closing, 1);
rpmem_fip_close(rpp->fip);
int ret = rpmem_obc_close(rpp->obc, 0);
if (ret)
ERR("!close request failed");
RPMEM_LOG(NOTICE, "out-of-band connection closed");
rpmem_common_fip_fini(rpp);
rpmem_common_fini(rpp, 1);
return ret;
}
/*
* rpmem_flush -- flush to target node operation
*
* rpp -- remote pool handle
* offset -- offset in pool
* length -- length of flush operation
* lane -- lane number
* flags -- additional flags
*/
int
rpmem_flush(RPMEMpool *rpp, size_t offset, size_t length,
unsigned lane, unsigned flags)
{
LOG(3, "rpp %p, offset %zu, length %zu, lane %d, flags 0x%x",
rpp, offset, length, lane, flags);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
if (flags & RPMEM_FLUSH_FLAGS_MASK) {
ERR("invalid flags (0x%x)", flags);
errno = EINVAL;
return -1;
}
if (rpp->no_headers == 0 && offset < RPMEM_HDR_SIZE) {
ERR("offset (%zu) in pool is less than %d bytes", offset,
RPMEM_HDR_SIZE);
errno = EINVAL;
return -1;
}
/*
* By default use RDMA SEND flush mode which has atomicity
* guarantees. For relaxed flush use RDMA WRITE.
*/
unsigned mode = RPMEM_PERSIST_SEND;
if (flags & RPMEM_FLUSH_RELAXED)
mode = RPMEM_FLUSH_WRITE;
int ret = rpmem_fip_flush(rpp->fip, offset, length, lane, mode);
if (unlikely(ret)) {
LOG(2, "flush operation failed");
rpp->error = ret;
errno = rpp->error;
return -1;
}
return 0;
}
/*
* rpmem_drain -- drain on target node operation
*
* rpp -- remote pool handle
* lane -- lane number
* flags -- additional flags
*/
int
rpmem_drain(RPMEMpool *rpp, unsigned lane, unsigned flags)
{
LOG(3, "rpp %p, lane %d, flags 0x%x", rpp, lane, flags);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
if (flags != 0) {
ERR("invalid flags (0x%x)", flags);
errno = EINVAL;
return -1;
}
int ret = rpmem_fip_drain(rpp->fip, lane);
if (unlikely(ret)) {
LOG(2, "drain operation failed");
rpp->error = ret;
errno = rpp->error;
return -1;
}
return 0;
}
/*
* rpmem_persist -- persist operation on target node
*
* rpp -- remote pool handle
* offset -- offset in pool
* length -- length of persist operation
* lane -- lane number
*/
int
rpmem_persist(RPMEMpool *rpp, size_t offset, size_t length,
unsigned lane, unsigned flags)
{
LOG(3, "rpp %p, offset %zu, length %zu, lane %d, flags 0x%x",
rpp, offset, length, lane, flags);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
if (flags & RPMEM_PERSIST_FLAGS_MASK) {
ERR("invalid flags (0x%x)", flags);
errno = EINVAL;
return -1;
}
if (rpp->no_headers == 0 && offset < RPMEM_HDR_SIZE) {
ERR("offset (%zu) in pool is less than %d bytes", offset,
RPMEM_HDR_SIZE);
errno = EINVAL;
return -1;
}
/*
* By default use RDMA SEND persist mode which has atomicity
* guarantees. For relaxed persist use RDMA WRITE.
*/
unsigned mode = RPMEM_PERSIST_SEND;
if (flags & RPMEM_PERSIST_RELAXED)
mode = RPMEM_FLUSH_WRITE;
int ret = rpmem_fip_persist(rpp->fip, offset, length,
lane, mode);
if (unlikely(ret)) {
LOG(2, "persist operation failed");
rpp->error = ret;
errno = rpp->error;
return -1;
}
return 0;
}
/*
* rpmem_deep_persist -- deep flush operation on target node
*
* rpp -- remote pool handle
* offset -- offset in pool
* length -- length of deep flush operation
* lane -- lane number
*/
int
rpmem_deep_persist(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane)
{
LOG(3, "rpp %p, offset %zu, length %zu, lane %d", rpp, offset, length,
lane);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
if (offset < RPMEM_HDR_SIZE) {
ERR("offset (%zu) in pool is less than %d bytes", offset,
RPMEM_HDR_SIZE);
errno = EINVAL;
return -1;
}
int ret = rpmem_fip_persist(rpp->fip, offset, length,
lane, RPMEM_DEEP_PERSIST);
if (unlikely(ret)) {
ERR("persist operation failed");
rpp->error = ret;
errno = rpp->error;
return -1;
}
return 0;
}
/*
* rpmem_read -- read data from remote pool:
*
* rpp -- remote pool handle
* buff -- output buffer
* offset -- offset in pool
* length -- length of read operation
*/
int
rpmem_read(RPMEMpool *rpp, void *buff, size_t offset,
size_t length, unsigned lane)
{
LOG(3, "rpp %p, buff %p, offset %zu, length %zu, lane %d", rpp, buff,
offset, length, lane);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
if (rpp->no_headers == 0 && offset < RPMEM_HDR_SIZE)
LOG(1, "reading from pool at offset (%zu) less than %d bytes",
offset, RPMEM_HDR_SIZE);
int ret = rpmem_fip_read(rpp->fip, buff, length, offset, lane);
if (unlikely(ret)) {
errno = ret;
ERR("!read operation failed");
rpp->error = ret;
return -1;
}
return 0;
}
/*
* rpmem_set_attr -- overwrite pool attributes on the remote node
*
* rpp -- remote pool handle
* attr -- new pool attributes for the pool on remote node
*/
int
rpmem_set_attr(RPMEMpool *rpp, const struct rpmem_pool_attr *attr)
{
LOG(3, "rpp %p, attr %p", rpp, attr);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
int ret = rpmem_obc_set_attr(rpp->obc, attr);
if (ret) {
RPMEM_LOG(ERR, "!set attributes request failed");
}
return ret;
}
/*
* rpmem_remove -- remove pool from remote node
*
* target -- target node in format [<user>@]<target_name>[:<port>]
* pool_set_name -- remote pool set name
* flags -- bitwise OR of one or more of the following flags:
* - RPMEM_REMOVE_FORCE
* - RPMEM_REMOVE_POOL_SET
*/
int
rpmem_remove(const char *target, const char *pool_set, int flags)
{
LOG(3, "target %s, pool_set %s, flags %d", target, pool_set, flags);
if (flags & ~(RPMEM_REMOVE_FLAGS_ALL)) {
ERR("invalid flags specified");
errno = EINVAL;
return -1;
}
struct rpmem_target_info *info = rpmem_target_parse(target);
if (!info) {
ERR("!parsing target node address failed");
goto err_target;
}
const char *argv[5];
argv[0] = "--remove";
argv[1] = pool_set;
const char **cur = &argv[2];
if (flags & RPMEM_REMOVE_FORCE)
*cur++ = "--force";
if (flags & RPMEM_REMOVE_POOL_SET)
*cur++ = "--pool-set";
*cur = NULL;
struct rpmem_ssh *ssh = rpmem_ssh_execv(info, argv);
if (!ssh) {
ERR("!executing ssh command failed");
goto err_ssh_exec;
}
int ret;
ret = rpmem_ssh_monitor(ssh, 0);
if (ret) {
ERR("!waiting for remote command failed");
goto err_ssh_monitor;
}
ret = rpmem_ssh_close(ssh);
if (ret) {
errno = ret;
ERR("remote command failed");
goto err_ssh_close;
}
rpmem_target_free(info);
return 0;
err_ssh_monitor:
rpmem_ssh_close(ssh);
err_ssh_close:
err_ssh_exec:
rpmem_target_free(info);
err_target:
return -1;
}
#if FAULT_INJECTION
void
rpmem_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
return core_inject_fault_at(type, nth, at);
}
int
rpmem_fault_injection_enabled(void)
{
return core_fault_injection_enabled();
}
#endif
| 20,542 | 21.451366 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/librpmem/rpmem_util.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_util.h -- util functions for librpmem header file
*/
#ifndef RPMEM_UTIL_H
#define RPMEM_UTIL_H 1
#ifdef __cplusplus
extern "C" {
#endif
enum {
LERR = 1,
LWARN = 2,
LNOTICE = 3,
LINFO = 4,
_LDBG = 10,
};
#define RPMEM_LOG(level, fmt, args...) LOG(L##level, fmt, ## args)
#define RPMEM_DBG(fmt, args...) LOG(_LDBG, fmt, ## args)
#define RPMEM_FATAL(fmt, args...) FATAL(fmt, ## args)
#define RPMEM_ASSERT(cond) ASSERT(cond)
#define RPMEM_PERSIST_FLAGS_ALL RPMEM_PERSIST_RELAXED
#define RPMEM_PERSIST_FLAGS_MASK ((unsigned)(~RPMEM_PERSIST_FLAGS_ALL))
#define RPMEM_FLUSH_FLAGS_ALL RPMEM_FLUSH_RELAXED
#define RPMEM_FLUSH_FLAGS_MASK ((unsigned)(~RPMEM_FLUSH_FLAGS_ALL))
const char *rpmem_util_proto_errstr(enum rpmem_err err);
int rpmem_util_proto_errno(enum rpmem_err err);
void rpmem_util_cmds_init(void);
void rpmem_util_cmds_fini(void);
const char *rpmem_util_cmd_get(void);
void rpmem_util_get_env_max_nlanes(unsigned *max_nlanes);
void rpmem_util_get_env_wq_size(unsigned *wq_size);
#ifdef __cplusplus
}
#endif
#endif
| 1,137 | 22.708333 | 71 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/librpmem/rpmem_obc.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_obc.h -- rpmem out-of-band connection client header file
*/
#ifndef RPMEM_OBC_H
#define RPMEM_OBC_H 1
#include <sys/types.h>
#include <sys/socket.h>
#include "librpmem.h"
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_obc;
struct rpmem_obc *rpmem_obc_init(void);
void rpmem_obc_fini(struct rpmem_obc *rpc);
int rpmem_obc_connect(struct rpmem_obc *rpc,
const struct rpmem_target_info *info);
int rpmem_obc_disconnect(struct rpmem_obc *rpc);
int rpmem_obc_monitor(struct rpmem_obc *rpc, int nonblock);
int rpmem_obc_create(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
const struct rpmem_pool_attr *pool_attr);
int rpmem_obc_open(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
struct rpmem_pool_attr *pool_attr);
int rpmem_obc_set_attr(struct rpmem_obc *rpc,
const struct rpmem_pool_attr *pool_attr);
int rpmem_obc_close(struct rpmem_obc *rpc, int flags);
#ifdef __cplusplus
}
#endif
#endif
| 1,100 | 21.9375 | 65 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/librpmem/rpmem_obc.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_obc.c -- rpmem out-of-band connection client source file
*/
#include <stdlib.h>
#include <netdb.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_common.h"
#include "rpmem_obc.h"
#include "rpmem_proto.h"
#include "rpmem_util.h"
#include "rpmem_ssh.h"
#include "out.h"
#include "sys_util.h"
#include "util.h"
/*
* rpmem_obc -- rpmem out-of-band client connection handle
*/
struct rpmem_obc {
struct rpmem_ssh *ssh;
};
/*
* rpmem_obc_is_connected -- (internal) return non-zero value if client is
* connected
*/
static inline int
rpmem_obc_is_connected(struct rpmem_obc *rpc)
{
return rpc->ssh != NULL;
}
/*
* rpmem_obc_check_ibc_attr -- (internal) check in-band connection
* attributes
*/
static int
rpmem_obc_check_ibc_attr(struct rpmem_msg_ibc_attr *ibc)
{
if (ibc->port == 0 || ibc->port > UINT16_MAX) {
ERR("invalid port number received -- %u", ibc->port);
errno = EPROTO;
return -1;
}
if (ibc->persist_method != RPMEM_PM_GPSPM &&
ibc->persist_method != RPMEM_PM_APM) {
ERR("invalid persistency method received -- %u",
ibc->persist_method);
errno = EPROTO;
return -1;
}
return 0;
}
/*
* rpmem_obc_check_port -- (internal) verify target node port number
*/
static int
rpmem_obc_check_port(const struct rpmem_target_info *info)
{
if (!(info->flags & RPMEM_HAS_SERVICE))
return 0;
if (*info->service == '\0') {
ERR("invalid port number -- '%s'", info->service);
goto err;
}
errno = 0;
char *endptr;
long port = strtol(info->service, &endptr, 10);
if (errno || *endptr != '\0') {
ERR("invalid port number -- '%s'", info->service);
goto err;
}
if (port < 1) {
ERR("port number must be positive -- '%s'", info->service);
goto err;
}
if (port > UINT16_MAX) {
ERR("port number too large -- '%s'", info->service);
goto err;
}
return 0;
err:
errno = EINVAL;
return -1;
}
/*
* rpmem_obc_close_conn -- (internal) close connection
*/
static void
rpmem_obc_close_conn(struct rpmem_obc *rpc)
{
rpmem_ssh_close(rpc->ssh);
(void) util_fetch_and_and64(&rpc->ssh, 0);
}
/*
* rpmem_obc_init_msg_hdr -- (internal) initialize message header
*/
static void
rpmem_obc_set_msg_hdr(struct rpmem_msg_hdr *hdrp,
enum rpmem_msg_type type, size_t size)
{
hdrp->type = type;
hdrp->size = size;
}
/*
* rpmem_obc_set_pool_desc -- (internal) fill the pool descriptor field
*/
static void
rpmem_obc_set_pool_desc(struct rpmem_msg_pool_desc *pool_desc,
const char *desc, size_t size)
{
RPMEM_ASSERT(size <= UINT32_MAX);
RPMEM_ASSERT(size > 0);
pool_desc->size = (uint32_t)size;
memcpy(pool_desc->desc, desc, size);
pool_desc->desc[size - 1] = '\0';
}
/*
* rpmem_obc_alloc_create_msg -- (internal) allocate and fill create request
* message
*/
static struct rpmem_msg_create *
rpmem_obc_alloc_create_msg(const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr, size_t *msg_sizep)
{
size_t pool_desc_size = strlen(req->pool_desc) + 1;
size_t msg_size = sizeof(struct rpmem_msg_create) + pool_desc_size;
struct rpmem_msg_create *msg = malloc(msg_size);
if (!msg) {
ERR("!cannot allocate create request message");
return NULL;
}
rpmem_obc_set_msg_hdr(&msg->hdr, RPMEM_MSG_TYPE_CREATE, msg_size);
msg->c.major = RPMEM_PROTO_MAJOR;
msg->c.minor = RPMEM_PROTO_MINOR;
msg->c.pool_size = req->pool_size;
msg->c.nlanes = req->nlanes;
msg->c.provider = req->provider;
msg->c.buff_size = req->buff_size;
rpmem_obc_set_pool_desc(&msg->pool_desc,
req->pool_desc, pool_desc_size);
if (pool_attr) {
pack_rpmem_pool_attr(pool_attr, &msg->pool_attr);
} else {
RPMEM_LOG(INFO, "using zeroed pool attributes");
memset(&msg->pool_attr, 0, sizeof(msg->pool_attr));
}
*msg_sizep = msg_size;
return msg;
}
/*
* rpmem_obc_check_req -- (internal) check request attributes
*/
static int
rpmem_obc_check_req(const struct rpmem_req_attr *req)
{
if (req->provider >= MAX_RPMEM_PROV) {
ERR("invalid provider specified -- %u", req->provider);
errno = EINVAL;
return -1;
}
return 0;
}
/*
* rpmem_obj_check_hdr_resp -- (internal) check response message header
*/
static int
rpmem_obc_check_hdr_resp(struct rpmem_msg_hdr_resp *resp,
enum rpmem_msg_type type, size_t size)
{
if (resp->type != type) {
ERR("invalid message type received -- %u", resp->type);
errno = EPROTO;
return -1;
}
if (resp->size != size) {
ERR("invalid message size received -- %lu", resp->size);
errno = EPROTO;
return -1;
}
if (resp->status >= MAX_RPMEM_ERR) {
ERR("invalid status received -- %u", resp->status);
errno = EPROTO;
return -1;
}
if (resp->status) {
enum rpmem_err status = (enum rpmem_err)resp->status;
ERR("%s", rpmem_util_proto_errstr(status));
errno = rpmem_util_proto_errno(status);
return -1;
}
return 0;
}
/*
* rpmem_obc_check_create_resp -- (internal) check create response message
*/
static int
rpmem_obc_check_create_resp(struct rpmem_msg_create_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_CREATE_RESP,
sizeof(struct rpmem_msg_create_resp)))
return -1;
if (rpmem_obc_check_ibc_attr(&resp->ibc))
return -1;
return 0;
}
/*
* rpmem_obc_get_res -- (internal) read response attributes
*/
static void
rpmem_obc_get_res(struct rpmem_resp_attr *res,
struct rpmem_msg_ibc_attr *ibc)
{
res->port = (unsigned short)ibc->port;
res->rkey = ibc->rkey;
res->raddr = ibc->raddr;
res->persist_method =
(enum rpmem_persist_method)ibc->persist_method;
res->nlanes = ibc->nlanes;
}
/*
* rpmem_obc_alloc_open_msg -- (internal) allocate and fill open request message
*/
static struct rpmem_msg_open *
rpmem_obc_alloc_open_msg(const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr, size_t *msg_sizep)
{
size_t pool_desc_size = strlen(req->pool_desc) + 1;
size_t msg_size = sizeof(struct rpmem_msg_open) + pool_desc_size;
struct rpmem_msg_open *msg = malloc(msg_size);
if (!msg) {
ERR("!cannot allocate open request message");
return NULL;
}
rpmem_obc_set_msg_hdr(&msg->hdr, RPMEM_MSG_TYPE_OPEN, msg_size);
msg->c.major = RPMEM_PROTO_MAJOR;
msg->c.minor = RPMEM_PROTO_MINOR;
msg->c.pool_size = req->pool_size;
msg->c.nlanes = req->nlanes;
msg->c.provider = req->provider;
msg->c.buff_size = req->buff_size;
rpmem_obc_set_pool_desc(&msg->pool_desc,
req->pool_desc, pool_desc_size);
*msg_sizep = msg_size;
return msg;
}
/*
* rpmem_obc_check_open_resp -- (internal) check open response message
*/
static int
rpmem_obc_check_open_resp(struct rpmem_msg_open_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_OPEN_RESP,
sizeof(struct rpmem_msg_open_resp)))
return -1;
if (rpmem_obc_check_ibc_attr(&resp->ibc))
return -1;
return 0;
}
/*
* rpmem_obc_check_close_resp -- (internal) check close response message
*/
static int
rpmem_obc_check_close_resp(struct rpmem_msg_close_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_CLOSE_RESP,
sizeof(struct rpmem_msg_close_resp)))
return -1;
return 0;
}
/*
* rpmem_obc_check_set_attr_resp -- (internal) check set attributes response
* message
*/
static int
rpmem_obc_check_set_attr_resp(struct rpmem_msg_set_attr_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_SET_ATTR_RESP,
sizeof(struct rpmem_msg_set_attr_resp)))
return -1;
return 0;
}
/*
* rpmem_obc_init -- initialize rpmem obc handle
*/
struct rpmem_obc *
rpmem_obc_init(void)
{
struct rpmem_obc *rpc = calloc(1, sizeof(*rpc));
if (!rpc) {
RPMEM_LOG(ERR, "!allocation of rpmem obc failed");
return NULL;
}
return rpc;
}
/*
* rpmem_obc_fini -- destroy rpmem obc handle
*
* This function must be called with connection already closed - after calling
* the rpmem_obc_disconnect or after receiving relevant value from
* rpmem_obc_monitor.
*/
void
rpmem_obc_fini(struct rpmem_obc *rpc)
{
free(rpc);
}
/*
* rpmem_obc_connect -- connect to target node
*
* Connects to target node, the target must be in the following format:
* <addr>[:<port>]. If the port number is not specified the default
* ssh port will be used. The <addr> is translated into IP address.
*
* Returns an error if connection is already established.
*/
int
rpmem_obc_connect(struct rpmem_obc *rpc, const struct rpmem_target_info *info)
{
if (rpmem_obc_is_connected(rpc)) {
errno = EALREADY;
goto err_notconnected;
}
if (rpmem_obc_check_port(info))
goto err_port;
rpc->ssh = rpmem_ssh_open(info);
if (!rpc->ssh)
goto err_ssh_open;
return 0;
err_ssh_open:
err_port:
err_notconnected:
return -1;
}
/*
* rpmem_obc_disconnect -- close the connection to target node
*
* Returns error if socket is not connected.
*/
int
rpmem_obc_disconnect(struct rpmem_obc *rpc)
{
if (rpmem_obc_is_connected(rpc)) {
rpmem_obc_close_conn(rpc);
return 0;
}
errno = ENOTCONN;
return -1;
}
/*
* rpmem_obc_monitor -- monitor connection with target node
*
* The nonblock variable indicates whether this function should return
* immediately (= 1) or may block (= 0).
*
* If the function detects that socket was closed by remote peer it is
* closed on local side and set to -1, so there is no need to call
* rpmem_obc_disconnect function. Please take a look at functions'
* descriptions to see which functions cannot be used if the connection
* has been already closed.
*
* This function expects there is no data pending on socket, if any data
* is pending this function returns an error and sets errno to EPROTO.
*
* Return values:
* 0 - not connected
* 1 - connected
* < 0 - error
*/
int
rpmem_obc_monitor(struct rpmem_obc *rpc, int nonblock)
{
if (!rpmem_obc_is_connected(rpc))
return 0;
return rpmem_ssh_monitor(rpc->ssh, nonblock);
}
/*
* rpmem_obc_create -- perform create request operation
*
* Returns error if connection has not been established yet.
*/
int
rpmem_obc_create(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
const struct rpmem_pool_attr *pool_attr)
{
if (!rpmem_obc_is_connected(rpc)) {
ERR("out-of-band connection not established");
errno = ENOTCONN;
goto err_notconnected;
}
if (rpmem_obc_check_req(req))
goto err_req;
size_t msg_size;
struct rpmem_msg_create *msg =
rpmem_obc_alloc_create_msg(req, pool_attr, &msg_size);
if (!msg)
goto err_alloc_msg;
RPMEM_LOG(INFO, "sending create request message");
rpmem_hton_msg_create(msg);
if (rpmem_ssh_send(rpc->ssh, msg, msg_size)) {
ERR("!sending create request message failed");
goto err_msg_send;
}
RPMEM_LOG(NOTICE, "create request message sent");
RPMEM_LOG(INFO, "receiving create request response");
struct rpmem_msg_create_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp,
sizeof(resp))) {
ERR("!receiving create request response failed");
goto err_msg_recv;
}
RPMEM_LOG(NOTICE, "create request response received");
rpmem_ntoh_msg_create_resp(&resp);
if (rpmem_obc_check_create_resp(&resp))
goto err_msg_resp;
rpmem_obc_get_res(res, &resp.ibc);
free(msg);
return 0;
err_msg_resp:
err_msg_recv:
err_msg_send:
free(msg);
err_alloc_msg:
err_req:
err_notconnected:
return -1;
}
/*
* rpmem_obc_open -- perform open request operation
*
* Returns error if connection is not already established.
*/
int
rpmem_obc_open(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
struct rpmem_pool_attr *pool_attr)
{
if (!rpmem_obc_is_connected(rpc)) {
ERR("out-of-band connection not established");
errno = ENOTCONN;
goto err_notconnected;
}
if (rpmem_obc_check_req(req))
goto err_req;
size_t msg_size;
struct rpmem_msg_open *msg =
rpmem_obc_alloc_open_msg(req, pool_attr, &msg_size);
if (!msg)
goto err_alloc_msg;
RPMEM_LOG(INFO, "sending open request message");
rpmem_hton_msg_open(msg);
if (rpmem_ssh_send(rpc->ssh, msg, msg_size)) {
ERR("!sending open request message failed");
goto err_msg_send;
}
RPMEM_LOG(NOTICE, "open request message sent");
RPMEM_LOG(INFO, "receiving open request response");
struct rpmem_msg_open_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp, sizeof(resp))) {
ERR("!receiving open request response failed");
goto err_msg_recv;
}
RPMEM_LOG(NOTICE, "open request response received");
rpmem_ntoh_msg_open_resp(&resp);
if (rpmem_obc_check_open_resp(&resp))
goto err_msg_resp;
rpmem_obc_get_res(res, &resp.ibc);
if (pool_attr)
unpack_rpmem_pool_attr(&resp.pool_attr, pool_attr);
free(msg);
return 0;
err_msg_resp:
err_msg_recv:
err_msg_send:
free(msg);
err_alloc_msg:
err_req:
err_notconnected:
return -1;
}
/*
* rpmem_obc_set_attr -- perform set attributes request operation
*
* Returns error if connection is not already established.
*/
int
rpmem_obc_set_attr(struct rpmem_obc *rpc,
const struct rpmem_pool_attr *pool_attr)
{
if (!rpmem_obc_is_connected(rpc)) {
ERR("out-of-band connection not established");
errno = ENOTCONN;
goto err_notconnected;
}
struct rpmem_msg_set_attr msg;
rpmem_obc_set_msg_hdr(&msg.hdr, RPMEM_MSG_TYPE_SET_ATTR, sizeof(msg));
if (pool_attr) {
memcpy(&msg.pool_attr, pool_attr, sizeof(msg.pool_attr));
} else {
RPMEM_LOG(INFO, "using zeroed pool attributes");
memset(&msg.pool_attr, 0, sizeof(msg.pool_attr));
}
RPMEM_LOG(INFO, "sending set attributes request message");
rpmem_hton_msg_set_attr(&msg);
if (rpmem_ssh_send(rpc->ssh, &msg, sizeof(msg))) {
ERR("!sending set attributes request message failed");
goto err_msg_send;
}
RPMEM_LOG(NOTICE, "set attributes request message sent");
RPMEM_LOG(INFO, "receiving set attributes request response");
struct rpmem_msg_set_attr_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp,
sizeof(resp))) {
ERR("!receiving set attributes request response failed");
goto err_msg_recv;
}
RPMEM_LOG(NOTICE, "set attributes request response received");
rpmem_ntoh_msg_set_attr_resp(&resp);
if (rpmem_obc_check_set_attr_resp(&resp))
goto err_msg_resp;
return 0;
err_msg_resp:
err_msg_recv:
err_msg_send:
err_notconnected:
return -1;
}
/*
* rpmem_obc_close -- perform close request operation
*
* Returns error if connection is not already established.
*
* NOTE: this function does not close the connection, but sends close request
* message to remote node and receives a response. The connection must be
* closed using rpmem_obc_disconnect function.
*/
int
rpmem_obc_close(struct rpmem_obc *rpc, int flags)
{
if (!rpmem_obc_is_connected(rpc)) {
errno = ENOTCONN;
return -1;
}
struct rpmem_msg_close msg;
rpmem_obc_set_msg_hdr(&msg.hdr, RPMEM_MSG_TYPE_CLOSE, sizeof(msg));
msg.flags = (uint32_t)flags;
RPMEM_LOG(INFO, "sending close request message");
rpmem_hton_msg_close(&msg);
if (rpmem_ssh_send(rpc->ssh, &msg, sizeof(msg))) {
RPMEM_LOG(ERR, "!sending close request failed");
return -1;
}
RPMEM_LOG(NOTICE, "close request message sent");
RPMEM_LOG(INFO, "receiving close request response");
struct rpmem_msg_close_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp,
sizeof(resp))) {
RPMEM_LOG(ERR, "!receiving close request response failed");
return -1;
}
RPMEM_LOG(NOTICE, "close request response received");
rpmem_ntoh_msg_close_resp(&resp);
if (rpmem_obc_check_close_resp(&resp))
return -1;
return 0;
}
| 15,410 | 21.730088 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemblk/blk.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* blk.h -- internal definitions for libpmem blk module
*/
#ifndef BLK_H
#define BLK_H 1
#include <stddef.h>
#include "ctl.h"
#include "os_thread.h"
#include "pool_hdr.h"
#include "page_size.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "alloc.h"
#include "fault_injection.h"
#define PMEMBLK_LOG_PREFIX "libpmemblk"
#define PMEMBLK_LOG_LEVEL_VAR "PMEMBLK_LOG_LEVEL"
#define PMEMBLK_LOG_FILE_VAR "PMEMBLK_LOG_FILE"
/* attributes of the blk memory pool format for the pool header */
#define BLK_HDR_SIG "PMEMBLK" /* must be 8 bytes including '\0' */
#define BLK_FORMAT_MAJOR 1
#define BLK_FORMAT_FEAT_DEFAULT \
{POOL_FEAT_COMPAT_DEFAULT, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000}
#define BLK_FORMAT_FEAT_CHECK \
{POOL_FEAT_COMPAT_VALID, POOL_FEAT_INCOMPAT_VALID, 0x0000}
static const features_t blk_format_feat_default = BLK_FORMAT_FEAT_DEFAULT;
struct pmemblk {
struct pool_hdr hdr; /* memory pool header */
/* root info for on-media format... */
uint32_t bsize; /* block size */
/* flag indicating if the pool was zero-initialized */
int is_zeroed;
/* some run-time state, allocated out of memory pool... */
void *addr; /* mapped region */
size_t size; /* size of mapped region */
int is_pmem; /* true if pool is PMEM */
int rdonly; /* true if pool is opened read-only */
void *data; /* post-header data area */
size_t datasize; /* size of data area */
size_t nlba; /* number of LBAs in pool */
struct btt *bttp; /* btt handle */
unsigned nlane; /* number of lanes */
unsigned next_lane; /* used to rotate through lanes */
os_mutex_t *locks; /* one per lane */
int is_dev_dax; /* true if mapped on device dax */
struct ctl *ctl; /* top level node of the ctl tree structure */
struct pool_set *set; /* pool set info */
#ifdef DEBUG
/* held during read/write mprotected sections */
os_mutex_t write_lock;
#endif
};
/* data area starts at this alignment after the struct pmemblk above */
#define BLK_FORMAT_DATA_ALIGN ((uintptr_t)PMEM_PAGESIZE)
#if FAULT_INJECTION
void
pmemblk_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at);
int
pmemblk_fault_injection_enabled(void);
#else
static inline void
pmemblk_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
abort();
}
static inline int
pmemblk_fault_injection_enabled(void)
{
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif
| 2,483 | 23.116505 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemblk/libpmemblk.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* libpmemblk.c -- pmem entry points for libpmemblk
*/
#include <stdio.h>
#include <stdint.h>
#include "libpmemblk.h"
#include "ctl_global.h"
#include "pmemcommon.h"
#include "blk.h"
/*
* The variable from which the config is directly loaded. The string
* cannot contain any comments or extraneous white characters.
*/
#define BLK_CONFIG_ENV_VARIABLE "PMEMBLK_CONF"
/*
* The variable that points to a config file from which the config is loaded.
*/
#define BLK_CONFIG_FILE_ENV_VARIABLE "PMEMBLK_CONF_FILE"
/*
* blk_ctl_init_and_load -- (static) initializes CTL and loads configuration
* from env variable and file
*/
static int
blk_ctl_init_and_load(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
if (pbp != NULL && (pbp->ctl = ctl_new()) == NULL) {
LOG(2, "!ctl_new");
return -1;
}
char *env_config = os_getenv(BLK_CONFIG_ENV_VARIABLE);
if (env_config != NULL) {
if (ctl_load_config_from_string(pbp ? pbp->ctl : NULL,
pbp, env_config) != 0) {
LOG(2, "unable to parse config stored in %s "
"environment variable",
BLK_CONFIG_ENV_VARIABLE);
goto err;
}
}
char *env_config_file = os_getenv(BLK_CONFIG_FILE_ENV_VARIABLE);
if (env_config_file != NULL && env_config_file[0] != '\0') {
if (ctl_load_config_from_file(pbp ? pbp->ctl : NULL,
pbp, env_config_file) != 0) {
LOG(2, "unable to parse config stored in %s "
"file (from %s environment variable)",
env_config_file,
BLK_CONFIG_FILE_ENV_VARIABLE);
goto err;
}
}
return 0;
err:
if (pbp)
ctl_delete(pbp->ctl);
return -1;
}
/*
* libpmemblk_init -- (internal) load-time initialization for blk
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmemblk_init(void)
{
ctl_global_register();
if (blk_ctl_init_and_load(NULL))
FATAL("error: %s", pmemblk_errormsg());
common_init(PMEMBLK_LOG_PREFIX, PMEMBLK_LOG_LEVEL_VAR,
PMEMBLK_LOG_FILE_VAR, PMEMBLK_MAJOR_VERSION,
PMEMBLK_MINOR_VERSION);
LOG(3, NULL);
}
/*
* libpmemblk_fini -- libpmemblk cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmemblk_fini(void)
{
LOG(3, NULL);
common_fini();
}
/*
* pmemblk_check_versionU -- see if lib meets application version requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemblk_check_versionU(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != PMEMBLK_MAJOR_VERSION) {
ERR("libpmemblk major version mismatch (need %u, found %u)",
major_required, PMEMBLK_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > PMEMBLK_MINOR_VERSION) {
ERR("libpmemblk minor version mismatch (need %u, found %u)",
minor_required, PMEMBLK_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* pmemblk_check_version -- see if lib meets application version requirements
*/
const char *
pmemblk_check_version(unsigned major_required, unsigned minor_required)
{
return pmemblk_check_versionU(major_required, minor_required);
}
#else
/*
* pmemblk_check_versionW -- see if lib meets application version requirements
*/
const wchar_t *
pmemblk_check_versionW(unsigned major_required, unsigned minor_required)
{
if (pmemblk_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* pmemblk_set_funcs -- allow overriding libpmemblk's call to malloc, etc.
*/
void
pmemblk_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s))
{
LOG(3, NULL);
util_set_alloc_funcs(malloc_func, free_func, realloc_func, strdup_func);
}
/*
* pmemblk_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemblk_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmemblk_errormsg -- return last error message
*/
const char *
pmemblk_errormsg(void)
{
return pmemblk_errormsgU();
}
#else
/*
* pmemblk_errormsgW -- return last error message as wchar_t
*/
const wchar_t *
pmemblk_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
| 4,318 | 20.487562 | 78 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemblk/btt.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* btt.h -- btt module definitions
*/
#ifndef BTT_H
#define BTT_H 1
#ifdef __cplusplus
extern "C" {
#endif
/* callback functions passed to btt_init() */
struct ns_callback {
int (*nsread)(void *ns, unsigned lane,
void *buf, size_t count, uint64_t off);
int (*nswrite)(void *ns, unsigned lane,
const void *buf, size_t count, uint64_t off);
int (*nszero)(void *ns, unsigned lane, size_t count, uint64_t off);
ssize_t (*nsmap)(void *ns, unsigned lane, void **addrp,
size_t len, uint64_t off);
void (*nssync)(void *ns, unsigned lane, void *addr, size_t len);
int ns_is_zeroed;
};
struct btt_info;
struct btt *btt_init(uint64_t rawsize, uint32_t lbasize, uint8_t parent_uuid[],
unsigned maxlane, void *ns, const struct ns_callback *ns_cbp);
unsigned btt_nlane(struct btt *bttp);
size_t btt_nlba(struct btt *bttp);
int btt_read(struct btt *bttp, unsigned lane, uint64_t lba, void *buf);
int btt_write(struct btt *bttp, unsigned lane, uint64_t lba, const void *buf);
int btt_set_zero(struct btt *bttp, unsigned lane, uint64_t lba);
int btt_set_error(struct btt *bttp, unsigned lane, uint64_t lba);
int btt_check(struct btt *bttp);
void btt_fini(struct btt *bttp);
uint64_t btt_flog_size(uint32_t nfree);
uint64_t btt_map_size(uint32_t external_nlba);
uint64_t btt_arena_datasize(uint64_t arena_size, uint32_t nfree);
int btt_info_set(struct btt_info *info, uint32_t external_lbasize,
uint32_t nfree, uint64_t arena_size, uint64_t space_left);
struct btt_flog *btt_flog_get_valid(struct btt_flog *flog_pair, int *next);
int map_entry_is_initial(uint32_t map_entry);
void btt_info_convert2h(struct btt_info *infop);
void btt_info_convert2le(struct btt_info *infop);
void btt_flog_convert2h(struct btt_flog *flogp);
void btt_flog_convert2le(struct btt_flog *flogp);
#ifdef __cplusplus
}
#endif
#endif
| 1,908 | 30.816667 | 79 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemblk/btt_layout.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* btt_layout.h -- block translation table on-media layout definitions
*/
/*
* Layout of BTT info block. All integers are stored little-endian.
*/
#ifndef BTT_LAYOUT_H
#define BTT_LAYOUT_H 1
#ifdef __cplusplus
extern "C" {
#endif
#define BTT_ALIGNMENT ((uintptr_t)4096) /* alignment of all BTT structures */
#define BTTINFO_SIG_LEN 16
#define BTTINFO_UUID_LEN 16
#define BTTINFO_UNUSED_LEN 3968
#define BTTINFO_SIG "BTT_ARENA_INFO\0"
struct btt_info {
char sig[BTTINFO_SIG_LEN]; /* must be "BTT_ARENA_INFO\0\0" */
uint8_t uuid[BTTINFO_UUID_LEN]; /* BTT UUID */
uint8_t parent_uuid[BTTINFO_UUID_LEN]; /* UUID of container */
uint32_t flags; /* see flag bits below */
uint16_t major; /* major version */
uint16_t minor; /* minor version */
uint32_t external_lbasize; /* advertised LBA size (bytes) */
uint32_t external_nlba; /* advertised LBAs in this arena */
uint32_t internal_lbasize; /* size of data area blocks (bytes) */
uint32_t internal_nlba; /* number of blocks in data area */
uint32_t nfree; /* number of free blocks */
uint32_t infosize; /* size of this info block */
/*
* The following offsets are relative to the beginning of
* the btt_info block.
*/
uint64_t nextoff; /* offset to next arena (or zero) */
uint64_t dataoff; /* offset to arena data area */
uint64_t mapoff; /* offset to area map */
uint64_t flogoff; /* offset to area flog */
uint64_t infooff; /* offset to backup info block */
char unused[BTTINFO_UNUSED_LEN]; /* must be zero */
uint64_t checksum; /* Fletcher64 of all fields */
};
/*
* Definitions for flags mask for btt_info structure above.
*/
#define BTTINFO_FLAG_ERROR 0x00000001 /* error state (read-only) */
#define BTTINFO_FLAG_ERROR_MASK 0x00000001 /* all error bits */
/*
* Current on-media format versions.
*/
#define BTTINFO_MAJOR_VERSION 1
#define BTTINFO_MINOR_VERSION 1
/*
* Layout of a BTT "flog" entry. All integers are stored little-endian.
*
* The "nfree" field in the BTT info block determines how many of these
* flog entries there are, and each entry consists of two of the following
* structs (entry updates alternate between the two structs), padded up
* to a cache line boundary to isolate adjacent updates.
*/
#define BTT_FLOG_PAIR_ALIGN ((uintptr_t)64)
struct btt_flog {
uint32_t lba; /* last pre-map LBA using this entry */
uint32_t old_map; /* old post-map LBA (the freed block) */
uint32_t new_map; /* new post-map LBA */
uint32_t seq; /* sequence number (01, 10, 11) */
};
/*
* Layout of a BTT "map" entry. 4-byte internal LBA offset, little-endian.
*/
#define BTT_MAP_ENTRY_SIZE 4
#define BTT_MAP_ENTRY_ERROR 0x40000000U
#define BTT_MAP_ENTRY_ZERO 0x80000000U
#define BTT_MAP_ENTRY_NORMAL 0xC0000000U
#define BTT_MAP_ENTRY_LBA_MASK 0x3fffffffU
#define BTT_MAP_LOCK_ALIGN ((uintptr_t)64)
/*
* BTT layout properties...
*/
#define BTT_MIN_SIZE ((1u << 20) * 16)
#define BTT_MAX_ARENA (1ull << 39) /* 512GB per arena */
#define BTT_MIN_LBA_SIZE (size_t)512
#define BTT_INTERNAL_LBA_ALIGNMENT 256U
#define BTT_DEFAULT_NFREE 256
#ifdef __cplusplus
}
#endif
#endif
| 3,197 | 28.611111 | 77 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemblk/blk.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* blk.c -- block memory pool entry points for libpmem
*/
#include <inttypes.h>
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/param.h>
#include <unistd.h>
#include <errno.h>
#include <time.h>
#include <stdint.h>
#include <endian.h>
#include <stdbool.h>
#include "libpmem.h"
#include "libpmemblk.h"
#include "mmap.h"
#include "set.h"
#include "out.h"
#include "btt.h"
#include "blk.h"
#include "util.h"
#include "sys_util.h"
#include "util_pmem.h"
#include "valgrind_internal.h"
static const struct pool_attr Blk_create_attr = {
BLK_HDR_SIG,
BLK_FORMAT_MAJOR,
BLK_FORMAT_FEAT_DEFAULT,
{0}, {0}, {0}, {0}, {0}
};
static const struct pool_attr Blk_open_attr = {
BLK_HDR_SIG,
BLK_FORMAT_MAJOR,
BLK_FORMAT_FEAT_CHECK,
{0}, {0}, {0}, {0}, {0}
};
/*
* lane_enter -- (internal) acquire a unique lane number
*/
static void
lane_enter(PMEMblkpool *pbp, unsigned *lane)
{
unsigned mylane;
mylane = util_fetch_and_add32(&pbp->next_lane, 1) % pbp->nlane;
/* lane selected, grab the per-lane lock */
util_mutex_lock(&pbp->locks[mylane]);
*lane = mylane;
}
/*
* lane_exit -- (internal) drop lane lock
*/
static void
lane_exit(PMEMblkpool *pbp, unsigned mylane)
{
util_mutex_unlock(&pbp->locks[mylane]);
}
/*
* nsread -- (internal) read data from the namespace encapsulating the BTT
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static int
nsread(void *ns, unsigned lane, void *buf, size_t count, uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
if (off + count > pbp->datasize) {
ERR("offset + count (%zu) past end of data area (%zu)",
(size_t)off + count, pbp->datasize);
errno = EINVAL;
return -1;
}
memcpy(buf, (char *)pbp->data + off, count);
return 0;
}
/*
* nswrite -- (internal) write data to the namespace encapsulating the BTT
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static int
nswrite(void *ns, unsigned lane, const void *buf, size_t count,
uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
if (off + count > pbp->datasize) {
ERR("offset + count (%zu) past end of data area (%zu)",
(size_t)off + count, pbp->datasize);
errno = EINVAL;
return -1;
}
void *dest = (char *)pbp->data + off;
#ifdef DEBUG
/* grab debug write lock */
util_mutex_lock(&pbp->write_lock);
#endif
/* unprotect the memory (debug version only) */
RANGE_RW(dest, count, pbp->is_dev_dax);
if (pbp->is_pmem)
pmem_memcpy_nodrain(dest, buf, count);
else
memcpy(dest, buf, count);
/* protect the memory again (debug version only) */
RANGE_RO(dest, count, pbp->is_dev_dax);
#ifdef DEBUG
/* release debug write lock */
util_mutex_unlock(&pbp->write_lock);
#endif
if (pbp->is_pmem)
pmem_drain();
else
pmem_msync(dest, count);
return 0;
}
/*
* nsmap -- (internal) allow direct access to a range of a namespace
*
* The caller requests a range to be "mapped" but the return value
* may indicate a smaller amount (in which case the caller is expected
* to call back later for another mapping).
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static ssize_t
nsmap(void *ns, unsigned lane, void **addrp, size_t len, uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(12, "pbp %p lane %u len %zu off %" PRIu64, pbp, lane, len, off);
ASSERT(((ssize_t)len) >= 0);
if (off + len >= pbp->datasize) {
ERR("offset + len (%zu) past end of data area (%zu)",
(size_t)off + len, pbp->datasize - 1);
errno = EINVAL;
return -1;
}
/*
* Since the entire file is memory-mapped, this callback
* can always provide the entire length requested.
*/
*addrp = (char *)pbp->data + off;
LOG(12, "returning addr %p", *addrp);
return (ssize_t)len;
}
/*
* nssync -- (internal) flush changes made to a namespace range
*
* This is used in conjunction with the addresses handed out by
* nsmap() above. There's no need to sync things written via
* nswrite() since those changes are flushed each time nswrite()
* is called.
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static void
nssync(void *ns, unsigned lane, void *addr, size_t len)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(12, "pbp %p lane %u addr %p len %zu", pbp, lane, addr, len);
if (pbp->is_pmem)
pmem_persist(addr, len);
else
pmem_msync(addr, len);
}
/*
* nszero -- (internal) zero data in the namespace encapsulating the BTT
*
* This routine is provided to btt_init() to allow the btt module to
* zero the memory pool containing the BTT layout.
*/
static int
nszero(void *ns, unsigned lane, size_t count, uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
if (off + count > pbp->datasize) {
ERR("offset + count (%zu) past end of data area (%zu)",
(size_t)off + count, pbp->datasize);
errno = EINVAL;
return -1;
}
void *dest = (char *)pbp->data + off;
/* unprotect the memory (debug version only) */
RANGE_RW(dest, count, pbp->is_dev_dax);
pmem_memset_persist(dest, 0, count);
/* protect the memory again (debug version only) */
RANGE_RO(dest, count, pbp->is_dev_dax);
return 0;
}
/* callbacks for btt_init() */
static struct ns_callback ns_cb = {
.nsread = nsread,
.nswrite = nswrite,
.nszero = nszero,
.nsmap = nsmap,
.nssync = nssync,
.ns_is_zeroed = 0
};
/*
* blk_descr_create -- (internal) create block memory pool descriptor
*/
static void
blk_descr_create(PMEMblkpool *pbp, uint32_t bsize, int zeroed)
{
LOG(3, "pbp %p bsize %u zeroed %d", pbp, bsize, zeroed);
/* create the required metadata */
pbp->bsize = htole32(bsize);
util_persist(pbp->is_pmem, &pbp->bsize, sizeof(bsize));
pbp->is_zeroed = zeroed;
util_persist(pbp->is_pmem, &pbp->is_zeroed, sizeof(pbp->is_zeroed));
}
/*
* blk_descr_check -- (internal) validate block memory pool descriptor
*/
static int
blk_descr_check(PMEMblkpool *pbp, size_t *bsize)
{
LOG(3, "pbp %p bsize %zu", pbp, *bsize);
size_t hdr_bsize = le32toh(pbp->bsize);
if (*bsize && *bsize != hdr_bsize) {
ERR("wrong bsize (%zu), pool created with bsize %zu",
*bsize, hdr_bsize);
errno = EINVAL;
return -1;
}
*bsize = hdr_bsize;
LOG(3, "using block size from header: %zu", *bsize);
return 0;
}
/*
* blk_runtime_init -- (internal) initialize block memory pool runtime data
*/
static int
blk_runtime_init(PMEMblkpool *pbp, size_t bsize, int rdonly)
{
LOG(3, "pbp %p bsize %zu rdonly %d",
pbp, bsize, rdonly);
/* remove volatile part of header */
VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
sizeof(struct pmemblk) -
sizeof(struct pool_hdr) -
sizeof(pbp->bsize) -
sizeof(pbp->is_zeroed));
/*
* Use some of the memory pool area for run-time info. This
* run-time state is never loaded from the file, it is always
* created here, so no need to worry about byte-order.
*/
pbp->rdonly = rdonly;
pbp->data = (char *)pbp->addr +
roundup(sizeof(*pbp), BLK_FORMAT_DATA_ALIGN);
ASSERT(((char *)pbp->addr + pbp->size) >= (char *)pbp->data);
pbp->datasize = (size_t)
(((char *)pbp->addr + pbp->size) - (char *)pbp->data);
LOG(4, "data area %p data size %zu bsize %zu",
pbp->data, pbp->datasize, bsize);
long ncpus = sysconf(_SC_NPROCESSORS_ONLN);
if (ncpus < 1)
ncpus = 1;
ns_cb.ns_is_zeroed = pbp->is_zeroed;
/* things free by "goto err" if not NULL */
struct btt *bttp = NULL;
os_mutex_t *locks = NULL;
bttp = btt_init(pbp->datasize, (uint32_t)bsize, pbp->hdr.poolset_uuid,
(unsigned)ncpus * 2, pbp, &ns_cb);
if (bttp == NULL)
goto err; /* btt_init set errno, called LOG */
pbp->bttp = bttp;
pbp->nlane = btt_nlane(pbp->bttp);
pbp->next_lane = 0;
if ((locks = Malloc(pbp->nlane * sizeof(*locks))) == NULL) {
ERR("!Malloc for lane locks");
goto err;
}
for (unsigned i = 0; i < pbp->nlane; i++)
util_mutex_init(&locks[i]);
pbp->locks = locks;
#ifdef DEBUG
/* initialize debug lock */
util_mutex_init(&pbp->write_lock);
#endif
/*
* If possible, turn off all permissions on the pool header page.
*
* The prototype PMFS doesn't allow this when large pages are in
* use. It is not considered an error if this fails.
*/
RANGE_NONE(pbp->addr, sizeof(struct pool_hdr), pbp->is_dev_dax);
/* the data area should be kept read-only for debug version */
RANGE_RO(pbp->data, pbp->datasize, pbp->is_dev_dax);
return 0;
err:
LOG(4, "error clean up");
int oerrno = errno;
if (bttp)
btt_fini(bttp);
errno = oerrno;
return -1;
}
/*
* pmemblk_createU -- create a block memory pool
*/
#ifndef _WIN32
static inline
#endif
PMEMblkpool *
pmemblk_createU(const char *path, size_t bsize, size_t poolsize, mode_t mode)
{
LOG(3, "path %s bsize %zu poolsize %zu mode %o",
path, bsize, poolsize, mode);
/* check if bsize is valid */
if (bsize == 0) {
ERR("Invalid block size %zu", bsize);
errno = EINVAL;
return NULL;
}
if (bsize > UINT32_MAX) {
ERR("Invalid block size %zu", bsize);
errno = EINVAL;
return NULL;
}
struct pool_set *set;
struct pool_attr adj_pool_attr = Blk_create_attr;
/* force set SDS feature */
if (SDS_at_create)
adj_pool_attr.features.incompat |= POOL_FEAT_SDS;
else
adj_pool_attr.features.incompat &= ~POOL_FEAT_SDS;
if (util_pool_create(&set, path, poolsize, PMEMBLK_MIN_POOL,
PMEMBLK_MIN_PART, &adj_pool_attr, NULL,
REPLICAS_DISABLED) != 0) {
LOG(2, "cannot create pool or pool set");
return NULL;
}
ASSERT(set->nreplicas > 0);
struct pool_replica *rep = set->replica[0];
PMEMblkpool *pbp = rep->part[0].addr;
VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
sizeof(struct pmemblk) -
((uintptr_t)&pbp->addr - (uintptr_t)&pbp->hdr));
pbp->addr = pbp;
pbp->size = rep->repsize;
pbp->set = set;
pbp->is_pmem = rep->is_pmem;
pbp->is_dev_dax = rep->part[0].is_dev_dax;
/* is_dev_dax implies is_pmem */
ASSERT(!pbp->is_dev_dax || pbp->is_pmem);
/* create pool descriptor */
blk_descr_create(pbp, (uint32_t)bsize, set->zeroed);
/* initialize runtime parts */
if (blk_runtime_init(pbp, bsize, 0) != 0) {
ERR("pool initialization failed");
goto err;
}
if (util_poolset_chmod(set, mode))
goto err;
util_poolset_fdclose(set);
LOG(3, "pbp %p", pbp);
return pbp;
err:
LOG(4, "error clean up");
int oerrno = errno;
util_poolset_close(set, DELETE_CREATED_PARTS);
errno = oerrno;
return NULL;
}
#ifndef _WIN32
/*
* pmemblk_create -- create a block memory pool
*/
PMEMblkpool *
pmemblk_create(const char *path, size_t bsize, size_t poolsize, mode_t mode)
{
return pmemblk_createU(path, bsize, poolsize, mode);
}
#else
/*
* pmemblk_createW -- create a block memory pool
*/
PMEMblkpool *
pmemblk_createW(const wchar_t *path, size_t bsize, size_t poolsize,
mode_t mode)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
PMEMblkpool *ret = pmemblk_createU(upath, bsize, poolsize, mode);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* blk_open_common -- (internal) open a block memory pool
*
* This routine does all the work, but takes a cow flag so internal
* calls can map a read-only pool if required.
*
* Passing in bsize == 0 means a valid pool header must exist (which
* will supply the block size).
*/
static PMEMblkpool *
blk_open_common(const char *path, size_t bsize, unsigned flags)
{
LOG(3, "path %s bsize %zu flags 0x%x", path, bsize, flags);
struct pool_set *set;
if (util_pool_open(&set, path, PMEMBLK_MIN_PART, &Blk_open_attr,
NULL, NULL, flags) != 0) {
LOG(2, "cannot open pool or pool set");
return NULL;
}
ASSERT(set->nreplicas > 0);
struct pool_replica *rep = set->replica[0];
PMEMblkpool *pbp = rep->part[0].addr;
VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
sizeof(struct pmemblk) -
((uintptr_t)&pbp->addr - (uintptr_t)&pbp->hdr));
pbp->addr = pbp;
pbp->size = rep->repsize;
pbp->set = set;
pbp->is_pmem = rep->is_pmem;
pbp->is_dev_dax = rep->part[0].is_dev_dax;
/* is_dev_dax implies is_pmem */
ASSERT(!pbp->is_dev_dax || pbp->is_pmem);
if (set->nreplicas > 1) {
errno = ENOTSUP;
ERR("!replicas not supported");
goto err;
}
/* validate pool descriptor */
if (blk_descr_check(pbp, &bsize) != 0) {
LOG(2, "descriptor check failed");
goto err;
}
/* initialize runtime parts */
if (blk_runtime_init(pbp, bsize, set->rdonly) != 0) {
ERR("pool initialization failed");
goto err;
}
util_poolset_fdclose(set);
LOG(3, "pbp %p", pbp);
return pbp;
err:
LOG(4, "error clean up");
int oerrno = errno;
util_poolset_close(set, DO_NOT_DELETE_PARTS);
errno = oerrno;
return NULL;
}
/*
* pmemblk_openU -- open a block memory pool
*/
#ifndef _WIN32
static inline
#endif
PMEMblkpool *
pmemblk_openU(const char *path, size_t bsize)
{
LOG(3, "path %s bsize %zu", path, bsize);
return blk_open_common(path, bsize, COW_at_open ? POOL_OPEN_COW : 0);
}
#ifndef _WIN32
/*
* pmemblk_open -- open a block memory pool
*/
PMEMblkpool *
pmemblk_open(const char *path, size_t bsize)
{
return pmemblk_openU(path, bsize);
}
#else
/*
* pmemblk_openW -- open a block memory pool
*/
PMEMblkpool *
pmemblk_openW(const wchar_t *path, size_t bsize)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
PMEMblkpool *ret = pmemblk_openU(upath, bsize);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmemblk_close -- close a block memory pool
*/
void
pmemblk_close(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
btt_fini(pbp->bttp);
if (pbp->locks) {
for (unsigned i = 0; i < pbp->nlane; i++)
util_mutex_destroy(&pbp->locks[i]);
Free((void *)pbp->locks);
}
#ifdef DEBUG
/* destroy debug lock */
util_mutex_destroy(&pbp->write_lock);
#endif
util_poolset_close(pbp->set, DO_NOT_DELETE_PARTS);
}
/*
* pmemblk_bsize -- return size of block for specified pool
*/
size_t
pmemblk_bsize(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
return le32toh(pbp->bsize);
}
/*
* pmemblk_nblock -- return number of usable blocks in a block memory pool
*/
size_t
pmemblk_nblock(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
return btt_nlba(pbp->bttp);
}
/*
* pmemblk_read -- read a block in a block memory pool
*/
int
pmemblk_read(PMEMblkpool *pbp, void *buf, long long blockno)
{
LOG(3, "pbp %p buf %p blockno %lld", pbp, buf, blockno);
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_read(pbp->bttp, lane, (uint64_t)blockno, buf);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_write -- write a block (atomically) in a block memory pool
*/
int
pmemblk_write(PMEMblkpool *pbp, const void *buf, long long blockno)
{
LOG(3, "pbp %p buf %p blockno %lld", pbp, buf, blockno);
if (pbp->rdonly) {
ERR("EROFS (pool is read-only)");
errno = EROFS;
return -1;
}
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_write(pbp->bttp, lane, (uint64_t)blockno, buf);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_set_zero -- zero a block in a block memory pool
*/
int
pmemblk_set_zero(PMEMblkpool *pbp, long long blockno)
{
LOG(3, "pbp %p blockno %lld", pbp, blockno);
if (pbp->rdonly) {
ERR("EROFS (pool is read-only)");
errno = EROFS;
return -1;
}
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_set_zero(pbp->bttp, lane, (uint64_t)blockno);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_set_error -- set the error state on a block in a block memory pool
*/
int
pmemblk_set_error(PMEMblkpool *pbp, long long blockno)
{
LOG(3, "pbp %p blockno %lld", pbp, blockno);
if (pbp->rdonly) {
ERR("EROFS (pool is read-only)");
errno = EROFS;
return -1;
}
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_set_error(pbp->bttp, lane, (uint64_t)blockno);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_checkU -- block memory pool consistency check
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_checkU(const char *path, size_t bsize)
{
LOG(3, "path \"%s\" bsize %zu", path, bsize);
/* map the pool read-only */
PMEMblkpool *pbp = blk_open_common(path, bsize, POOL_OPEN_COW);
if (pbp == NULL)
return -1; /* errno set by blk_open_common() */
int retval = btt_check(pbp->bttp);
int oerrno = errno;
pmemblk_close(pbp);
errno = oerrno;
return retval;
}
#ifndef _WIN32
/*
* pmemblk_check -- block memory pool consistency check
*/
int
pmemblk_check(const char *path, size_t bsize)
{
return pmemblk_checkU(path, bsize);
}
#else
/*
* pmemblk_checkW -- block memory pool consistency check
*/
int
pmemblk_checkW(const wchar_t *path, size_t bsize)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return -1;
int ret = pmemblk_checkU(upath, bsize);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmemblk_ctl_getU -- programmatically executes a read ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_ctl_getU(PMEMblkpool *pbp, const char *name, void *arg)
{
LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_READ, arg);
}
/*
* pmemblk_ctl_setU -- programmatically executes a write ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_ctl_setU(PMEMblkpool *pbp, const char *name, void *arg)
{
LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_WRITE, arg);
}
/*
* pmemblk_ctl_execU -- programmatically executes a runnable ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_ctl_execU(PMEMblkpool *pbp, const char *name, void *arg)
{
LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_RUNNABLE, arg);
}
#ifndef _WIN32
/*
* pmemblk_ctl_get -- programmatically executes a read ctl query
*/
int
pmemblk_ctl_get(PMEMblkpool *pbp, const char *name, void *arg)
{
return pmemblk_ctl_getU(pbp, name, arg);
}
/*
* pmemblk_ctl_set -- programmatically executes a write ctl query
*/
int
pmemblk_ctl_set(PMEMblkpool *pbp, const char *name, void *arg)
{
return pmemblk_ctl_setU(pbp, name, arg);
}
/*
* pmemblk_ctl_exec -- programmatically executes a runnable ctl query
*/
int
pmemblk_ctl_exec(PMEMblkpool *pbp, const char *name, void *arg)
{
return pmemblk_ctl_execU(pbp, name, arg);
}
#else
/*
* pmemblk_ctl_getW -- programmatically executes a read ctl query
*/
int
pmemblk_ctl_getW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemblk_ctl_getU(pbp, uname, arg);
util_free_UTF8(uname);
return ret;
}
/*
* pmemblk_ctl_setW -- programmatically executes a write ctl query
*/
int
pmemblk_ctl_setW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemblk_ctl_setU(pbp, uname, arg);
util_free_UTF8(uname);
return ret;
}
/*
* pmemblk_ctl_execW -- programmatically executes a runnable ctl query
*/
int
pmemblk_ctl_execW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemblk_ctl_execU(pbp, uname, arg);
util_free_UTF8(uname);
return ret;
}
#endif
#if FAULT_INJECTION
void
pmemblk_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
core_inject_fault_at(type, nth, at);
}
int
pmemblk_fault_injection_enabled(void)
{
return core_fault_injection_enabled();
}
#endif
| 20,218 | 20.305585 | 77 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/container_ravl.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* container_ravl.c -- implementation of ravl-based block container
*/
#include "container_ravl.h"
#include "ravl.h"
#include "out.h"
#include "sys_util.h"
struct block_container_ravl {
struct block_container super;
struct ravl *tree;
};
/*
* container_compare_memblocks -- (internal) compares two memory blocks
*/
static int
container_compare_memblocks(const void *lhs, const void *rhs)
{
const struct memory_block *l = lhs;
const struct memory_block *r = rhs;
int64_t diff = (int64_t)l->size_idx - (int64_t)r->size_idx;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->zone_id - (int64_t)r->zone_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->chunk_id - (int64_t)r->chunk_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->block_off - (int64_t)r->block_off;
if (diff != 0)
return diff > 0 ? 1 : -1;
return 0;
}
/*
* container_ravl_insert_block -- (internal) inserts a new memory block
* into the container
*/
static int
container_ravl_insert_block(struct block_container *bc,
const struct memory_block *m)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
struct memory_block *e = m->m_ops->get_user_data(m);
VALGRIND_DO_MAKE_MEM_DEFINED(e, sizeof(*e));
VALGRIND_ADD_TO_TX(e, sizeof(*e));
*e = *m;
VALGRIND_SET_CLEAN(e, sizeof(*e));
VALGRIND_REMOVE_FROM_TX(e, sizeof(*e));
return ravl_insert(c->tree, e);
}
/*
* container_ravl_get_rm_block_bestfit -- (internal) removes and returns the
* best-fit memory block for size
*/
static int
container_ravl_get_rm_block_bestfit(struct block_container *bc,
struct memory_block *m)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
struct ravl_node *n = ravl_find(c->tree, m,
RAVL_PREDICATE_GREATER_EQUAL);
if (n == NULL)
return ENOMEM;
struct memory_block *e = ravl_data(n);
*m = *e;
ravl_remove(c->tree, n);
return 0;
}
/*
* container_ravl_get_rm_block_exact --
* (internal) removes exact match memory block
*/
static int
container_ravl_get_rm_block_exact(struct block_container *bc,
const struct memory_block *m)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
struct ravl_node *n = ravl_find(c->tree, m, RAVL_PREDICATE_EQUAL);
if (n == NULL)
return ENOMEM;
ravl_remove(c->tree, n);
return 0;
}
/*
* container_ravl_is_empty -- (internal) checks whether the container is empty
*/
static int
container_ravl_is_empty(struct block_container *bc)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
return ravl_empty(c->tree);
}
/*
* container_ravl_rm_all -- (internal) removes all elements from the tree
*/
static void
container_ravl_rm_all(struct block_container *bc)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
ravl_clear(c->tree);
}
/*
* container_ravl_delete -- (internal) deletes the container
*/
static void
container_ravl_destroy(struct block_container *bc)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
ravl_delete(c->tree);
Free(bc);
}
/*
* Tree-based block container used to provide best-fit functionality to the
* bucket. The time complexity for this particular container is O(k) where k is
* the length of the key.
*
* The get methods also guarantee that the block with lowest possible address
* that best matches the requirements is provided.
*/
static const struct block_container_ops container_ravl_ops = {
.insert = container_ravl_insert_block,
.get_rm_exact = container_ravl_get_rm_block_exact,
.get_rm_bestfit = container_ravl_get_rm_block_bestfit,
.is_empty = container_ravl_is_empty,
.rm_all = container_ravl_rm_all,
.destroy = container_ravl_destroy,
};
/*
* container_new_ravl -- allocates and initializes a ravl container
*/
struct block_container *
container_new_ravl(struct palloc_heap *heap)
{
struct block_container_ravl *bc = Malloc(sizeof(*bc));
if (bc == NULL)
goto error_container_malloc;
bc->super.heap = heap;
bc->super.c_ops = &container_ravl_ops;
bc->tree = ravl_new(container_compare_memblocks);
if (bc->tree == NULL)
goto error_ravl_new;
return (struct block_container *)&bc->super;
error_ravl_new:
Free(bc);
error_container_malloc:
return NULL;
}
| 4,333 | 21.931217 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/heap_layout.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* heap_layout.h -- internal definitions for heap layout
*/
#ifndef LIBPMEMOBJ_HEAP_LAYOUT_H
#define LIBPMEMOBJ_HEAP_LAYOUT_H 1
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#define HEAP_MAJOR 1
#define HEAP_MINOR 0
#define MAX_CHUNK (UINT16_MAX - 7) /* has to be multiple of 8 */
#define CHUNK_BASE_ALIGNMENT 1024
#define CHUNKSIZE ((size_t)1024 * 256) /* 256 kilobytes */
#define MAX_MEMORY_BLOCK_SIZE (MAX_CHUNK * CHUNKSIZE)
#define HEAP_SIGNATURE_LEN 16
#define HEAP_SIGNATURE "MEMORY_HEAP_HDR\0"
#define ZONE_HEADER_MAGIC 0xC3F0A2D2
#define ZONE_MIN_SIZE (sizeof(struct zone) + sizeof(struct chunk))
#define ZONE_MAX_SIZE (sizeof(struct zone) + sizeof(struct chunk) * MAX_CHUNK)
#define HEAP_MIN_SIZE (sizeof(struct heap_layout) + ZONE_MIN_SIZE)
/* Base bitmap values, relevant for both normal and flexible bitmaps */
#define RUN_BITS_PER_VALUE 64U
#define RUN_BASE_METADATA_VALUES\
((unsigned)(sizeof(struct chunk_run_header) / sizeof(uint64_t)))
#define RUN_BASE_METADATA_SIZE (sizeof(struct chunk_run_header))
#define RUN_CONTENT_SIZE (CHUNKSIZE - RUN_BASE_METADATA_SIZE)
/*
* Calculates the size in bytes of a single run instance, including bitmap
*/
#define RUN_CONTENT_SIZE_BYTES(size_idx)\
(RUN_CONTENT_SIZE + (((size_idx) - 1) * CHUNKSIZE))
/* Default bitmap values, specific for old, non-flexible, bitmaps */
#define RUN_DEFAULT_METADATA_VALUES 40 /* in 8 byte words, 320 bytes total */
#define RUN_DEFAULT_BITMAP_VALUES \
(RUN_DEFAULT_METADATA_VALUES - RUN_BASE_METADATA_VALUES)
#define RUN_DEFAULT_BITMAP_SIZE (sizeof(uint64_t) * RUN_DEFAULT_BITMAP_VALUES)
#define RUN_DEFAULT_BITMAP_NBITS\
(RUN_BITS_PER_VALUE * RUN_DEFAULT_BITMAP_VALUES)
#define RUN_DEFAULT_SIZE \
(CHUNKSIZE - RUN_BASE_METADATA_SIZE - RUN_DEFAULT_BITMAP_SIZE)
/*
* Calculates the size in bytes of a single run instance, without bitmap,
* but only for the default fixed-bitmap algorithm
*/
#define RUN_DEFAULT_SIZE_BYTES(size_idx)\
(RUN_DEFAULT_SIZE + (((size_idx) - 1) * CHUNKSIZE))
#define CHUNK_MASK ((CHUNKSIZE) - 1)
#define CHUNK_ALIGN_UP(value) ((((value) + CHUNK_MASK) & ~CHUNK_MASK))
enum chunk_flags {
CHUNK_FLAG_COMPACT_HEADER = 0x0001,
CHUNK_FLAG_HEADER_NONE = 0x0002,
CHUNK_FLAG_ALIGNED = 0x0004,
CHUNK_FLAG_FLEX_BITMAP = 0x0008,
};
#define CHUNK_FLAGS_ALL_VALID (\
CHUNK_FLAG_COMPACT_HEADER |\
CHUNK_FLAG_HEADER_NONE |\
CHUNK_FLAG_ALIGNED |\
CHUNK_FLAG_FLEX_BITMAP\
)
enum chunk_type {
CHUNK_TYPE_UNKNOWN,
CHUNK_TYPE_FOOTER, /* not actual chunk type */
CHUNK_TYPE_FREE,
CHUNK_TYPE_USED,
CHUNK_TYPE_RUN,
CHUNK_TYPE_RUN_DATA,
MAX_CHUNK_TYPE
};
struct chunk {
uint8_t data[CHUNKSIZE];
};
struct chunk_run_header {
uint64_t block_size;
uint64_t alignment; /* valid only /w CHUNK_FLAG_ALIGNED */
};
struct chunk_run {
struct chunk_run_header hdr;
uint8_t content[RUN_CONTENT_SIZE]; /* bitmap + data */
};
struct chunk_header {
uint16_t type;
uint16_t flags;
uint32_t size_idx;
};
struct zone_header {
uint32_t magic;
uint32_t size_idx;
uint8_t reserved[56];
};
struct zone {
struct zone_header header;
struct chunk_header chunk_headers[MAX_CHUNK];
struct chunk chunks[];
};
struct heap_header {
char signature[HEAP_SIGNATURE_LEN];
uint64_t major;
uint64_t minor;
uint64_t unused; /* might be garbage */
uint64_t chunksize;
uint64_t chunks_per_zone;
uint8_t reserved[960];
uint64_t checksum;
};
struct heap_layout {
struct heap_header header;
struct zone zone0; /* first element of zones array */
};
#define ALLOC_HDR_SIZE_SHIFT (48ULL)
#define ALLOC_HDR_FLAGS_MASK (((1ULL) << ALLOC_HDR_SIZE_SHIFT) - 1)
struct allocation_header_legacy {
uint8_t unused[8];
uint64_t size;
uint8_t unused2[32];
uint64_t root_size;
uint64_t type_num;
};
#define ALLOC_HDR_COMPACT_SIZE sizeof(struct allocation_header_compact)
struct allocation_header_compact {
uint64_t size;
uint64_t extra;
};
enum header_type {
HEADER_LEGACY,
HEADER_COMPACT,
HEADER_NONE,
MAX_HEADER_TYPES
};
static const size_t header_type_to_size[MAX_HEADER_TYPES] = {
sizeof(struct allocation_header_legacy),
sizeof(struct allocation_header_compact),
0
};
static const enum chunk_flags header_type_to_flag[MAX_HEADER_TYPES] = {
(enum chunk_flags)0,
CHUNK_FLAG_COMPACT_HEADER,
CHUNK_FLAG_HEADER_NONE
};
static inline struct zone *
ZID_TO_ZONE(struct heap_layout *layout, size_t zone_id)
{
return (struct zone *)
((uintptr_t)&layout->zone0 + ZONE_MAX_SIZE * zone_id);
}
static inline struct chunk_header *
GET_CHUNK_HDR(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return &ZID_TO_ZONE(layout, zone_id)->chunk_headers[chunk_id];
}
static inline struct chunk *
GET_CHUNK(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return &ZID_TO_ZONE(layout, zone_id)->chunks[chunk_id];
}
static inline struct chunk_run *
GET_CHUNK_RUN(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return (struct chunk_run *)GET_CHUNK(layout, zone_id, chunk_id);
}
#ifdef __cplusplus
}
#endif
#endif
| 5,105 | 23.666667 | 78 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/alloc_class.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* alloc_class.h -- internal definitions for allocation classes
*/
#ifndef LIBPMEMOBJ_ALLOC_CLASS_H
#define LIBPMEMOBJ_ALLOC_CLASS_H 1
#include <stddef.h>
#include <stdint.h>
#include <sys/types.h>
#include "heap_layout.h"
#include "memblock.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MAX_ALLOCATION_CLASSES (UINT8_MAX)
#define DEFAULT_ALLOC_CLASS_ID (0)
#define RUN_UNIT_MAX RUN_BITS_PER_VALUE
struct alloc_class_collection;
enum alloc_class_type {
CLASS_UNKNOWN,
CLASS_HUGE,
CLASS_RUN,
MAX_ALLOC_CLASS_TYPES
};
struct alloc_class {
uint8_t id;
uint16_t flags;
size_t unit_size;
enum header_type header_type;
enum alloc_class_type type;
/* run-specific data */
struct run_descriptor rdsc;
};
struct alloc_class_collection *alloc_class_collection_new(void);
void alloc_class_collection_delete(struct alloc_class_collection *ac);
struct alloc_class *alloc_class_by_run(
struct alloc_class_collection *ac,
size_t unit_size, uint16_t flags, uint32_t size_idx);
struct alloc_class *alloc_class_by_alloc_size(
struct alloc_class_collection *ac, size_t size);
struct alloc_class *alloc_class_by_id(
struct alloc_class_collection *ac, uint8_t id);
int alloc_class_reserve(struct alloc_class_collection *ac, uint8_t id);
int alloc_class_find_first_free_slot(struct alloc_class_collection *ac,
uint8_t *slot);
ssize_t
alloc_class_calc_size_idx(struct alloc_class *c, size_t size);
struct alloc_class *
alloc_class_new(int id, struct alloc_class_collection *ac,
enum alloc_class_type type, enum header_type htype,
size_t unit_size, size_t alignment,
uint32_t size_idx);
void alloc_class_delete(struct alloc_class_collection *ac,
struct alloc_class *c);
#ifdef __cplusplus
}
#endif
#endif
| 1,815 | 21.7 | 71 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/recycler.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* recycler.c -- implementation of run recycler
*/
#include "heap.h"
#include "recycler.h"
#include "vec.h"
#include "out.h"
#include "util.h"
#include "sys_util.h"
#include "ravl.h"
#include "valgrind_internal.h"
#define THRESHOLD_MUL 4
/*
* recycler_element_cmp -- compares two recycler elements
*/
static int
recycler_element_cmp(const void *lhs, const void *rhs)
{
const struct recycler_element *l = lhs;
const struct recycler_element *r = rhs;
int64_t diff = (int64_t)l->max_free_block - (int64_t)r->max_free_block;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->free_space - (int64_t)r->free_space;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->zone_id - (int64_t)r->zone_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->chunk_id - (int64_t)r->chunk_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
return 0;
}
struct recycler {
struct ravl *runs;
struct palloc_heap *heap;
/*
* How many unaccounted units there *might* be inside of the memory
* blocks stored in the recycler.
* The value is not meant to be accurate, but rather a rough measure on
* how often should the memory block scores be recalculated.
*
* Per-chunk unaccounted units are shared for all zones, which might
* lead to some unnecessary recalculations.
*/
size_t unaccounted_units[MAX_CHUNK];
size_t unaccounted_total;
size_t nallocs;
size_t *peak_arenas;
VEC(, struct recycler_element) recalc;
os_mutex_t lock;
};
/*
* recycler_new -- creates new recycler instance
*/
struct recycler *
recycler_new(struct palloc_heap *heap, size_t nallocs, size_t *peak_arenas)
{
struct recycler *r = Malloc(sizeof(struct recycler));
if (r == NULL)
goto error_alloc_recycler;
r->runs = ravl_new_sized(recycler_element_cmp,
sizeof(struct recycler_element));
if (r->runs == NULL)
goto error_alloc_tree;
r->heap = heap;
r->nallocs = nallocs;
r->peak_arenas = peak_arenas;
r->unaccounted_total = 0;
memset(&r->unaccounted_units, 0, sizeof(r->unaccounted_units));
VEC_INIT(&r->recalc);
util_mutex_init(&r->lock);
return r;
error_alloc_tree:
Free(r);
error_alloc_recycler:
return NULL;
}
/*
* recycler_delete -- deletes recycler instance
*/
void
recycler_delete(struct recycler *r)
{
VEC_DELETE(&r->recalc);
util_mutex_destroy(&r->lock);
ravl_delete(r->runs);
Free(r);
}
/*
* recycler_element_new -- calculates how many free bytes does a run have and
* what's the largest request that the run can handle, returns that as
* recycler element struct
*/
struct recycler_element
recycler_element_new(struct palloc_heap *heap, const struct memory_block *m)
{
/*
* Counting of the clear bits can race with a concurrent deallocation
* that operates on the same run. This race is benign and has absolutely
* no effect on the correctness of this algorithm. Ideally, we would
* avoid grabbing the lock, but helgrind gets very confused if we
* try to disable reporting for this function.
*/
os_mutex_t *lock = m->m_ops->get_lock(m);
util_mutex_lock(lock);
struct recycler_element e = {
.free_space = 0,
.max_free_block = 0,
.chunk_id = m->chunk_id,
.zone_id = m->zone_id,
};
m->m_ops->calc_free(m, &e.free_space, &e.max_free_block);
util_mutex_unlock(lock);
return e;
}
/*
* recycler_put -- inserts new run into the recycler
*/
int
recycler_put(struct recycler *r, const struct memory_block *m,
struct recycler_element element)
{
int ret = 0;
util_mutex_lock(&r->lock);
ret = ravl_emplace_copy(r->runs, &element);
util_mutex_unlock(&r->lock);
return ret;
}
/*
* recycler_get -- retrieves a chunk from the recycler
*/
int
recycler_get(struct recycler *r, struct memory_block *m)
{
int ret = 0;
util_mutex_lock(&r->lock);
struct recycler_element e = { .max_free_block = m->size_idx, 0, 0, 0};
struct ravl_node *n = ravl_find(r->runs, &e,
RAVL_PREDICATE_GREATER_EQUAL);
if (n == NULL) {
ret = ENOMEM;
goto out;
}
struct recycler_element *ne = ravl_data(n);
m->chunk_id = ne->chunk_id;
m->zone_id = ne->zone_id;
ravl_remove(r->runs, n);
struct chunk_header *hdr = heap_get_chunk_hdr(r->heap, m);
m->size_idx = hdr->size_idx;
memblock_rebuild_state(r->heap, m);
out:
util_mutex_unlock(&r->lock);
return ret;
}
/*
* recycler_recalc -- recalculates the scores of runs in the recycler to match
* the updated persistent state
*/
struct empty_runs
recycler_recalc(struct recycler *r, int force)
{
struct empty_runs runs;
VEC_INIT(&runs);
uint64_t units = r->unaccounted_total;
size_t peak_arenas;
util_atomic_load64(r->peak_arenas, &peak_arenas);
uint64_t recalc_threshold =
THRESHOLD_MUL * peak_arenas * r->nallocs;
if (!force && units < recalc_threshold)
return runs;
if (util_mutex_trylock(&r->lock) != 0)
return runs;
/* If the search is forced, recalculate everything */
uint64_t search_limit = force ? UINT64_MAX : units;
uint64_t found_units = 0;
struct memory_block nm = MEMORY_BLOCK_NONE;
struct ravl_node *n;
struct recycler_element next = {0, 0, 0, 0};
enum ravl_predicate p = RAVL_PREDICATE_GREATER_EQUAL;
do {
if ((n = ravl_find(r->runs, &next, p)) == NULL)
break;
p = RAVL_PREDICATE_GREATER;
struct recycler_element *ne = ravl_data(n);
next = *ne;
uint64_t chunk_units = r->unaccounted_units[ne->chunk_id];
if (!force && chunk_units == 0)
continue;
uint32_t existing_free_space = ne->free_space;
nm.chunk_id = ne->chunk_id;
nm.zone_id = ne->zone_id;
memblock_rebuild_state(r->heap, &nm);
struct recycler_element e = recycler_element_new(r->heap, &nm);
ASSERT(e.free_space >= existing_free_space);
uint64_t free_space_diff = e.free_space - existing_free_space;
found_units += free_space_diff;
if (free_space_diff == 0)
continue;
/*
* Decrease the per chunk_id counter by the number of nallocs
* found, increased by the blocks potentially freed in the
* active memory block. Cap the sub value to prevent overflow.
*/
util_fetch_and_sub64(&r->unaccounted_units[nm.chunk_id],
MIN(chunk_units, free_space_diff + r->nallocs));
ravl_remove(r->runs, n);
if (e.free_space == r->nallocs) {
memblock_rebuild_state(r->heap, &nm);
if (VEC_PUSH_BACK(&runs, nm) != 0)
ASSERT(0); /* XXX: fix after refactoring */
} else {
VEC_PUSH_BACK(&r->recalc, e);
}
} while (found_units < search_limit);
struct recycler_element *e;
VEC_FOREACH_BY_PTR(e, &r->recalc) {
ravl_emplace_copy(r->runs, e);
}
VEC_CLEAR(&r->recalc);
util_mutex_unlock(&r->lock);
util_fetch_and_sub64(&r->unaccounted_total, units);
return runs;
}
/*
* recycler_inc_unaccounted -- increases the number of unaccounted units in the
* recycler
*/
void
recycler_inc_unaccounted(struct recycler *r, const struct memory_block *m)
{
util_fetch_and_add64(&r->unaccounted_total, m->size_idx);
util_fetch_and_add64(&r->unaccounted_units[m->chunk_id],
m->size_idx);
}
| 6,997 | 22.019737 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/alloc_class.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* alloc_class.c -- implementation of allocation classes
*/
#include <float.h>
#include <string.h>
#include "alloc_class.h"
#include "heap_layout.h"
#include "util.h"
#include "out.h"
#include "bucket.h"
#include "critnib.h"
#define RUN_CLASS_KEY_PACK(map_idx_s, flags_s, size_idx_s)\
((uint64_t)(map_idx_s) << 32 |\
(uint64_t)(flags_s) << 16 |\
(uint64_t)(size_idx_s))
/*
* Value used to mark a reserved spot in the bucket array.
*/
#define ACLASS_RESERVED ((void *)0xFFFFFFFFULL)
/*
* The last size that is handled by runs.
*/
#define MAX_RUN_SIZE (CHUNKSIZE * 10)
/*
* Maximum number of bytes the allocation class generation algorithm can decide
* to waste in a single run chunk.
*/
#define MAX_RUN_WASTED_BYTES 1024
/*
* Allocation categories are used for allocation classes generation. Each one
* defines the biggest handled size (in bytes) and step pct of the generation
* process. The step percentage defines maximum allowed external fragmentation
* for the category.
*/
#define MAX_ALLOC_CATEGORIES 9
/*
* The first size (in byes) which is actually used in the allocation
* class generation algorithm. All smaller sizes use the first predefined bucket
* with the smallest run unit size.
*/
#define FIRST_GENERATED_CLASS_SIZE 128
/*
* The granularity of the allocation class generation algorithm.
*/
#define ALLOC_BLOCK_SIZE_GEN 64
/*
* The first predefined allocation class size
*/
#define MIN_UNIT_SIZE 128
static const struct {
size_t size;
float step;
} categories[MAX_ALLOC_CATEGORIES] = {
/* dummy category - the first allocation class is predefined */
{FIRST_GENERATED_CLASS_SIZE, 0.05f},
{1024, 0.05f},
{2048, 0.05f},
{4096, 0.05f},
{8192, 0.05f},
{16384, 0.05f},
{32768, 0.05f},
{131072, 0.05f},
{393216, 0.05f},
};
#define RUN_UNIT_MAX_ALLOC 8U
/*
* Every allocation has to be a multiple of at least 8 because we need to
* ensure proper alignment of every pmem structure.
*/
#define ALLOC_BLOCK_SIZE 16
/*
* Converts size (in bytes) to number of allocation blocks.
*/
#define SIZE_TO_CLASS_MAP_INDEX(_s, _g) (1 + (((_s) - 1) / (_g)))
/*
* Target number of allocations per run instance.
*/
#define RUN_MIN_NALLOCS 200
/*
* Hard limit of chunks per single run.
*/
#define RUN_SIZE_IDX_CAP (16)
#define ALLOC_CLASS_DEFAULT_FLAGS CHUNK_FLAG_FLEX_BITMAP
struct alloc_class_collection {
size_t granularity;
struct alloc_class *aclasses[MAX_ALLOCATION_CLASSES];
/*
* The last size (in bytes) that is handled by runs, everything bigger
* uses the default class.
*/
size_t last_run_max_size;
/* maps allocation classes to allocation sizes, excluding the header! */
uint8_t *class_map_by_alloc_size;
/* maps allocation classes to run unit sizes */
struct critnib *class_map_by_unit_size;
int fail_on_missing_class;
int autogenerate_on_missing_class;
};
/*
* alloc_class_find_first_free_slot -- searches for the
* first available allocation class slot
*
* This function must be thread-safe because allocation classes can be created
* at runtime.
*/
int
alloc_class_find_first_free_slot(struct alloc_class_collection *ac,
uint8_t *slot)
{
LOG(10, NULL);
for (int n = 0; n < MAX_ALLOCATION_CLASSES; ++n) {
if (util_bool_compare_and_swap64(&ac->aclasses[n],
NULL, ACLASS_RESERVED)) {
*slot = (uint8_t)n;
return 0;
}
}
return -1;
}
/*
* alloc_class_reserve -- reserve the specified class id
*/
int
alloc_class_reserve(struct alloc_class_collection *ac, uint8_t id)
{
LOG(10, NULL);
return util_bool_compare_and_swap64(&ac->aclasses[id],
NULL, ACLASS_RESERVED) ? 0 : -1;
}
/*
* alloc_class_reservation_clear -- removes the reservation on class id
*/
static void
alloc_class_reservation_clear(struct alloc_class_collection *ac, int id)
{
LOG(10, NULL);
int ret = util_bool_compare_and_swap64(&ac->aclasses[id],
ACLASS_RESERVED, NULL);
ASSERT(ret);
}
/*
* alloc_class_new -- creates a new allocation class
*/
struct alloc_class *
alloc_class_new(int id, struct alloc_class_collection *ac,
enum alloc_class_type type, enum header_type htype,
size_t unit_size, size_t alignment,
uint32_t size_idx)
{
LOG(10, NULL);
struct alloc_class *c = Malloc(sizeof(*c));
if (c == NULL)
goto error_class_alloc;
c->unit_size = unit_size;
c->header_type = htype;
c->type = type;
c->flags = (uint16_t)
(header_type_to_flag[c->header_type] |
(alignment ? CHUNK_FLAG_ALIGNED : 0)) |
ALLOC_CLASS_DEFAULT_FLAGS;
switch (type) {
case CLASS_HUGE:
id = DEFAULT_ALLOC_CLASS_ID;
break;
case CLASS_RUN:
c->rdsc.alignment = alignment;
memblock_run_bitmap(&size_idx, c->flags, unit_size,
alignment, NULL, &c->rdsc.bitmap);
c->rdsc.nallocs = c->rdsc.bitmap.nbits;
c->rdsc.size_idx = size_idx;
/* these two fields are duplicated from class */
c->rdsc.unit_size = c->unit_size;
c->rdsc.flags = c->flags;
uint8_t slot = (uint8_t)id;
if (id < 0 && alloc_class_find_first_free_slot(ac,
&slot) != 0)
goto error_class_alloc;
id = slot;
size_t map_idx = SIZE_TO_CLASS_MAP_INDEX(c->unit_size,
ac->granularity);
ASSERT(map_idx <= UINT32_MAX);
uint32_t map_idx_s = (uint32_t)map_idx;
uint16_t size_idx_s = (uint16_t)size_idx;
uint16_t flags_s = (uint16_t)c->flags;
uint64_t k = RUN_CLASS_KEY_PACK(map_idx_s,
flags_s, size_idx_s);
if (critnib_insert(ac->class_map_by_unit_size,
k, c) != 0) {
ERR("unable to register allocation class");
goto error_map_insert;
}
break;
default:
ASSERT(0);
}
c->id = (uint8_t)id;
ac->aclasses[c->id] = c;
return c;
error_map_insert:
Free(c);
error_class_alloc:
if (id >= 0)
alloc_class_reservation_clear(ac, id);
return NULL;
}
/*
* alloc_class_delete -- (internal) deletes an allocation class
*/
void
alloc_class_delete(struct alloc_class_collection *ac,
struct alloc_class *c)
{
LOG(10, NULL);
ac->aclasses[c->id] = NULL;
Free(c);
}
/*
* alloc_class_find_or_create -- (internal) searches for the
* biggest allocation class for which unit_size is evenly divisible by n.
* If no such class exists, create one.
*/
static struct alloc_class *
alloc_class_find_or_create(struct alloc_class_collection *ac, size_t n)
{
LOG(10, NULL);
COMPILE_ERROR_ON(MAX_ALLOCATION_CLASSES > UINT8_MAX);
uint64_t required_size_bytes = n * RUN_MIN_NALLOCS;
uint32_t required_size_idx = 1;
if (required_size_bytes > RUN_DEFAULT_SIZE) {
required_size_bytes -= RUN_DEFAULT_SIZE;
required_size_idx +=
CALC_SIZE_IDX(CHUNKSIZE, required_size_bytes);
if (required_size_idx > RUN_SIZE_IDX_CAP)
required_size_idx = RUN_SIZE_IDX_CAP;
}
for (int i = MAX_ALLOCATION_CLASSES - 1; i >= 0; --i) {
struct alloc_class *c = ac->aclasses[i];
if (c == NULL || c->type == CLASS_HUGE ||
c->rdsc.size_idx < required_size_idx)
continue;
if (n % c->unit_size == 0 &&
n / c->unit_size <= RUN_UNIT_MAX_ALLOC)
return c;
}
/*
* In order to minimize the wasted space at the end of the run the
* run data size must be divisible by the allocation class unit size
* with the smallest possible remainder, preferably 0.
*/
struct run_bitmap b;
size_t runsize_bytes = 0;
do {
if (runsize_bytes != 0) /* don't increase on first iteration */
n += ALLOC_BLOCK_SIZE_GEN;
uint32_t size_idx = required_size_idx;
memblock_run_bitmap(&size_idx, ALLOC_CLASS_DEFAULT_FLAGS, n, 0,
NULL, &b);
runsize_bytes = RUN_CONTENT_SIZE_BYTES(size_idx) - b.size;
} while ((runsize_bytes % n) > MAX_RUN_WASTED_BYTES);
/*
* Now that the desired unit size is found the existing classes need
* to be searched for possible duplicates. If a class that can handle
* the calculated size already exists, simply return that.
*/
for (int i = 1; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = ac->aclasses[i];
if (c == NULL || c->type == CLASS_HUGE)
continue;
if (n / c->unit_size <= RUN_UNIT_MAX_ALLOC &&
n % c->unit_size == 0)
return c;
if (c->unit_size == n)
return c;
}
return alloc_class_new(-1, ac, CLASS_RUN, HEADER_COMPACT, n, 0,
required_size_idx);
}
/*
* alloc_class_find_min_frag -- searches for an existing allocation
* class that will provide the smallest internal fragmentation for the given
* size.
*/
static struct alloc_class *
alloc_class_find_min_frag(struct alloc_class_collection *ac, size_t n)
{
LOG(10, NULL);
struct alloc_class *best_c = NULL;
size_t lowest_waste = SIZE_MAX;
ASSERTne(n, 0);
/*
* Start from the largest buckets in order to minimize unit size of
* allocated memory blocks.
*/
for (int i = MAX_ALLOCATION_CLASSES - 1; i >= 0; --i) {
struct alloc_class *c = ac->aclasses[i];
/* can't use alloc classes /w no headers by default */
if (c == NULL || c->header_type == HEADER_NONE)
continue;
size_t real_size = n + header_type_to_size[c->header_type];
size_t units = CALC_SIZE_IDX(c->unit_size, real_size);
/* can't exceed the maximum allowed run unit max */
if (c->type == CLASS_RUN && units > RUN_UNIT_MAX_ALLOC)
continue;
if (c->unit_size * units == real_size)
return c;
size_t waste = (c->unit_size * units) - real_size;
/*
* If we assume that the allocation class is only ever going to
* be used with exactly one size, the effective internal
* fragmentation would be increased by the leftover
* memory at the end of the run.
*/
if (c->type == CLASS_RUN) {
size_t wasted_units = c->rdsc.nallocs % units;
size_t wasted_bytes = wasted_units * c->unit_size;
size_t waste_avg_per_unit = wasted_bytes /
c->rdsc.nallocs;
waste += waste_avg_per_unit;
}
if (best_c == NULL || lowest_waste > waste) {
best_c = c;
lowest_waste = waste;
}
}
ASSERTne(best_c, NULL);
return best_c;
}
/*
* alloc_class_collection_new -- creates a new collection of allocation classes
*/
struct alloc_class_collection *
alloc_class_collection_new()
{
LOG(10, NULL);
struct alloc_class_collection *ac = Zalloc(sizeof(*ac));
if (ac == NULL)
return NULL;
ac->granularity = ALLOC_BLOCK_SIZE;
ac->last_run_max_size = MAX_RUN_SIZE;
ac->fail_on_missing_class = 0;
ac->autogenerate_on_missing_class = 1;
size_t maps_size = (MAX_RUN_SIZE / ac->granularity) + 1;
if ((ac->class_map_by_alloc_size = Malloc(maps_size)) == NULL)
goto error;
if ((ac->class_map_by_unit_size = critnib_new()) == NULL)
goto error;
memset(ac->class_map_by_alloc_size, 0xFF, maps_size);
if (alloc_class_new(-1, ac, CLASS_HUGE, HEADER_COMPACT,
CHUNKSIZE, 0, 1) == NULL)
goto error;
struct alloc_class *predefined_class =
alloc_class_new(-1, ac, CLASS_RUN, HEADER_COMPACT,
MIN_UNIT_SIZE, 0, 1);
if (predefined_class == NULL)
goto error;
for (size_t i = 0; i < FIRST_GENERATED_CLASS_SIZE / ac->granularity;
++i) {
ac->class_map_by_alloc_size[i] = predefined_class->id;
}
/*
* Based on the defined categories, a set of allocation classes is
* created. The unit size of those classes is depended on the category
* initial size and step.
*/
size_t granularity_mask = ALLOC_BLOCK_SIZE_GEN - 1;
for (int c = 1; c < MAX_ALLOC_CATEGORIES; ++c) {
size_t n = categories[c - 1].size + ALLOC_BLOCK_SIZE_GEN;
do {
if (alloc_class_find_or_create(ac, n) == NULL)
goto error;
float stepf = (float)n * categories[c].step;
size_t stepi = (size_t)stepf;
stepi = (stepf - (float)stepi < FLT_EPSILON) ?
stepi : stepi + 1;
n += (stepi + (granularity_mask)) & ~granularity_mask;
} while (n <= categories[c].size);
}
/*
* Find the largest alloc class and use it's unit size as run allocation
* threshold.
*/
uint8_t largest_aclass_slot;
for (largest_aclass_slot = MAX_ALLOCATION_CLASSES - 1;
largest_aclass_slot > 0 &&
ac->aclasses[largest_aclass_slot] == NULL;
--largest_aclass_slot) {
/* intentional NOP */
}
struct alloc_class *c = ac->aclasses[largest_aclass_slot];
/*
* The actual run might contain less unit blocks than the theoretical
* unit max variable. This may be the case for very large unit sizes.
*/
size_t real_unit_max = c->rdsc.nallocs < RUN_UNIT_MAX_ALLOC ?
c->rdsc.nallocs : RUN_UNIT_MAX_ALLOC;
size_t theoretical_run_max_size = c->unit_size * real_unit_max;
ac->last_run_max_size = MAX_RUN_SIZE > theoretical_run_max_size ?
theoretical_run_max_size : MAX_RUN_SIZE;
#ifdef DEBUG
/*
* Verify that each bucket's unit size points back to the bucket by the
* bucket map. This must be true for the default allocation classes,
* otherwise duplicate buckets will be created.
*/
for (size_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = ac->aclasses[i];
if (c != NULL && c->type == CLASS_RUN) {
ASSERTeq(i, c->id);
ASSERTeq(alloc_class_by_run(ac, c->unit_size,
c->flags, c->rdsc.size_idx), c);
}
}
#endif
return ac;
error:
alloc_class_collection_delete(ac);
return NULL;
}
/*
* alloc_class_collection_delete -- deletes the allocation class collection and
* all of the classes within it
*/
void
alloc_class_collection_delete(struct alloc_class_collection *ac)
{
LOG(10, NULL);
for (size_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = ac->aclasses[i];
if (c != NULL) {
alloc_class_delete(ac, c);
}
}
if (ac->class_map_by_unit_size)
critnib_delete(ac->class_map_by_unit_size);
Free(ac->class_map_by_alloc_size);
Free(ac);
}
/*
* alloc_class_assign_by_size -- (internal) chooses the allocation class that
* best approximates the provided size
*/
static struct alloc_class *
alloc_class_assign_by_size(struct alloc_class_collection *ac,
size_t size)
{
LOG(10, NULL);
size_t class_map_index = SIZE_TO_CLASS_MAP_INDEX(size,
ac->granularity);
struct alloc_class *c = alloc_class_find_min_frag(ac,
class_map_index * ac->granularity);
ASSERTne(c, NULL);
/*
* We don't lock this array because locking this section here and then
* bailing out if someone else was faster would be still slower than
* just calculating the class and failing to assign the variable.
* We are using a compare and swap so that helgrind/drd don't complain.
*/
util_bool_compare_and_swap64(
&ac->class_map_by_alloc_size[class_map_index],
MAX_ALLOCATION_CLASSES, c->id);
return c;
}
/*
* alloc_class_by_alloc_size -- returns allocation class that is assigned
* to handle an allocation of the provided size
*/
struct alloc_class *
alloc_class_by_alloc_size(struct alloc_class_collection *ac, size_t size)
{
if (size < ac->last_run_max_size) {
uint8_t class_id = ac->class_map_by_alloc_size[
SIZE_TO_CLASS_MAP_INDEX(size, ac->granularity)];
if (class_id == MAX_ALLOCATION_CLASSES) {
if (ac->fail_on_missing_class)
return NULL;
else if (ac->autogenerate_on_missing_class)
return alloc_class_assign_by_size(ac, size);
else
return ac->aclasses[DEFAULT_ALLOC_CLASS_ID];
}
return ac->aclasses[class_id];
} else {
return ac->aclasses[DEFAULT_ALLOC_CLASS_ID];
}
}
/*
* alloc_class_by_run -- returns the allocation class that has the given
* unit size
*/
struct alloc_class *
alloc_class_by_run(struct alloc_class_collection *ac,
size_t unit_size, uint16_t flags, uint32_t size_idx)
{
size_t map_idx = SIZE_TO_CLASS_MAP_INDEX(unit_size, ac->granularity);
ASSERT(map_idx <= UINT32_MAX);
uint32_t map_idx_s = (uint32_t)map_idx;
ASSERT(size_idx <= UINT16_MAX);
uint16_t size_idx_s = (uint16_t)size_idx;
uint16_t flags_s = (uint16_t)flags;
return critnib_get(ac->class_map_by_unit_size,
RUN_CLASS_KEY_PACK(map_idx_s, flags_s, size_idx_s));
}
/*
* alloc_class_by_id -- returns the allocation class with an id
*/
struct alloc_class *
alloc_class_by_id(struct alloc_class_collection *ac, uint8_t id)
{
return ac->aclasses[id];
}
/*
* alloc_class_calc_size_idx -- calculates how many units does the size require
*/
ssize_t
alloc_class_calc_size_idx(struct alloc_class *c, size_t size)
{
uint32_t size_idx = CALC_SIZE_IDX(c->unit_size,
size + header_type_to_size[c->header_type]);
if (c->type == CLASS_RUN) {
if (c->header_type == HEADER_NONE && size_idx != 1)
return -1;
else if (size_idx > RUN_UNIT_MAX)
return -1;
else if (size_idx > c->rdsc.nallocs)
return -1;
}
return size_idx;
}
| 16,240 | 24.496075 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/obj.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* obj.h -- internal definitions for obj module
*/
#ifndef LIBPMEMOBJ_OBJ_H
#define LIBPMEMOBJ_OBJ_H 1
#include <stddef.h>
#include <stdint.h>
#include "lane.h"
#include "pool_hdr.h"
#include "pmalloc.h"
#include "ctl.h"
#include "sync.h"
#include "stats.h"
#include "ctl_debug.h"
#include "page_size.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "alloc.h"
#include "fault_injection.h"
#define PMEMOBJ_LOG_PREFIX "libpmemobj"
#define PMEMOBJ_LOG_LEVEL_VAR "PMEMOBJ_LOG_LEVEL"
#define PMEMOBJ_LOG_FILE_VAR "PMEMOBJ_LOG_FILE"
/* attributes of the obj memory pool format for the pool header */
#define OBJ_HDR_SIG "PMEMOBJ" /* must be 8 bytes including '\0' */
#define OBJ_FORMAT_MAJOR 6
#define OBJ_FORMAT_FEAT_DEFAULT \
{POOL_FEAT_COMPAT_DEFAULT, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000}
#define OBJ_FORMAT_FEAT_CHECK \
{POOL_FEAT_COMPAT_VALID, POOL_FEAT_INCOMPAT_VALID, 0x0000}
static const features_t obj_format_feat_default = OBJ_FORMAT_FEAT_CHECK;
/* size of the persistent part of PMEMOBJ pool descriptor */
#define OBJ_DSC_P_SIZE 2048
/* size of unused part of the persistent part of PMEMOBJ pool descriptor */
#define OBJ_DSC_P_UNUSED (OBJ_DSC_P_SIZE - PMEMOBJ_MAX_LAYOUT - 40)
#define OBJ_LANES_OFFSET (sizeof(struct pmemobjpool)) /* lanes offset */
#define OBJ_NLANES 1024 /* number of lanes */
#define OBJ_OFF_TO_PTR(pop, off) ((void *)((uintptr_t)(pop) + (off)))
#define OBJ_PTR_TO_OFF(pop, ptr) ((uintptr_t)(ptr) - (uintptr_t)(pop))
#define OBJ_OID_IS_NULL(oid) ((oid).off == 0)
#define OBJ_LIST_EMPTY(head) OBJ_OID_IS_NULL((head)->pe_first)
#define OBJ_OFF_FROM_HEAP(pop, off)\
((off) >= (pop)->heap_offset &&\
(off) < (pop)->heap_offset + (pop)->heap_size)
#define OBJ_OFF_FROM_LANES(pop, off)\
((off) >= (pop)->lanes_offset &&\
(off) < (pop)->lanes_offset +\
(pop)->nlanes * sizeof(struct lane_layout))
#define OBJ_PTR_FROM_POOL(pop, ptr)\
((uintptr_t)(ptr) >= (uintptr_t)(pop) &&\
(uintptr_t)(ptr) < (uintptr_t)(pop) +\
(pop)->heap_offset + (pop)->heap_size)
#define OBJ_OFF_IS_VALID(pop, off)\
(OBJ_OFF_FROM_HEAP(pop, off) ||\
(OBJ_PTR_TO_OFF(pop, &(pop)->root_offset) == (off)) ||\
(OBJ_PTR_TO_OFF(pop, &(pop)->root_size) == (off)) ||\
(OBJ_OFF_FROM_LANES(pop, off)))
#define OBJ_PTR_IS_VALID(pop, ptr)\
OBJ_OFF_IS_VALID(pop, OBJ_PTR_TO_OFF(pop, ptr))
typedef void (*persist_local_fn)(const void *, size_t);
typedef void (*flush_local_fn)(const void *, size_t);
typedef void (*drain_local_fn)(void);
typedef void *(*memcpy_local_fn)(void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memmove_local_fn)(void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memset_local_fn)(void *dest, int c, size_t len, unsigned flags);
typedef int (*persist_remote_fn)(PMEMobjpool *pop, const void *addr,
size_t len, unsigned lane, unsigned flags);
typedef uint64_t type_num_t;
#define CONVERSION_FLAG_OLD_SET_CACHE ((1ULL) << 0)
/* PMEM_OBJ_POOL_HEAD_SIZE Without the unused and unused2 arrays */
#define PMEM_OBJ_POOL_HEAD_SIZE 2196
#define PMEM_OBJ_POOL_UNUSED2_SIZE (PMEM_PAGESIZE \
- OBJ_DSC_P_UNUSED\
- PMEM_OBJ_POOL_HEAD_SIZE)
/*
//NEW
//#define _GNU_SOURCE
//#include <sys/types.h>
//#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mman.h>
//int __real_open(const char *__path, int __oflag);
//int __wrap_open(const char *__path, int __oflag);
void* open_device(void);
//END NEW
*/
struct pmemobjpool {
struct pool_hdr hdr; /* memory pool header */
/* persistent part of PMEMOBJ pool descriptor (2kB) */
char layout[PMEMOBJ_MAX_LAYOUT];
uint64_t lanes_offset;
uint64_t nlanes;
uint64_t heap_offset;
uint64_t unused3;
unsigned char unused[OBJ_DSC_P_UNUSED]; /* must be zero */
uint64_t checksum; /* checksum of above fields */
uint64_t root_offset;
/* unique runID for this program run - persistent but not checksummed */
uint64_t run_id;
uint64_t root_size;
/*
* These flags can be set from a conversion tool and are set only for
* the first recovery of the pool.
*/
uint64_t conversion_flags;
uint64_t heap_size;
struct stats_persistent stats_persistent;
char pmem_reserved[496]; /* must be zeroed */
/* some run-time state, allocated out of memory pool... */
void *addr; /* mapped region */
int is_pmem; /* true if pool is PMEM */
int rdonly; /* true if pool is opened read-only */
struct palloc_heap heap;
struct lane_descriptor lanes_desc;
uint64_t uuid_lo;
int is_dev_dax; /* true if mapped on device dax */
struct ctl *ctl; /* top level node of the ctl tree structure */
struct stats *stats;
struct pool_set *set; /* pool set info */
struct pmemobjpool *replica; /* next replica */
/* per-replica functions: pmem or non-pmem */
persist_local_fn persist_local; /* persist function */
flush_local_fn flush_local; /* flush function */
drain_local_fn drain_local; /* drain function */
memcpy_local_fn memcpy_local; /* persistent memcpy function */
memmove_local_fn memmove_local; /* persistent memmove function */
memset_local_fn memset_local; /* persistent memset function */
/* for 'master' replica: with or without data replication */
struct pmem_ops p_ops;
PMEMmutex rootlock; /* root object lock */
int is_master_replica;
int has_remote_replicas;
/* remote replica section */
void *rpp; /* RPMEMpool opaque handle if it is a remote replica */
uintptr_t remote_base; /* beginning of the remote pool */
char *node_addr; /* address of a remote node */
char *pool_desc; /* descriptor of a poolset */
persist_remote_fn persist_remote; /* remote persist function */
int vg_boot;
int tx_debug_skip_expensive_checks;
struct tx_parameters *tx_params;
/*
* Locks are dynamically allocated on FreeBSD. Keep track so
* we can free them on pmemobj_close.
*/
PMEMmutex_internal *mutex_head;
PMEMrwlock_internal *rwlock_head;
PMEMcond_internal *cond_head;
struct {
struct ravl *map;
os_mutex_t lock;
int verify;
} ulog_user_buffers;
void *user_data;
//New
//void *device;
/* padding to align size of this structure to page boundary */
/* sizeof(unused2) == 8192 - offsetof(struct pmemobjpool, unused2) */
char unused2[PMEM_OBJ_POOL_UNUSED2_SIZE -28 ];
};
/*
* Stored in the 'size' field of oobh header, determines whether the object
* is internal or not. Internal objects are skipped in pmemobj iteration
* functions.
*/
#define OBJ_INTERNAL_OBJECT_MASK ((1ULL) << 15)
#define CLASS_ID_FROM_FLAG(flag)\
((uint16_t)((flag) >> 48))
#define ARENA_ID_FROM_FLAG(flag)\
((uint16_t)((flag) >> 32))
/*
* pmemobj_get_uuid_lo -- (internal) evaluates XOR sum of least significant
* 8 bytes with most significant 8 bytes.
*/
static inline uint64_t
pmemobj_get_uuid_lo(PMEMobjpool *pop)
{
uint64_t uuid_lo = 0;
for (int i = 0; i < 8; i++) {
uuid_lo = (uuid_lo << 8) |
(pop->hdr.poolset_uuid[i] ^
pop->hdr.poolset_uuid[8 + i]);
}
return uuid_lo;
}
/*
* OBJ_OID_IS_VALID -- (internal) checks if 'oid' is valid
*/
static inline int
OBJ_OID_IS_VALID(PMEMobjpool *pop, PMEMoid oid)
{
return OBJ_OID_IS_NULL(oid) ||
(oid.pool_uuid_lo == pop->uuid_lo &&
oid.off >= pop->heap_offset &&
oid.off < pop->heap_offset + pop->heap_size);
}
static inline int
OBJ_OFF_IS_VALID_FROM_CTX(void *ctx, uint64_t offset)
{
PMEMobjpool *pop = (PMEMobjpool *)ctx;
return OBJ_OFF_IS_VALID(pop, offset);
}
void obj_init(void);
void obj_fini(void);
int obj_read_remote(void *ctx, uintptr_t base, void *dest, void *addr,
size_t length);
/*
* (debug helper macro) logs notice message if used inside a transaction
*/
#ifdef DEBUG
#define _POBJ_DEBUG_NOTICE_IN_TX()\
_pobj_debug_notice(__func__, NULL, 0)
#else
#define _POBJ_DEBUG_NOTICE_IN_TX() do {} while (0)
#endif
#if FAULT_INJECTION
void
pmemobj_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at);
int
pmemobj_fault_injection_enabled(void);
#else
static inline void
pmemobj_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
abort();
}
static inline int
pmemobj_fault_injection_enabled(void)
{
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif
| 8,196 | 25.441935 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/list.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* list.h -- internal definitions for persistent atomic lists module
*/
#ifndef LIBPMEMOBJ_LIST_H
#define LIBPMEMOBJ_LIST_H 1
#include <stddef.h>
#include <stdint.h>
#include <sys/types.h>
#include "libpmemobj.h"
#include "lane.h"
#include "pmalloc.h"
#include "ulog.h"
#ifdef __cplusplus
extern "C" {
#endif
struct list_entry {
PMEMoid pe_next;
PMEMoid pe_prev;
};
struct list_head {
PMEMoid pe_first;
PMEMmutex lock;
};
int list_insert_new_user(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before,
size_t size, uint64_t type_num, palloc_constr constructor, void *arg,
PMEMoid *oidp);
int list_insert(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head, PMEMoid dest, int before,
PMEMoid oid);
int list_remove_free_user(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head,
PMEMoid *oidp);
int list_remove(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head,
PMEMoid oid);
int list_move(PMEMobjpool *pop,
size_t pe_offset_old, struct list_head *head_old,
size_t pe_offset_new, struct list_head *head_new,
PMEMoid dest, int before, PMEMoid oid);
void list_move_oob(PMEMobjpool *pop,
struct list_head *head_old, struct list_head *head_new,
PMEMoid oid);
#ifdef __cplusplus
}
#endif
#endif
| 1,376 | 20.184615 | 73 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/memops.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* memops.c -- aggregated memory operations helper implementation
*
* The operation collects all of the required memory modifications that
* need to happen in an atomic way (all of them or none), and abstracts
* away the storage type (transient/persistent) and the underlying
* implementation of how it's actually performed - in some cases using
* the redo log is unnecessary and the allocation process can be sped up
* a bit by completely omitting that whole machinery.
*
* The modifications are not visible until the context is processed.
*/
#include "memops.h"
#include "obj.h"
#include "out.h"
#include "ravl.h"
#include "valgrind_internal.h"
#include "vecq.h"
#include "sys_util.h"
#include <x86intrin.h>
#define ULOG_BASE_SIZE 1024
#define OP_MERGE_SEARCH 64
static inline uint64_t getCycle(){
uint32_t cycles_high, cycles_low, pid;
asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx
"mov %%edx, %0\n\t"
"mov %%eax, %1\n\t"
"mov %%ecx, %2\n\t"
:"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars
:// no input
:"%eax", "%edx", "%ecx" // clobbered by rdtscp
);
return((uint64_t)cycles_high << 32) | cycles_low;
}
enum operation_state {
OPERATION_IDLE,
OPERATION_IN_PROGRESS,
OPERATION_CLEANUP,
};
struct operation_log {
size_t capacity; /* capacity of the ulog log */
size_t offset; /* data offset inside of the log */
struct ulog *ulog; /* DRAM allocated log of modifications */
};
/*
* operation_context -- context of an ongoing palloc operation
*/
struct operation_context {
enum log_type type;
ulog_extend_fn extend; /* function to allocate next ulog */
ulog_free_fn ulog_free; /* function to free next ulogs */
const struct pmem_ops *p_ops;
struct pmem_ops t_ops; /* used for transient data processing */
struct pmem_ops s_ops; /* used for shadow copy data processing */
size_t ulog_curr_offset; /* offset in the log for buffer stores */
size_t ulog_curr_capacity; /* capacity of the current log */
size_t ulog_curr_gen_num; /* transaction counter in the current log */
struct ulog *ulog_curr; /* current persistent log */
size_t total_logged; /* total amount of buffer stores in the logs */
struct ulog *ulog; /* pointer to the persistent ulog log */
size_t ulog_base_nbytes; /* available bytes in initial ulog log */
size_t ulog_capacity; /* sum of capacity, incl all next ulog logs */
int ulog_auto_reserve; /* allow or do not to auto ulog reservation */
int ulog_any_user_buffer; /* set if any user buffer is added */
struct ulog_next next; /* vector of 'next' fields of persistent ulog */
enum operation_state state; /* operation sanity check */
struct operation_log pshadow_ops; /* shadow copy of persistent ulog */
struct operation_log transient_ops; /* log of transient changes */
/* collection used to look for potential merge candidates */
VECQ(, struct ulog_entry_val *) merge_entries;
};
/*
* operation_log_transient_init -- (internal) initialize operation log
* containing transient memory resident changes
*/
static int
operation_log_transient_init(struct operation_log *log)
{
log->capacity = ULOG_BASE_SIZE;
log->offset = 0;
struct ulog *src = Zalloc(sizeof(struct ulog) +
ULOG_BASE_SIZE);
if (src == NULL) {
ERR("!Zalloc");
return -1;
}
/* initialize underlying redo log structure */
src->capacity = ULOG_BASE_SIZE;
log->ulog = src;
return 0;
}
/*
* operation_log_persistent_init -- (internal) initialize operation log
* containing persistent memory resident changes
*/
static int
operation_log_persistent_init(struct operation_log *log,
size_t ulog_base_nbytes)
{
log->capacity = ULOG_BASE_SIZE;
log->offset = 0;
struct ulog *src = Zalloc(sizeof(struct ulog) +
ULOG_BASE_SIZE);
if (src == NULL) {
ERR("!Zalloc");
return -1;
}
/* initialize underlying redo log structure */
src->capacity = ulog_base_nbytes;
memset(src->unused, 0, sizeof(src->unused));
log->ulog = src;
return 0;
}
/*
* operation_transient_clean -- cleans pmemcheck address state
*/
static int
operation_transient_clean(void *base, const void *addr, size_t len,
unsigned flags)
{
VALGRIND_SET_CLEAN(addr, len);
return 0;
}
/*
* operation_transient_drain -- noop
*/
static void
operation_transient_drain(void *base)
{
}
/*
* operation_transient_memcpy -- transient memcpy wrapper
*/
static void *
operation_transient_memcpy(void *base, void *dest, const void *src, size_t len,
unsigned flags)
{
return memcpy(dest, src, len);
}
/*
* operation_new -- creates new operation context
*/
struct operation_context *
operation_new(struct ulog *ulog, size_t ulog_base_nbytes,
ulog_extend_fn extend, ulog_free_fn ulog_free,
const struct pmem_ops *p_ops, enum log_type type)
{
struct operation_context *ctx = Zalloc(sizeof(*ctx));
if (ctx == NULL) {
ERR("!Zalloc");
goto error_ctx_alloc;
}
ctx->ulog = ulog;
ctx->ulog_base_nbytes = ulog_base_nbytes;
ctx->ulog_capacity = ulog_capacity(ulog,
ulog_base_nbytes, p_ops);
ctx->extend = extend;
ctx->ulog_free = ulog_free;
ctx->state = OPERATION_IDLE;
VEC_INIT(&ctx->next);
ulog_rebuild_next_vec(ulog, &ctx->next, p_ops);
ctx->p_ops = p_ops;
ctx->type = type;
ctx->ulog_any_user_buffer = 0;
ctx->ulog_curr_offset = 0;
ctx->ulog_curr_capacity = 0;
ctx->ulog_curr = NULL;
ctx->t_ops.base = NULL;
ctx->t_ops.flush = operation_transient_clean;
ctx->t_ops.memcpy = operation_transient_memcpy;
ctx->t_ops.drain = operation_transient_drain;
ctx->s_ops.base = p_ops->base;
ctx->s_ops.flush = operation_transient_clean;
ctx->s_ops.memcpy = operation_transient_memcpy;
ctx->s_ops.drain = operation_transient_drain;
VECQ_INIT(&ctx->merge_entries);
if (operation_log_transient_init(&ctx->transient_ops) != 0)
goto error_ulog_alloc;
if (operation_log_persistent_init(&ctx->pshadow_ops,
ulog_base_nbytes) != 0)
goto error_ulog_alloc;
return ctx;
error_ulog_alloc:
operation_delete(ctx);
error_ctx_alloc:
return NULL;
}
/*
* operation_delete -- deletes operation context
*/
void
operation_delete(struct operation_context *ctx)
{
VECQ_DELETE(&ctx->merge_entries);
VEC_DELETE(&ctx->next);
Free(ctx->pshadow_ops.ulog);
Free(ctx->transient_ops.ulog);
Free(ctx);
}
/*
* operation_user_buffer_remove -- removes range from the tree and returns 0
*/
static int
operation_user_buffer_remove(void *base, void *addr)
{
PMEMobjpool *pop = base;
if (!pop->ulog_user_buffers.verify)
return 0;
util_mutex_lock(&pop->ulog_user_buffers.lock);
struct ravl *ravl = pop->ulog_user_buffers.map;
enum ravl_predicate predict = RAVL_PREDICATE_EQUAL;
struct user_buffer_def range;
range.addr = addr;
range.size = 0;
struct ravl_node *n = ravl_find(ravl, &range, predict);
ASSERTne(n, NULL);
ravl_remove(ravl, n);
util_mutex_unlock(&pop->ulog_user_buffers.lock);
return 0;
}
/*
* operation_free_logs -- free all logs except first
*/
void
operation_free_logs(struct operation_context *ctx, uint64_t flags)
{
int freed = ulog_free_next(ctx->ulog, ctx->p_ops, ctx->ulog_free,
operation_user_buffer_remove, flags);
if (freed) {
ctx->ulog_capacity = ulog_capacity(ctx->ulog,
ctx->ulog_base_nbytes, ctx->p_ops);
VEC_CLEAR(&ctx->next);
ulog_rebuild_next_vec(ctx->ulog, &ctx->next, ctx->p_ops);
}
ASSERTeq(VEC_SIZE(&ctx->next), 0);
}
/*
* operation_merge -- (internal) performs operation on a field
*/
static inline void
operation_merge(struct ulog_entry_base *entry, uint64_t value,
ulog_operation_type type)
{
struct ulog_entry_val *e = (struct ulog_entry_val *)entry;
switch (type) {
case ULOG_OPERATION_AND:
e->value &= value;
break;
case ULOG_OPERATION_OR:
e->value |= value;
break;
case ULOG_OPERATION_SET:
e->value = value;
break;
default:
ASSERT(0); /* unreachable */
}
}
/*
* operation_try_merge_entry -- tries to merge the incoming log entry with
* existing entries
*
* Because this requires a reverse foreach, it cannot be implemented using
* the on-media ulog log structure since there's no way to find what's
* the previous entry in the log. Instead, the last N entries are stored
* in a collection and traversed backwards.
*/
static int
operation_try_merge_entry(struct operation_context *ctx,
void *ptr, uint64_t value, ulog_operation_type type)
{
int ret = 0;
uint64_t offset = OBJ_PTR_TO_OFF(ctx->p_ops->base, ptr);
struct ulog_entry_val *e;
VECQ_FOREACH_REVERSE(e, &ctx->merge_entries) {
if (ulog_entry_offset(&e->base) == offset) {
if (ulog_entry_type(&e->base) == type) {
operation_merge(&e->base, value, type);
return 1;
} else {
break;
}
}
}
return ret;
}
/*
* operation_merge_entry_add -- adds a new entry to the merge collection,
* keeps capacity at OP_MERGE_SEARCH. Removes old entries in FIFO fashion.
*/
static void
operation_merge_entry_add(struct operation_context *ctx,
struct ulog_entry_val *entry)
{
if (VECQ_SIZE(&ctx->merge_entries) == OP_MERGE_SEARCH)
(void) VECQ_DEQUEUE(&ctx->merge_entries);
if (VECQ_ENQUEUE(&ctx->merge_entries, entry) != 0) {
/* this is fine, only runtime perf will get slower */
LOG(2, "out of memory - unable to track entries");
}
}
/*
* operation_add_typed_value -- adds new entry to the current operation, if the
* same ptr address already exists and the operation type is set,
* the new value is not added and the function has no effect.
*/
int
operation_add_typed_entry(struct operation_context *ctx,
void *ptr, uint64_t value,
ulog_operation_type type, enum operation_log_type log_type)
{
struct operation_log *oplog = log_type == LOG_PERSISTENT ?
&ctx->pshadow_ops : &ctx->transient_ops;
/*
* Always make sure to have one extra spare cacheline so that the
* ulog log entry creation has enough room for zeroing.
*/
if (oplog->offset + CACHELINE_SIZE == oplog->capacity) {
size_t ncapacity = oplog->capacity + ULOG_BASE_SIZE;
struct ulog *ulog = Realloc(oplog->ulog,
SIZEOF_ULOG(ncapacity));
if (ulog == NULL)
return -1;
oplog->capacity += ULOG_BASE_SIZE;
oplog->ulog = ulog;
oplog->ulog->capacity = oplog->capacity;
/*
* Realloc invalidated the ulog entries that are inside of this
* vector, need to clear it to avoid use after free.
*/
VECQ_CLEAR(&ctx->merge_entries);
}
if (log_type == LOG_PERSISTENT &&
operation_try_merge_entry(ctx, ptr, value, type) != 0)
return 0;
struct ulog_entry_val *entry = ulog_entry_val_create(
oplog->ulog, oplog->offset, ptr, value, type,
log_type == LOG_TRANSIENT ? &ctx->t_ops : &ctx->s_ops);
if (log_type == LOG_PERSISTENT)
operation_merge_entry_add(ctx, entry);
oplog->offset += ulog_entry_size(&entry->base);
return 0;
}
/*
* operation_add_value -- adds new entry to the current operation with
* entry type autodetected based on the memory location
*/
int
operation_add_entry(struct operation_context *ctx, void *ptr, uint64_t value,
ulog_operation_type type)
{
const struct pmem_ops *p_ops = ctx->p_ops;
PMEMobjpool *pop = (PMEMobjpool *)p_ops->base;
int from_pool = OBJ_OFF_IS_VALID(pop,
(uintptr_t)ptr - (uintptr_t)p_ops->base);
return operation_add_typed_entry(ctx, ptr, value, type,
from_pool ? LOG_PERSISTENT : LOG_TRANSIENT);
}
/*
* operation_add_buffer -- adds a buffer operation to the log
*/
int
operation_add_buffer(struct operation_context *ctx,
void *dest, void *src, size_t size, ulog_operation_type type)
{
size_t real_size = size + sizeof(struct ulog_entry_buf);
/* if there's no space left in the log, reserve some more */
if (ctx->ulog_curr_capacity == 0) {
ctx->ulog_curr_gen_num = ctx->ulog->gen_num;
if (operation_reserve(ctx, ctx->total_logged + real_size) != 0)
return -1;
ctx->ulog_curr = ctx->ulog_curr == NULL ? ctx->ulog :
ulog_next(ctx->ulog_curr, ctx->p_ops);
ASSERTne(ctx->ulog_curr, NULL);
ctx->ulog_curr_offset = 0;
ctx->ulog_curr_capacity = ctx->ulog_curr->capacity;
}
size_t curr_size = MIN(real_size, ctx->ulog_curr_capacity);
size_t data_size = curr_size - sizeof(struct ulog_entry_buf);
size_t entry_size = ALIGN_UP(curr_size, CACHELINE_SIZE);
/*
* To make sure that the log is consistent and contiguous, we need
* make sure that the header of the entry that would be located
* immediately after this one is zeroed.
*/
struct ulog_entry_base *next_entry = NULL;
if (entry_size == ctx->ulog_curr_capacity) {
struct ulog *u = ulog_next(ctx->ulog_curr, ctx->p_ops);
if (u != NULL)
next_entry = (struct ulog_entry_base *)u->data;
} else {
size_t next_entry_offset = ctx->ulog_curr_offset + entry_size;
next_entry = (struct ulog_entry_base *)(ctx->ulog_curr->data +
next_entry_offset);
}
#ifdef USE_NDP_CLOBBER
int clear_next_header = 0;
if (next_entry != NULL){
clear_next_header = 1;
}
#else
if (next_entry != NULL){
ulog_clobber_entry(next_entry, ctx->p_ops);
}
#endif
#ifdef GET_NDP_BREAKDOWN
uint64_t startCycles = getCycle();
#endif
//ulogcount++;
#ifdef USE_NDP_CLOBBER
ulog_entry_buf_create(ctx->ulog_curr,
ctx->ulog_curr_offset,
ctx->ulog_curr_gen_num,
dest, src, data_size,
type, ctx->p_ops,
clear_next_header);
#else
ulog_entry_buf_create(ctx->ulog_curr,
ctx->ulog_curr_offset,
ctx->ulog_curr_gen_num,
dest, src, data_size,
type, ctx->p_ops);
#endif
#ifdef GET_NDP_BREAKDOWN
uint64_t endCycles = getCycle();
ulogCycles += endCycles - startCycles;
#endif
/* create a persistent log entry */
/* struct ulog_entry_buf *e = ulog_entry_buf_create(ctx->ulog_curr,
ctx->ulog_curr_offset,
ctx->ulog_curr_gen_num,
dest, src, data_size,
type, ctx->p_ops);
*/
// ASSERT(entry_size == ulog_entry_size(&e->base));
// ASSERT(entry_size <= ctx->ulog_curr_capacity);
ctx->total_logged += entry_size;
ctx->ulog_curr_offset += entry_size;
ctx->ulog_curr_capacity -= entry_size;
/*
* Recursively add the data to the log until the entire buffer is
* processed.
*/
return size - data_size == 0 ? 0 : operation_add_buffer(ctx,
(char *)dest + data_size,
(char *)src + data_size,
size - data_size, type);
}
/*
* operation_user_buffer_range_cmp -- compares addresses of
* user buffers
*/
int
operation_user_buffer_range_cmp(const void *lhs, const void *rhs)
{
const struct user_buffer_def *l = lhs;
const struct user_buffer_def *r = rhs;
if (l->addr > r->addr)
return 1;
else if (l->addr < r->addr)
return -1;
return 0;
}
/*
* operation_user_buffer_try_insert -- adds a user buffer range to the tree,
* if the buffer already exists in the tree function returns -1, otherwise
* it returns 0
*/
static int
operation_user_buffer_try_insert(PMEMobjpool *pop,
struct user_buffer_def *userbuf)
{
int ret = 0;
if (!pop->ulog_user_buffers.verify)
return ret;
util_mutex_lock(&pop->ulog_user_buffers.lock);
void *addr_end = (char *)userbuf->addr + userbuf->size;
struct user_buffer_def search;
search.addr = addr_end;
struct ravl_node *n = ravl_find(pop->ulog_user_buffers.map,
&search, RAVL_PREDICATE_LESS_EQUAL);
if (n != NULL) {
struct user_buffer_def *r = ravl_data(n);
void *r_end = (char *)r->addr + r->size;
if (r_end > userbuf->addr && r->addr < addr_end) {
/* what was found overlaps with what is being added */
ret = -1;
goto out;
}
}
if (ravl_emplace_copy(pop->ulog_user_buffers.map, userbuf) == -1) {
ASSERTne(errno, EEXIST);
ret = -1;
}
out:
util_mutex_unlock(&pop->ulog_user_buffers.lock);
return ret;
}
/*
* operation_user_buffer_verify_align -- verify if the provided buffer can be
* used as a transaction log, and if so - perform necessary alignments
*/
int
operation_user_buffer_verify_align(struct operation_context *ctx,
struct user_buffer_def *userbuf)
{
/*
* Address of the buffer has to be aligned up, and the size
* has to be aligned down, taking into account the number of bytes
* the address was incremented by. The remaining size has to be large
* enough to contain the header and at least one ulog entry.
*/
uint64_t buffer_offset = OBJ_PTR_TO_OFF(ctx->p_ops->base,
userbuf->addr);
ptrdiff_t size_diff = (intptr_t)ulog_by_offset(buffer_offset,
ctx->p_ops) - (intptr_t)userbuf->addr;
ssize_t capacity_unaligned = (ssize_t)userbuf->size - size_diff
- (ssize_t)sizeof(struct ulog);
if (capacity_unaligned < (ssize_t)CACHELINE_SIZE) {
ERR("Capacity insufficient");
return -1;
}
size_t capacity_aligned = ALIGN_DOWN((size_t)capacity_unaligned,
CACHELINE_SIZE);
userbuf->addr = ulog_by_offset(buffer_offset, ctx->p_ops);
userbuf->size = capacity_aligned + sizeof(struct ulog);
if (operation_user_buffer_try_insert(ctx->p_ops->base, userbuf)) {
ERR("Buffer currently used");
return -1;
}
return 0;
}
/*
* operation_add_user_buffer -- add user buffer to the ulog
*/
void
operation_add_user_buffer(struct operation_context *ctx,
struct user_buffer_def *userbuf)
{
uint64_t buffer_offset = OBJ_PTR_TO_OFF(ctx->p_ops->base,
userbuf->addr);
size_t capacity = userbuf->size - sizeof(struct ulog);
ulog_construct(buffer_offset, capacity, ctx->ulog->gen_num,
1, ULOG_USER_OWNED, ctx->p_ops);
struct ulog *last_log;
/* if there is only one log */
if (!VEC_SIZE(&ctx->next))
last_log = ctx->ulog;
else /* get last element from vector */
last_log = ulog_by_offset(VEC_BACK(&ctx->next), ctx->p_ops);
ASSERTne(last_log, NULL);
size_t next_size = sizeof(last_log->next);
VALGRIND_ADD_TO_TX(&last_log->next, next_size);
last_log->next = buffer_offset;
pmemops_persist(ctx->p_ops, &last_log->next, next_size);
VEC_PUSH_BACK(&ctx->next, buffer_offset);
ctx->ulog_capacity += capacity;
operation_set_any_user_buffer(ctx, 1);
}
/*
* operation_set_auto_reserve -- set auto reserve value for context
*/
void
operation_set_auto_reserve(struct operation_context *ctx, int auto_reserve)
{
ctx->ulog_auto_reserve = auto_reserve;
}
/*
* operation_set_any_user_buffer -- set ulog_any_user_buffer value for context
*/
void
operation_set_any_user_buffer(struct operation_context *ctx,
int any_user_buffer)
{
ctx->ulog_any_user_buffer = any_user_buffer;
}
/*
* operation_get_any_user_buffer -- get ulog_any_user_buffer value from context
*/
int
operation_get_any_user_buffer(struct operation_context *ctx)
{
return ctx->ulog_any_user_buffer;
}
/*
* operation_process_persistent_redo -- (internal) process using ulog
*/
static void
operation_process_persistent_redo(struct operation_context *ctx)
{
ASSERTeq(ctx->pshadow_ops.capacity % CACHELINE_SIZE, 0);
ulog_store(ctx->ulog, ctx->pshadow_ops.ulog,
ctx->pshadow_ops.offset, ctx->ulog_base_nbytes,
ctx->ulog_capacity,
&ctx->next, ctx->p_ops);
#ifdef USE_NDP_REDO
if(!use_ndp_redo){
#endif
ulog_process(ctx->pshadow_ops.ulog, OBJ_OFF_IS_VALID_FROM_CTX,
ctx->p_ops);
//ulog_process(ctx->ulog, OBJ_OFF_IS_VALID_FROM_CTX,
// ctx->p_ops);
#ifdef USE_NDP_REDO
}
else {
//ulog_process(ctx->pshadow_ops.ulog, OBJ_OFF_IS_VALID_FROM_CTX,
// ctx->p_ops);
//while(1){}
ulog_process_ndp(ctx->ulog, ctx->pshadow_ops.ulog, OBJ_OFF_IS_VALID_FROM_CTX,
ctx->p_ops);
//while(1){}
}
#endif
// while(((*((uint32_t*)(ctx->p_ops->device)+254)) & 2) != 2){
//asm volatile ("clflush (%0)" :: "r"((uint32_t*)(tx->pop->p_ops.device)+254));
//printf("waiting %x %x\n",*((uint32_t*)(tx->pop->p_ops.device)+11),*((uint32_t*)(tx->pop->p_ops.device)+254));
//printf("waiting!!\n");
// }
ulog_clobber(ctx->ulog, &ctx->next, ctx->p_ops);
}
/*
* operation_process_persistent_undo -- (internal) process using ulog
*/
static void
operation_process_persistent_undo(struct operation_context *ctx)
{
ASSERTeq(ctx->pshadow_ops.capacity % CACHELINE_SIZE, 0);
ulog_process(ctx->ulog, OBJ_OFF_IS_VALID_FROM_CTX, ctx->p_ops);
}
/*
* operation_reserve -- (internal) reserves new capacity in persistent ulog log
*/
int
operation_reserve(struct operation_context *ctx, size_t new_capacity)
{
if (new_capacity > ctx->ulog_capacity) {
if (ctx->extend == NULL) {
ERR("no extend function present");
return -1;
}
if (ulog_reserve(ctx->ulog,
ctx->ulog_base_nbytes,
ctx->ulog_curr_gen_num,
ctx->ulog_auto_reserve,
&new_capacity, ctx->extend,
&ctx->next, ctx->p_ops) != 0)
return -1;
ctx->ulog_capacity = new_capacity;
}
return 0;
}
/*
* operation_init -- initializes runtime state of an operation
*/
void
operation_init(struct operation_context *ctx)
{
struct operation_log *plog = &ctx->pshadow_ops;
struct operation_log *tlog = &ctx->transient_ops;
VALGRIND_ANNOTATE_NEW_MEMORY(ctx, sizeof(*ctx));
VALGRIND_ANNOTATE_NEW_MEMORY(tlog->ulog, sizeof(struct ulog) +
tlog->capacity);
VALGRIND_ANNOTATE_NEW_MEMORY(plog->ulog, sizeof(struct ulog) +
plog->capacity);
tlog->offset = 0;
plog->offset = 0;
VECQ_REINIT(&ctx->merge_entries);
ctx->ulog_curr_offset = 0;
ctx->ulog_curr_capacity = 0;
ctx->ulog_curr_gen_num = 0;
ctx->ulog_curr = NULL;
ctx->total_logged = 0;
ctx->ulog_auto_reserve = 1;
ctx->ulog_any_user_buffer = 0;
}
/*
* operation_start -- initializes and starts a new operation
*/
void
operation_start(struct operation_context *ctx)
{
operation_init(ctx);
ASSERTeq(ctx->state, OPERATION_IDLE);
ctx->state = OPERATION_IN_PROGRESS;
}
void
operation_resume(struct operation_context *ctx)
{
operation_start(ctx);
ctx->total_logged = ulog_base_nbytes(ctx->ulog);
}
/*
* operation_cancel -- cancels a running operation
*/
void
operation_cancel(struct operation_context *ctx)
{
ASSERTeq(ctx->state, OPERATION_IN_PROGRESS);
ctx->state = OPERATION_IDLE;
}
/*
* operation_process -- processes registered operations
*
* The order of processing is important: persistent, transient.
* This is because the transient entries that reside on persistent memory might
* require write to a location that is currently occupied by a valid persistent
* state but becomes a transient state after operation is processed.
*/
void
operation_process(struct operation_context *ctx)
{
/*
* If there's exactly one persistent entry there's no need to involve
* the redo log. We can simply assign the value, the operation will be
* atomic.
*/
int redo_process = ctx->type == LOG_TYPE_REDO &&
ctx->pshadow_ops.offset != 0;
if (redo_process &&
ctx->pshadow_ops.offset == sizeof(struct ulog_entry_val)) {
struct ulog_entry_base *e = (struct ulog_entry_base *)
ctx->pshadow_ops.ulog->data;
ulog_operation_type t = ulog_entry_type(e);
if (t == ULOG_OPERATION_SET || t == ULOG_OPERATION_AND ||
t == ULOG_OPERATION_OR) {
ulog_entry_apply(e, 1, ctx->p_ops); //could not be effectiv ein ndp
redo_process = 0;
}
}
if (redo_process) {
operation_process_persistent_redo(ctx); //ndp
ctx->state = OPERATION_CLEANUP;
} else if (ctx->type == LOG_TYPE_UNDO && ctx->total_logged != 0) {
operation_process_persistent_undo(ctx);
ctx->state = OPERATION_CLEANUP;
}
/* process transient entries with transient memory ops */
if (ctx->transient_ops.offset != 0)
ulog_process(ctx->transient_ops.ulog, NULL, &ctx->t_ops); //where is this used?
}
/*
* operation_finish -- finalizes the operation
*/
void
operation_finish(struct operation_context *ctx, unsigned flags)
{
ASSERTne(ctx->state, OPERATION_IDLE);
if (ctx->type == LOG_TYPE_UNDO && ctx->total_logged != 0)
ctx->state = OPERATION_CLEANUP;
if (ctx->ulog_any_user_buffer) {
flags |= ULOG_ANY_USER_BUFFER;
ctx->state = OPERATION_CLEANUP;
}
if (ctx->state != OPERATION_CLEANUP)
goto out;
if (ctx->type == LOG_TYPE_UNDO) {
int ret = ulog_clobber_data(ctx->ulog,
ctx->total_logged, ctx->ulog_base_nbytes,
&ctx->next, ctx->ulog_free,
operation_user_buffer_remove,
ctx->p_ops, flags);
if (ret == 0)
goto out;
} else if (ctx->type == LOG_TYPE_REDO) {
int ret = ulog_free_next(ctx->ulog, ctx->p_ops,
ctx->ulog_free, operation_user_buffer_remove,
flags);
if (ret == 0)
goto out;
}
/* clobbering shrunk the ulog */
ctx->ulog_capacity = ulog_capacity(ctx->ulog,
ctx->ulog_base_nbytes, ctx->p_ops);
VEC_CLEAR(&ctx->next);
ulog_rebuild_next_vec(ctx->ulog, &ctx->next, ctx->p_ops);
out:
ctx->state = OPERATION_IDLE;
}
| 24,116 | 25.589857 | 113 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/stats.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2019, Intel Corporation */
/*
* stats.c -- implementation of statistics
*/
#include "obj.h"
#include "stats.h"
STATS_CTL_HANDLER(persistent, curr_allocated, heap_curr_allocated);
STATS_CTL_HANDLER(transient, run_allocated, heap_run_allocated);
STATS_CTL_HANDLER(transient, run_active, heap_run_active);
static const struct ctl_node CTL_NODE(heap)[] = {
STATS_CTL_LEAF(persistent, curr_allocated),
STATS_CTL_LEAF(transient, run_allocated),
STATS_CTL_LEAF(transient, run_active),
CTL_NODE_END
};
/*
* CTL_READ_HANDLER(enabled) -- returns whether or not statistics are enabled
*/
static int
CTL_READ_HANDLER(enabled)(void *ctx,
enum ctl_query_source source, void *arg,
struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
enum pobj_stats_enabled *arg_out = arg;
*arg_out = pop->stats->enabled;
return 0;
}
/*
* stats_enabled_parser -- parses the stats enabled type
*/
static int
stats_enabled_parser(const void *arg, void *dest, size_t dest_size)
{
const char *vstr = arg;
enum pobj_stats_enabled *enabled = dest;
ASSERTeq(dest_size, sizeof(enum pobj_stats_enabled));
int bool_out;
if (ctl_arg_boolean(arg, &bool_out, sizeof(bool_out)) == 0) {
*enabled = bool_out ?
POBJ_STATS_ENABLED_BOTH : POBJ_STATS_DISABLED;
return 0;
}
if (strcmp(vstr, "disabled") == 0) {
*enabled = POBJ_STATS_DISABLED;
} else if (strcmp(vstr, "both") == 0) {
*enabled = POBJ_STATS_ENABLED_BOTH;
} else if (strcmp(vstr, "persistent") == 0) {
*enabled = POBJ_STATS_ENABLED_PERSISTENT;
} else if (strcmp(vstr, "transient") == 0) {
*enabled = POBJ_STATS_ENABLED_TRANSIENT;
} else {
ERR("invalid enable type");
errno = EINVAL;
return -1;
}
return 0;
}
/*
* CTL_WRITE_HANDLER(enabled) -- enables or disables statistics counting
*/
static int
CTL_WRITE_HANDLER(enabled)(void *ctx,
enum ctl_query_source source, void *arg,
struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
pop->stats->enabled = *(enum pobj_stats_enabled *)arg;
return 0;
}
static const struct ctl_argument CTL_ARG(enabled) = {
.dest_size = sizeof(enum pobj_stats_enabled),
.parsers = {
CTL_ARG_PARSER(sizeof(enum pobj_stats_enabled),
stats_enabled_parser),
CTL_ARG_PARSER_END
}
};
static const struct ctl_node CTL_NODE(stats)[] = {
CTL_CHILD(heap),
CTL_LEAF_RW(enabled),
CTL_NODE_END
};
/*
* stats_new -- allocates and initializes statistics instance
*/
struct stats *
stats_new(PMEMobjpool *pop)
{
struct stats *s = Malloc(sizeof(*s));
if (s == NULL) {
ERR("!Malloc");
return NULL;
}
s->enabled = POBJ_STATS_ENABLED_TRANSIENT;
s->persistent = &pop->stats_persistent;
VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(s->persistent, sizeof(*s->persistent));
s->transient = Zalloc(sizeof(struct stats_transient));
if (s->transient == NULL)
goto error_transient_alloc;
return s;
error_transient_alloc:
Free(s);
return NULL;
}
/*
* stats_delete -- deletes statistics instance
*/
void
stats_delete(PMEMobjpool *pop, struct stats *s)
{
pmemops_persist(&pop->p_ops, s->persistent,
sizeof(struct stats_persistent));
Free(s->transient);
Free(s);
}
/*
* stats_ctl_register -- registers ctl nodes for statistics
*/
void
stats_ctl_register(PMEMobjpool *pop)
{
CTL_REGISTER_MODULE(pop->ctl, stats);
}
| 3,293 | 20.671053 | 77 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/heap.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* heap.h -- internal definitions for heap
*/
#ifndef LIBPMEMOBJ_HEAP_H
#define LIBPMEMOBJ_HEAP_H 1
#include <stddef.h>
#include <stdint.h>
#include "bucket.h"
#include "memblock.h"
#include "memops.h"
#include "palloc.h"
#include "os_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
#define HEAP_OFF_TO_PTR(heap, off) ((void *)((char *)((heap)->base) + (off)))
#define HEAP_PTR_TO_OFF(heap, ptr)\
((uintptr_t)(ptr) - (uintptr_t)((heap)->base))
#define BIT_IS_CLR(a, i) (!((a) & (1ULL << (i))))
#define HEAP_ARENA_PER_THREAD (0)
int heap_boot(struct palloc_heap *heap, void *heap_start, uint64_t heap_size,
uint64_t *sizep,
void *base, struct pmem_ops *p_ops,
struct stats *stats, struct pool_set *set);
int heap_init(void *heap_start, uint64_t heap_size, uint64_t *sizep,
struct pmem_ops *p_ops);
void heap_cleanup(struct palloc_heap *heap);
int heap_check(void *heap_start, uint64_t heap_size);
int heap_check_remote(void *heap_start, uint64_t heap_size,
struct remote_ops *ops);
int heap_buckets_init(struct palloc_heap *heap);
int heap_create_alloc_class_buckets(struct palloc_heap *heap,
struct alloc_class *c);
int heap_extend(struct palloc_heap *heap, struct bucket *defb, size_t size);
struct alloc_class *
heap_get_best_class(struct palloc_heap *heap, size_t size);
struct bucket *
heap_bucket_acquire(struct palloc_heap *heap, uint8_t class_id,
uint16_t arena_id);
void
heap_bucket_release(struct palloc_heap *heap, struct bucket *b);
int heap_get_bestfit_block(struct palloc_heap *heap, struct bucket *b,
struct memory_block *m);
struct memory_block
heap_coalesce_huge(struct palloc_heap *heap, struct bucket *b,
const struct memory_block *m);
os_mutex_t *heap_get_run_lock(struct palloc_heap *heap,
uint32_t chunk_id);
void
heap_force_recycle(struct palloc_heap *heap);
void
heap_discard_run(struct palloc_heap *heap, struct memory_block *m);
void
heap_memblock_on_free(struct palloc_heap *heap, const struct memory_block *m);
int
heap_free_chunk_reuse(struct palloc_heap *heap,
struct bucket *bucket, struct memory_block *m);
void heap_foreach_object(struct palloc_heap *heap, object_callback cb,
void *arg, struct memory_block start);
struct alloc_class_collection *heap_alloc_classes(struct palloc_heap *heap);
void *heap_end(struct palloc_heap *heap);
unsigned heap_get_narenas_total(struct palloc_heap *heap);
unsigned heap_get_narenas_max(struct palloc_heap *heap);
int heap_set_narenas_max(struct palloc_heap *heap, unsigned size);
unsigned heap_get_narenas_auto(struct palloc_heap *heap);
unsigned heap_get_thread_arena_id(struct palloc_heap *heap);
int heap_arena_create(struct palloc_heap *heap);
struct bucket **
heap_get_arena_buckets(struct palloc_heap *heap, unsigned arena_id);
int heap_get_arena_auto(struct palloc_heap *heap, unsigned arena_id);
int heap_set_arena_auto(struct palloc_heap *heap, unsigned arena_id,
int automatic);
void heap_set_arena_thread(struct palloc_heap *heap, unsigned arena_id);
void heap_vg_open(struct palloc_heap *heap, object_callback cb,
void *arg, int objects);
static inline struct chunk_header *
heap_get_chunk_hdr(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK_HDR(heap->layout, m->zone_id, m->chunk_id);
}
static inline struct chunk *
heap_get_chunk(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK(heap->layout, m->zone_id, m->chunk_id);
}
static inline struct chunk_run *
heap_get_chunk_run(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK_RUN(heap->layout, m->zone_id, m->chunk_id);
}
#ifdef __cplusplus
}
#endif
#endif
| 3,719 | 26.969925 | 78 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/list.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* list.c -- implementation of persistent atomic lists module
*/
#include <inttypes.h>
#include "list.h"
#include "obj.h"
#include "os_thread.h"
#include "out.h"
#include "sync.h"
#include "valgrind_internal.h"
#include "memops.h"
#define PREV_OFF (offsetof(struct list_entry, pe_prev) + offsetof(PMEMoid, off))
#define NEXT_OFF (offsetof(struct list_entry, pe_next) + offsetof(PMEMoid, off))
/*
* list_args_common -- common arguments for operations on list
*
* pe_offset - offset to list entry relative to user data
* obj_doffset - offset to element's data relative to pmemobj pool
* entry_ptr - list entry structure of element
*/
struct list_args_common {
ssize_t pe_offset;
uint64_t obj_doffset;
struct list_entry *entry_ptr;
};
/*
* list_args_insert -- arguments for inserting element to list
*
* head - list head
* dest - destination element OID
* dest_entry_ptr - list entry of destination element
* before - insert before or after destination element
*/
struct list_args_insert {
struct list_head *head;
PMEMoid dest;
struct list_entry *dest_entry_ptr;
int before;
};
/*
* list_args_reinsert -- arguments for reinserting element on list
*
* head - list head
* entry_ptr - list entry of old element
* obj_doffset - offset to element's data relative to pmemobj pool
*/
struct list_args_reinsert {
struct list_head *head;
struct list_entry *entry_ptr;
uint64_t obj_doffset;
};
/*
* list_args_remove -- arguments for removing element from list
*
* pe_offset - offset to list entry relative to user data
* obj_doffset - offset to element's data relative to pmemobj pool
* head - list head
* entry_ptr - list entry structure of element
*/
struct list_args_remove {
ssize_t pe_offset;
uint64_t obj_doffset;
struct list_head *head;
struct list_entry *entry_ptr;
};
/*
* list_mutexes_lock -- (internal) grab one or two locks in ascending
* address order
*/
static inline int
list_mutexes_lock(PMEMobjpool *pop,
struct list_head *head1, struct list_head *head2)
{
ASSERTne(head1, NULL);
if (!head2 || head1 == head2)
return pmemobj_mutex_lock(pop, &head1->lock);
PMEMmutex *lock1;
PMEMmutex *lock2;
if ((uintptr_t)&head1->lock < (uintptr_t)&head2->lock) {
lock1 = &head1->lock;
lock2 = &head2->lock;
} else {
lock1 = &head2->lock;
lock2 = &head1->lock;
}
int ret;
if ((ret = pmemobj_mutex_lock(pop, lock1)))
goto err;
if ((ret = pmemobj_mutex_lock(pop, lock2)))
goto err_unlock;
return 0;
err_unlock:
pmemobj_mutex_unlock(pop, lock1);
err:
return ret;
}
/*
* list_mutexes_unlock -- (internal) release one or two locks
*/
static inline void
list_mutexes_unlock(PMEMobjpool *pop,
struct list_head *head1, struct list_head *head2)
{
ASSERTne(head1, NULL);
if (!head2 || head1 == head2) {
pmemobj_mutex_unlock_nofail(pop, &head1->lock);
return;
}
pmemobj_mutex_unlock_nofail(pop, &head1->lock);
pmemobj_mutex_unlock_nofail(pop, &head2->lock);
}
/*
* list_get_dest -- (internal) return destination object ID
*
* If the input dest is not OID_NULL returns dest.
* If the input dest is OID_NULL and before is set returns first element.
* If the input dest is OID_NULL and before is no set returns last element.
*/
static inline PMEMoid
list_get_dest(PMEMobjpool *pop, struct list_head *head, PMEMoid dest,
ssize_t pe_offset, int before)
{
if (dest.off)
return dest;
if (head->pe_first.off == 0 || !!before == POBJ_LIST_DEST_HEAD)
return head->pe_first;
struct list_entry *first_ptr = (struct list_entry *)OBJ_OFF_TO_PTR(pop,
(uintptr_t)((ssize_t)head->pe_first.off + pe_offset));
return first_ptr->pe_prev;
}
/*
* list_set_oid_redo_log -- (internal) set PMEMoid value using redo log
*/
static size_t
list_set_oid_redo_log(PMEMobjpool *pop,
struct operation_context *ctx,
PMEMoid *oidp, uint64_t obj_doffset, int oidp_inited)
{
ASSERT(OBJ_PTR_IS_VALID(pop, oidp));
if (!oidp_inited || oidp->pool_uuid_lo != pop->uuid_lo) {
if (oidp_inited)
ASSERTeq(oidp->pool_uuid_lo, 0);
operation_add_entry(ctx, &oidp->pool_uuid_lo, pop->uuid_lo,
ULOG_OPERATION_SET);
}
operation_add_entry(ctx, &oidp->off, obj_doffset,
ULOG_OPERATION_SET);
return 0;
}
/*
* list_update_head -- (internal) update pe_first entry in list head
*/
static size_t
list_update_head(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_head *head, uint64_t first_offset)
{
LOG(15, NULL);
operation_add_entry(ctx, &head->pe_first.off, first_offset,
ULOG_OPERATION_SET);
if (head->pe_first.pool_uuid_lo == 0) {
operation_add_entry(ctx, &head->pe_first.pool_uuid_lo,
pop->uuid_lo, ULOG_OPERATION_SET);
}
return 0;
}
/*
* u64_add_offset -- (internal) add signed offset to unsigned integer and check
* for overflows
*/
static void
u64_add_offset(uint64_t *value, ssize_t off)
{
uint64_t prev = *value;
if (off >= 0) {
*value += (size_t)off;
ASSERT(*value >= prev); /* detect overflow */
} else {
*value -= (size_t)-off;
ASSERT(*value < prev);
}
}
/*
* list_fill_entry_persist -- (internal) fill new entry using persist function
*
* Used for newly allocated objects.
*/
static void
list_fill_entry_persist(PMEMobjpool *pop, struct list_entry *entry_ptr,
uint64_t next_offset, uint64_t prev_offset)
{
LOG(15, NULL);
VALGRIND_ADD_TO_TX(entry_ptr, sizeof(*entry_ptr));
entry_ptr->pe_next.pool_uuid_lo = pop->uuid_lo;
entry_ptr->pe_next.off = next_offset;
entry_ptr->pe_prev.pool_uuid_lo = pop->uuid_lo;
entry_ptr->pe_prev.off = prev_offset;
VALGRIND_REMOVE_FROM_TX(entry_ptr, sizeof(*entry_ptr));
pmemops_persist(&pop->p_ops, entry_ptr, sizeof(*entry_ptr));
}
/*
* list_fill_entry_redo_log -- (internal) fill new entry using redo log
*
* Used to update entry in existing object.
*/
static size_t
list_fill_entry_redo_log(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_common *args,
uint64_t next_offset, uint64_t prev_offset, int set_uuid)
{
LOG(15, NULL);
struct pmem_ops *ops = &pop->p_ops;
ASSERTne(args->entry_ptr, NULL);
ASSERTne(args->obj_doffset, 0);
if (set_uuid) {
VALGRIND_ADD_TO_TX(&(args->entry_ptr->pe_next.pool_uuid_lo),
sizeof(args->entry_ptr->pe_next.pool_uuid_lo));
VALGRIND_ADD_TO_TX(&(args->entry_ptr->pe_prev.pool_uuid_lo),
sizeof(args->entry_ptr->pe_prev.pool_uuid_lo));
/* don't need to fill pool uuid using redo log */
args->entry_ptr->pe_next.pool_uuid_lo = pop->uuid_lo;
args->entry_ptr->pe_prev.pool_uuid_lo = pop->uuid_lo;
VALGRIND_REMOVE_FROM_TX(
&(args->entry_ptr->pe_next.pool_uuid_lo),
sizeof(args->entry_ptr->pe_next.pool_uuid_lo));
VALGRIND_REMOVE_FROM_TX(
&(args->entry_ptr->pe_prev.pool_uuid_lo),
sizeof(args->entry_ptr->pe_prev.pool_uuid_lo));
pmemops_persist(ops, args->entry_ptr, sizeof(*args->entry_ptr));
} else {
ASSERTeq(args->entry_ptr->pe_next.pool_uuid_lo, pop->uuid_lo);
ASSERTeq(args->entry_ptr->pe_prev.pool_uuid_lo, pop->uuid_lo);
}
/* set current->next and current->prev using redo log */
uint64_t next_off_off = args->obj_doffset + NEXT_OFF;
uint64_t prev_off_off = args->obj_doffset + PREV_OFF;
u64_add_offset(&next_off_off, args->pe_offset);
u64_add_offset(&prev_off_off, args->pe_offset);
void *next_ptr = (char *)pop + next_off_off;
void *prev_ptr = (char *)pop + prev_off_off;
operation_add_entry(ctx, next_ptr, next_offset, ULOG_OPERATION_SET);
operation_add_entry(ctx, prev_ptr, prev_offset, ULOG_OPERATION_SET);
return 0;
}
/*
* list_remove_single -- (internal) remove element from single list
*/
static size_t
list_remove_single(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_remove *args)
{
LOG(15, NULL);
if (args->entry_ptr->pe_next.off == args->obj_doffset) {
/* only one element on list */
ASSERTeq(args->head->pe_first.off, args->obj_doffset);
ASSERTeq(args->entry_ptr->pe_prev.off, args->obj_doffset);
return list_update_head(pop, ctx, args->head, 0);
} else {
/* set next->prev = prev and prev->next = next */
uint64_t next_off = args->entry_ptr->pe_next.off;
uint64_t next_prev_off = next_off + PREV_OFF;
u64_add_offset(&next_prev_off, args->pe_offset);
uint64_t prev_off = args->entry_ptr->pe_prev.off;
uint64_t prev_next_off = prev_off + NEXT_OFF;
u64_add_offset(&prev_next_off, args->pe_offset);
void *prev_ptr = (char *)pop + next_prev_off;
void *next_ptr = (char *)pop + prev_next_off;
operation_add_entry(ctx, prev_ptr, prev_off,
ULOG_OPERATION_SET);
operation_add_entry(ctx, next_ptr, next_off,
ULOG_OPERATION_SET);
if (args->head->pe_first.off == args->obj_doffset) {
/* removing element is the first one */
return list_update_head(pop, ctx,
args->head, next_off);
} else {
return 0;
}
}
}
/*
* list_insert_before -- (internal) insert element at offset before an element
*/
static size_t
list_insert_before(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_insert *args, struct list_args_common *args_common,
uint64_t *next_offset, uint64_t *prev_offset)
{
LOG(15, NULL);
/* current->next = dest and current->prev = dest->prev */
*next_offset = args->dest.off;
*prev_offset = args->dest_entry_ptr->pe_prev.off;
/* dest->prev = current and dest->prev->next = current */
uint64_t dest_prev_off = args->dest.off + PREV_OFF;
u64_add_offset(&dest_prev_off, args_common->pe_offset);
uint64_t dest_prev_next_off = args->dest_entry_ptr->pe_prev.off +
NEXT_OFF;
u64_add_offset(&dest_prev_next_off, args_common->pe_offset);
void *dest_prev_ptr = (char *)pop + dest_prev_off;
void *dest_prev_next_ptr = (char *)pop + dest_prev_next_off;
operation_add_entry(ctx, dest_prev_ptr, args_common->obj_doffset,
ULOG_OPERATION_SET);
operation_add_entry(ctx, dest_prev_next_ptr, args_common->obj_doffset,
ULOG_OPERATION_SET);
return 0;
}
/*
* list_insert_after -- (internal) insert element at offset after an element
*/
static size_t
list_insert_after(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_insert *args, struct list_args_common *args_common,
uint64_t *next_offset, uint64_t *prev_offset)
{
LOG(15, NULL);
/* current->next = dest->next and current->prev = dest */
*next_offset = args->dest_entry_ptr->pe_next.off;
*prev_offset = args->dest.off;
/* dest->next = current and dest->next->prev = current */
uint64_t dest_next_off = args->dest.off + NEXT_OFF;
u64_add_offset(&dest_next_off, args_common->pe_offset);
uint64_t dest_next_prev_off = args->dest_entry_ptr->pe_next.off +
PREV_OFF;
u64_add_offset(&dest_next_prev_off, args_common->pe_offset);
void *dest_next_ptr = (char *)pop + dest_next_off;
void *dest_next_prev_ptr = (char *)pop + dest_next_prev_off;
operation_add_entry(ctx, dest_next_ptr, args_common->obj_doffset,
ULOG_OPERATION_SET);
operation_add_entry(ctx, dest_next_prev_ptr, args_common->obj_doffset,
ULOG_OPERATION_SET);
return 0;
}
/*
* list_insert_user -- (internal) insert element at offset to a user list
*/
static size_t
list_insert_user(PMEMobjpool *pop,
struct operation_context *ctx,
struct list_args_insert *args, struct list_args_common *args_common,
uint64_t *next_offset, uint64_t *prev_offset)
{
LOG(15, NULL);
if (args->dest.off == 0) {
/* inserting the first element on list */
ASSERTeq(args->head->pe_first.off, 0);
/* set loop on current element */
*next_offset = args_common->obj_doffset;
*prev_offset = args_common->obj_doffset;
/* update head */
list_update_head(pop, ctx, args->head,
args_common->obj_doffset);
} else {
if (args->before) {
/* inserting before dest */
list_insert_before(pop, ctx, args, args_common,
next_offset, prev_offset);
if (args->dest.off == args->head->pe_first.off) {
/* current element at first position */
list_update_head(pop, ctx, args->head,
args_common->obj_doffset);
}
} else {
/* inserting after dest */
list_insert_after(pop, ctx, args, args_common,
next_offset, prev_offset);
}
}
return 0;
}
/*
* list_insert_new -- allocate and insert element to oob and user lists
*
* pop - pmemobj pool handle
* pe_offset - offset to list entry on user list relative to user data
* user_head - user list head, must be locked if not NULL
* dest - destination on user list
* before - insert before/after destination on user list
* size - size of allocation, will be increased by OBJ_OOB_SIZE
* constructor - object's constructor
* arg - argument for object's constructor
* oidp - pointer to target object ID
*/
static int
list_insert_new(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before,
size_t size, uint64_t type_num, int (*constructor)(void *ctx, void *ptr,
size_t usable_size, void *arg), void *arg, PMEMoid *oidp)
{
LOG(3, NULL);
ASSERT(user_head != NULL);
int ret;
#ifdef DEBUG
int r = pmemobj_mutex_assert_locked(pop, &user_head->lock);
ASSERTeq(r, 0);
#endif
struct lane *lane;
lane_hold(pop, &lane);
struct pobj_action reserved;
if (palloc_reserve(&pop->heap, size, constructor, arg,
type_num, 0, 0, 0, &reserved) != 0) {
ERR("!palloc_reserve");
ret = -1;
goto err_pmalloc;
}
uint64_t obj_doffset = reserved.heap.offset;
struct operation_context *ctx = lane->external;
operation_start(ctx);
ASSERT((ssize_t)pe_offset >= 0);
dest = list_get_dest(pop, user_head, dest,
(ssize_t)pe_offset, before);
struct list_entry *entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
obj_doffset + pe_offset);
struct list_entry *dest_entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
dest.off + pe_offset);
struct list_args_insert args = {
.dest = dest,
.dest_entry_ptr = dest_entry_ptr,
.head = user_head,
.before = before,
};
struct list_args_common args_common = {
.obj_doffset = obj_doffset,
.entry_ptr = entry_ptr,
.pe_offset = (ssize_t)pe_offset,
};
uint64_t next_offset;
uint64_t prev_offset;
/* insert element to user list */
list_insert_user(pop,
ctx, &args, &args_common,
&next_offset, &prev_offset);
/* don't need to use redo log for filling new element */
list_fill_entry_persist(pop, entry_ptr,
next_offset, prev_offset);
if (oidp != NULL) {
if (OBJ_PTR_IS_VALID(pop, oidp)) {
list_set_oid_redo_log(pop, ctx,
oidp, obj_doffset, 0);
} else {
oidp->off = obj_doffset;
oidp->pool_uuid_lo = pop->uuid_lo;
}
}
palloc_publish(&pop->heap, &reserved, 1, ctx);
ret = 0;
err_pmalloc:
lane_release(pop);
ASSERT(ret == 0 || ret == -1);
return ret;
}
/*
* list_insert_new_user -- allocate and insert element to oob and user lists
*
* pop - pmemobj pool handle
* oob_head - oob list head
* pe_offset - offset to list entry on user list relative to user data
* user_head - user list head
* dest - destination on user list
* before - insert before/after destination on user list
* size - size of allocation, will be increased by OBJ_OOB_SIZE
* constructor - object's constructor
* arg - argument for object's constructor
* oidp - pointer to target object ID
*/
int
list_insert_new_user(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before,
size_t size, uint64_t type_num, int (*constructor)(void *ctx, void *ptr,
size_t usable_size, void *arg), void *arg, PMEMoid *oidp)
{
int ret;
if ((ret = pmemobj_mutex_lock(pop, &user_head->lock))) {
errno = ret;
LOG(2, "pmemobj_mutex_lock failed");
return -1;
}
ret = list_insert_new(pop, pe_offset, user_head,
dest, before, size, type_num, constructor, arg, oidp);
pmemobj_mutex_unlock_nofail(pop, &user_head->lock);
ASSERT(ret == 0 || ret == -1);
return ret;
}
/*
* list_insert -- insert object to a single list
*
* pop - pmemobj handle
* pe_offset - offset to list entry on user list relative to user data
* head - list head
* dest - destination object ID
* before - before/after destination
* oid - target object ID
*/
int
list_insert(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head,
PMEMoid dest, int before,
PMEMoid oid)
{
LOG(3, NULL);
ASSERTne(head, NULL);
struct lane *lane;
lane_hold(pop, &lane);
int ret;
if ((ret = pmemobj_mutex_lock(pop, &head->lock))) {
errno = ret;
LOG(2, "pmemobj_mutex_lock failed");
ret = -1;
goto err;
}
struct operation_context *ctx = lane->external;
operation_start(ctx);
dest = list_get_dest(pop, head, dest, pe_offset, before);
struct list_entry *entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
(uintptr_t)((ssize_t)oid.off + pe_offset));
struct list_entry *dest_entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
(uintptr_t)((ssize_t)dest.off + pe_offset));
struct list_args_insert args = {
.dest = dest,
.dest_entry_ptr = dest_entry_ptr,
.head = head,
.before = before,
};
struct list_args_common args_common = {
.obj_doffset = oid.off,
.entry_ptr = entry_ptr,
.pe_offset = (ssize_t)pe_offset,
};
uint64_t next_offset;
uint64_t prev_offset;
/* insert element to user list */
list_insert_user(pop, ctx,
&args, &args_common, &next_offset, &prev_offset);
/* fill entry of existing element using redo log */
list_fill_entry_redo_log(pop, ctx,
&args_common, next_offset, prev_offset, 1);
operation_process(ctx);
operation_finish(ctx, 0);
pmemobj_mutex_unlock_nofail(pop, &head->lock);
err:
lane_release(pop);
ASSERT(ret == 0 || ret == -1);
return ret;
}
/*
* list_remove_free -- remove from two lists and free an object
*
* pop - pmemobj pool handle
* oob_head - oob list head
* pe_offset - offset to list entry on user list relative to user data
* user_head - user list head, *must* be locked if not NULL
* oidp - pointer to target object ID
*/
static void
list_remove_free(PMEMobjpool *pop, size_t pe_offset,
struct list_head *user_head, PMEMoid *oidp)
{
LOG(3, NULL);
ASSERT(user_head != NULL);
#ifdef DEBUG
int r = pmemobj_mutex_assert_locked(pop, &user_head->lock);
ASSERTeq(r, 0);
#endif
struct lane *lane;
lane_hold(pop, &lane);
struct operation_context *ctx = lane->external;
operation_start(ctx);
struct pobj_action deferred;
palloc_defer_free(&pop->heap, oidp->off, &deferred);
uint64_t obj_doffset = oidp->off;
ASSERT((ssize_t)pe_offset >= 0);
struct list_entry *entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
obj_doffset + pe_offset);
struct list_args_remove args = {
.pe_offset = (ssize_t)pe_offset,
.head = user_head,
.entry_ptr = entry_ptr,
.obj_doffset = obj_doffset
};
/* remove from user list */
list_remove_single(pop, ctx, &args);
/* clear the oid */
if (OBJ_PTR_IS_VALID(pop, oidp))
list_set_oid_redo_log(pop, ctx, oidp, 0, 1);
else
oidp->off = 0;
palloc_publish(&pop->heap, &deferred, 1, ctx);
lane_release(pop);
}
/*
* list_remove_free_user -- remove from two lists and free an object
*
* pop - pmemobj pool handle
* oob_head - oob list head
* pe_offset - offset to list entry on user list relative to user data
* user_head - user list head
* oidp - pointer to target object ID
*/
int
list_remove_free_user(PMEMobjpool *pop, size_t pe_offset,
struct list_head *user_head, PMEMoid *oidp)
{
LOG(3, NULL);
int ret;
if ((ret = pmemobj_mutex_lock(pop, &user_head->lock))) {
errno = ret;
LOG(2, "pmemobj_mutex_lock failed");
return -1;
}
list_remove_free(pop, pe_offset, user_head, oidp);
pmemobj_mutex_unlock_nofail(pop, &user_head->lock);
return 0;
}
/*
* list_remove -- remove object from list
*
* pop - pmemobj handle
* pe_offset - offset to list entry on user list relative to user data
* head - list head
* oid - target object ID
*/
int
list_remove(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head,
PMEMoid oid)
{
LOG(3, NULL);
ASSERTne(head, NULL);
int ret;
struct lane *lane;
lane_hold(pop, &lane);
if ((ret = pmemobj_mutex_lock(pop, &head->lock))) {
errno = ret;
LOG(2, "pmemobj_mutex_lock failed");
ret = -1;
goto err;
}
struct operation_context *ctx = lane->external;
operation_start(ctx);
struct list_entry *entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
oid.off + (size_t)pe_offset);
struct list_args_remove args = {
.pe_offset = (ssize_t)pe_offset,
.head = head,
.entry_ptr = entry_ptr,
.obj_doffset = oid.off,
};
struct list_args_common args_common = {
.obj_doffset = oid.off,
.entry_ptr = entry_ptr,
.pe_offset = (ssize_t)pe_offset,
};
/* remove element from user list */
list_remove_single(pop, ctx, &args);
/* clear next and prev offsets in removing element using redo log */
list_fill_entry_redo_log(pop, ctx,
&args_common, 0, 0, 0);
operation_process(ctx);
operation_finish(ctx, 0);
pmemobj_mutex_unlock_nofail(pop, &head->lock);
err:
lane_release(pop);
ASSERT(ret == 0 || ret == -1);
return ret;
}
/*
* list_move -- move object between two lists
*
* pop - pmemobj handle
* pe_offset_old - offset to old list entry relative to user data
* head_old - old list head
* pe_offset_new - offset to new list entry relative to user data
* head_new - new list head
* dest - destination object ID
* before - before/after destination
* oid - target object ID
*/
int
list_move(PMEMobjpool *pop,
size_t pe_offset_old, struct list_head *head_old,
size_t pe_offset_new, struct list_head *head_new,
PMEMoid dest, int before, PMEMoid oid)
{
LOG(3, NULL);
ASSERTne(head_old, NULL);
ASSERTne(head_new, NULL);
int ret;
struct lane *lane;
lane_hold(pop, &lane);
/*
* Grab locks in specified order to avoid dead-locks.
*
* XXX performance improvement: initialize oob locks at pool opening
*/
if ((ret = list_mutexes_lock(pop, head_new, head_old))) {
errno = ret;
LOG(2, "list_mutexes_lock failed");
ret = -1;
goto err;
}
struct operation_context *ctx = lane->external;
operation_start(ctx);
dest = list_get_dest(pop, head_new, dest,
(ssize_t)pe_offset_new, before);
struct list_entry *entry_ptr_old =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
oid.off + pe_offset_old);
struct list_entry *entry_ptr_new =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
oid.off + pe_offset_new);
struct list_entry *dest_entry_ptr =
(struct list_entry *)OBJ_OFF_TO_PTR(pop,
dest.off + pe_offset_new);
if (head_old == head_new) {
/* moving within the same list */
if (dest.off == oid.off)
goto unlock;
if (before && dest_entry_ptr->pe_prev.off == oid.off) {
if (head_old->pe_first.off != dest.off)
goto unlock;
list_update_head(pop, ctx,
head_old, oid.off);
goto redo_last;
}
if (!before && dest_entry_ptr->pe_next.off == oid.off) {
if (head_old->pe_first.off != oid.off)
goto unlock;
list_update_head(pop, ctx,
head_old, entry_ptr_old->pe_next.off);
goto redo_last;
}
}
ASSERT((ssize_t)pe_offset_old >= 0);
struct list_args_remove args_remove = {
.pe_offset = (ssize_t)pe_offset_old,
.head = head_old,
.entry_ptr = entry_ptr_old,
.obj_doffset = oid.off,
};
struct list_args_insert args_insert = {
.head = head_new,
.dest = dest,
.dest_entry_ptr = dest_entry_ptr,
.before = before,
};
ASSERT((ssize_t)pe_offset_new >= 0);
struct list_args_common args_common = {
.obj_doffset = oid.off,
.entry_ptr = entry_ptr_new,
.pe_offset = (ssize_t)pe_offset_new,
};
uint64_t next_offset;
uint64_t prev_offset;
/* remove element from user list */
list_remove_single(pop, ctx, &args_remove);
/* insert element to user list */
list_insert_user(pop, ctx, &args_insert,
&args_common, &next_offset, &prev_offset);
/* offsets differ, move is between different list entries - set uuid */
int set_uuid = pe_offset_new != pe_offset_old ? 1 : 0;
/* fill next and prev offsets of moving element using redo log */
list_fill_entry_redo_log(pop, ctx,
&args_common, next_offset, prev_offset, set_uuid);
redo_last:
unlock:
operation_process(ctx);
operation_finish(ctx, 0);
list_mutexes_unlock(pop, head_new, head_old);
err:
lane_release(pop);
ASSERT(ret == 0 || ret == -1);
return ret;
}
| 24,297 | 24.848936 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/memops.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* memops.h -- aggregated memory operations helper definitions
*/
#ifndef LIBPMEMOBJ_MEMOPS_H
#define LIBPMEMOBJ_MEMOPS_H 1
#include <stddef.h>
#include <stdint.h>
#include "vec.h"
#include "pmemops.h"
#include "ulog.h"
#include "lane.h"
#ifdef __cplusplus
extern "C" {
#endif
enum operation_log_type {
LOG_PERSISTENT, /* log of persistent modifications */
LOG_TRANSIENT, /* log of transient memory modifications */
MAX_OPERATION_LOG_TYPE
};
enum log_type {
LOG_TYPE_UNDO,
LOG_TYPE_REDO,
MAX_LOG_TYPE,
};
struct user_buffer_def {
void *addr;
size_t size;
};
#ifdef GET_NDP_BREAKDOWN
extern uint64_t ulogCycles;
#endif
#ifdef USE_NDP_REDO
extern int use_ndp_redo;
#endif
struct operation_context;
struct operation_context *
operation_new(struct ulog *redo, size_t ulog_base_nbytes,
ulog_extend_fn extend, ulog_free_fn ulog_free,
const struct pmem_ops *p_ops, enum log_type type);
void operation_init(struct operation_context *ctx);
void operation_start(struct operation_context *ctx);
void operation_resume(struct operation_context *ctx);
void operation_delete(struct operation_context *ctx);
void operation_free_logs(struct operation_context *ctx, uint64_t flags);
int operation_add_buffer(struct operation_context *ctx,
void *dest, void *src, size_t size, ulog_operation_type type);
int operation_add_entry(struct operation_context *ctx,
void *ptr, uint64_t value, ulog_operation_type type);
int operation_add_typed_entry(struct operation_context *ctx,
void *ptr, uint64_t value,
ulog_operation_type type, enum operation_log_type log_type);
int operation_user_buffer_verify_align(struct operation_context *ctx,
struct user_buffer_def *userbuf);
void operation_add_user_buffer(struct operation_context *ctx,
struct user_buffer_def *userbuf);
void operation_set_auto_reserve(struct operation_context *ctx,
int auto_reserve);
void operation_set_any_user_buffer(struct operation_context *ctx,
int any_user_buffer);
int operation_get_any_user_buffer(struct operation_context *ctx);
int operation_user_buffer_range_cmp(const void *lhs, const void *rhs);
int operation_reserve(struct operation_context *ctx, size_t new_capacity);
void operation_process(struct operation_context *ctx);
void operation_finish(struct operation_context *ctx, unsigned flags);
void operation_cancel(struct operation_context *ctx);
#ifdef __cplusplus
}
#endif
#endif
| 2,467 | 26.422222 | 74 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/pmalloc.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* pmalloc.h -- internal definitions for persistent malloc
*/
#ifndef LIBPMEMOBJ_PMALLOC_H
#define LIBPMEMOBJ_PMALLOC_H 1
#include <stddef.h>
#include <stdint.h>
#include "libpmemobj.h"
#include "memops.h"
#include "palloc.h"
#ifdef __cplusplus
extern "C" {
#endif
/* single operations done in the internal context of the lane */
int pmalloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags);
int pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id);
int prealloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags);
void pfree(PMEMobjpool *pop, uint64_t *off);
/* external operation to be used together with context-aware palloc funcs */
struct operation_context *pmalloc_operation_hold(PMEMobjpool *pop);
struct operation_context *pmalloc_operation_hold_no_start(PMEMobjpool *pop);
void pmalloc_operation_release(PMEMobjpool *pop);
void pmalloc_ctl_register(PMEMobjpool *pop);
int pmalloc_cleanup(PMEMobjpool *pop);
int pmalloc_boot(PMEMobjpool *pop);
#ifdef __cplusplus
}
#endif
#endif
| 1,291 | 24.333333 | 76 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/recycler.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* recycler.h -- internal definitions of run recycler
*
* This is a container that stores runs that are currently not used by any of
* the buckets.
*/
#ifndef LIBPMEMOBJ_RECYCLER_H
#define LIBPMEMOBJ_RECYCLER_H 1
#include "memblock.h"
#include "vec.h"
#ifdef __cplusplus
extern "C" {
#endif
struct recycler;
VEC(empty_runs, struct memory_block);
struct recycler_element {
uint32_t max_free_block;
uint32_t free_space;
uint32_t chunk_id;
uint32_t zone_id;
};
struct recycler *recycler_new(struct palloc_heap *layout,
size_t nallocs, size_t *peak_arenas);
void recycler_delete(struct recycler *r);
struct recycler_element recycler_element_new(struct palloc_heap *heap,
const struct memory_block *m);
int recycler_put(struct recycler *r, const struct memory_block *m,
struct recycler_element element);
int recycler_get(struct recycler *r, struct memory_block *m);
struct empty_runs recycler_recalc(struct recycler *r, int force);
void recycler_inc_unaccounted(struct recycler *r,
const struct memory_block *m);
#ifdef __cplusplus
}
#endif
#endif
| 1,158 | 20.867925 | 77 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/palloc.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* palloc.h -- internal definitions for persistent allocator
*/
#ifndef LIBPMEMOBJ_PALLOC_H
#define LIBPMEMOBJ_PALLOC_H 1
#include <stddef.h>
#include <stdint.h>
#include "libpmemobj.h"
#include "memops.h"
#include "ulog.h"
#include "valgrind_internal.h"
#include "stats.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PALLOC_CTL_DEBUG_NO_PATTERN (-1)
struct palloc_heap {
struct pmem_ops p_ops;
struct heap_layout *layout;
struct heap_rt *rt;
uint64_t *sizep;
uint64_t growsize;
struct stats *stats;
struct pool_set *set;
void *base;
int alloc_pattern;
};
struct memory_block;
typedef int (*palloc_constr)(void *base, void *ptr,
size_t usable_size, void *arg);
int palloc_operation(struct palloc_heap *heap, uint64_t off, uint64_t *dest_off,
size_t size, palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags,
uint16_t class_id, uint16_t arena_id,
struct operation_context *ctx);
int
palloc_reserve(struct palloc_heap *heap, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags,
uint16_t class_id, uint16_t arena_id,
struct pobj_action *act);
void
palloc_defer_free(struct palloc_heap *heap, uint64_t off,
struct pobj_action *act);
void
palloc_cancel(struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt);
void
palloc_publish(struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt,
struct operation_context *ctx);
void
palloc_set_value(struct palloc_heap *heap, struct pobj_action *act,
uint64_t *ptr, uint64_t value);
uint64_t palloc_first(struct palloc_heap *heap);
uint64_t palloc_next(struct palloc_heap *heap, uint64_t off);
size_t palloc_usable_size(struct palloc_heap *heap, uint64_t off);
uint64_t palloc_extra(struct palloc_heap *heap, uint64_t off);
uint16_t palloc_flags(struct palloc_heap *heap, uint64_t off);
int palloc_boot(struct palloc_heap *heap, void *heap_start,
uint64_t heap_size, uint64_t *sizep,
void *base, struct pmem_ops *p_ops,
struct stats *stats, struct pool_set *set);
int palloc_buckets_init(struct palloc_heap *heap);
int palloc_init(void *heap_start, uint64_t heap_size, uint64_t *sizep,
struct pmem_ops *p_ops);
void *palloc_heap_end(struct palloc_heap *h);
int palloc_heap_check(void *heap_start, uint64_t heap_size);
int palloc_heap_check_remote(void *heap_start, uint64_t heap_size,
struct remote_ops *ops);
void palloc_heap_cleanup(struct palloc_heap *heap);
size_t palloc_heap(void *heap_start);
int palloc_defrag(struct palloc_heap *heap, uint64_t **objv, size_t objcnt,
struct operation_context *ctx, struct pobj_defrag_result *result);
/* foreach callback, terminates iteration if return value is non-zero */
typedef int (*object_callback)(const struct memory_block *m, void *arg);
#if VG_MEMCHECK_ENABLED
void palloc_heap_vg_open(struct palloc_heap *heap, int objects);
#endif
#ifdef __cplusplus
}
#endif
#endif
| 3,006 | 25.377193 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/container.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* container.h -- internal definitions for block containers
*/
#ifndef LIBPMEMOBJ_CONTAINER_H
#define LIBPMEMOBJ_CONTAINER_H 1
#include "memblock.h"
#ifdef __cplusplus
extern "C" {
#endif
struct block_container {
const struct block_container_ops *c_ops;
struct palloc_heap *heap;
};
struct block_container_ops {
/* inserts a new memory block into the container */
int (*insert)(struct block_container *c, const struct memory_block *m);
/* removes exact match memory block */
int (*get_rm_exact)(struct block_container *c,
const struct memory_block *m);
/* removes and returns the best-fit memory block for size */
int (*get_rm_bestfit)(struct block_container *c,
struct memory_block *m);
/* checks whether the container is empty */
int (*is_empty)(struct block_container *c);
/* removes all elements from the container */
void (*rm_all)(struct block_container *c);
/* deletes the container */
void (*destroy)(struct block_container *c);
};
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CONTAINER_H */
| 1,125 | 21.979592 | 72 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/stats.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2019, Intel Corporation */
/*
* stats.h -- definitions of statistics
*/
#ifndef LIBPMEMOBJ_STATS_H
#define LIBPMEMOBJ_STATS_H 1
#include "ctl.h"
#include "libpmemobj/ctl.h"
#ifdef __cplusplus
extern "C" {
#endif
struct stats_transient {
uint64_t heap_run_allocated;
uint64_t heap_run_active;
};
struct stats_persistent {
uint64_t heap_curr_allocated;
};
struct stats {
enum pobj_stats_enabled enabled;
struct stats_transient *transient;
struct stats_persistent *persistent;
};
#define STATS_INC(stats, type, name, value) do {\
STATS_INC_##type(stats, name, value);\
} while (0)
#define STATS_INC_transient(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_TRANSIENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_fetch_and_add64((&(stats)->transient->name), (value));\
} while (0)
#define STATS_INC_persistent(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_PERSISTENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_fetch_and_add64((&(stats)->persistent->name), (value));\
} while (0)
#define STATS_SUB(stats, type, name, value) do {\
STATS_SUB_##type(stats, name, value);\
} while (0)
#define STATS_SUB_transient(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_TRANSIENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_fetch_and_sub64((&(stats)->transient->name), (value));\
} while (0)
#define STATS_SUB_persistent(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_PERSISTENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_fetch_and_sub64((&(stats)->persistent->name), (value));\
} while (0)
#define STATS_SET(stats, type, name, value) do {\
STATS_SET_##type(stats, name, value);\
} while (0)
#define STATS_SET_transient(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_TRANSIENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_atomic_store_explicit64((&(stats)->transient->name),\
(value), memory_order_release);\
} while (0)
#define STATS_SET_persistent(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_PERSISTENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_atomic_store_explicit64((&(stats)->persistent->name),\
(value), memory_order_release);\
} while (0)
#define STATS_CTL_LEAF(type, name)\
{CTL_STR(name), CTL_NODE_LEAF,\
{CTL_READ_HANDLER(type##_##name), NULL, NULL},\
NULL, NULL}
#define STATS_CTL_HANDLER(type, name, varname)\
static int CTL_READ_HANDLER(type##_##name)(void *ctx,\
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)\
{\
PMEMobjpool *pop = ctx;\
uint64_t *argv = arg;\
util_atomic_load_explicit64(&pop->stats->type->varname,\
argv, memory_order_acquire);\
return 0;\
}
void stats_ctl_register(PMEMobjpool *pop);
struct stats *stats_new(PMEMobjpool *pop);
void stats_delete(PMEMobjpool *pop, struct stats *stats);
#ifdef __cplusplus
}
#endif
#endif
| 2,990 | 26.440367 | 71 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/bucket.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* bucket.c -- bucket implementation
*
* Buckets manage volatile state of the heap. They are the abstraction layer
* between the heap-managed chunks/runs and memory allocations.
*
* Each bucket instance can have a different underlying container that is
* responsible for selecting blocks - which means that whether the allocator
* serves memory blocks in best/first/next -fit manner is decided during bucket
* creation.
*/
#include "alloc_class.h"
#include "bucket.h"
#include "heap.h"
#include "out.h"
#include "sys_util.h"
#include "valgrind_internal.h"
/*
* bucket_new -- creates a new bucket instance
*/
struct bucket *
bucket_new(struct block_container *c, struct alloc_class *aclass)
{
if (c == NULL)
return NULL;
struct bucket *b = Malloc(sizeof(*b));
if (b == NULL)
return NULL;
b->container = c;
b->c_ops = c->c_ops;
util_mutex_init(&b->lock);
b->is_active = 0;
b->active_memory_block = NULL;
if (aclass && aclass->type == CLASS_RUN) {
b->active_memory_block =
Zalloc(sizeof(struct memory_block_reserved));
if (b->active_memory_block == NULL)
goto error_active_alloc;
}
b->aclass = aclass;
return b;
error_active_alloc:
util_mutex_destroy(&b->lock);
Free(b);
return NULL;
}
/*
* bucket_insert_block -- inserts a block into the bucket
*/
int
bucket_insert_block(struct bucket *b, const struct memory_block *m)
{
#if VG_MEMCHECK_ENABLED || VG_HELGRIND_ENABLED || VG_DRD_ENABLED
if (On_memcheck || On_drd_or_hg) {
size_t size = m->m_ops->get_real_size(m);
void *data = m->m_ops->get_real_data(m);
VALGRIND_DO_MAKE_MEM_NOACCESS(data, size);
VALGRIND_ANNOTATE_NEW_MEMORY(data, size);
}
#endif
return b->c_ops->insert(b->container, m);
}
/*
* bucket_delete -- cleanups and deallocates bucket instance
*/
void
bucket_delete(struct bucket *b)
{
if (b->active_memory_block)
Free(b->active_memory_block);
util_mutex_destroy(&b->lock);
b->c_ops->destroy(b->container);
Free(b);
}
/*
* bucket_current_resvp -- returns the pointer to the current reservation count
*/
int *
bucket_current_resvp(struct bucket *b)
{
return b->active_memory_block ? &b->active_memory_block->nresv : NULL;
}
| 2,251 | 21.52 | 79 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/container_seglists.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* container_seglists.c -- implementation of segregated lists block container
*
* This container is constructed from N (up to 64) intrusive lists and a
* single 8 byte bitmap that stores the information whether a given list is
* empty or not.
*/
#include "container_seglists.h"
#include "out.h"
#include "sys_util.h"
#include "util.h"
#include "valgrind_internal.h"
#include "vecq.h"
#define SEGLIST_BLOCK_LISTS 64U
struct block_container_seglists {
struct block_container super;
struct memory_block m;
VECQ(, uint32_t) blocks[SEGLIST_BLOCK_LISTS];
uint64_t nonempty_lists;
};
/*
* container_seglists_insert_block -- (internal) inserts a new memory block
* into the container
*/
static int
container_seglists_insert_block(struct block_container *bc,
const struct memory_block *m)
{
ASSERT(m->chunk_id < MAX_CHUNK);
ASSERT(m->zone_id < UINT16_MAX);
ASSERTne(m->size_idx, 0);
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
if (c->nonempty_lists == 0)
c->m = *m;
ASSERT(m->size_idx <= SEGLIST_BLOCK_LISTS);
ASSERT(m->chunk_id == c->m.chunk_id);
ASSERT(m->zone_id == c->m.zone_id);
if (VECQ_ENQUEUE(&c->blocks[m->size_idx - 1], m->block_off) != 0)
return -1;
/* marks the list as nonempty */
c->nonempty_lists |= 1ULL << (m->size_idx - 1);
return 0;
}
/*
* container_seglists_get_rm_block_bestfit -- (internal) removes and returns the
* best-fit memory block for size
*/
static int
container_seglists_get_rm_block_bestfit(struct block_container *bc,
struct memory_block *m)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
ASSERT(m->size_idx <= SEGLIST_BLOCK_LISTS);
uint32_t i = 0;
/* applicable lists */
uint64_t size_mask = (1ULL << (m->size_idx - 1)) - 1;
uint64_t v = c->nonempty_lists & ~size_mask;
if (v == 0)
return ENOMEM;
/* finds the list that serves the smallest applicable size */
i = util_lssb_index64(v);
uint32_t block_offset = VECQ_DEQUEUE(&c->blocks[i]);
if (VECQ_SIZE(&c->blocks[i]) == 0) /* marks the list as empty */
c->nonempty_lists &= ~(1ULL << (i));
*m = c->m;
m->block_off = block_offset;
m->size_idx = i + 1;
return 0;
}
/*
* container_seglists_is_empty -- (internal) checks whether the container is
* empty
*/
static int
container_seglists_is_empty(struct block_container *bc)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
return c->nonempty_lists == 0;
}
/*
* container_seglists_rm_all -- (internal) removes all elements from the tree
*/
static void
container_seglists_rm_all(struct block_container *bc)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i)
VECQ_CLEAR(&c->blocks[i]);
c->nonempty_lists = 0;
}
/*
* container_seglists_delete -- (internal) deletes the container
*/
static void
container_seglists_destroy(struct block_container *bc)
{
struct block_container_seglists *c =
(struct block_container_seglists *)bc;
for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i)
VECQ_DELETE(&c->blocks[i]);
Free(c);
}
/*
* This container does not support retrieval of exact memory blocks, but other
* than provides best-fit in O(1) time for unit sizes that do not exceed 64.
*/
static const struct block_container_ops container_seglists_ops = {
.insert = container_seglists_insert_block,
.get_rm_exact = NULL,
.get_rm_bestfit = container_seglists_get_rm_block_bestfit,
.is_empty = container_seglists_is_empty,
.rm_all = container_seglists_rm_all,
.destroy = container_seglists_destroy,
};
/*
* container_new_seglists -- allocates and initializes a seglists container
*/
struct block_container *
container_new_seglists(struct palloc_heap *heap)
{
struct block_container_seglists *bc = Malloc(sizeof(*bc));
if (bc == NULL)
goto error_container_malloc;
bc->super.heap = heap;
bc->super.c_ops = &container_seglists_ops;
for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i)
VECQ_INIT(&bc->blocks[i]);
bc->nonempty_lists = 0;
return (struct block_container *)&bc->super;
error_container_malloc:
return NULL;
}
| 4,215 | 23.511628 | 80 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/tx.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* tx.h -- internal definitions for transactions
*/
#ifndef LIBPMEMOBJ_INTERNAL_TX_H
#define LIBPMEMOBJ_INTERNAL_TX_H 1
#include <stdint.h>
#include "obj.h"
#include "ulog.h"
#ifdef __cplusplus
extern "C" {
#endif
#define TX_DEFAULT_RANGE_CACHE_SIZE (1 << 15)
#define TX_DEFAULT_RANGE_CACHE_THRESHOLD (1 << 12)
#define TX_RANGE_MASK (8ULL - 1)
#define TX_RANGE_MASK_LEGACY (32ULL - 1)
#define TX_ALIGN_SIZE(s, amask) (((s) + (amask)) & ~(amask))
#define TX_SNAPSHOT_LOG_ENTRY_ALIGNMENT CACHELINE_SIZE
#define TX_SNAPSHOT_LOG_BUFFER_OVERHEAD sizeof(struct ulog)
#define TX_SNAPSHOT_LOG_ENTRY_OVERHEAD sizeof(struct ulog_entry_buf)
#define TX_INTENT_LOG_BUFFER_ALIGNMENT CACHELINE_SIZE
#define TX_INTENT_LOG_BUFFER_OVERHEAD sizeof(struct ulog)
#define TX_INTENT_LOG_ENTRY_OVERHEAD sizeof(struct ulog_entry_val)
struct tx_parameters {
size_t cache_size;
};
/*
* Returns the current transaction's pool handle, NULL if not within
* a transaction.
*/
PMEMobjpool *tx_get_pop(void);
void tx_ctl_register(PMEMobjpool *pop);
struct tx_parameters *tx_params_new(void);
void tx_params_delete(struct tx_parameters *tx_params);
#ifdef __cplusplus
}
#endif
#endif
| 1,258 | 22.314815 | 68 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/critnib.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* critnib.c -- implementation of critnib tree
*
* It offers identity lookup (like a hashmap) and <= lookup (like a search
* tree). Unlike some hashing algorithms (cuckoo hash, perfect hashing) the
* complexity isn't constant, but for data sizes we expect it's several
* times as fast as cuckoo, and has no "stop the world" cases that would
* cause latency (ie, better worst case behaviour).
*/
/*
* STRUCTURE DESCRIPTION
*
* Critnib is a hybrid between a radix tree and DJ Bernstein's critbit:
* it skips nodes for uninteresting radix nodes (ie, ones that would have
* exactly one child), this requires adding to every node a field that
* describes the slice (4-bit in our case) that this radix level is for.
*
* This implementation also stores each node's path (ie, bits that are
* common to every key in that subtree) -- this doesn't help with lookups
* at all (unused in == match, could be reconstructed at no cost in <=
* after first dive) but simplifies inserts and removes. If we ever want
* that piece of memory it's easy to trim it down.
*/
/*
* CONCURRENCY ISSUES
*
* Reads are completely lock-free sync-free, but only almost wait-free:
* if for some reason a read thread gets pathologically stalled, it will
* notice the data being stale and restart the work. In usual cases,
* the structure having been modified does _not_ cause a restart.
*
* Writes could be easily made lock-free as well (with only a cmpxchg
* sync), but this leads to problems with removes. A possible solution
* would be doing removes by overwriting by NULL w/o freeing -- yet this
* would lead to the structure growing without bounds. Complex per-node
* locks would increase concurrency but they slow down individual writes
* enough that in practice a simple global write lock works faster.
*
* Removes are the only operation that can break reads. The structure
* can do local RCU well -- the problem being knowing when it's safe to
* free. Any synchronization with reads would kill their speed, thus
* instead we have a remove count. The grace period is DELETED_LIFE,
* after which any read will notice staleness and restart its work.
*/
#include <errno.h>
#include <stdbool.h>
#include "alloc.h"
#include "critnib.h"
#include "out.h"
#include "sys_util.h"
#include "valgrind_internal.h"
/*
* A node that has been deleted is left untouched for this many delete
* cycles. Reads have guaranteed correctness if they took no longer than
* DELETED_LIFE concurrent deletes, otherwise they notice something is
* wrong and restart. The memory of deleted nodes is never freed to
* malloc nor their pointers lead anywhere wrong, thus a stale read will
* (temporarily) get a wrong answer but won't crash.
*
* There's no need to count writes as they never interfere with reads.
*
* Allowing stale reads (of arbitrarily old writes or of deletes less than
* DELETED_LIFE old) might sound counterintuitive, but it doesn't affect
* semantics in any way: the thread could have been stalled just after
* returning from our code. Thus, the guarantee is: the result of get() or
* find_le() is a value that was current at any point between the call
* start and end.
*/
#define DELETED_LIFE 16
#define SLICE 4
#define NIB ((1ULL << SLICE) - 1)
#define SLNODES (1 << SLICE)
typedef unsigned char sh_t;
struct critnib_node {
/*
* path is the part of a tree that's already traversed (be it through
* explicit nodes or collapsed links) -- ie, any subtree below has all
* those bits set to this value.
*
* nib is a 4-bit slice that's an index into the node's children.
*
* shift is the length (in bits) of the part of the key below this node.
*
* nib
* |XXXXXXXXXX|?|*****|
* path ^
* +-----+
* shift
*/
struct critnib_node *child[SLNODES];
uint64_t path;
sh_t shift;
};
struct critnib_leaf {
uint64_t key;
void *value;
};
struct critnib {
struct critnib_node *root;
/* pool of freed nodes: singly linked list, next at child[0] */
struct critnib_node *deleted_node;
struct critnib_leaf *deleted_leaf;
/* nodes removed but not yet eligible for reuse */
struct critnib_node *pending_del_nodes[DELETED_LIFE];
struct critnib_leaf *pending_del_leaves[DELETED_LIFE];
uint64_t remove_count;
os_mutex_t mutex; /* writes/removes */
};
/*
* atomic load
*/
static void
load(void *src, void *dst)
{
util_atomic_load_explicit64((uint64_t *)src, (uint64_t *)dst,
memory_order_acquire);
}
/*
* atomic store
*/
static void
store(void *dst, void *src)
{
util_atomic_store_explicit64((uint64_t *)dst, (uint64_t)src,
memory_order_release);
}
/*
* internal: is_leaf -- check tagged pointer for leafness
*/
static inline bool
is_leaf(struct critnib_node *n)
{
return (uint64_t)n & 1;
}
/*
* internal: to_leaf -- untag a leaf pointer
*/
static inline struct critnib_leaf *
to_leaf(struct critnib_node *n)
{
return (void *)((uint64_t)n & ~1ULL);
}
/*
* internal: path_mask -- return bit mask of a path above a subtree [shift]
* bits tall
*/
static inline uint64_t
path_mask(sh_t shift)
{
return ~NIB << shift;
}
/*
* internal: slice_index -- return index of child at the given nib
*/
static inline unsigned
slice_index(uint64_t key, sh_t shift)
{
return (unsigned)((key >> shift) & NIB);
}
/*
* critnib_new -- allocates a new critnib structure
*/
struct critnib *
critnib_new(void)
{
struct critnib *c = Zalloc(sizeof(struct critnib));
if (!c)
return NULL;
util_mutex_init(&c->mutex);
VALGRIND_HG_DRD_DISABLE_CHECKING(&c->root, sizeof(c->root));
VALGRIND_HG_DRD_DISABLE_CHECKING(&c->remove_count,
sizeof(c->remove_count));
return c;
}
/*
* internal: delete_node -- recursively free (to malloc) a subtree
*/
static void
delete_node(struct critnib_node *__restrict n)
{
if (!is_leaf(n)) {
for (int i = 0; i < SLNODES; i++) {
if (n->child[i])
delete_node(n->child[i]);
}
Free(n);
} else {
Free(to_leaf(n));
}
}
/*
* critnib_delete -- destroy and free a critnib struct
*/
void
critnib_delete(struct critnib *c)
{
if (c->root)
delete_node(c->root);
util_mutex_destroy(&c->mutex);
for (struct critnib_node *m = c->deleted_node; m; ) {
struct critnib_node *mm = m->child[0];
Free(m);
m = mm;
}
for (struct critnib_leaf *k = c->deleted_leaf; k; ) {
struct critnib_leaf *kk = k->value;
Free(k);
k = kk;
}
for (int i = 0; i < DELETED_LIFE; i++) {
Free(c->pending_del_nodes[i]);
Free(c->pending_del_leaves[i]);
}
Free(c);
}
/*
* internal: free_node -- free (to internal pool, not malloc) a node.
*
* We cannot free them to malloc as a stalled reader thread may still walk
* through such nodes; it will notice the result being bogus but only after
* completing the walk, thus we need to ensure any freed nodes still point
* to within the critnib structure.
*/
static void
free_node(struct critnib *__restrict c, struct critnib_node *__restrict n)
{
if (!n)
return;
ASSERT(!is_leaf(n));
n->child[0] = c->deleted_node;
c->deleted_node = n;
}
/*
* internal: alloc_node -- allocate a node from our pool or from malloc
*/
static struct critnib_node *
alloc_node(struct critnib *__restrict c)
{
if (!c->deleted_node) {
struct critnib_node *n = Malloc(sizeof(struct critnib_node));
if (n == NULL)
ERR("!Malloc");
return n;
}
struct critnib_node *n = c->deleted_node;
c->deleted_node = n->child[0];
VALGRIND_ANNOTATE_NEW_MEMORY(n, sizeof(*n));
return n;
}
/*
* internal: free_leaf -- free (to internal pool, not malloc) a leaf.
*
* See free_node().
*/
static void
free_leaf(struct critnib *__restrict c, struct critnib_leaf *__restrict k)
{
if (!k)
return;
k->value = c->deleted_leaf;
c->deleted_leaf = k;
}
/*
* internal: alloc_leaf -- allocate a leaf from our pool or from malloc
*/
static struct critnib_leaf *
alloc_leaf(struct critnib *__restrict c)
{
if (!c->deleted_leaf) {
struct critnib_leaf *k = Malloc(sizeof(struct critnib_leaf));
if (k == NULL)
ERR("!Malloc");
return k;
}
struct critnib_leaf *k = c->deleted_leaf;
c->deleted_leaf = k->value;
VALGRIND_ANNOTATE_NEW_MEMORY(k, sizeof(*k));
return k;
}
/*
* crinib_insert -- write a key:value pair to the critnib structure
*
* Returns:
* • 0 on success
* • EEXIST if such a key already exists
* • ENOMEM if we're out of memory
*
* Takes a global write lock but doesn't stall any readers.
*/
int
critnib_insert(struct critnib *c, uint64_t key, void *value)
{
util_mutex_lock(&c->mutex);
struct critnib_leaf *k = alloc_leaf(c);
if (!k) {
util_mutex_unlock(&c->mutex);
return ENOMEM;
}
VALGRIND_HG_DRD_DISABLE_CHECKING(k, sizeof(struct critnib_leaf));
k->key = key;
k->value = value;
struct critnib_node *kn = (void *)((uint64_t)k | 1);
struct critnib_node *n = c->root;
if (!n) {
c->root = kn;
util_mutex_unlock(&c->mutex);
return 0;
}
struct critnib_node **parent = &c->root;
struct critnib_node *prev = c->root;
while (n && !is_leaf(n) && (key & path_mask(n->shift)) == n->path) {
prev = n;
parent = &n->child[slice_index(key, n->shift)];
n = *parent;
}
if (!n) {
n = prev;
store(&n->child[slice_index(key, n->shift)], kn);
util_mutex_unlock(&c->mutex);
return 0;
}
uint64_t path = is_leaf(n) ? to_leaf(n)->key : n->path;
/* Find where the path differs from our key. */
uint64_t at = path ^ key;
if (!at) {
ASSERT(is_leaf(n));
free_leaf(c, to_leaf(kn));
/* fail instead of replacing */
util_mutex_unlock(&c->mutex);
return EEXIST;
}
/* and convert that to an index. */
sh_t sh = util_mssb_index64(at) & (sh_t)~(SLICE - 1);
struct critnib_node *m = alloc_node(c);
if (!m) {
free_leaf(c, to_leaf(kn));
util_mutex_unlock(&c->mutex);
return ENOMEM;
}
VALGRIND_HG_DRD_DISABLE_CHECKING(m, sizeof(struct critnib_node));
for (int i = 0; i < SLNODES; i++)
m->child[i] = NULL;
m->child[slice_index(key, sh)] = kn;
m->child[slice_index(path, sh)] = n;
m->shift = sh;
m->path = key & path_mask(sh);
store(parent, m);
util_mutex_unlock(&c->mutex);
return 0;
}
/*
* critnib_remove -- delete a key from the critnib structure, return its value
*/
void *
critnib_remove(struct critnib *c, uint64_t key)
{
struct critnib_leaf *k;
void *value = NULL;
util_mutex_lock(&c->mutex);
struct critnib_node *n = c->root;
if (!n)
goto not_found;
uint64_t del = util_fetch_and_add64(&c->remove_count, 1) % DELETED_LIFE;
free_node(c, c->pending_del_nodes[del]);
free_leaf(c, c->pending_del_leaves[del]);
c->pending_del_nodes[del] = NULL;
c->pending_del_leaves[del] = NULL;
if (is_leaf(n)) {
k = to_leaf(n);
if (k->key == key) {
store(&c->root, NULL);
goto del_leaf;
}
goto not_found;
}
/*
* n and k are a parent:child pair (after the first iteration); k is the
* leaf that holds the key we're deleting.
*/
struct critnib_node **k_parent = &c->root;
struct critnib_node **n_parent = &c->root;
struct critnib_node *kn = n;
while (!is_leaf(kn)) {
n_parent = k_parent;
n = kn;
k_parent = &kn->child[slice_index(key, kn->shift)];
kn = *k_parent;
if (!kn)
goto not_found;
}
k = to_leaf(kn);
if (k->key != key)
goto not_found;
store(&n->child[slice_index(key, n->shift)], NULL);
/* Remove the node if there's only one remaining child. */
int ochild = -1;
for (int i = 0; i < SLNODES; i++) {
if (n->child[i]) {
if (ochild != -1)
goto del_leaf;
ochild = i;
}
}
ASSERTne(ochild, -1);
store(n_parent, n->child[ochild]);
c->pending_del_nodes[del] = n;
del_leaf:
value = k->value;
c->pending_del_leaves[del] = k;
not_found:
util_mutex_unlock(&c->mutex);
return value;
}
/*
* critnib_get -- query for a key ("==" match), returns value or NULL
*
* Doesn't need a lock but if many deletes happened while our thread was
* somehow stalled the query is restarted (as freed nodes remain unused only
* for a grace period).
*
* Counterintuitively, it's pointless to return the most current answer,
* we need only one that was valid at any point after the call started.
*/
void *
critnib_get(struct critnib *c, uint64_t key)
{
uint64_t wrs1, wrs2;
void *res;
do {
struct critnib_node *n;
load(&c->remove_count, &wrs1);
load(&c->root, &n);
/*
* critbit algorithm: dive into the tree, looking at nothing but
* each node's critical bit^H^H^Hnibble. This means we risk
* going wrong way if our path is missing, but that's ok...
*/
while (n && !is_leaf(n))
load(&n->child[slice_index(key, n->shift)], &n);
/* ... as we check it at the end. */
struct critnib_leaf *k = to_leaf(n);
res = (n && k->key == key) ? k->value : NULL;
load(&c->remove_count, &wrs2);
} while (wrs1 + DELETED_LIFE <= wrs2);
return res;
}
/*
* internal: find_successor -- return the rightmost non-null node in a subtree
*/
static void *
find_successor(struct critnib_node *__restrict n)
{
while (1) {
int nib;
for (nib = NIB; nib >= 0; nib--)
if (n->child[nib])
break;
if (nib < 0)
return NULL;
n = n->child[nib];
if (is_leaf(n))
return to_leaf(n)->value;
}
}
/*
* internal: find_le -- recursively search <= in a subtree
*/
static void *
find_le(struct critnib_node *__restrict n, uint64_t key)
{
if (!n)
return NULL;
if (is_leaf(n)) {
struct critnib_leaf *k = to_leaf(n);
return (k->key <= key) ? k->value : NULL;
}
/*
* is our key outside the subtree we're in?
*
* If we're inside, all bits above the nib will be identical; note
* that shift points at the nib's lower rather than upper edge, so it
* needs to be masked away as well.
*/
if ((key ^ n->path) >> (n->shift) & ~NIB) {
/*
* subtree is too far to the left?
* -> its rightmost value is good
*/
if (n->path < key)
return find_successor(n);
/*
* subtree is too far to the right?
* -> it has nothing of interest to us
*/
return NULL;
}
unsigned nib = slice_index(key, n->shift);
/* recursive call: follow the path */
{
struct critnib_node *m;
load(&n->child[nib], &m);
void *value = find_le(m, key);
if (value)
return value;
}
/*
* nothing in that subtree? We strayed from the path at this point,
* thus need to search every subtree to our left in this node. No
* need to dive into any but the first non-null, though.
*/
for (; nib > 0; nib--) {
struct critnib_node *m;
load(&n->child[nib - 1], &m);
if (m) {
n = m;
if (is_leaf(n))
return to_leaf(n)->value;
return find_successor(n);
}
}
return NULL;
}
/*
* critnib_find_le -- query for a key ("<=" match), returns value or NULL
*
* Same guarantees as critnib_get().
*/
void *
critnib_find_le(struct critnib *c, uint64_t key)
{
uint64_t wrs1, wrs2;
void *res;
do {
load(&c->remove_count, &wrs1);
struct critnib_node *n; /* avoid a subtle TOCTOU */
load(&c->root, &n);
res = n ? find_le(n, key) : NULL;
load(&c->remove_count, &wrs2);
} while (wrs1 + DELETED_LIFE <= wrs2);
return res;
}
| 15,052 | 22.087423 | 78 | c |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/memblock.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* memblock.h -- internal definitions for memory block
*/
#ifndef LIBPMEMOBJ_MEMBLOCK_H
#define LIBPMEMOBJ_MEMBLOCK_H 1
#include <stddef.h>
#include <stdint.h>
#include "os_thread.h"
#include "heap_layout.h"
#include "memops.h"
#include "palloc.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MEMORY_BLOCK_NONE \
(struct memory_block)\
{0, 0, 0, 0, NULL, NULL, MAX_HEADER_TYPES, MAX_MEMORY_BLOCK, NULL}
#define MEMORY_BLOCK_IS_NONE(_m)\
((_m).heap == NULL)
#define MEMORY_BLOCK_EQUALS(lhs, rhs)\
((lhs).zone_id == (rhs).zone_id && (lhs).chunk_id == (rhs).chunk_id &&\
(lhs).block_off == (rhs).block_off && (lhs).heap == (rhs).heap)
enum memory_block_type {
/*
* Huge memory blocks are directly backed by memory chunks. A single
* huge block can consist of several chunks.
* The persistent representation of huge memory blocks can be thought
* of as a doubly linked list with variable length elements.
* That list is stored in the chunk headers array where one element
* directly corresponds to one chunk.
*
* U - used, F - free, R - footer, . - empty
* |U| represents a used chunk with a size index of 1, with type
* information (CHUNK_TYPE_USED) stored in the corresponding header
* array element - chunk_headers[chunk_id].
*
* |F...R| represents a free chunk with size index of 5. The empty
* chunk headers have undefined values and shouldn't be used. All
* chunks with size larger than 1 must have a footer in the last
* corresponding header array - chunk_headers[chunk_id - size_idx - 1].
*
* The above representation of chunks will be used to describe the
* way fail-safety is achieved during heap operations.
*
* Allocation of huge memory block with size index 5:
* Initial heap state: |U| <> |F..R| <> |U| <> |F......R|
*
* The only block that matches that size is at very end of the chunks
* list: |F......R|
*
* As the request was for memory block of size 5, and this ones size is
* 7 there's a need to first split the chunk in two.
* 1) The last chunk header of the new allocation is marked as footer
* and the block after that one is marked as free: |F...RF.R|
* This is allowed and has no impact on the heap because this
* modification is into chunk header that is otherwise unused, in
* other words the linked list didn't change.
*
* 2) The size index of the first header is changed from previous value
* of 7 to 5: |F...R||F.R|
* This is a single fail-safe atomic operation and this is the
* first change that is noticeable by the heap operations.
* A single linked list element is split into two new ones.
*
* 3) The allocation process either uses redo log or changes directly
* the chunk header type from free to used: |U...R| <> |F.R|
*
* In a similar fashion the reverse operation, free, is performed:
* Initial heap state: |U| <> |F..R| <> |F| <> |U...R| <> |F.R|
*
* This is the heap after the previous example with the single chunk
* in between changed from used to free.
*
* 1) Determine the neighbors of the memory block which is being
* freed.
*
* 2) Update the footer (if needed) information of the last chunk which
* is the memory block being freed or it's neighbor to the right.
* |F| <> |U...R| <> |F.R << this one|
*
* 3) Update the size index and type of the left-most chunk header.
* And so this: |F << this one| <> |U...R| <> |F.R|
* becomes this: |F.......R|
* The entire chunk header can be updated in a single fail-safe
* atomic operation because it's size is only 64 bytes.
*/
MEMORY_BLOCK_HUGE,
/*
* Run memory blocks are chunks with CHUNK_TYPE_RUN and size index of 1.
* The entire chunk is subdivided into smaller blocks and has an
* additional metadata attached in the form of a bitmap - each bit
* corresponds to a single block.
* In this case there's no need to perform any coalescing or splitting
* on the persistent metadata.
* The bitmap is stored on a variable number of 64 bit values and
* because of the requirement of allocation fail-safe atomicity the
* maximum size index of a memory block from a run is 64 - since that's
* the limit of atomic write guarantee.
*
* The allocation/deallocation process is a single 8 byte write that
* sets/clears the corresponding bits. Depending on the user choice
* it can either be made atomically or using redo-log when grouped with
* other operations.
* It's also important to note that in a case of realloc it might so
* happen that a single 8 byte bitmap value has its bits both set and
* cleared - that's why the run memory block metadata changes operate
* on AND'ing or OR'ing a bitmask instead of directly setting the value.
*/
MEMORY_BLOCK_RUN,
MAX_MEMORY_BLOCK
};
enum memblock_state {
MEMBLOCK_STATE_UNKNOWN,
MEMBLOCK_ALLOCATED,
MEMBLOCK_FREE,
MAX_MEMBLOCK_STATE,
};
/* runtime bitmap information for a run */
struct run_bitmap {
unsigned nvalues; /* number of 8 byte values - size of values array */
unsigned nbits; /* number of valid bits */
size_t size; /* total size of the bitmap in bytes */
uint64_t *values; /* pointer to the bitmap's values array */
};
/* runtime information necessary to create a run */
struct run_descriptor {
uint16_t flags; /* chunk flags for the run */
size_t unit_size; /* the size of a single unit in a run */
uint32_t size_idx; /* size index of a single run instance */
size_t alignment; /* required alignment of objects */
unsigned nallocs; /* number of allocs per run */
struct run_bitmap bitmap;
};
struct memory_block_ops {
/* returns memory block size */
size_t (*block_size)(const struct memory_block *m);
/* prepares header modification operation */
void (*prep_hdr)(const struct memory_block *m,
enum memblock_state dest_state, struct operation_context *ctx);
/* returns lock associated with memory block */
os_mutex_t *(*get_lock)(const struct memory_block *m);
/* returns whether a block is allocated or not */
enum memblock_state (*get_state)(const struct memory_block *m);
/* returns pointer to the data of a block */
void *(*get_user_data)(const struct memory_block *m);
/*
* Returns the size of a memory block without overhead.
* This is the size of a data block that can be used.
*/
size_t (*get_user_size)(const struct memory_block *m);
/* returns pointer to the beginning of data of a run block */
void *(*get_real_data)(const struct memory_block *m);
/* returns the size of a memory block, including headers */
size_t (*get_real_size)(const struct memory_block *m);
/* writes a header of an allocation */
void (*write_header)(const struct memory_block *m,
uint64_t extra_field, uint16_t flags);
void (*invalidate)(const struct memory_block *m);
/*
* Checks the header type of a chunk matches the expected type and
* modifies it if necessary. This is fail-safe atomic.
*/
void (*ensure_header_type)(const struct memory_block *m,
enum header_type t);
/*
* Reinitializes a block after a heap restart.
* This is called for EVERY allocation, but *only* under Valgrind.
*/
void (*reinit_header)(const struct memory_block *m);
/* returns the extra field of an allocation */
uint64_t (*get_extra)(const struct memory_block *m);
/* returns the flags of an allocation */
uint16_t (*get_flags)(const struct memory_block *m);
/* initializes memblock in valgrind */
void (*vg_init)(const struct memory_block *m, int objects,
object_callback cb, void *arg);
/* iterates over every free block */
int (*iterate_free)(const struct memory_block *m,
object_callback cb, void *arg);
/* iterates over every used block */
int (*iterate_used)(const struct memory_block *m,
object_callback cb, void *arg);
/* calculates number of free units, valid only for runs */
void (*calc_free)(const struct memory_block *m,
uint32_t *free_space, uint32_t *max_free_block);
/* this is called exactly once for every existing chunk */
void (*reinit_chunk)(const struct memory_block *m);
/*
* Initializes bitmap data for a run.
* Do *not* use this function unless absolutely necessary, it breaks
* the abstraction layer by exposing implementation details.
*/
void (*get_bitmap)(const struct memory_block *m, struct run_bitmap *b);
/* calculates the ratio between occupied and unoccupied space */
unsigned (*fill_pct)(const struct memory_block *m);
};
struct memory_block {
uint32_t chunk_id; /* index of the memory block in its zone */
uint32_t zone_id; /* index of this block zone in the heap */
/*
* Size index of the memory block represented in either multiple of
* CHUNKSIZE in the case of a huge chunk or in multiple of a run
* block size.
*/
uint32_t size_idx;
/*
* Used only for run chunks, must be zeroed for huge.
* Number of preceding blocks in the chunk. In other words, the
* position of this memory block in run bitmap.
*/
uint32_t block_off;
/*
* The variables below are associated with the memory block and are
* stored here for convenience. Those fields are filled by either the
* memblock_from_offset or memblock_rebuild_state, and they should not
* be modified manually.
*/
const struct memory_block_ops *m_ops;
struct palloc_heap *heap;
enum header_type header_type;
enum memory_block_type type;
struct run_bitmap *cached_bitmap;
};
/*
* This is a representation of a run memory block that is active in a bucket or
* is on a pending list in the recycler.
* This structure should never be passed around by value because the address of
* the nresv variable can be in reservations made through palloc_reserve(). Only
* if the number of reservations equals 0 the structure can be moved/freed.
*/
struct memory_block_reserved {
struct memory_block m;
struct bucket *bucket;
/*
* Number of reservations made from this run, the pointer to this value
* is stored in a user facing pobj_action structure. Decremented once
* the reservation is published or canceled.
*/
int nresv;
};
struct memory_block memblock_from_offset(struct palloc_heap *heap,
uint64_t off);
struct memory_block memblock_from_offset_opt(struct palloc_heap *heap,
uint64_t off, int size);
void memblock_rebuild_state(struct palloc_heap *heap, struct memory_block *m);
struct memory_block memblock_huge_init(struct palloc_heap *heap,
uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx);
struct memory_block memblock_run_init(struct palloc_heap *heap,
uint32_t chunk_id, uint32_t zone_id, struct run_descriptor *rdsc);
void memblock_run_bitmap(uint32_t *size_idx, uint16_t flags,
uint64_t unit_size, uint64_t alignment, void *content,
struct run_bitmap *b);
#ifdef __cplusplus
}
#endif
#endif
| 10,750 | 34.019544 | 80 | h |
null | NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/pmalloc.c | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmalloc.c -- implementation of pmalloc POSIX-like API
*
* This is the front-end part of the persistent memory allocator. It uses both
* transient and persistent representation of the heap to provide memory blocks
* in a reasonable time and with an acceptable common-case fragmentation.
*/
#include <inttypes.h>
#include "valgrind_internal.h"
#include "heap.h"
#include "lane.h"
#include "memblock.h"
#include "memops.h"
#include "obj.h"
#include "out.h"
#include "palloc.h"
#include "pmalloc.h"
#include "alloc_class.h"
#include "set.h"
#include "mmap.h"
enum pmalloc_operation_type {
OPERATION_INTERNAL, /* used only for single, one-off operations */
OPERATION_EXTERNAL, /* used for everything else, incl. large redos */
MAX_OPERATION_TYPE,
};
struct lane_alloc_runtime {
struct operation_context *ctx[MAX_OPERATION_TYPE];
};
/*
* pmalloc_operation_hold_type -- acquires allocator lane section and returns a
* pointer to its operation context
*/
static struct operation_context *
pmalloc_operation_hold_type(PMEMobjpool *pop, enum pmalloc_operation_type type,
int start)
{
struct lane *lane;
lane_hold(pop, &lane);
struct operation_context *ctx = type == OPERATION_INTERNAL ?
lane->internal : lane->external;
if (start)
operation_start(ctx);
return ctx;
}
/*
* pmalloc_operation_hold_type -- acquires allocator lane section and returns a
* pointer to its operation context without starting
*/
struct operation_context *
pmalloc_operation_hold_no_start(PMEMobjpool *pop)
{
return pmalloc_operation_hold_type(pop, OPERATION_EXTERNAL, 0);
}
/*
* pmalloc_operation_hold -- acquires allocator lane section and returns a
* pointer to its redo log
*/
struct operation_context *
pmalloc_operation_hold(PMEMobjpool *pop)
{
return pmalloc_operation_hold_type(pop, OPERATION_EXTERNAL, 1);
}
/*
* pmalloc_operation_release -- releases allocator lane section
*/
void
pmalloc_operation_release(PMEMobjpool *pop)
{
lane_release(pop);
}
/*
* pmalloc -- allocates a new block of memory
*
* The pool offset is written persistently into the off variable.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
pmalloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, 0, off, size, NULL, NULL,
extra_field, object_flags, 0, 0, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* pmalloc_construct -- allocates a new block of memory with a constructor
*
* The block offset is written persistently into the off variable, but only
* after the constructor function has been called.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, 0, off, size, constructor, arg,
extra_field, object_flags, class_id, 0, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* prealloc -- resizes in-place a previously allocated memory block
*
* The block offset is written persistently into the off variable.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
prealloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, *off, off, size, NULL, NULL,
extra_field, object_flags, 0, 0, ctx);
pmalloc_operation_release(pop);
return ret;
}
/*
* pfree -- deallocates a memory block previously allocated by pmalloc
*
* A zero value is written persistently into the off variable.
*
* If successful function returns zero. Otherwise an error number is returned.
*/
void
pfree(PMEMobjpool *pop, uint64_t *off)
{
struct operation_context *ctx =
pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1);
int ret = palloc_operation(&pop->heap, *off, off, 0, NULL, NULL,
0, 0, 0, 0, ctx);
ASSERTeq(ret, 0);
pmalloc_operation_release(pop);
}
/*
* pmalloc_boot -- global runtime init routine of allocator section
*/
int
pmalloc_boot(PMEMobjpool *pop)
{
int ret = palloc_boot(&pop->heap, (char *)pop + pop->heap_offset,
pop->set->poolsize - pop->heap_offset, &pop->heap_size,
pop, &pop->p_ops,
pop->stats, pop->set);
if (ret)
return ret;
#if VG_MEMCHECK_ENABLED
if (On_memcheck)
palloc_heap_vg_open(&pop->heap, pop->vg_boot);
#endif
ret = palloc_buckets_init(&pop->heap);
if (ret)
palloc_heap_cleanup(&pop->heap);
return ret;
}
/*
* pmalloc_cleanup -- global cleanup routine of allocator section
*/
int
pmalloc_cleanup(PMEMobjpool *pop)
{
palloc_heap_cleanup(&pop->heap);
return 0;
}
/*
* CTL_WRITE_HANDLER(desc) -- creates a new allocation class
*/
static int
CTL_WRITE_HANDLER(desc)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
uint8_t id;
struct alloc_class_collection *ac = heap_alloc_classes(&pop->heap);
struct pobj_alloc_class_desc *p = arg;
if (p->unit_size <= 0 || p->unit_size > PMEMOBJ_MAX_ALLOC_SIZE ||
p->units_per_block <= 0) {
errno = EINVAL;
return -1;
}
if (p->alignment != 0 && p->unit_size % p->alignment != 0) {
ERR("unit size must be evenly divisible by alignment");
errno = EINVAL;
return -1;
}
if (p->alignment > (MEGABYTE * 2)) {
ERR("alignment cannot be larger than 2 megabytes");
errno = EINVAL;
return -1;
}
enum header_type lib_htype = MAX_HEADER_TYPES;
switch (p->header_type) {
case POBJ_HEADER_LEGACY:
lib_htype = HEADER_LEGACY;
break;
case POBJ_HEADER_COMPACT:
lib_htype = HEADER_COMPACT;
break;
case POBJ_HEADER_NONE:
lib_htype = HEADER_NONE;
break;
case MAX_POBJ_HEADER_TYPES:
default:
ERR("invalid header type");
errno = EINVAL;
return -1;
}
if (PMDK_SLIST_EMPTY(indexes)) {
if (alloc_class_find_first_free_slot(ac, &id) != 0) {
ERR("no available free allocation class identifier");
errno = EINVAL;
return -1;
}
} else {
struct ctl_index *idx = PMDK_SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "class_id"), 0);
if (idx->value < 0 || idx->value >= MAX_ALLOCATION_CLASSES) {
ERR("class id outside of the allowed range");
errno = ERANGE;
return -1;
}
id = (uint8_t)idx->value;
if (alloc_class_reserve(ac, id) != 0) {
ERR("attempted to overwrite an allocation class");
errno = EEXIST;
return -1;
}
}
size_t runsize_bytes =
CHUNK_ALIGN_UP((p->units_per_block * p->unit_size) +
RUN_BASE_METADATA_SIZE);
/* aligning the buffer might require up-to to 'alignment' bytes */
if (p->alignment != 0)
runsize_bytes += p->alignment;
uint32_t size_idx = (uint32_t)(runsize_bytes / CHUNKSIZE);
if (size_idx > UINT16_MAX)
size_idx = UINT16_MAX;
struct alloc_class *c = alloc_class_new(id,
heap_alloc_classes(&pop->heap), CLASS_RUN,
lib_htype, p->unit_size, p->alignment, size_idx);
if (c == NULL) {
errno = EINVAL;
return -1;
}
if (heap_create_alloc_class_buckets(&pop->heap, c) != 0) {
alloc_class_delete(ac, c);
return -1;
}
p->class_id = c->id;
p->units_per_block = c->rdsc.nallocs;
return 0;
}
/*
* pmalloc_header_type_parser -- parses the alloc header type argument
*/
static int
pmalloc_header_type_parser(const void *arg, void *dest, size_t dest_size)
{
const char *vstr = arg;
enum pobj_header_type *htype = dest;
ASSERTeq(dest_size, sizeof(enum pobj_header_type));
if (strcmp(vstr, "none") == 0) {
*htype = POBJ_HEADER_NONE;
} else if (strcmp(vstr, "compact") == 0) {
*htype = POBJ_HEADER_COMPACT;
} else if (strcmp(vstr, "legacy") == 0) {
*htype = POBJ_HEADER_LEGACY;
} else {
ERR("invalid header type");
errno = EINVAL;
return -1;
}
return 0;
}
/*
* CTL_READ_HANDLER(desc) -- reads the information about allocation class
*/
static int
CTL_READ_HANDLER(desc)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
uint8_t id;
struct ctl_index *idx = PMDK_SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "class_id"), 0);
if (idx->value < 0 || idx->value >= MAX_ALLOCATION_CLASSES) {
ERR("class id outside of the allowed range");
errno = ERANGE;
return -1;
}
id = (uint8_t)idx->value;
struct alloc_class *c = alloc_class_by_id(
heap_alloc_classes(&pop->heap), id);
if (c == NULL) {
ERR("class with the given id does not exist");
errno = ENOENT;
return -1;
}
enum pobj_header_type user_htype = MAX_POBJ_HEADER_TYPES;
switch (c->header_type) {
case HEADER_LEGACY:
user_htype = POBJ_HEADER_LEGACY;
break;
case HEADER_COMPACT:
user_htype = POBJ_HEADER_COMPACT;
break;
case HEADER_NONE:
user_htype = POBJ_HEADER_NONE;
break;
default:
ASSERT(0); /* unreachable */
break;
}
struct pobj_alloc_class_desc *p = arg;
p->units_per_block = c->type == CLASS_HUGE ? 0 : c->rdsc.nallocs;
p->header_type = user_htype;
p->unit_size = c->unit_size;
p->class_id = c->id;
p->alignment = c->flags & CHUNK_FLAG_ALIGNED ? c->rdsc.alignment : 0;
return 0;
}
static const struct ctl_argument CTL_ARG(desc) = {
.dest_size = sizeof(struct pobj_alloc_class_desc),
.parsers = {
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
unit_size, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
alignment, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
units_per_block, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc,
header_type, pmalloc_header_type_parser),
CTL_ARG_PARSER_END
}
};
static const struct ctl_node CTL_NODE(class_id)[] = {
CTL_LEAF_RW(desc),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(new)[] = {
CTL_LEAF_WO(desc),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(alloc_class)[] = {
CTL_INDEXED(class_id),
CTL_INDEXED(new),
CTL_NODE_END
};
/*
* CTL_RUNNABLE_HANDLER(extend) -- extends the pool by the given size
*/
static int
CTL_RUNNABLE_HANDLER(extend)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t arg_in = *(ssize_t *)arg;
if (arg_in < (ssize_t)PMEMOBJ_MIN_PART) {
ERR("incorrect size for extend, must be larger than %" PRIu64,
PMEMOBJ_MIN_PART);
return -1;
}
struct palloc_heap *heap = &pop->heap;
struct bucket *defb = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID,
HEAP_ARENA_PER_THREAD);
int ret = heap_extend(heap, defb, (size_t)arg_in) < 0 ? -1 : 0;
heap_bucket_release(heap, defb);
return ret;
}
/*
* CTL_READ_HANDLER(granularity) -- reads the current heap grow size
*/
static int
CTL_READ_HANDLER(granularity)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t *arg_out = arg;
*arg_out = (ssize_t)pop->heap.growsize;
return 0;
}
/*
* CTL_WRITE_HANDLER(granularity) -- changes the heap grow size
*/
static int
CTL_WRITE_HANDLER(granularity)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t arg_in = *(int *)arg;
if (arg_in != 0 && arg_in < (ssize_t)PMEMOBJ_MIN_PART) {
ERR("incorrect grow size, must be 0 or larger than %" PRIu64,
PMEMOBJ_MIN_PART);
return -1;
}
pop->heap.growsize = (size_t)arg_in;
return 0;
}
static const struct ctl_argument CTL_ARG(granularity) = CTL_ARG_LONG_LONG;
/*
* CTL_READ_HANDLER(total) -- reads a number of the arenas
*/
static int
CTL_READ_HANDLER(total)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned *narenas = arg;
*narenas = heap_get_narenas_total(&pop->heap);
return 0;
}
/*
* CTL_READ_HANDLER(max) -- reads a max number of the arenas
*/
static int
CTL_READ_HANDLER(max)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned *max = arg;
*max = heap_get_narenas_max(&pop->heap);
return 0;
}
/*
* CTL_WRITE_HANDLER(max) -- write a max number of the arenas
*/
static int
CTL_WRITE_HANDLER(max)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned size = *(unsigned *)arg;
int ret = heap_set_narenas_max(&pop->heap, size);
if (ret) {
LOG(1, "cannot change max arena number");
return -1;
}
return 0;
}
static const struct ctl_argument CTL_ARG(max) = CTL_ARG_LONG_LONG;
/*
* CTL_READ_HANDLER(automatic) -- reads a number of the automatic arenas
*/
static int
CTL_READ_HANDLER(automatic, narenas)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned *narenas = arg;
*narenas = heap_get_narenas_auto(&pop->heap);
return 0;
}
/*
* CTL_READ_HANDLER(arena_id) -- reads the id of the arena
* assigned to the calling thread
*/
static int
CTL_READ_HANDLER(arena_id)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned *arena_id = arg;
*arena_id = heap_get_thread_arena_id(&pop->heap);
return 0;
}
/*
* CTL_WRITE_HANDLER(arena_id) -- assigns the arena to the calling thread
*/
static int
CTL_WRITE_HANDLER(arena_id)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned arena_id = *(unsigned *)arg;
unsigned narenas = heap_get_narenas_total(&pop->heap);
/*
* check if index is not bigger than number of arenas
* or if it is not equal zero
*/
if (arena_id < 1 || arena_id > narenas) {
LOG(1, "arena id outside of the allowed range: <1,%u>",
narenas);
errno = ERANGE;
return -1;
}
heap_set_arena_thread(&pop->heap, arena_id);
return 0;
}
static const struct ctl_argument CTL_ARG(arena_id) = CTL_ARG_LONG_LONG;
/*
* CTL_WRITE_HANDLER(automatic) -- updates automatic status of the arena
*/
static int
CTL_WRITE_HANDLER(automatic)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int arg_in = *(int *)arg;
unsigned arena_id;
struct ctl_index *idx = PMDK_SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "arena_id"), 0);
arena_id = (unsigned)idx->value;
unsigned narenas = heap_get_narenas_total(&pop->heap);
/*
* check if index is not bigger than number of arenas
* or if it is not equal zero
*/
if (arena_id < 1 || arena_id > narenas) {
LOG(1, "arena id outside of the allowed range: <1,%u>",
narenas);
errno = ERANGE;
return -1;
}
if (arg_in != 0 && arg_in != 1) {
LOG(1, "incorrect arena state, must be 0 or 1");
return -1;
}
return heap_set_arena_auto(&pop->heap, arena_id, arg_in);
}
/*
* CTL_READ_HANDLER(automatic) -- reads automatic status of the arena
*/
static int
CTL_READ_HANDLER(automatic)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int *arg_out = arg;
unsigned arena_id;
struct ctl_index *idx = PMDK_SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "arena_id"), 0);
arena_id = (unsigned)idx->value;
unsigned narenas = heap_get_narenas_total(&pop->heap);
/*
* check if index is not bigger than number of arenas
* or if it is not equal zero
*/
if (arena_id < 1 || arena_id > narenas) {
LOG(1, "arena id outside of the allowed range: <1,%u>",
narenas);
errno = ERANGE;
return -1;
}
*arg_out = heap_get_arena_auto(&pop->heap, arena_id);
return 0;
}
static struct ctl_argument CTL_ARG(automatic) = CTL_ARG_BOOLEAN;
static const struct ctl_node CTL_NODE(size)[] = {
CTL_LEAF_RW(granularity),
CTL_LEAF_RUNNABLE(extend),
CTL_NODE_END
};
/*
* CTL_READ_HANDLER(size) -- reads usable size of specified arena
*/
static int
CTL_READ_HANDLER(size)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned arena_id;
unsigned narenas;
size_t *arena_size = arg;
struct ctl_index *idx = PMDK_SLIST_FIRST(indexes);
ASSERTeq(strcmp(idx->name, "arena_id"), 0);
/* take index of arena */
arena_id = (unsigned)idx->value;
/* take number of arenas */
narenas = heap_get_narenas_total(&pop->heap);
/*
* check if index is not bigger than number of arenas
* or if it is not equal zero
*/
if (arena_id < 1 || arena_id > narenas) {
LOG(1, "arena id outside of the allowed range: <1,%u>",
narenas);
errno = ERANGE;
return -1;
}
/* take buckets for arena */
struct bucket **buckets;
buckets = heap_get_arena_buckets(&pop->heap, arena_id);
/* calculate number of reservation for arena using buckets */
unsigned size = 0;
for (int i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
if (buckets[i] != NULL && buckets[i]->is_active)
size += buckets[i]->active_memory_block->m.size_idx;
}
*arena_size = size * CHUNKSIZE;
return 0;
}
/*
* CTL_RUNNABLE_HANDLER(create) -- create new arena in the heap
*/
static int
CTL_RUNNABLE_HANDLER(create)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
unsigned *arena_id = arg;
struct palloc_heap *heap = &pop->heap;
int ret = heap_arena_create(heap);
if (ret < 0)
return -1;
*arena_id = (unsigned)ret;
return 0;
}
static const struct ctl_node CTL_NODE(arena_id)[] = {
CTL_LEAF_RO(size),
CTL_LEAF_RW(automatic),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(arena)[] = {
CTL_INDEXED(arena_id),
CTL_LEAF_RUNNABLE(create),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(narenas)[] = {
CTL_LEAF_RO(automatic, narenas),
CTL_LEAF_RO(total),
CTL_LEAF_RW(max),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(thread)[] = {
CTL_LEAF_RW(arena_id),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(heap)[] = {
CTL_CHILD(alloc_class),
CTL_CHILD(arena),
CTL_CHILD(size),
CTL_CHILD(thread),
CTL_CHILD(narenas),
CTL_NODE_END
};
/*
* pmalloc_ctl_register -- registers ctl nodes for "heap" module
*/
void
pmalloc_ctl_register(PMEMobjpool *pop)
{
CTL_REGISTER_MODULE(pop->ctl, heap);
}
| 18,444 | 22.114035 | 79 | c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.