repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | ceph-main/src/test/osd/scrubber_test_datasets.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
/// \file data-sets used by the scrubber unit tests
#include "./scrubber_generators.h"
namespace ScrubDatasets {
/*
* Two objects with some clones. No inconsitencies.
*/
extern ScrubGenerator::RealObjsConf minimal_snaps_configuration;
// and a part of this configuration, one that we will corrupt in a test:
extern hobject_t hobj_ms1_snp30;
// a manipulation set used in TestTScrubberBe_data_2:
extern ScrubGenerator::CorruptFuncList crpt_funcs_set1;
} // namespace ScrubDatasets
| 600 | 26.318182 | 72 | h |
null | ceph-main/src/test/osdc/FakeWriteback.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_OSDC_FAKEWRITEBACK_H
#define CEPH_TEST_OSDC_FAKEWRITEBACK_H
#include "include/Context.h"
#include "include/types.h"
#include "osd/osd_types.h"
#include "osdc/WritebackHandler.h"
#include <atomic>
class Finisher;
class FakeWriteback : public WritebackHandler {
public:
FakeWriteback(CephContext *cct, ceph::mutex *lock, uint64_t delay_ns);
~FakeWriteback() override;
void read(const object_t& oid, uint64_t object_no,
const object_locator_t& oloc, uint64_t off, uint64_t len,
snapid_t snapid, bufferlist *pbl, uint64_t trunc_size,
__u32 trunc_seq, int op_flags,
const ZTracer::Trace &parent_trace,
Context *onfinish) override;
ceph_tid_t write(const object_t& oid, const object_locator_t& oloc,
uint64_t off, uint64_t len,
const SnapContext& snapc, const bufferlist &bl,
ceph::real_time mtime, uint64_t trunc_size,
__u32 trunc_seq, ceph_tid_t journal_tid,
const ZTracer::Trace &parent_trace,
Context *oncommit) override;
using WritebackHandler::write;
bool may_copy_on_write(const object_t&, uint64_t, uint64_t,
snapid_t) override;
private:
CephContext *m_cct;
ceph::mutex *m_lock;
uint64_t m_delay_ns;
std::atomic<unsigned> m_tid = { 0 };
Finisher *m_finisher;
};
#endif
| 1,424 | 28.6875 | 72 | h |
null | ceph-main/src/test/osdc/MemWriteback.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_OSDC_MEMWRITEBACK_H
#define CEPH_TEST_OSDC_MEMWRITEBACK_H
#include "include/Context.h"
#include "include/types.h"
#include "osd/osd_types.h"
#include "osdc/WritebackHandler.h"
#include <atomic>
class Finisher;
class MemWriteback : public WritebackHandler {
public:
MemWriteback(CephContext *cct, ceph::mutex *lock, uint64_t delay_ns);
~MemWriteback() override;
void read(const object_t& oid, uint64_t object_no,
const object_locator_t& oloc, uint64_t off, uint64_t len,
snapid_t snapid, bufferlist *pbl, uint64_t trunc_size,
__u32 trunc_seq, int op_flags,
const ZTracer::Trace &parent_trace,
Context *onfinish) override;
ceph_tid_t write(const object_t& oid, const object_locator_t& oloc,
uint64_t off, uint64_t len,
const SnapContext& snapc, const bufferlist &bl,
ceph::real_time mtime, uint64_t trunc_size,
__u32 trunc_seq, ceph_tid_t journal_tid,
const ZTracer::Trace &parent_trace,
Context *oncommit) override;
using WritebackHandler::write;
bool may_copy_on_write(const object_t&, uint64_t, uint64_t,
snapid_t) override;
void write_object_data(const object_t& oid, uint64_t off, uint64_t len,
const bufferlist& data_bl);
int read_object_data(const object_t& oid, uint64_t off, uint64_t len,
bufferlist *data_bl);
private:
std::map<object_t, bufferlist> object_data;
CephContext *m_cct;
ceph::mutex *m_lock;
uint64_t m_delay_ns;
std::atomic<unsigned> m_tid = { 0 };
Finisher *m_finisher;
};
#endif
| 1,688 | 30.867925 | 73 | h |
null | ceph-main/src/test/rbd_mirror/test_fixture.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_RBD_MIRROR_TEST_FIXTURE_H
#define CEPH_TEST_RBD_MIRROR_TEST_FIXTURE_H
#include "include/int_types.h"
#include "include/rados/librados.hpp"
#include <gtest/gtest.h>
#include <memory>
#include <set>
namespace librbd {
class ImageCtx;
class RBD;
}
namespace rbd {
namespace mirror {
template <typename> class Threads;
class TestFixture : public ::testing::Test {
public:
TestFixture();
static void SetUpTestCase();
static void TearDownTestCase();
void SetUp() override;
void TearDown() override;
librados::IoCtx m_local_io_ctx;
librados::IoCtx m_remote_io_ctx;
std::string m_image_name;
uint64_t m_image_size = 1 << 24;
std::set<librbd::ImageCtx *> m_image_ctxs;
Threads<librbd::ImageCtx> *m_threads = nullptr;
int create_image(librbd::RBD &rbd, librados::IoCtx &ioctx,
const std::string &name, uint64_t size);
int open_image(librados::IoCtx &io_ctx, const std::string &image_name,
librbd::ImageCtx **image_ctx);
int create_snap(librbd::ImageCtx *image_ctx, const char* snap_name,
librados::snap_t *snap_id = nullptr);
static std::string get_temp_image_name();
static int create_image_data_pool(std::string &data_pool);
static std::string _local_pool_name;
static std::string _remote_pool_name;
static std::shared_ptr<librados::Rados> _rados;
static uint64_t _image_number;
static std::string _data_pool;
};
} // namespace mirror
} // namespace rbd
#endif // CEPH_TEST_RBD_MIRROR_TEST_FIXTURE_H
| 1,623 | 23.606061 | 72 | h |
null | ceph-main/src/test/rbd_mirror/test_mock_fixture.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_RBD_MIRROR_TEST_MOCK_FIXTURE_H
#define CEPH_TEST_RBD_MIRROR_TEST_MOCK_FIXTURE_H
#include "test/rbd_mirror/test_fixture.h"
#include "test/librados_test_stub/LibradosTestStub.h"
#include "common/WorkQueue.h"
#include "librbd/asio/ContextWQ.h"
#include <boost/shared_ptr.hpp>
#include <gmock/gmock.h>
#include "include/ceph_assert.h"
namespace librados {
class TestRadosClient;
class MockTestMemCluster;
class MockTestMemIoCtxImpl;
class MockTestMemRadosClient;
}
namespace librbd {
class MockImageCtx;
}
ACTION_P(CopyInBufferlist, str) {
arg0->append(str);
}
ACTION_P(CompleteContext, r) {
arg0->complete(r);
}
ACTION_P2(CompleteContext, wq, r) {
auto context_wq = reinterpret_cast<librbd::asio::ContextWQ *>(wq);
context_wq->queue(arg0, r);
}
ACTION_P(GetReference, ref_object) {
ref_object->get();
}
MATCHER_P(ContentsEqual, bl, "") {
// TODO fix const-correctness of bufferlist
return const_cast<bufferlist &>(arg).contents_equal(
const_cast<bufferlist &>(bl));
}
namespace rbd {
namespace mirror {
class TestMockFixture : public TestFixture {
public:
typedef boost::shared_ptr<librados::TestCluster> TestClusterRef;
static void SetUpTestCase();
static void TearDownTestCase();
void TearDown() override;
void expect_test_features(librbd::MockImageCtx &mock_image_ctx);
librados::MockTestMemCluster& get_mock_cluster();
private:
static TestClusterRef s_test_cluster;
};
} // namespace mirror
} // namespace rbd
#endif // CEPH_TEST_RBD_MIRROR_TEST_MOCK_FIXTURE_H
| 1,628 | 21.315068 | 70 | h |
null | ceph-main/src/test/rbd_mirror/mock/image_sync/MockSyncPointHandler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_MOCK_IMAGE_SYNC_SYNC_POINT_HANDLER_H
#define CEPH_MOCK_IMAGE_SYNC_SYNC_POINT_HANDLER_H
#include "tools/rbd_mirror/image_sync/Types.h"
#include <gmock/gmock.h>
struct Context;
namespace rbd {
namespace mirror {
namespace image_sync {
struct MockSyncPointHandler : public SyncPointHandler{
MOCK_CONST_METHOD0(get_sync_points, SyncPoints());
MOCK_CONST_METHOD0(get_snap_seqs, librbd::SnapSeqs());
MOCK_METHOD4(update_sync_points, void(const librbd::SnapSeqs&,
const SyncPoints&,
bool, Context*));
};
} // namespace image_sync
} // namespace mirror
} // namespace rbd
#endif // CEPH_MOCK_IMAGE_SYNC_SYNC_POINT_HANDLER_H
| 821 | 26.4 | 70 | h |
null | ceph-main/src/test/rgw/amqp_mock.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
namespace amqp_mock {
void set_valid_port(int port);
void set_valid_host(const std::string& host);
void set_valid_vhost(const std::string& vhost);
void set_valid_user(const std::string& user, const std::string& password);
void set_multiple(unsigned tag);
void reset_multiple();
extern bool FAIL_NEXT_WRITE; // default "false"
extern bool FAIL_NEXT_READ; // default "false"
extern bool REPLY_ACK; // default "true"
}
| 579 | 28 | 74 | h |
null | ceph-main/src/test/rgw/amqp_url.c | /*
* ***** BEGIN LICENSE BLOCK *****
* Version: MIT
*
* Portions created by Alan Antonuk are Copyright (c) 2012-2013
* Alan Antonuk. All Rights Reserved.
*
* Portions created by VMware are Copyright (c) 2007-2012 VMware, Inc.
* All Rights Reserved.
*
* Portions created by Tony Garnock-Jones are Copyright (c) 2009-2010
* VMware, Inc. and Tony Garnock-Jones. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* ***** END LICENSE BLOCK *****
*/
// this version of the file is slightly modified from the original one
// as it is only used to mock amqp libraries
#ifdef _MSC_VER
#define _CRT_SECURE_NO_WARNINGS
#endif
#include "amqp.h"
#include <limits.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
void amqp_default_connection_info(struct amqp_connection_info *ci) {
/* Apply defaults */
ci->user = "guest";
ci->password = "guest";
ci->host = "localhost";
ci->port = 5672;
ci->vhost = "/";
ci->ssl = 0;
}
/* Scan for the next delimiter, handling percent-encodings on the way. */
static char find_delim(char **pp, int colon_and_at_sign_are_delims) {
char *from = *pp;
char *to = from;
for (;;) {
char ch = *from++;
switch (ch) {
case ':':
case '@':
if (!colon_and_at_sign_are_delims) {
*to++ = ch;
break;
}
/* fall through */
case 0:
case '/':
case '?':
case '#':
case '[':
case ']':
*to = 0;
*pp = from;
return ch;
case '%': {
unsigned int val;
int chars;
int res = sscanf(from, "%2x%n", &val, &chars);
if (res == EOF || res < 1 || chars != 2 || val > CHAR_MAX)
/* Return a surprising delimiter to
force an error. */
{
return '%';
}
*to++ = (char)val;
from += 2;
break;
}
default:
*to++ = ch;
break;
}
}
}
/* Parse an AMQP URL into its component parts. */
int amqp_parse_url(char *url, struct amqp_connection_info *parsed) {
int res = AMQP_STATUS_BAD_URL;
char delim;
char *start;
char *host;
char *port = NULL;
amqp_default_connection_info(parsed);
parsed->port = 5672;
parsed->ssl = 0;
/* check the prefix */
if (!strncmp(url, "amqp://", 7)) {
/* do nothing */
} else if (!strncmp(url, "amqps://", 8)) {
parsed->port = 5671;
parsed->ssl = 1;
} else {
goto out;
}
host = start = url += (parsed->ssl ? 8 : 7);
delim = find_delim(&url, 1);
if (delim == ':') {
/* The colon could be introducing the port or the
password part of the userinfo. We don't know yet,
so stash the preceding component. */
port = start = url;
delim = find_delim(&url, 1);
}
if (delim == '@') {
/* What might have been the host and port were in fact
the username and password */
parsed->user = host;
if (port) {
parsed->password = port;
}
port = NULL;
host = start = url;
delim = find_delim(&url, 1);
}
if (delim == '[') {
/* IPv6 address. The bracket should be the first
character in the host. */
if (host != start || *host != 0) {
goto out;
}
start = url;
delim = find_delim(&url, 0);
if (delim != ']') {
goto out;
}
parsed->host = start;
start = url;
delim = find_delim(&url, 1);
/* Closing bracket should be the last character in the
host. */
if (*start != 0) {
goto out;
}
} else {
/* If we haven't seen the host yet, this is it. */
if (*host != 0) {
parsed->host = host;
}
}
if (delim == ':') {
port = start = url;
delim = find_delim(&url, 1);
}
if (port) {
char *end;
long portnum = strtol(port, &end, 10);
if (port == end || *end != 0 || portnum < 0 || portnum > 65535) {
goto out;
}
parsed->port = portnum;
}
if (delim == '/') {
start = url;
delim = find_delim(&url, 1);
if (delim != 0) {
goto out;
}
parsed->vhost = start;
res = AMQP_STATUS_OK;
} else if (delim == 0) {
res = AMQP_STATUS_OK;
}
/* Any other delimiter is bad, and we will return AMQP_STATUS_BAD_AMQP_URL. */
out:
return res;
}
| 5,304 | 22.896396 | 78 | c |
null | ceph-main/src/test/system/cross_process_sem.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
struct cross_process_sem_data_t;
class CrossProcessSem
{
public:
static int create(int initial_val, CrossProcessSem** ret);
~CrossProcessSem();
/* Initialize the semaphore. Must be called before any operations */
int init();
/* Semaphore wait */
void wait();
/* Semaphore post */
void post();
/* Reinitialize the semaphore to the desired value.
* NOT thread-safe if it is in use at the time!
*/
int reinit(int dval);
private:
explicit CrossProcessSem(struct cross_process_sem_data_t *data);
struct cross_process_sem_data_t *m_data;
};
| 971 | 22.707317 | 70 | h |
null | ceph-main/src/test/system/st_rados_create_pool.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef TEST_SYSTEM_ST_RADOS_CREATE_POOL_H
#define TEST_SYSTEM_ST_RADOS_CREATE_POOL_H
#include "systest_runnable.h"
class CrossProcessSem;
/*
* st_rados_create_pool
*
* Waits, then posts to setup_sem.
* Creates a pool and populates it with some objects.
* Then, calls pool_setup_sem->post()
*/
class StRadosCreatePool : public SysTestRunnable
{
public:
static std::string get_random_buf(int sz);
StRadosCreatePool(int argc, const char **argv,
CrossProcessSem *setup_sem,
CrossProcessSem *pool_setup_sem,
CrossProcessSem *close_create_pool_sem,
const std::string &pool_name,
int num_objects,
const std::string &suffix);
~StRadosCreatePool() override;
int run() override;
private:
CrossProcessSem *m_setup_sem;
CrossProcessSem *m_pool_setup_sem;
CrossProcessSem *m_close_create_pool;
std::string m_pool_name;
int m_num_objects;
std::string m_suffix;
};
std::string get_temp_pool_name(const char* prefix);
#endif
| 1,376 | 24.5 | 70 | h |
null | ceph-main/src/test/system/st_rados_delete_objs.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef TEST_SYSTEM_ST_RADOS_DELETE_OBJS_H
#define TEST_SYSTEM_ST_RADOS_DELETE_OBJS_H
#include "systest_runnable.h"
class CrossProcessSem;
/*
* st_rados_delete_objs
*
* Waits on setup_sem, posts to it,
* deletes num_objs objects from the pool,
* and posts to deleted_sem.
*/
class StRadosDeleteObjs : public SysTestRunnable
{
public:
StRadosDeleteObjs(int argc, const char **argv,
CrossProcessSem *setup_sem,
CrossProcessSem *deleted_sem,
int num_objs,
const std::string &pool_name,
const std::string &suffix);
~StRadosDeleteObjs() override;
int run() override;
private:
CrossProcessSem *m_setup_sem;
CrossProcessSem *m_deleted_sem;
int m_num_objs;
std::string m_pool_name;
std::string m_suffix;
};
#endif
| 1,161 | 22.714286 | 70 | h |
null | ceph-main/src/test/system/st_rados_delete_pool.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef TEST_SYSTEM_ST_RADOS_DELETE_POOL_H
#define TEST_SYSTEM_ST_RADOS_DELETE_POOL_H
#include "systest_runnable.h"
class CrossProcessSem;
/*
* st_rados_delete_pool
*
* Waits on pool_setup_sem, posts to it,
* deletes a pool, and posts to delete_pool_sem.
*/
class StRadosDeletePool : public SysTestRunnable
{
public:
StRadosDeletePool(int argc, const char **argv,
CrossProcessSem *pool_setup_sem,
CrossProcessSem *delete_pool_sem,
const std::string &pool_name);
~StRadosDeletePool() override;
int run() override;
private:
CrossProcessSem *m_pool_setup_sem;
CrossProcessSem *m_delete_pool_sem;
std::string m_pool_name;
};
#endif
| 1,066 | 23.25 | 70 | h |
null | ceph-main/src/test/system/st_rados_list_objects.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef TEST_SYSTEM_ST_RADOS_LIST_OBJECTS_H
#define TEST_SYSTEM_ST_RADOS_LIST_OBJECTS_H
#include "systest_runnable.h"
class CrossProcessSem;
/*
* st_rados_list_objects
*
* 1. calls pool_setup_sem->wait()
* 2. calls pool_setup_sem->post()
* 3. list some objects
* 4. modify_sem->wait()
* 5. list some objects
*/
class StRadosListObjects : public SysTestRunnable
{
public:
static std::string get_random_buf(int sz);
StRadosListObjects(int argc, const char **argv,
const std::string &pool_name,
bool accept_list_errors,
int midway_cnt,
CrossProcessSem *pool_setup_sem,
CrossProcessSem *midway_sem_wait,
CrossProcessSem *midway_sem_post);
~StRadosListObjects() override;
int run() override;
private:
std::string m_pool_name;
bool m_accept_list_errors;
int m_midway_cnt;
CrossProcessSem *m_pool_setup_sem;
CrossProcessSem *m_midway_sem_wait;
CrossProcessSem *m_midway_sem_post;
};
#endif
| 1,356 | 24.12963 | 70 | h |
null | ceph-main/src/test/system/st_rados_notify.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef TEST_SYSTEM_ST_RADOS_NOTIFY_H
#define TEST_SYSTEM_ST_RADOS_NOTIFY_H
#include "systest_runnable.h"
class CrossProcessSem;
/*
* st_rados_notify
*
* 1. waits on and then posts to setup_sem
* 2. connects and opens the pool
* 3. waits on and then posts to notify_sem
* 4. notifies on the object
* 5. posts to notified_sem
*/
class StRadosNotify : public SysTestRunnable
{
public:
StRadosNotify(int argc, const char **argv,
CrossProcessSem *setup_sem,
CrossProcessSem *notify_sem,
CrossProcessSem *notified_sem,
int notify_retcode,
const std::string &pool_name,
const std::string &obj_name);
~StRadosNotify() override;
int run() override;
private:
CrossProcessSem *m_setup_sem;
CrossProcessSem *m_notify_sem;
CrossProcessSem *m_notified_sem;
int m_notify_retcode;
std::string m_pool_name;
std::string m_obj_name;
};
#endif
| 1,266 | 22.90566 | 70 | h |
null | ceph-main/src/test/system/st_rados_watch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef TEST_SYSTEM_ST_RADOS_WATCH_H
#define TEST_SYSTEM_ST_RADOS_WATCH_H
#include "systest_runnable.h"
class CrossProcessSem;
/*
* st_rados_watch
*
* 1. waits on setup_sem
* 2. posts to setup_sem
* 3. watches an object
* 4. posts to watch_sem
* 5. waits on notify_sem
* 6. posts to notify_sem
* 7. checks that the correct number of notifies were received
*/
class StRadosWatch : public SysTestRunnable
{
public:
StRadosWatch(int argc, const char **argv,
CrossProcessSem *setup_sem,
CrossProcessSem *watch_sem,
CrossProcessSem *notify_sem,
int num_notifies,
int watch_retcode,
const std::string &pool_name,
const std::string &obj_name);
~StRadosWatch() override;
int run() override;
private:
CrossProcessSem *m_setup_sem;
CrossProcessSem *m_watch_sem;
CrossProcessSem *m_notify_sem;
int m_num_notifies;
int m_watch_retcode;
std::string m_pool_name;
std::string m_obj_name;
};
#endif
| 1,372 | 23.087719 | 70 | h |
null | ceph-main/src/test/system/systest_runnable.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_SYSTEM_TEST_H
#define CEPH_SYSTEM_TEST_H
#include <pthread.h>
#include <stdio.h>
#include <string>
#include <vector>
#ifndef _WIN32
#include "common/Preforker.h"
#endif
#define RETURN1_IF_NOT_VAL(expected, expr) \
do {\
int _rinv_ret = expr;\
if (_rinv_ret != expected) {\
printf("%s: file %s, line %d: expected %d, got %d\n",\
get_id_str(), __FILE__, __LINE__, expected, _rinv_ret);\
return 1; \
}\
} while(0);
#define RETURN1_IF_NONZERO(expr) \
RETURN1_IF_NOT_VAL(0, expr)
extern void* systest_runnable_pthread_helper(void *arg);
std::string get_temp_pool_name(const char* prefix);
/* Represents a single test thread / process.
*
* Inherit from this class and implement the test body in run().
*/
class SysTestRunnable
{
public:
static const int ID_STR_SZ = 196;
SysTestRunnable(int argc, const char **argv);
virtual ~SysTestRunnable();
/* Returns 0 on success; error code otherwise. */
virtual int run() = 0;
/* Return a string identifying the runnable. */
const char* get_id_str(void) const;
/* Start the Runnable */
int start();
/* Wait until the Runnable is finished. Returns an error string on failure. */
std::string join();
/* Starts a bunch of SystemTestRunnables and waits until they're done.
*
* Returns an error string on failure. */
static std::string run_until_finished(std::vector < SysTestRunnable * >&
runnables);
protected:
int m_argc;
const char **m_argv;
private:
explicit SysTestRunnable(const SysTestRunnable &rhs);
SysTestRunnable& operator=(const SysTestRunnable &rhs);
void update_id_str(bool started);
void set_argv(int argc, const char **argv);
friend void* systest_runnable_pthread_helper(void *arg);
#ifndef _WIN32
Preforker preforker;
#endif
const char **m_argv_orig;
bool m_started;
int m_id;
pthread_t m_pthread;
char m_id_str[ID_STR_SZ];
};
#endif
| 2,326 | 23.494737 | 80 | h |
null | ceph-main/src/test/system/systest_settings.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_SYSTEM_TEST_SETTINGS_H
#define CEPH_SYSTEM_TEST_SETTINGS_H
#include <string>
/* Singleton with settings grabbed from environment variables */
class SysTestSettings
{
public:
static SysTestSettings& inst();
bool use_threads() const;
std::string get_log_name(const std::string &suffix) const;
private:
static SysTestSettings* m_inst;
SysTestSettings();
~SysTestSettings();
bool m_use_threads;
std::string m_log_file_base;
};
#endif
| 872 | 22.594595 | 70 | h |
null | ceph-main/src/tools/RadosDump.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef RADOS_DUMP_H_
#define RADOS_DUMP_H_
#include <stdint.h>
#include "include/buffer.h"
#include "include/encoding.h"
#include "osd/osd_types.h"
#include "osd/OSDMap.h"
typedef uint8_t sectiontype_t;
typedef uint32_t mymagic_t;
typedef int64_t mysize_t;
enum {
TYPE_NONE = 0,
TYPE_PG_BEGIN,
TYPE_PG_END,
TYPE_OBJECT_BEGIN,
TYPE_OBJECT_END,
TYPE_DATA,
TYPE_ATTRS,
TYPE_OMAP_HDR,
TYPE_OMAP,
TYPE_PG_METADATA,
TYPE_POOL_BEGIN,
TYPE_POOL_END,
END_OF_TYPES, //Keep at the end
};
const uint16_t shortmagic = 0xffce; //goes into stream as "ceff"
//endmagic goes into stream as "ceff ffec"
const mymagic_t endmagic = (0xecff << 16) | shortmagic;
//The first FIXED_LENGTH bytes are a fixed
//portion of the export output. This includes the overall
//version number, and size of header and footer.
//THIS STRUCTURE CAN ONLY BE APPENDED TO. If it needs to expand,
//the version can be bumped and then anything
//can be added to the export format.
struct super_header {
static const uint32_t super_magic = (shortmagic << 16) | shortmagic;
// ver = 1, Initial version
// ver = 2, Add OSDSuperblock to pg_begin
static const uint32_t super_ver = 2;
static const uint32_t FIXED_LENGTH = 16;
uint32_t magic;
uint32_t version;
uint32_t header_size;
uint32_t footer_size;
super_header() : magic(0), version(0), header_size(0), footer_size(0) { }
void encode(bufferlist& bl) const {
using ceph::encode;
encode(magic, bl);
encode(version, bl);
encode(header_size, bl);
encode(footer_size, bl);
}
void decode(bufferlist::const_iterator& bl) {
using ceph::decode;
decode(magic, bl);
decode(version, bl);
decode(header_size, bl);
decode(footer_size, bl);
}
};
struct header {
sectiontype_t type;
mysize_t size;
header(sectiontype_t type, mysize_t size) :
type(type), size(size) { }
header(): type(0), size(0) { }
void encode(bufferlist& bl) const {
uint32_t debug_type = (type << 24) | (type << 16) | shortmagic;
ENCODE_START(1, 1, bl);
encode(debug_type, bl);
encode(size, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
uint32_t debug_type;
DECODE_START(1, bl);
decode(debug_type, bl);
type = debug_type >> 24;
decode(size, bl);
DECODE_FINISH(bl);
}
};
struct footer {
mymagic_t magic;
footer() : magic(endmagic) { }
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(magic, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(magic, bl);
DECODE_FINISH(bl);
}
};
struct pg_begin {
spg_t pgid;
OSDSuperblock superblock;
pg_begin(spg_t pg, const OSDSuperblock& sb):
pgid(pg), superblock(sb) { }
pg_begin() { }
void encode(bufferlist& bl) const {
// If superblock doesn't include CEPH_FS_FEATURE_INCOMPAT_SHARDS then
// shard will be NO_SHARD for a replicated pool. This means
// that we allow the decode by struct_v 2.
ENCODE_START(3, 2, bl);
encode(pgid.pgid, bl);
encode(superblock, bl);
encode(pgid.shard, bl);
ENCODE_FINISH(bl);
}
// NOTE: New super_ver prevents decode from ver 1
void decode(bufferlist::const_iterator& bl) {
DECODE_START(3, bl);
decode(pgid.pgid, bl);
if (struct_v > 1) {
decode(superblock, bl);
}
if (struct_v > 2) {
decode(pgid.shard, bl);
} else {
pgid.shard = shard_id_t::NO_SHARD;
}
DECODE_FINISH(bl);
}
};
struct object_begin {
ghobject_t hoid;
// Duplicate what is in the OI_ATTR so we have it at the start
// of object processing.
object_info_t oi;
explicit object_begin(const ghobject_t &hoid): hoid(hoid) { }
object_begin() { }
// If superblock doesn't include CEPH_FS_FEATURE_INCOMPAT_SHARDS then
// generation will be NO_GEN, shard_id will be NO_SHARD for a replicated
// pool. This means we will allow the decode by struct_v 1.
void encode(bufferlist& bl) const {
ENCODE_START(3, 1, bl);
encode(hoid.hobj, bl);
encode(hoid.generation, bl);
encode(hoid.shard_id, bl);
encode(oi, bl, -1); /* FIXME: we always encode with full features */
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(3, bl);
decode(hoid.hobj, bl);
if (struct_v > 1) {
decode(hoid.generation, bl);
decode(hoid.shard_id, bl);
} else {
hoid.generation = ghobject_t::NO_GEN;
hoid.shard_id = shard_id_t::NO_SHARD;
}
if (struct_v > 2) {
decode(oi, bl);
}
DECODE_FINISH(bl);
}
};
struct data_section {
uint64_t offset;
uint64_t len;
bufferlist databl;
data_section(uint64_t offset, uint64_t len, bufferlist bl):
offset(offset), len(len), databl(bl) { }
data_section(): offset(0), len(0) { }
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(offset, bl);
encode(len, bl);
encode(databl, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(offset, bl);
decode(len, bl);
decode(databl, bl);
DECODE_FINISH(bl);
}
};
struct attr_section {
using data_t = std::map<std::string,bufferlist,std::less<>>;
data_t data;
explicit attr_section(const data_t &data) : data(data) { }
explicit attr_section(std::map<std::string, bufferptr, std::less<>> &data_)
{
for (auto& [k, v] : data_) {
bufferlist bl;
bl.push_back(v);
data.emplace(k, std::move(bl));
}
}
attr_section() { }
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(data, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(data, bl);
DECODE_FINISH(bl);
}
};
struct omap_hdr_section {
bufferlist hdr;
explicit omap_hdr_section(bufferlist hdr) : hdr(hdr) { }
omap_hdr_section() { }
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(hdr, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(hdr, bl);
DECODE_FINISH(bl);
}
};
struct omap_section {
std::map<std::string, bufferlist> omap;
explicit omap_section(const std::map<std::string, bufferlist> &omap) :
omap(omap) { }
omap_section() { }
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(omap, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(omap, bl);
DECODE_FINISH(bl);
}
};
struct metadata_section {
// struct_ver is the on-disk version of original pg
__u8 struct_ver; // for reference
epoch_t map_epoch;
pg_info_t info;
pg_log_t log;
PastIntervals past_intervals;
OSDMap osdmap;
bufferlist osdmap_bl; // Used in lieu of encoding osdmap due to crc checking
std::map<eversion_t, hobject_t> divergent_priors;
pg_missing_t missing;
metadata_section(
__u8 struct_ver,
epoch_t map_epoch,
const pg_info_t &info,
const pg_log_t &log,
const PastIntervals &past_intervals,
const pg_missing_t &missing)
: struct_ver(struct_ver),
map_epoch(map_epoch),
info(info),
log(log),
past_intervals(past_intervals),
missing(missing) {}
metadata_section()
: struct_ver(0),
map_epoch(0) { }
void encode(bufferlist& bl) const {
ENCODE_START(6, 6, bl);
encode(struct_ver, bl);
encode(map_epoch, bl);
encode(info, bl);
encode(log, bl);
encode(past_intervals, bl);
// Equivalent to osdmap.encode(bl, features); but
// preserving exact layout for CRC checking.
bl.append(osdmap_bl);
encode(divergent_priors, bl);
encode(missing, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(6, bl);
decode(struct_ver, bl);
decode(map_epoch, bl);
decode(info, bl);
decode(log, bl);
if (struct_v >= 6) {
decode(past_intervals, bl);
} else if (struct_v > 1) {
std::cout << "NOTICE: Older export with classic past_intervals" << std::endl;
} else {
std::cout << "NOTICE: Older export without past_intervals" << std::endl;
}
if (struct_v > 2) {
osdmap.decode(bl);
} else {
std::cout << "WARNING: Older export without OSDMap information" << std::endl;
}
if (struct_v > 3) {
decode(divergent_priors, bl);
}
if (struct_v > 4) {
decode(missing, bl);
}
DECODE_FINISH(bl);
}
};
/**
* Superclass for classes that will need to handle a serialized RADOS
* dump. Requires that the serialized dump be opened with a known FD.
*/
class RadosDump
{
protected:
int file_fd;
super_header sh;
bool dry_run;
public:
RadosDump(int file_fd_, bool dry_run_)
: file_fd(file_fd_), dry_run(dry_run_)
{}
int read_super();
int get_header(header *h);
int get_footer(footer *f);
int read_section(sectiontype_t *type, bufferlist *bl);
int skip_object(bufferlist &bl);
void write_super();
// Define this in .h because it's templated
template <typename T>
int write_section(sectiontype_t type, const T& obj, int fd) {
if (dry_run)
return 0;
bufferlist blhdr, bl, blftr;
obj.encode(bl);
header hdr(type, bl.length());
hdr.encode(blhdr);
footer ft;
ft.encode(blftr);
int ret = blhdr.write_fd(fd);
if (ret) return ret;
ret = bl.write_fd(fd);
if (ret) return ret;
ret = blftr.write_fd(fd);
return ret;
}
int write_simple(sectiontype_t type, int fd)
{
if (dry_run)
return 0;
bufferlist hbl;
header hdr(type, 0);
hdr.encode(hbl);
return hbl.write_fd(fd);
}
};
#endif
| 10,275 | 24.063415 | 83 | h |
null | ceph-main/src/tools/ceph_objectstore_tool.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Inktank
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_OBJECTSTORE_TOOL_H_
#define CEPH_OBJECTSTORE_TOOL_H_
#include "RadosDump.h"
class ObjectStoreTool : public RadosDump
{
public:
ObjectStoreTool(int file_fd, bool dry_run)
: RadosDump(file_fd, dry_run)
{}
int dump_export(Formatter *formatter, const std::string &dump_data_dir);
int do_import(ObjectStore *store, OSDSuperblock& sb, bool force,
std::string pgidstr);
int do_export(CephContext *cct, ObjectStore *fs, coll_t coll, spg_t pgid,
pg_info_t &info, epoch_t map_epoch, __u8 struct_ver,
const OSDSuperblock& superblock,
PastIntervals &past_intervals);
int dump_object(Formatter *formatter, bufferlist &bl,
const std::string &dump_data_dir = "");
int get_object(
ObjectStore *store, OSDriver& driver, SnapMapper& mapper, coll_t coll,
bufferlist &bl, OSDMap &curmap, bool *skipped_objects);
int export_file(
ObjectStore *store, coll_t cid, ghobject_t &obj);
int export_files(ObjectStore *store, coll_t coll);
};
#endif // CEPH_OBJECSTORE_TOOL_H_
| 1,481 | 31.933333 | 77 | h |
null | ceph-main/src/tools/kvstore_tool.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iosfwd>
#include <memory>
#include <string>
#include "acconfig.h"
#include "include/buffer_fwd.h"
#ifdef WITH_BLUESTORE
#include "os/bluestore/BlueStore.h"
#endif
class KeyValueDB;
class StoreTool
{
#ifdef WITH_BLUESTORE
struct Deleter {
BlueStore *bluestore;
Deleter()
: bluestore(nullptr) {}
Deleter(BlueStore *store)
: bluestore(store) {}
void operator()(KeyValueDB *db) {
if (bluestore) {
bluestore->umount();
delete bluestore;
} else {
delete db;
}
}
};
std::unique_ptr<KeyValueDB, Deleter> db;
#else
std::unique_ptr<KeyValueDB> db;
#endif
const std::string store_path;
public:
StoreTool(const std::string& type,
const std::string& path,
bool need_open_db = true,
bool need_stats = false);
int load_bluestore(const std::string& path, bool need_open_db);
uint32_t traverse(const std::string& prefix,
const bool do_crc,
const bool do_value_dump,
std::ostream *out);
void list(const std::string& prefix,
const bool do_crc,
const bool do_value_dump);
bool exists(const std::string& prefix);
bool exists(const std::string& prefix, const std::string& key);
ceph::bufferlist get(const std::string& prefix,
const std::string& key,
bool& exists);
uint64_t get_size();
bool set(const std::string& prefix,
const std::string& key,
ceph::bufferlist& val);
bool rm(const std::string& prefix, const std::string& key);
bool rm_prefix(const std::string& prefix);
void print_summary(const uint64_t total_keys, const uint64_t total_size,
const uint64_t total_txs, const std::string& store_path,
const std::string& other_path, const int duration) const;
int copy_store_to(const std::string& type, const std::string& other_path,
const int num_keys_per_tx, const std::string& other_type);
void compact();
void compact_prefix(const std::string& prefix);
void compact_range(const std::string& prefix,
const std::string& start,
const std::string& end);
int destructive_repair();
int print_stats() const;
int build_size_histogram(const std::string& prefix) const;
};
| 2,363 | 27.829268 | 78 | h |
null | ceph-main/src/tools/scratchtool.c | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/rados/librados.h"
#include <assert.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
static int do_rados_setxattr(rados_ioctx_t io_ctx, const char *oid,
const char *key, const char *val)
{
int ret = rados_setxattr(io_ctx, oid, key, val, strlen(val) + 1);
if (ret < 0) {
printf("rados_setxattr failed with error %d\n", ret);
return 1;
}
printf("rados_setxattr %s=%s\n", key, val);
return 0;
}
static int do_rados_getxattr(rados_ioctx_t io_ctx, const char *oid,
const char *key, const char *expected)
{
size_t blen = strlen(expected) + 1;
char buf[blen];
memset(buf, 0, sizeof(buf));
int r = rados_getxattr(io_ctx, oid, key, buf, blen);
if (r < 0) {
printf("rados_getxattr(%s) failed with error %d\n", key, r);
return 1;
}
if (strcmp(buf, expected) != 0) {
printf("rados_getxattr(%s) got wrong result! "
"expected: '%s'. got '%s'\n", key, expected, buf);
return 1;
}
printf("rados_getxattr %s=%s\n", key, buf);
return 0;
}
static int do_rados_getxattrs(rados_ioctx_t io_ctx, const char *oid,
const char **exkeys, const char **exvals)
{
rados_xattrs_iter_t iter;
int nval = 0, i, nfound = 0, r = 0, ret = 1;
for (i = 0; exvals[i]; ++i) {
++nval;
}
r = rados_getxattrs(io_ctx, oid, &iter);
if (r) {
printf("rados_getxattrs(%s) failed with error %d\n", oid, r);
return 1;
}
while (1) {
size_t len;
const char *key, *val;
r = rados_getxattrs_next(iter, &key, &val, &len);
if (r) {
printf("rados_getxattrs(%s): rados_getxattrs_next "
"returned error %d\n", oid, r);
goto out_err;
}
if (!key)
break;
for (i = 0; i < nval; ++i) {
if (strcmp(exkeys[i], key))
continue;
if ((len == strlen(exvals[i]) + 1) && (val != NULL) && (!strcmp(exvals[i], val))) {
nfound++;
break;
}
printf("rados_getxattrs(%s): got key %s, but the "
"value was %s rather than %s.\n",
oid, key, val, exvals[i]);
goto out_err;
}
}
if (nfound != nval) {
printf("rados_getxattrs(%s): only found %d extended attributes. "
"Expected %d\n", oid, nfound, nval);
goto out_err;
}
ret = 0;
printf("rados_getxattrs(%s)\n", oid);
out_err:
rados_getxattrs_end(iter);
return ret;
}
static int testrados(void)
{
char tmp[32];
int i, r, safe;
int ret = 1; //set 1 as error case
rados_t cl;
const char *oid = "foo_object";
const char *exkeys[] = { "a", "b", "c", NULL };
const char *exvals[] = { "1", "2", "3", NULL };
if (rados_create(&cl, NULL) < 0) {
printf("error initializing\n");
return 1;
}
if (rados_conf_read_file(cl, NULL)) {
printf("error reading configuration file\n");
goto out_err;
}
// Try to set a configuration option that doesn't exist.
// This should fail.
if (!rados_conf_set(cl, "config option that doesn't exist",
"some random value")) {
printf("error: succeeded in setting nonexistent config option\n");
goto out_err;
}
if (rados_conf_get(cl, "log to stderr", tmp, sizeof(tmp))) {
printf("error: failed to read log_to_stderr from config\n");
goto out_err;
}
// Can we change it?
if (rados_conf_set(cl, "log to stderr", "true")) {
printf("error: error setting log_to_stderr\n");
goto out_err;
}
if (rados_conf_get(cl, "log to stderr", tmp, sizeof(tmp))) {
printf("error: failed to read log_to_stderr from config\n");
goto out_err;
}
if (strcmp(tmp, "true")) {
printf("error: new setting for log_to_stderr failed to take effect.\n");
goto out_err;
}
if (rados_connect(cl)) {
printf("error connecting\n");
goto out_err;
}
if (rados_connect(cl) == 0) {
printf("second connect attempt didn't return an error\n");
goto out_err;
}
/* create an io_ctx */
r = rados_pool_create(cl, "foo");
printf("rados_pool_create = %d\n", r);
rados_ioctx_t io_ctx;
r = rados_ioctx_create(cl, "foo", &io_ctx);
if (r < 0) {
printf("error creating ioctx\n");
goto out_err;
}
printf("rados_ioctx_create = %d, io_ctx = %p\n", r, io_ctx);
/* list all pools */
{
int buf_sz = rados_pool_list(cl, NULL, 0);
printf("need buffer size of %d\n", buf_sz);
char buf[buf_sz];
int r = rados_pool_list(cl, buf, buf_sz);
if (r != buf_sz) {
printf("buffer size mismatch: got %d the first time, but %d "
"the second.\n", buf_sz, r);
goto out_err_cleanup;
}
const char *b = buf;
printf("begin pools.\n");
while (1) {
if (b[0] == '\0')
break;
printf(" pool: '%s'\n", b);
b += strlen(b) + 1;
};
printf("end pools.\n");
}
/* stat */
struct rados_pool_stat_t st;
r = rados_ioctx_pool_stat(io_ctx, &st);
printf("rados_ioctx_pool_stat = %d, %lld KB, %lld objects\n", r, (long long)st.num_kb, (long long)st.num_objects);
/* snapshots */
r = rados_ioctx_snap_create(io_ctx, "snap1");
printf("rados_ioctx_snap_create snap1 = %d\n", r);
rados_snap_t snaps[10];
r = rados_ioctx_snap_list(io_ctx, snaps, 10);
for (i=0; i<r; i++) {
char name[100];
rados_ioctx_snap_get_name(io_ctx, snaps[i], name, sizeof(name));
printf("rados_ioctx_snap_list got snap %lld %s\n", (long long)snaps[i], name);
}
rados_snap_t snapid;
r = rados_ioctx_snap_lookup(io_ctx, "snap1", &snapid);
printf("rados_ioctx_snap_lookup snap1 got %lld, result %d\n", (long long)snapid, r);
r = rados_ioctx_snap_remove(io_ctx, "snap1");
printf("rados_ioctx_snap_remove snap1 = %d\n", r);
/* sync io */
time_t tm;
char buf[128], buf2[128];
time(&tm);
snprintf(buf, 128, "%s", ctime(&tm));
r = rados_write(io_ctx, oid, buf, strlen(buf) + 1, 0);
printf("rados_write = %d\n", r);
r = rados_read(io_ctx, oid, buf2, sizeof(buf2), 0);
printf("rados_read = %d\n", r);
if (memcmp(buf, buf2, r))
printf("*** content mismatch ***\n");
/* attrs */
if (do_rados_setxattr(io_ctx, oid, "b", "2"))
goto out_err_cleanup;
if (do_rados_setxattr(io_ctx, oid, "a", "1"))
goto out_err_cleanup;
if (do_rados_setxattr(io_ctx, oid, "c", "3"))
goto out_err_cleanup;
if (do_rados_getxattr(io_ctx, oid, "a", "1"))
goto out_err_cleanup;
if (do_rados_getxattr(io_ctx, oid, "b", "2"))
goto out_err_cleanup;
if (do_rados_getxattr(io_ctx, oid, "c", "3"))
goto out_err_cleanup;
if (do_rados_getxattrs(io_ctx, oid, exkeys, exvals))
goto out_err_cleanup;
uint64_t size;
time_t mtime;
r = rados_stat(io_ctx, oid, &size, &mtime);
printf("rados_stat size = %lld mtime = %d = %d\n", (long long)size, (int)mtime, r);
r = rados_stat(io_ctx, "does_not_exist", NULL, NULL);
printf("rados_stat(does_not_exist) = %d\n", r);
/* exec */
rados_exec(io_ctx, oid, "crypto", "md5", buf, strlen(buf) + 1, buf, 128);
printf("exec result=%s\n", buf);
r = rados_read(io_ctx, oid, buf2, 128, 0);
printf("read result=%s\n", buf2);
printf("size=%d\n", r);
/* aio */
rados_completion_t a, b;
rados_aio_create_completion2(NULL, NULL, &a);
rados_aio_create_completion2(NULL, NULL, &b);
rados_aio_write(io_ctx, "a", a, buf, 100, 0);
rados_aio_write(io_ctx, "../b/bb_bb_bb\\foo\\bar", b, buf, 100, 0);
rados_aio_wait_for_complete(a);
printf("a safe\n");
rados_aio_wait_for_complete(b);
printf("b safe\n");
rados_aio_release(a);
rados_aio_release(b);
/* test flush */
printf("testing aio flush\n");
rados_completion_t c;
rados_aio_create_completion2(NULL, NULL, &c);
rados_aio_write(io_ctx, "c", c, buf, 100, 0);
safe = rados_aio_is_safe(c);
printf("a should not yet be safe and ... %s\n", safe ? "is":"is not");
assert(!safe);
rados_aio_flush(io_ctx);
safe = rados_aio_is_safe(c);
printf("a should be safe and ... %s\n", safe ? "is":"is not");
assert(safe);
rados_aio_release(c);
rados_read(io_ctx, "../b/bb_bb_bb\\foo\\bar", buf2, 128, 0);
/* list objects */
rados_list_ctx_t h;
r = rados_nobjects_list_open(io_ctx, &h);
printf("rados_nobjects_list_open = %d, h = %p\n", r, h);
const char *poolname;
while (rados_nobjects_list_next2(h, &poolname, NULL, NULL, NULL, NULL, NULL) == 0)
printf("rados_nobjects_list_next2 got object '%s'\n", poolname);
rados_nobjects_list_close(h);
/* stat */
r = rados_ioctx_pool_stat(io_ctx, &st);
printf("rados_stat_pool = %d, %lld KB, %lld objects\n", r, (long long)st.num_kb, (long long)st.num_objects);
ret = 0;
out_err_cleanup:
/* delete a pool */
rados_ioctx_destroy(io_ctx);
r = rados_pool_delete(cl, "foo");
printf("rados_delete_pool = %d\n", r);
out_err:
rados_shutdown(cl);
return ret;
}
int main(int argc, const char **argv)
{
return testrados();
}
| 8,798 | 26.496875 | 115 | c |
null | ceph-main/src/tools/ceph-dencoder/ceph_time.h | #ifndef TEST_CEPH_TIME_H
#define TEST_CEPH_TIME_H
#include <list>
#include "include/encoding.h"
#include "common/ceph_time.h"
#include "common/Formatter.h"
// wrapper for ceph::real_time that implements the dencoder interface
template <typename Clock>
class time_point_wrapper {
using time_point = typename Clock::time_point;
time_point t;
public:
time_point_wrapper() = default;
explicit time_point_wrapper(const time_point& t) : t(t) {}
void encode(bufferlist& bl) const {
using ceph::encode;
encode(t, bl);
}
void decode(bufferlist::const_iterator &p) {
using ceph::decode;
decode(t, p);
}
void dump(Formatter* f) {
auto epoch_time = Clock::to_time_t(t);
f->dump_string("time", std::ctime(&epoch_time));
}
static void generate_test_instances(std::list<time_point_wrapper*>& ls) {
constexpr time_t t{455500800}; // Ghostbusters release date
ls.push_back(new time_point_wrapper(Clock::from_time_t(t)));
}
};
using real_time_wrapper = time_point_wrapper<ceph::real_clock>;
WRITE_CLASS_ENCODER(real_time_wrapper)
using coarse_real_time_wrapper = time_point_wrapper<ceph::coarse_real_clock>;
WRITE_CLASS_ENCODER(coarse_real_time_wrapper)
// wrapper for ceph::timespan that implements the dencoder interface
class timespan_wrapper {
ceph::timespan d;
public:
timespan_wrapper() = default;
explicit timespan_wrapper(const ceph::timespan& d) : d(d) {}
void encode(bufferlist& bl) const {
using ceph::encode;
encode(d, bl);
}
void decode(bufferlist::const_iterator &p) {
using ceph::decode;
decode(d, p);
}
void dump(Formatter* f) {
f->dump_int("timespan", d.count());
}
static void generate_test_instances(std::list<timespan_wrapper*>& ls) {
constexpr std::chrono::seconds d{7377}; // marathon world record (2:02:57)
ls.push_back(new timespan_wrapper(d));
}
};
WRITE_CLASS_ENCODER(timespan_wrapper)
#endif
| 1,918 | 26.811594 | 78 | h |
null | ceph-main/src/tools/ceph-dencoder/denc_plugin.h | #include <dlfcn.h>
#include <filesystem>
#include <vector>
#include "denc_registry.h"
namespace fs = std::filesystem;
class DencoderPlugin {
using dencoders_t = std::vector<std::pair<std::string, Dencoder*>>;
public:
DencoderPlugin(const fs::path& path) {
mod = dlopen(path.c_str(), RTLD_NOW);
if (mod == nullptr) {
std::cerr << "failed to dlopen(" << path << "): " << dlerror() << std::endl;
}
}
DencoderPlugin(DencoderPlugin&& other)
: mod{other.mod},
dencoders{std::move(other.dencoders)}
{
other.mod = nullptr;
other.dencoders.clear();
}
~DencoderPlugin() {
#if !defined(__FreeBSD__)
if (mod) {
dlclose(mod);
}
#endif
}
const dencoders_t& register_dencoders() {
static constexpr std::string_view REGISTER_DENCODERS_FUNCTION = "register_dencoders\0";
assert(mod);
using register_dencoders_t = void (*)(DencoderPlugin*);
const auto do_register =
reinterpret_cast<register_dencoders_t>(dlsym(mod, REGISTER_DENCODERS_FUNCTION.data()));
if (do_register == nullptr) {
std::cerr << "failed to dlsym(" << REGISTER_DENCODERS_FUNCTION << "): "
<< dlerror() << std::endl;
return dencoders;
}
do_register(this);
return dencoders;
}
bool good() const {
return mod != nullptr;
}
void unregister_dencoders() {
while (!dencoders.empty()) {
delete dencoders.back().second;
dencoders.pop_back();
}
}
template<typename DencoderT, typename...Args>
void emplace(const char* name, Args&&...args) {
dencoders.emplace_back(name, new DencoderT(std::forward<Args>(args)...));
}
private:
void *mod = nullptr;
dencoders_t dencoders;
};
#define TYPE(t) plugin->emplace<DencoderImplNoFeature<t>>(#t, false, false);
#define TYPE_STRAYDATA(t) plugin->emplace<DencoderImplNoFeature<t>>(#t, true, false);
#define TYPE_NONDETERMINISTIC(t) plugin->emplace<DencoderImplNoFeature<t>>(#t, false, true);
#define TYPE_FEATUREFUL(t) plugin->emplace<DencoderImplFeatureful<t>>(#t, false, false);
#define TYPE_FEATUREFUL_STRAYDATA(t) plugin->emplace<DencoderImplFeatureful<t>>(#t, true, false);
#define TYPE_FEATUREFUL_NONDETERMINISTIC(t) plugin->emplace<DencoderImplFeatureful<t>>(#t, false, true);
#define TYPE_FEATUREFUL_NOCOPY(t) plugin->emplace<DencoderImplFeaturefulNoCopy<t>>(#t, false, false);
#define TYPE_NOCOPY(t) plugin->emplace<DencoderImplNoFeatureNoCopy<t>>(#t, false, false);
#define MESSAGE(t) plugin->emplace<MessageDencoderImpl<t>>(#t);
#define DENC_API extern "C" [[gnu::visibility("default")]]
| 2,566 | 31.493671 | 104 | h |
null | ceph-main/src/tools/ceph-dencoder/denc_registry.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <string>
#include <string_view>
#include "include/buffer_fwd.h"
#include "msg/Message.h"
namespace ceph {
class Formatter;
}
struct Dencoder {
virtual ~Dencoder() {}
virtual std::string decode(bufferlist bl, uint64_t seek) = 0;
virtual void encode(bufferlist& out, uint64_t features) = 0;
virtual void dump(ceph::Formatter *f) = 0;
virtual void copy() {
std::cerr << "copy operator= not supported" << std::endl;
}
virtual void copy_ctor() {
std::cerr << "copy ctor not supported" << std::endl;
}
virtual void generate() = 0;
virtual int num_generated() = 0;
virtual std::string select_generated(unsigned n) = 0;
virtual bool is_deterministic() = 0;
unsigned get_struct_v(bufferlist bl, uint64_t seek) const {
auto p = bl.cbegin(seek);
uint8_t struct_v = 0;
ceph::decode(struct_v, p);
return struct_v;
}
//virtual void print(ostream& out) = 0;
};
template<class T>
class DencoderBase : public Dencoder {
protected:
T* m_object;
std::list<T*> m_list;
bool stray_okay;
bool nondeterministic;
public:
DencoderBase(bool stray_okay, bool nondeterministic)
: m_object(new T),
stray_okay(stray_okay),
nondeterministic(nondeterministic) {}
~DencoderBase() override {
delete m_object;
}
std::string decode(bufferlist bl, uint64_t seek) override {
auto p = bl.cbegin();
p.seek(seek);
try {
using ceph::decode;
decode(*m_object, p);
}
catch (buffer::error& e) {
return e.what();
}
if (!stray_okay && !p.end()) {
std::ostringstream ss;
ss << "stray data at end of buffer, offset " << p.get_off();
return ss.str();
}
return {};
}
void encode(bufferlist& out, uint64_t features) override = 0;
void dump(ceph::Formatter *f) override {
m_object->dump(f);
}
void generate() override {
T::generate_test_instances(m_list);
}
int num_generated() override {
return m_list.size();
}
std::string select_generated(unsigned i) override {
// allow 0- or 1-based (by wrapping)
if (i == 0)
i = m_list.size();
if ((i == 0) || (i > m_list.size()))
return "invalid id for generated object";
m_object = *(std::next(m_list.begin(), i-1));
return {};
}
bool is_deterministic() override {
return !nondeterministic;
}
};
template<class T>
class DencoderImplNoFeatureNoCopy : public DencoderBase<T> {
public:
DencoderImplNoFeatureNoCopy(bool stray_ok, bool nondeterministic)
: DencoderBase<T>(stray_ok, nondeterministic) {}
void encode(bufferlist& out, uint64_t features) override {
out.clear();
using ceph::encode;
encode(*this->m_object, out);
}
};
template<class T>
class DencoderImplNoFeature : public DencoderImplNoFeatureNoCopy<T> {
public:
DencoderImplNoFeature(bool stray_ok, bool nondeterministic)
: DencoderImplNoFeatureNoCopy<T>(stray_ok, nondeterministic) {}
void copy() override {
T *n = new T;
*n = *this->m_object;
delete this->m_object;
this->m_object = n;
}
void copy_ctor() override {
T *n = new T(*this->m_object);
delete this->m_object;
this->m_object = n;
}
};
template<class T>
class DencoderImplFeaturefulNoCopy : public DencoderBase<T> {
public:
DencoderImplFeaturefulNoCopy(bool stray_ok, bool nondeterministic)
: DencoderBase<T>(stray_ok, nondeterministic) {}
void encode(bufferlist& out, uint64_t features) override {
out.clear();
using ceph::encode;
encode(*(this->m_object), out, features);
}
};
template<class T>
class DencoderImplFeatureful : public DencoderImplFeaturefulNoCopy<T> {
public:
DencoderImplFeatureful(bool stray_ok, bool nondeterministic)
: DencoderImplFeaturefulNoCopy<T>(stray_ok, nondeterministic) {}
void copy() override {
T *n = new T;
*n = *this->m_object;
delete this->m_object;
this->m_object = n;
}
void copy_ctor() override {
T *n = new T(*this->m_object);
delete this->m_object;
this->m_object = n;
}
};
template<class T>
class MessageDencoderImpl : public Dencoder {
ref_t<T> m_object;
std::list<ref_t<T>> m_list;
public:
MessageDencoderImpl() : m_object{make_message<T>()} {}
~MessageDencoderImpl() override {}
std::string decode(bufferlist bl, uint64_t seek) override {
auto p = bl.cbegin();
p.seek(seek);
try {
ref_t<Message> n(decode_message(g_ceph_context, 0, p), false);
if (!n)
throw std::runtime_error("failed to decode");
if (n->get_type() != m_object->get_type()) {
std::stringstream ss;
ss << "decoded type " << n->get_type() << " instead of expected " << m_object->get_type();
throw std::runtime_error(ss.str());
}
m_object = ref_cast<T>(n);
}
catch (buffer::error& e) {
return e.what();
}
if (!p.end()) {
std::ostringstream ss;
ss << "stray data at end of buffer, offset " << p.get_off();
return ss.str();
}
return {};
}
void encode(bufferlist& out, uint64_t features) override {
out.clear();
encode_message(m_object.get(), features, out);
}
void dump(ceph::Formatter *f) override {
m_object->dump(f);
}
void generate() override {
//T::generate_test_instances(m_list);
}
int num_generated() override {
return m_list.size();
}
std::string select_generated(unsigned i) override {
// allow 0- or 1-based (by wrapping)
if (i == 0)
i = m_list.size();
if ((i == 0) || (i > m_list.size()))
return "invalid id for generated object";
m_object = *(std::next(m_list.begin(), i-1));
return {};
}
bool is_deterministic() override {
return true;
}
//void print(ostream& out) {
//out << m_object << std::endl;
//}
};
class DencoderRegistry
{
using dencoders_t = std::map<std::string_view, Dencoder*>;
public:
dencoders_t& get() {
return dencoders;
}
void register_dencoder(std::string_view name, Dencoder* denc) {
dencoders.emplace(name, denc);
}
private:
dencoders_t dencoders;
};
| 6,138 | 24.367769 | 91 | h |
null | ceph-main/src/tools/ceph-dencoder/mds_types.h | #ifdef WITH_CEPHFS
#include "mds/JournalPointer.h"
TYPE(JournalPointer)
#include "osdc/Journaler.h"
TYPE(Journaler::Header)
#include "mds/snap.h"
TYPE(SnapInfo)
TYPE(snaplink_t)
TYPE(sr_t)
#include "mds/mdstypes.h"
#include "include/cephfs/types.h"
TYPE(frag_info_t)
TYPE(nest_info_t)
TYPE(quota_info_t)
TYPE(client_writeable_range_t)
TYPE_FEATUREFUL(inode_t<std::allocator>)
TYPE_FEATUREFUL(old_inode_t<std::allocator>)
TYPE(fnode_t)
TYPE(old_rstat_t)
TYPE_FEATUREFUL(session_info_t)
TYPE(string_snap_t)
TYPE(MDSCacheObjectInfo)
TYPE(mds_table_pending_t)
TYPE(cap_reconnect_t)
TYPE(inode_load_vec_t)
TYPE(dirfrag_load_vec_t)
TYPE(mds_load_t)
TYPE(MDSCacheObjectInfo)
TYPE(inode_backtrace_t)
TYPE(inode_backpointer_t)
#include "mds/CInode.h"
TYPE_FEATUREFUL(InodeStore)
TYPE_FEATUREFUL(InodeStoreBare)
#include "mds/MDSMap.h"
TYPE_FEATUREFUL(MDSMap)
TYPE_FEATUREFUL(MDSMap::mds_info_t)
#include "mds/FSMap.h"
//TYPE_FEATUREFUL(Filesystem)
TYPE_FEATUREFUL(FSMap)
#include "mds/Capability.h"
TYPE_NOCOPY(Capability)
#include "mds/inode_backtrace.h"
TYPE(inode_backpointer_t)
TYPE(inode_backtrace_t)
#include "mds/InoTable.h"
TYPE(InoTable)
#include "mds/SnapServer.h"
TYPE_STRAYDATA(SnapServer)
#include "mds/events/ECommitted.h"
TYPE_FEATUREFUL_NOCOPY(ECommitted)
#include "mds/events/EExport.h"
TYPE_FEATUREFUL_NOCOPY(EExport)
#include "mds/events/EFragment.h"
TYPE_FEATUREFUL_NOCOPY(EFragment)
#include "mds/events/EImportFinish.h"
TYPE_FEATUREFUL_NOCOPY(EImportFinish)
#include "mds/events/EImportStart.h"
TYPE_FEATUREFUL_NOCOPY(EImportStart)
#include "mds/events/EMetaBlob.h"
TYPE_FEATUREFUL_NOCOPY(EMetaBlob::fullbit)
TYPE(EMetaBlob::remotebit)
TYPE(EMetaBlob::nullbit)
TYPE_FEATUREFUL_NOCOPY(EMetaBlob::dirlump)
TYPE_FEATUREFUL_NOCOPY(EMetaBlob)
#include "mds/events/EOpen.h"
TYPE_FEATUREFUL_NOCOPY(EOpen)
#include "mds/events/EResetJournal.h"
TYPE_FEATUREFUL_NOCOPY(EResetJournal)
#include "mds/events/ESession.h"
TYPE_FEATUREFUL_NOCOPY(ESession)
#include "mds/events/ESessions.h"
TYPE_FEATUREFUL_NOCOPY(ESessions)
#include "mds/events/EPeerUpdate.h"
TYPE(link_rollback)
TYPE(rmdir_rollback)
TYPE(rename_rollback::drec)
TYPE(rename_rollback)
TYPE_FEATUREFUL_NOCOPY(EPeerUpdate)
#include "mds/events/ESubtreeMap.h"
TYPE_FEATUREFUL_NOCOPY(ESubtreeMap)
#include "mds/events/ETableClient.h"
TYPE_FEATUREFUL_NOCOPY(ETableClient)
#include "mds/events/ETableServer.h"
TYPE_FEATUREFUL_NOCOPY(ETableServer)
#include "mds/events/EUpdate.h"
TYPE_FEATUREFUL_NOCOPY(EUpdate)
#endif // WITH_CEPHFS
| 2,515 | 21.265487 | 44 | h |
null | ceph-main/src/tools/ceph-dencoder/osd_types.h | #include "osd/OSDMap.h"
TYPE(osd_info_t)
TYPE_FEATUREFUL(osd_xinfo_t)
TYPE_FEATUREFUL_NOCOPY(OSDMap)
TYPE_FEATUREFUL_STRAYDATA(OSDMap::Incremental)
#include "osd/osd_types.h"
TYPE(osd_reqid_t)
TYPE(object_locator_t)
TYPE(request_redirect_t)
TYPE(pg_t)
TYPE(coll_t)
TYPE_FEATUREFUL(objectstore_perf_stat_t)
TYPE_FEATUREFUL(osd_stat_t)
TYPE(OSDSuperblock)
TYPE_FEATUREFUL(pool_snap_info_t)
TYPE_FEATUREFUL(pg_pool_t)
TYPE(object_stat_sum_t)
TYPE(object_stat_collection_t)
TYPE(pg_stat_t)
TYPE_FEATUREFUL(pool_stat_t)
TYPE(pg_hit_set_info_t)
TYPE(pg_hit_set_history_t)
TYPE(pg_history_t)
TYPE(pg_info_t)
TYPE(PastIntervals)
TYPE_FEATUREFUL(pg_query_t)
TYPE(ObjectModDesc)
TYPE(pg_log_entry_t)
TYPE(pg_log_dup_t)
TYPE(pg_log_t)
TYPE_FEATUREFUL(pg_missing_item)
TYPE_FEATUREFUL(pg_missing_t)
TYPE(pg_nls_response_t)
TYPE(pg_ls_response_t)
TYPE(object_copy_cursor_t)
TYPE_FEATUREFUL(object_copy_data_t)
TYPE(pg_create_t)
TYPE(OSDSuperblock)
TYPE(SnapSet)
TYPE_FEATUREFUL(watch_info_t)
TYPE_FEATUREFUL(watch_item_t)
TYPE(object_manifest_t)
TYPE_FEATUREFUL(object_info_t)
TYPE(SnapSet)
TYPE_FEATUREFUL(ObjectRecoveryInfo)
TYPE(ObjectRecoveryProgress)
TYPE(PushReplyOp)
TYPE_FEATUREFUL(PullOp)
TYPE_FEATUREFUL(PushOp)
TYPE(ScrubMap::object)
TYPE(ScrubMap)
TYPE_FEATUREFUL(obj_list_watch_response_t)
TYPE(clone_info)
TYPE(obj_list_snap_response_t)
TYPE(pool_pg_num_history_t)
#include "osd/ECUtil.h"
// TYPE(stripe_info_t) non-standard encoding/decoding functions
TYPE(ECUtil::HashInfo)
#include "osd/ECMsgTypes.h"
TYPE_NOCOPY(ECSubWrite)
TYPE(ECSubWriteReply)
TYPE_FEATUREFUL(ECSubRead)
TYPE(ECSubReadReply)
#include "osd/HitSet.h"
TYPE_NONDETERMINISTIC(ExplicitHashHitSet)
TYPE_NONDETERMINISTIC(ExplicitObjectHitSet)
TYPE(BloomHitSet)
TYPE_NONDETERMINISTIC(HitSet) // because some subclasses are
TYPE(HitSet::Params)
#include "os/ObjectStore.h"
TYPE(ObjectStore::Transaction)
#include "os/SequencerPosition.h"
TYPE(SequencerPosition)
#ifdef WITH_BLUESTORE
#include "os/bluestore/bluestore_types.h"
TYPE(bluestore_bdev_label_t)
TYPE(bluestore_cnode_t)
TYPE(bluestore_compression_header_t)
TYPE(bluestore_extent_ref_map_t)
TYPE(bluestore_pextent_t)
TYPE(bluestore_blob_use_tracker_t)
// TODO: bluestore_blob_t repurposes the "feature" param of encode() for its
// struct_v. at a higher level, BlueStore::ExtentMap encodes the extends using
// a different interface than the normal ones. see
// BlueStore::ExtentMap::encode_some(). maybe we can test it using another
// approach.
// TYPE_FEATUREFUL(bluestore_blob_t)
// TYPE(bluestore_shared_blob_t) there is no encode here
TYPE(bluestore_onode_t)
TYPE(bluestore_deferred_op_t)
TYPE(bluestore_deferred_transaction_t)
// TYPE(bluestore_compression_header_t) there is no encode here
#include "os/bluestore/bluefs_types.h"
TYPE(bluefs_extent_t)
TYPE(bluefs_fnode_t)
TYPE(bluefs_super_t)
TYPE(bluefs_transaction_t)
#endif
#include "mon/AuthMonitor.h"
TYPE_FEATUREFUL(AuthMonitor::Incremental)
#include "mon/PGMap.h"
TYPE_FEATUREFUL_NONDETERMINISTIC(PGMapDigest)
TYPE_FEATUREFUL_NONDETERMINISTIC(PGMap)
#include "mon/MonitorDBStore.h"
TYPE(MonitorDBStore::Transaction)
TYPE(MonitorDBStore::Op)
#include "mon/MonMap.h"
TYPE_FEATUREFUL(MonMap)
#include "mon/MonCap.h"
TYPE(MonCap)
#include "mon/MgrMap.h"
TYPE_FEATUREFUL(MgrMap)
#include "mon/mon_types.h"
TYPE(MonitorDBStoreStats)
TYPE(ScrubResult)
#include "mon/CreatingPGs.h"
TYPE_FEATUREFUL(creating_pgs_t)
#include "mgr/ServiceMap.h"
TYPE_FEATUREFUL(ServiceMap)
TYPE_FEATUREFUL(ServiceMap::Service)
TYPE_FEATUREFUL(ServiceMap::Daemon)
#include "mon/ConnectionTracker.h"
TYPE(ConnectionReport);
TYPE(ConnectionTracker);
#include "os/DBObjectMap.h"
TYPE(DBObjectMap::_Header)
TYPE(DBObjectMap::State)
#include "os/kstore/kstore_types.h"
TYPE(kstore_cnode_t)
TYPE(kstore_onode_t)
| 3,786 | 24.07947 | 78 | h |
null | ceph-main/src/tools/ceph-dencoder/rbd_types.h | #ifdef WITH_RBD
#include "librbd/journal/Types.h"
TYPE(librbd::journal::EventEntry)
TYPE(librbd::journal::ClientData)
TYPE(librbd::journal::TagData)
#include "librbd/mirroring_watcher/Types.h"
TYPE(librbd::mirroring_watcher::NotifyMessage)
#include "librbd/trash_watcher/Types.h"
TYPE(librbd::trash_watcher::NotifyMessage)
#include "librbd/WatchNotifyTypes.h"
TYPE_NOCOPY(librbd::watch_notify::NotifyMessage)
TYPE(librbd::watch_notify::ResponseMessage)
#include "rbd_replay/ActionTypes.h"
TYPE(rbd_replay::action::Dependency)
TYPE(rbd_replay::action::ActionEntry)
#include "tools/rbd_mirror/image_map/Types.h"
TYPE(rbd::mirror::image_map::PolicyData)
#endif
#if defined(WITH_RBD) && defined(WITH_RBD_SSD_CACHE)
#include "librbd/cache/pwl/Types.h"
#include "librbd/cache/pwl/ssd/Types.h"
TYPE(librbd::cache::pwl::WriteLogCacheEntry)
TYPE(librbd::cache::pwl::WriteLogPoolRoot)
TYPE(librbd::cache::pwl::ssd::SuperBlock)
#endif
#ifdef WITH_RBD
#include "cls/rbd/cls_rbd.h"
TYPE_FEATUREFUL(cls_rbd_parent)
TYPE_FEATUREFUL(cls_rbd_snap)
#include "cls/rbd/cls_rbd_types.h"
TYPE(cls::rbd::ParentImageSpec)
TYPE(cls::rbd::ChildImageSpec)
TYPE(cls::rbd::MigrationSpec)
TYPE(cls::rbd::MirrorPeer)
TYPE(cls::rbd::MirrorImage)
TYPE(cls::rbd::MirrorImageMap)
TYPE(cls::rbd::MirrorImageStatus)
TYPE(cls::rbd::MirrorImageSiteStatus)
TYPE_FEATUREFUL(cls::rbd::MirrorImageSiteStatusOnDisk)
TYPE(cls::rbd::GroupImageSpec)
TYPE(cls::rbd::GroupImageStatus)
TYPE(cls::rbd::GroupSnapshot)
TYPE(cls::rbd::GroupSpec)
TYPE(cls::rbd::ImageSnapshotSpec)
TYPE(cls::rbd::SnapshotInfo)
TYPE(cls::rbd::SnapshotNamespace)
#endif
| 1,601 | 29.226415 | 54 | h |
null | ceph-main/src/tools/ceph-dencoder/rgw_types.h | #ifdef WITH_RADOSGW
#include "rgw_rados.h"
TYPE(RGWOLHInfo)
TYPE(RGWObjManifestPart)
TYPE(RGWObjManifest)
TYPE(objexp_hint_entry)
#include "rgw_zone.h"
TYPE(RGWZoneParams)
TYPE(RGWZone)
TYPE(RGWZoneGroup)
TYPE(RGWRealm)
TYPE(RGWPeriod)
TYPE(RGWPeriodLatestEpochInfo)
#include "rgw_acl.h"
TYPE(ACLPermission)
TYPE(ACLGranteeType)
TYPE(ACLGrant)
TYPE(RGWAccessControlList)
TYPE(ACLOwner)
TYPE(RGWAccessControlPolicy)
#include "rgw_cache.h"
TYPE(ObjectMetaInfo)
TYPE(ObjectCacheInfo)
TYPE(RGWCacheNotifyInfo)
#include "rgw_lc.h"
TYPE(RGWLifecycleConfiguration)
#include "cls/log/cls_log_types.h"
TYPE(cls_log_entry)
#include "cls/rgw/cls_rgw_types.h"
TYPE(rgw_bucket_pending_info)
TYPE(rgw_bucket_dir_entry_meta)
TYPE(rgw_bucket_entry_ver)
TYPE(rgw_bucket_dir_entry)
TYPE(rgw_bucket_category_stats)
TYPE(rgw_bucket_dir_header)
TYPE(rgw_bucket_dir)
TYPE(rgw_bucket_entry_ver)
TYPE(cls_rgw_obj_key)
TYPE(rgw_bucket_olh_log_entry)
TYPE(rgw_usage_log_entry)
TYPE(rgw_cls_bi_entry)
TYPE(rgw_bucket_olh_entry)
TYPE(rgw_usage_data)
TYPE(rgw_usage_log_info)
TYPE(rgw_user_bucket)
TYPE(cls_rgw_lc_entry)
#include "cls/rgw/cls_rgw_ops.h"
TYPE(cls_rgw_lc_get_entry_ret)
TYPE(rgw_cls_obj_prepare_op)
TYPE(rgw_cls_obj_complete_op)
TYPE(rgw_cls_list_op)
TYPE(rgw_cls_list_ret)
TYPE(cls_rgw_gc_defer_entry_op)
TYPE(cls_rgw_gc_list_op)
TYPE(cls_rgw_gc_list_ret)
TYPE(cls_rgw_gc_obj_info)
TYPE(cls_rgw_gc_remove_op)
TYPE(cls_rgw_gc_set_entry_op)
TYPE(cls_rgw_obj)
TYPE(cls_rgw_obj_chain)
TYPE(rgw_cls_tag_timeout_op)
TYPE(cls_rgw_bi_log_list_op)
TYPE(cls_rgw_bi_log_trim_op)
TYPE(cls_rgw_bi_log_list_ret)
TYPE(rgw_cls_link_olh_op)
TYPE(rgw_cls_unlink_instance_op)
TYPE(rgw_cls_read_olh_log_op)
TYPE(rgw_cls_read_olh_log_ret)
TYPE(rgw_cls_trim_olh_log_op)
TYPE(rgw_cls_bucket_clear_olh_op)
TYPE(rgw_cls_check_index_ret)
TYPE(cls_rgw_reshard_add_op)
TYPE(cls_rgw_reshard_list_op)
TYPE(cls_rgw_reshard_list_ret)
TYPE(cls_rgw_reshard_get_op)
TYPE(cls_rgw_reshard_get_ret)
TYPE(cls_rgw_reshard_remove_op)
TYPE(cls_rgw_set_bucket_resharding_op)
TYPE(cls_rgw_clear_bucket_resharding_op)
TYPE(cls_rgw_lc_obj_head)
#include "cls/rgw/cls_rgw_client.h"
TYPE(rgw_bi_log_entry)
TYPE(cls_rgw_reshard_entry)
TYPE(cls_rgw_bucket_instance_entry)
#include "cls/user/cls_user_types.h"
TYPE(cls_user_bucket)
TYPE(cls_user_bucket_entry)
TYPE(cls_user_stats)
TYPE(cls_user_header)
#include "cls/user/cls_user_ops.h"
TYPE(cls_user_set_buckets_op)
TYPE(cls_user_remove_bucket_op)
TYPE(cls_user_list_buckets_op)
TYPE(cls_user_list_buckets_ret)
TYPE(cls_user_get_header_op)
TYPE(cls_user_get_header_ret)
TYPE(cls_user_complete_stats_sync_op)
#include "cls/journal/cls_journal_types.h"
TYPE(cls::journal::ObjectPosition)
TYPE(cls::journal::ObjectSetPosition)
TYPE(cls::journal::Client)
TYPE(cls::journal::Tag)
#include "rgw_common.h"
TYPE(RGWAccessKey)
TYPE(RGWSubUser)
TYPE(RGWUserInfo)
TYPE(rgw_bucket)
TYPE(RGWBucketInfo)
TYPE(RGWBucketEnt)
TYPE(rgw_obj)
#include "rgw_log.h"
TYPE(rgw_log_entry)
#include "rgw_datalog.h"
TYPE(rgw_data_change)
#include "rgw_mdlog.h"
TYPE(RGWMetadataLogData)
#include "rgw_meta_sync_status.h"
TYPE(rgw_meta_sync_info)
TYPE(rgw_meta_sync_marker)
TYPE(rgw_meta_sync_status)
#include "rgw_multi.h"
TYPE(RGWUploadPartInfo)
#include "rgw_data_sync.h"
TYPE(rgw_data_sync_info)
TYPE(rgw_data_sync_marker)
TYPE(rgw_data_sync_status)
#include "rgw_bucket_encryption.h"
TYPE(RGWBucketEncryptionConfig)
#endif
| 3,408 | 21.576159 | 42 | h |
null | ceph-main/src/tools/ceph-dencoder/sstring.h | #ifndef TEST_SSTRING_H
#define TEST_SSTRING_H
#include "common/sstring.hh"
// wrapper for sstring that implements the dencoder interface
class sstring_wrapper {
using sstring16 = basic_sstring<char, uint32_t, 16>;
sstring16 s1;
using sstring24 = basic_sstring<unsigned char, uint16_t, 24>;
sstring24 s2;
public:
sstring_wrapper() = default;
sstring_wrapper(sstring16&& s1, sstring24&& s2)
: s1(std::move(s1)), s2(std::move(s2))
{}
DENC(sstring_wrapper, w, p) {
DENC_START(1, 1, p);
denc(w.s1, p);
denc(w.s2, p);
DENC_FINISH(p);
}
void dump(Formatter* f) {
f->dump_string("s1", s1.c_str());
f->dump_string("s2", reinterpret_cast<const char*>(s2.c_str()));
}
static void generate_test_instances(std::list<sstring_wrapper*>& ls) {
ls.push_back(new sstring_wrapper());
// initialize sstrings that fit in internal storage
constexpr auto cstr6 = "abcdef";
ls.push_back(new sstring_wrapper(sstring16{cstr6}, sstring24{cstr6}));
// initialize sstrings that overflow into external storage
constexpr auto cstr26 = "abcdefghijklmnopqrstuvwxyz";
ls.push_back(new sstring_wrapper(sstring16{cstr26}, sstring24{cstr26}));
}
};
WRITE_CLASS_DENC(sstring_wrapper)
#endif
| 1,240 | 29.268293 | 76 | h |
null | ceph-main/src/tools/ceph-dencoder/str.h | #ifndef TEST_STRING_H
#define TEST_STRING_H
#include "common/Formatter.h"
// wrapper for std::string that implements the dencoder interface
class string_wrapper {
std::string s;
public:
string_wrapper() = default;
string_wrapper(string s1)
: s(s1)
{}
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
encode(s, bl);
}
void decode(ceph::buffer::list::const_iterator &bl) {
using ceph::decode;
decode(s, bl);
}
void dump(Formatter* f) {
f->dump_string("s", s);
}
static void generate_test_instances(std::list<string_wrapper*>& ls) {
ls.push_back(new string_wrapper());
// initialize strings that fit in internal storage
std::string s1 = "abcdef";
ls.push_back(new string_wrapper(s1));
}
};
WRITE_CLASS_ENCODER(string_wrapper)
#endif
| 821 | 20.076923 | 71 | h |
null | ceph-main/src/tools/cephfs/DataScan.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "MDSUtility.h"
#include "include/rados/librados.hpp"
class InodeStore;
class MDSTable;
class RecoveryDriver {
protected:
// If true, overwrite structures that generate decoding errors.
bool force_corrupt;
// If true, overwrite root objects during init_roots even if they
// exist
bool force_init;
public:
virtual int init(
librados::Rados &rados,
std::string &metadata_pool_name,
const FSMap *fsmap,
fs_cluster_id_t fscid) = 0;
void set_force_corrupt(const bool val)
{
force_corrupt = val;
}
void set_force_init(const bool val)
{
force_init = val;
}
/**
* Inject an inode + dentry parents into the metadata pool,
* based on a backtrace recovered from the data pool
*/
virtual int inject_with_backtrace(
const inode_backtrace_t &bt,
const InodeStore &dentry) = 0;
/**
* Inject an inode + dentry into the lost+found directory,
* when all we know about a file is its inode.
*/
virtual int inject_lost_and_found(
inodeno_t ino,
const InodeStore &dentry) = 0;
/**
* Create any missing roots (i.e. mydir, strays, root inode)
*/
virtual int init_roots(
int64_t data_pool_id) = 0;
/**
* Pre-injection check that all the roots are present in
* the metadata pool. Used to avoid parallel workers interfering
* with one another, by cueing the user to go run 'init' on a
* single node before running a parallel scan.
*
* @param result: set to true if roots are present, else set to false
* @returns 0 on no unexpected errors, else error code. Missing objects
* are not considered an unexpected error: check *result for
* this case.
*/
virtual int check_roots(bool *result) = 0;
/**
* Helper to compose dnames for links to lost+found
* inodes.
*/
std::string lost_found_dname(inodeno_t ino)
{
char s[20];
snprintf(s, sizeof(s), "%llx", (unsigned long long)ino);
return std::string(s);
}
RecoveryDriver()
: force_corrupt(false),
force_init(false)
{}
virtual ~RecoveryDriver() {}
};
class LocalFileDriver : public RecoveryDriver
{
protected:
const std::string path;
librados::IoCtx &data_io;
int inject_data(
const std::string &file_path,
uint64_t size,
uint32_t chunk_size,
inodeno_t ino);
public:
LocalFileDriver(const std::string &path_, librados::IoCtx &data_io_)
: RecoveryDriver(), path(path_), data_io(data_io_)
{}
// Implement RecoveryDriver interface
int init(
librados::Rados &rados,
std::string &metadata_pool_name,
const FSMap *fsmap,
fs_cluster_id_t fscid) override;
int inject_with_backtrace(
const inode_backtrace_t &bt,
const InodeStore &dentry) override;
int inject_lost_and_found(
inodeno_t ino,
const InodeStore &dentry) override;
int init_roots(int64_t data_pool_id) override;
int check_roots(bool *result) override;
};
/**
* A class that knows how to work with objects in a CephFS
* metadata pool.
*/
class MetadataTool
{
protected:
librados::IoCtx metadata_io;
/**
* Construct a synthetic InodeStore for a normal file
*/
void build_file_dentry(
inodeno_t ino, uint64_t file_size, time_t file_mtime,
const file_layout_t &layout,
InodeStore *out,
std::string symlink);
/**
* Construct a synthetic InodeStore for a directory
*/
void build_dir_dentry(
inodeno_t ino,
const frag_info_t &fragstat,
const file_layout_t &layout,
InodeStore *out);
/**
* Try and read an fnode from a dirfrag
*/
int read_fnode(inodeno_t ino, frag_t frag,
fnode_t *fnode, uint64_t *read_version);
/**
* Try and read a dentry from a dirfrag
*/
int read_dentry(inodeno_t parent_ino, frag_t frag,
const std::string &dname, InodeStore *inode, snapid_t *dnfirst=nullptr);
};
/**
* A class that knows how to manipulate CephFS metadata pools
*/
class MetadataDriver : public RecoveryDriver, public MetadataTool
{
protected:
/**
* Create a .inode object, i.e. root or mydir
*/
int inject_unlinked_inode(inodeno_t inono, int mode, int64_t data_pool_id);
/**
* Check for existence of .inode objects, before
* trying to go ahead and inject metadata.
*/
int root_exists(inodeno_t ino, bool *result);
int find_or_create_dirfrag(
inodeno_t ino,
frag_t fragment,
bool *created);
/**
* Work out which fragment of a directory should contain a named
* dentry, recursing up the trace as necessary to retrieve
* fragtrees.
*/
int get_frag_of(
inodeno_t dirino,
const std::string &dname,
frag_t *result_ft);
public:
// Implement RecoveryDriver interface
int init(
librados::Rados &rados,
std::string &metadata_pool_name,
const FSMap *fsmap,
fs_cluster_id_t fscid) override;
int inject_linkage(
inodeno_t dir_ino, const std::string &dname,
const frag_t fragment, const InodeStore &inode, snapid_t dnfirst=CEPH_NOSNAP);
int inject_with_backtrace(
const inode_backtrace_t &bt,
const InodeStore &dentry) override;
int inject_lost_and_found(
inodeno_t ino,
const InodeStore &dentry) override;
int init_roots(int64_t data_pool_id) override;
int check_roots(bool *result) override;
int load_table(MDSTable *table);
int save_table(MDSTable *table);
};
class DataScan : public MDSUtility, public MetadataTool
{
protected:
RecoveryDriver *driver;
fs_cluster_id_t fscid;
std::string metadata_pool_name;
std::vector<int64_t> data_pools;
// IoCtx for data pool (where we scrape file backtraces from)
librados::IoCtx data_io;
// Remember the data pool ID for use in layouts
int64_t data_pool_id;
// IoCtxs for extra data pools
std::vector<librados::IoCtx> extra_data_ios;
uint32_t n;
uint32_t m;
/**
* Scan data pool for backtraces, and inject inodes to metadata pool
*/
int scan_inodes();
/**
* Scan data pool for file sizes and mtimes
*/
int scan_extents();
/**
* Scan metadata pool for 0th dirfrags to link orphaned
* directory inodes.
*/
int scan_frags();
/**
* Cleanup xattrs from data pool
*/
int cleanup();
/**
* Check if an inode number is in the permitted ranges
*/
bool valid_ino(inodeno_t ino) const;
int scan_links();
// Accept pools which are not in the FSMap
bool force_pool;
// Respond to decode errors by overwriting
bool force_corrupt;
// Overwrite root objects even if they exist
bool force_init;
// Only scan inodes without this scrub tag
std::string filter_tag;
/**
* @param r set to error on valid key with invalid value
* @return true if argument consumed, else false
*/
bool parse_kwarg(
const std::vector<const char*> &args,
std::vector<const char *>::const_iterator &i,
int *r);
/**
* @return true if argument consumed, else false
*/
bool parse_arg(
const std::vector<const char*> &arg,
std::vector<const char *>::const_iterator &i);
int probe_filter(librados::IoCtx &ioctx);
/**
* Apply a function to all objects in an ioctx's pool, optionally
* restricted to only those objects with a 00000000 offset and
* no tag matching DataScan::scrub_tag.
*/
int forall_objects(
librados::IoCtx &ioctx,
bool untagged_only,
std::function<int(std::string, uint64_t, uint64_t)> handler);
public:
static void usage();
int main(const std::vector<const char *> &args);
DataScan()
: driver(NULL), fscid(FS_CLUSTER_ID_NONE),
data_pool_id(-1), n(0), m(1),
force_pool(false), force_corrupt(false),
force_init(false)
{
}
~DataScan() override
{
delete driver;
}
};
| 8,566 | 23.831884 | 86 | h |
null | ceph-main/src/tools/cephfs/Dumper.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2010 Greg Farnum <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef JOURNAL_DUMPER_H_
#define JOURNAL_DUMPER_H_
#include "MDSUtility.h"
class Journaler;
/**
* This class lets you dump out an mds journal for troubleshooting or whatever.
*
* It was built to work with cmds so some of the design choices are random.
* To use, create a Dumper, call init(), and then call dump() with the name
* of the file to dump to.
*/
class Dumper : public MDSUtility {
private:
mds_role_t role;
inodeno_t ino;
public:
Dumper() : ino(-1)
{}
int init(mds_role_t role_, const std::string &type);
int recover_journal(Journaler *journaler);
int dump(const char *dumpfile);
int undump(const char *dumpfile, bool force);
};
#endif /* JOURNAL_DUMPER_H_ */
| 1,125 | 23.478261 | 79 | h |
null | ceph-main/src/tools/cephfs/EventOutput.h | // -*- mode:c++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* ceph - scalable distributed file system
*
* copyright (c) 2014 john spray <[email protected]>
*
* this is free software; you can redistribute it and/or
* modify it under the terms of the gnu lesser general public
* license version 2.1, as published by the free software
* foundation. see file copying.
*/
#ifndef EVENT_OUTPUT_H
#define EVENT_OUTPUT_H
#include <string>
class JournalScanner;
/**
* Different output formats for the results of a journal scan
*/
class EventOutput
{
private:
JournalScanner const &scan;
std::string const path;
public:
EventOutput(JournalScanner const &scan_, std::string const &path_)
: scan(scan_), path(path_) {}
void summary() const;
void list() const;
int json() const;
int binary() const;
};
#endif // EVENT_OUTPUT_H
| 920 | 20.418605 | 71 | h |
null | ceph-main/src/tools/cephfs/JournalFilter.h | // -*- mode:c++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* ceph - scalable distributed file system
*
* copyright (c) 2014 john spray <[email protected]>
*
* this is free software; you can redistribute it and/or
* modify it under the terms of the gnu lesser general public
* license version 2.1, as published by the free software
* foundation. see file copying.
*/
#ifndef JOURNAL_FILTER_H
#define JOURNAL_FILTER_H
#include "mds/mdstypes.h"
#include "mds/LogEvent.h"
#include "mds/PurgeQueue.h"
/**
* A set of conditions for narrowing down a search through the journal
*/
class JournalFilter
{
private:
/* Filtering by journal offset range */
uint64_t range_start;
uint64_t range_end;
static const std::string range_separator;
/* Filtering by file (sub) path */
std::string path_expr;
/* Filtering by inode */
inodeno_t inode;
/* Filtering by type */
LogEvent::EventType event_type;
std::string type;
/* Filtering by PurgeItem::Action */
PurgeItem::Action purge_action;
/* Filtering by dirfrag */
dirfrag_t frag;
std::string frag_dentry; //< optional, filter dentry name within fragment
/* Filtering by metablob client name */
entity_name_t client_name;
public:
JournalFilter(std::string t) :
range_start(0),
range_end(-1),
inode(0),
event_type(0),
type(t),
purge_action(PurgeItem::NONE) {}
bool get_range(uint64_t &start, uint64_t &end) const;
bool apply(uint64_t pos, LogEvent &le) const;
bool apply(uint64_t pos, PurgeItem &pi) const;
int parse_args(
std::vector<const char*> &argv,
std::vector<const char*>::iterator &arg);
};
#endif // JOURNAL_FILTER_H
| 1,719 | 22.243243 | 76 | h |
null | ceph-main/src/tools/cephfs/JournalScanner.h | // -*- mode:c++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* ceph - scalable distributed file system
*
* copyright (c) 2014 john spray <[email protected]>
*
* this is free software; you can redistribute it and/or
* modify it under the terms of the gnu lesser general public
* license version 2.1, as published by the free software
* foundation. see file copying.
*/
#ifndef JOURNAL_SCANNER_H
#define JOURNAL_SCANNER_H
#include "include/rados/librados_fwd.hpp"
// For Journaler::Header, can't forward-declare nested classes
#include <osdc/Journaler.h>
#include "JournalFilter.h"
/**
* A simple sequential reader for metadata journals. Unlike
* the MDS Journaler class, this is written to detect, record,
* and read past corruptions and missing objects. It is also
* less efficient but more plainly written.
*/
class JournalScanner
{
private:
librados::IoCtx &io;
// Input constraints
const int rank;
std::string type;
JournalFilter const filter;
void gap_advance();
public:
JournalScanner(
librados::IoCtx &io_,
int rank_,
const std::string &type_,
JournalFilter const &filter_) :
io(io_),
rank(rank_),
type(type_),
filter(filter_),
is_mdlog(false),
pointer_present(false),
pointer_valid(false),
header_present(false),
header_valid(false),
header(NULL) {};
JournalScanner(
librados::IoCtx &io_,
int rank_,
const std::string &type_) :
io(io_),
rank(rank_),
type(type_),
filter(type_),
is_mdlog(false),
pointer_present(false),
pointer_valid(false),
header_present(false),
header_valid(false),
header(NULL) {};
~JournalScanner();
int set_journal_ino();
int scan(bool const full=true);
int scan_pointer();
int scan_header();
int scan_events();
void report(std::ostream &out) const;
std::string obj_name(uint64_t offset) const;
std::string obj_name(inodeno_t ino, uint64_t offset) const;
// The results of the scan
inodeno_t ino; // Corresponds to journal ino according their type
struct EventRecord {
EventRecord(std::unique_ptr<LogEvent> le, uint32_t rs) : log_event(std::move(le)), raw_size(rs) {}
EventRecord(std::unique_ptr<PurgeItem> p, uint32_t rs) : pi(std::move(p)), raw_size(rs) {}
std::unique_ptr<LogEvent> log_event;
std::unique_ptr<PurgeItem> pi;
uint32_t raw_size = 0; //< Size from start offset including all encoding overhead
};
class EventError {
public:
int r;
std::string description;
EventError(int r_, const std::string &desc_)
: r(r_), description(desc_) {}
};
typedef std::map<uint64_t, EventRecord> EventMap;
typedef std::map<uint64_t, EventError> ErrorMap;
typedef std::pair<uint64_t, uint64_t> Range;
bool is_mdlog;
bool pointer_present; //mdlog specific
bool pointer_valid; //mdlog specific
bool header_present;
bool header_valid;
Journaler::Header *header;
bool is_healthy() const;
bool is_readable() const;
std::vector<std::string> objects_valid;
std::vector<uint64_t> objects_missing;
std::vector<Range> ranges_invalid;
std::vector<uint64_t> events_valid;
EventMap events;
// For events present in ::events (i.e. scanned successfully),
// any subsequent errors handling them (e.g. replaying)
ErrorMap errors;
private:
// Forbid copy construction because I have ptr members
JournalScanner(const JournalScanner &rhs);
};
#endif // JOURNAL_SCANNER_H
| 3,519 | 25.268657 | 102 | h |
null | ceph-main/src/tools/cephfs/JournalTool.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 John Spray <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include "MDSUtility.h"
#include "RoleSelector.h"
#include <vector>
#include "mds/mdstypes.h"
#include "mds/LogEvent.h"
#include "mds/events/EMetaBlob.h"
#include "include/rados/librados.hpp"
#include "JournalFilter.h"
class JournalScanner;
/**
* Command line tool for investigating and repairing filesystems
* with damaged metadata logs
*/
class JournalTool : public MDSUtility
{
private:
MDSRoleSelector role_selector;
// Bit hacky, use this `rank` member to control behaviour of the
// various main_ functions.
mds_rank_t rank;
// when set, generate per rank dump file path
bool all_ranks = false;
std::string type;
// Entry points
int main_journal(std::vector<const char*> &argv);
int main_header(std::vector<const char*> &argv);
int main_event(std::vector<const char*> &argv);
// Shared functionality
int recover_journal();
// Journal operations
int journal_inspect();
int journal_export(std::string const &path, bool import, bool force);
int journal_reset(bool hard);
// Header operations
int header_set();
// I/O handles
librados::Rados rados;
librados::IoCtx input;
librados::IoCtx output;
bool other_pool;
// Metadata backing store manipulation
int read_lost_found(std::set<std::string> &lost);
int recover_dentries(
EMetaBlob const &metablob,
bool const dry_run,
std::set<inodeno_t> *consumed_inos);
// Splicing
int erase_region(JournalScanner const &jp, uint64_t const pos, uint64_t const length);
// Backing store helpers
void encode_fullbit_as_inode(
const EMetaBlob::fullbit &fb,
const bool bare,
bufferlist *out_bl);
int consume_inos(const std::set<inodeno_t> &inos);
//validate type
int validate_type(const std::string &type);
// generate output file path for dump/export
std::string gen_dump_file_path(const std::string &prefix);
// check if an operation (mode, command) is safe to be
// executed on all ranks.
bool can_execute_for_all_ranks(const std::string &mode,
const std::string &command);
public:
static void usage();
JournalTool() :
rank(0), other_pool(false) {}
int main(std::vector<const char*> &argv);
};
| 2,745 | 25.921569 | 90 | h |
null | ceph-main/src/tools/cephfs/MDSUtility.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 John Spray <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef MDS_UTILITY_H_
#define MDS_UTILITY_H_
#include "osdc/Objecter.h"
#include "mds/FSMap.h"
#include "messages/MFSMap.h"
#include "msg/Dispatcher.h"
#include "msg/Messenger.h"
#include "auth/Auth.h"
#include "common/async/context_pool.h"
#include "common/Finisher.h"
#include "common/Timer.h"
/// MDS Utility
/**
* This class is the parent for MDS utilities, i.e. classes that
* need access the objects belonging to the MDS without actually
* acting as an MDS daemon themselves.
*/
class MDSUtility : public Dispatcher {
protected:
Objecter *objecter;
FSMap *fsmap;
Messenger *messenger;
MonClient *monc;
ceph::mutex lock = ceph::make_mutex("MDSUtility::lock");
Finisher finisher;
ceph::async::io_context_pool poolctx;
Context *waiting_for_mds_map;
bool inited;
public:
MDSUtility();
~MDSUtility() override;
void handle_fs_map(MFSMap* m);
bool ms_dispatch(Message *m) override;
bool ms_handle_reset(Connection *con) override { return false; }
void ms_handle_remote_reset(Connection *con) override {}
bool ms_handle_refused(Connection *con) override { return false; }
int init();
void shutdown();
};
#endif /* MDS_UTILITY_H_ */
| 1,602 | 25.278689 | 70 | h |
null | ceph-main/src/tools/cephfs/MetaTool.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef METATOOL_H__
#define METATOOL_H__
#include "MDSUtility.h"
#include "RoleSelector.h"
#include <vector>
#include <stack>
using std::stack;
#include "mds/mdstypes.h"
#include "mds/LogEvent.h"
#include "mds/events/EMetaBlob.h"
#include "include/rados/librados.hpp"
#include "common/ceph_json.h"
using ::ceph::bufferlist;
class MetaTool : public MDSUtility
{
public:
class inode_meta_t {
public:
inode_meta_t(snapid_t f = CEPH_NOSNAP, char t = char(255), InodeStore* i = NULL):
_f(f),_t(t),_i(i) {
};
snapid_t get_snapid() const {
return _f;
}
InodeStore* get_meta() const {
if (_t == 'I')
return _i;
else
return NULL;
}
int get_type() const {
return _t;
}
void decode_json(JSONObj *obj);
void encode(::ceph::bufferlist& bl, uint64_t features);
private:
snapid_t _f;
char _t;
InodeStore* _i;
};
private:
class meta_op {
public:
meta_op(bool debug = false, std::string out = "", std::string in = "", bool confirm = false):
_debug(debug),
_out(out),
_in(in),
_confirm(confirm)
{}
void release();
typedef enum {
OP_LIST = 0,
OP_LTRACE,
OP_SHOW,
OP_AMEND,
OP_SHOW_FN,
OP_AMEND_FN,
OP_NO
} op_type;
typedef enum {
INO_DIR = 0,
INO_F
} ino_type;
static std::string op_type_name(op_type& t) {
std::string name;
switch (t) {
case OP_LIST:
name = "list dir";
break;
case OP_LTRACE:
name = "load trace";
break;
case OP_SHOW:
name = "show info";
break;
case OP_AMEND:
name = "amend info";
break;
case OP_SHOW_FN:
name = "show fnode";
break;
case OP_AMEND_FN:
name = "amend fnode";
break;
case OP_NO:
name = "noop";
break;
default:
name = "unknow op type";
}
return name;
}
static std::string ino_type_name(ino_type& t) {
std::string name;
switch (t) {
case INO_DIR:
name = "dir";
break;
case INO_F:
name = "file";
break;
default:
name = "unknow file type";
}
return name;
}
class sub_op {
public:
sub_op(meta_op* mop):
trace_level(0),
_proc(false),
_mop(mop)
{}
void print() {
std::cout << detail() << std::endl;
}
std::string detail() {
std::stringstream ds;
ds << " [sub_op]" << op_type_name(sub_op_t) << "|"
<< ino_type_name(sub_ino_t) << "|"
<< ino << "|"
<< frag << "|"
<< ino_c << "|"
<< trace_level << "|"
<< name;
return ds.str();
}
bool get_c_ancestor(inode_backpointer_t& bp) {
if (!_mop || !ino_c)
return false;
auto item = _mop->ancestors.find(ino_c);
if (item != _mop->ancestors.end()) {
bp = item->second;
return true;
} else
return false;
}
bool get_ancestor(inode_backpointer_t& bp) {
if (!_mop || !ino)
return false;
auto item = _mop->ancestors.find(ino);
if (item != _mop->ancestors.end()) {
bp = item->second;
return true;
} else
return false;
}
op_type sub_op_t;
ino_type sub_ino_t;
inodeno_t ino;
frag_t frag;
inodeno_t ino_c;
unsigned trace_level;
std::string name;
bool _proc;
meta_op* _mop;
};
std::map<inodeno_t, inode_backpointer_t > ancestors;
std::map<inodeno_t, inode_meta_t* > inodes;
std::map<inodeno_t, std::string > okeys;
void clear_sops() {
while(!no_sops())
pop_op();
}
bool no_sops() {
return sub_ops.empty();
}
void push_op(sub_op* sop) {
if (_debug)
std::cout << "<<====" << sop->detail() << std::endl;
sub_ops.push(sop);
}
sub_op* top_op() {
return sub_ops.top();
}
void pop_op() {
sub_op* sop = sub_ops.top();
if (_debug)
std::cout << "====>>" << sop->detail() << std::endl;;
delete sop;
sub_ops.pop();
}
std::string outfile() {
return _out;
}
std::string infile() {
return _in;
}
bool is_debug() {
return _debug;
}
bool confirm_chg() {
return _confirm;
}
private:
stack<sub_op*> sub_ops;
bool _debug;
std::string _out;
std::string _in;
bool _confirm;
};
MDSRoleSelector role_selector;
mds_rank_t rank;
// I/O handles
librados::Rados rados;
librados::IoCtx io_meta;
std::vector<librados::IoCtx*> io_data_v;
librados::IoCtx output;
bool _debug;
uint64_t features;
std::string obj_name(inodeno_t ino, frag_t fg = frag_t(), const char *suffix = NULL) const;
std::string obj_name(inodeno_t ino, uint64_t offset, const char *suffix = NULL) const;
std::string obj_name(const char* ino, uint64_t offset, const char *suffix = NULL) const;
// 0 : continue to find
// 1 : stop to find it
int show_child(std::string_view key,
std::string_view dname,
const snapid_t last,
bufferlist &bl,
const int pos,
const std::set<snapid_t> *snaps,
bool *force_dirty,
inodeno_t sp_ino = 0,
meta_op* op = NULL
);
int process(std::string& mode, std::string& ino, std::string out, std::string in, bool confirm);
int show_meta_info(std::string& ino, std::string& out);
int list_meta_info(std::string& ino, std::string& out);
int amend_meta_info(std::string& ino, std::string& in, bool confirm);
int show_fnode(std::string& ino, std::string& out);
int amend_fnode(std::string& in, bool confirm);
int op_process(meta_op &op);
int list_meta(meta_op &op);
int file_meta(meta_op &op);
int show_meta(meta_op &op);
int amend_meta(meta_op &op);
int show_fn(meta_op &op);
int amend_fn(meta_op &op);
public:
int _file_meta(meta_op &op, librados::IoCtx& io);
int _show_meta(inode_meta_t& i, const std::string& fn);
int _amend_meta(std::string &k, inode_meta_t& i, const std::string& fn, meta_op& op);
int _show_fn(inode_meta_t& i, const std::string& fn);
int _amend_fn(const std::string& fn, bool confirm);
void usage();
MetaTool(bool debug=false):
_debug(debug) {}
~MetaTool() {}
int main(std::string& mode,
std::string& rank_str,
std::string& minfo,
std::string&ino,
std::string& out,
std::string& in,
bool confirm = false
);
};
#endif // METATOOL_H__
| 6,916 | 24.336996 | 98 | h |
null | ceph-main/src/tools/cephfs/PgFiles.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef PG_EFFECTS_H_
#define PG_EFFECTS_H_
#include "include/cephfs/libcephfs.h"
#include "osd/osd_types.h"
#include <set>
#include "osdc/Objecter.h"
/**
* This utility scans the files (via an online MDS) and works out
* which ones rely on named PGs. For use when someone has
* some bad/damaged PGs and wants to see which files might be
* affected.
*/
class PgFiles
{
private:
Objecter *objecter;
struct ceph_mount_info *cmount = nullptr;
std::set<pg_t> pgs;
std::set<uint64_t> pools;
void hit_file(std::string const &path, const struct ceph_statx &stx);
void hit_dir(std::string const &path);
public:
PgFiles(Objecter *o, const std::set<pg_t> &pgs_);
~PgFiles();
int init();
int scan_path(std::string const &path);
};
#endif
| 1,161 | 21.346154 | 71 | h |
null | ceph-main/src/tools/cephfs/Resetter.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2010 Greg Farnum <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef JOURNAL_RESETTER_H_
#define JOURNAL_RESETTER_H_
#include "MDSUtility.h"
class Journaler;
/**
* This class lets you reset an mds journal for troubleshooting or whatever.
*
* To use, create a Resetter, call init(), and then call reset() with the name
* of the file to dump to.
*/
class Resetter : public MDSUtility {
private:
mds_role_t role;
inodeno_t ino;
bool is_mdlog;
protected:
int _write_reset_event(Journaler *journaler);
public:
Resetter() {}
~Resetter() {}
int init(mds_role_t role_, const std::string &type, bool hard);
/**
* For use when no journal header/pointer was present: write one
* out from scratch.
*/
int reset_hard();
int reset();
};
#endif /* JOURNAL_RESETTER_H_ */
| 1,161 | 21.784314 | 78 | h |
null | ceph-main/src/tools/cephfs/TableTool.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 John Spray <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include "MDSUtility.h"
#include "RoleSelector.h"
#include "include/rados/librados.hpp"
/**
* Command line tool for debugging the backing store of
* MDSTable instances.
*/
class TableTool : public MDSUtility
{
private:
MDSRoleSelector role_selector;
// I/O handles
librados::Rados rados;
librados::IoCtx io;
int apply_role_fn(std::function<int(mds_role_t, Formatter *)> fptr, Formatter *f);
public:
static void usage();
int main(std::vector<const char*> &argv);
};
| 931 | 21.731707 | 86 | h |
null | ceph-main/src/tools/cephfs_mirror/ClusterWatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_CLUSTER_WATCHER_H
#define CEPHFS_MIRROR_CLUSTER_WATCHER_H
#include <map>
#include "common/ceph_mutex.h"
#include "common/async/context_pool.h"
#include "messages/MFSMap.h"
#include "msg/Dispatcher.h"
#include "Types.h"
class MonClient;
namespace cephfs {
namespace mirror {
class ServiceDaemon;
// watch peer changes for filesystems via FSMap updates
class ClusterWatcher : public Dispatcher {
public:
struct Listener {
virtual ~Listener() {
}
virtual void handle_mirroring_enabled(const FilesystemSpec &spec) = 0;
virtual void handle_mirroring_disabled(const Filesystem &filesystem) = 0;
virtual void handle_peers_added(const Filesystem &filesystem, const Peer &peer) = 0;
virtual void handle_peers_removed(const Filesystem &filesystem, const Peer &peer) = 0;
};
ClusterWatcher(CephContext *cct, MonClient *monc, ServiceDaemon *service_daemon,
Listener &listener);
~ClusterWatcher();
bool ms_can_fast_dispatch_any() const override {
return true;
}
bool ms_can_fast_dispatch2(const cref_t<Message> &m) const override;
void ms_fast_dispatch2(const ref_t<Message> &m) override;
bool ms_dispatch2(const ref_t<Message> &m) override;
void ms_handle_connect(Connection *c) override {
}
bool ms_handle_reset(Connection *c) override {
return false;
}
void ms_handle_remote_reset(Connection *c) override {
}
bool ms_handle_refused(Connection *c) override {
return false;
}
int init();
void shutdown();
private:
ceph::mutex m_lock = ceph::make_mutex("cephfs::mirror::cluster_watcher");
MonClient *m_monc;
ServiceDaemon *m_service_daemon;
Listener &m_listener;
bool m_stopping = false;
std::map<Filesystem, Peers> m_filesystem_peers;
void handle_fsmap(const cref_t<MFSMap> &m);
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_CLUSTER_WATCHER_H
| 2,000 | 24.653846 | 90 | h |
null | ceph-main/src/tools/cephfs_mirror/FSMirror.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_FS_MIRROR_H
#define CEPHFS_MIRROR_FS_MIRROR_H
#include "common/Formatter.h"
#include "common/Thread.h"
#include "mds/FSMap.h"
#include "Types.h"
#include "InstanceWatcher.h"
#include "MirrorWatcher.h"
class ContextWQ;
namespace cephfs {
namespace mirror {
class MirrorAdminSocketHook;
class PeerReplayer;
class ServiceDaemon;
// handle mirroring for a filesystem to a set of peers
class FSMirror {
public:
FSMirror(CephContext *cct, const Filesystem &filesystem, uint64_t pool_id,
ServiceDaemon *service_daemon, std::vector<const char*> args,
ContextWQ *work_queue);
~FSMirror();
void init(Context *on_finish);
void shutdown(Context *on_finish);
void add_peer(const Peer &peer);
void remove_peer(const Peer &peer);
bool is_stopping() {
std::scoped_lock locker(m_lock);
return m_stopping;
}
bool is_init_failed() {
std::scoped_lock locker(m_lock);
return m_init_failed;
}
bool is_failed() {
std::scoped_lock locker(m_lock);
return m_init_failed ||
m_instance_watcher->is_failed() ||
m_mirror_watcher->is_failed();
}
bool is_blocklisted() {
std::scoped_lock locker(m_lock);
return is_blocklisted(locker);
}
Peers get_peers() {
std::scoped_lock locker(m_lock);
return m_all_peers;
}
std::string get_instance_addr() {
std::scoped_lock locker(m_lock);
return m_addrs;
}
// admin socket helpers
void mirror_status(Formatter *f);
void reopen_logs();
private:
bool is_blocklisted(const std::scoped_lock<ceph::mutex> &locker) const {
bool blocklisted = false;
if (m_instance_watcher) {
blocklisted = m_instance_watcher->is_blocklisted();
}
if (m_mirror_watcher) {
blocklisted |= m_mirror_watcher->is_blocklisted();
}
return blocklisted;
}
struct SnapListener : public InstanceWatcher::Listener {
FSMirror *fs_mirror;
SnapListener(FSMirror *fs_mirror)
: fs_mirror(fs_mirror) {
}
void acquire_directory(std::string_view dir_path) override {
fs_mirror->handle_acquire_directory(dir_path);
}
void release_directory(std::string_view dir_path) override {
fs_mirror->handle_release_directory(dir_path);
}
};
CephContext *m_cct;
Filesystem m_filesystem;
uint64_t m_pool_id;
ServiceDaemon *m_service_daemon;
std::vector<const char *> m_args;
ContextWQ *m_work_queue;
ceph::mutex m_lock = ceph::make_mutex("cephfs::mirror::fs_mirror");
SnapListener m_snap_listener;
std::set<std::string, std::less<>> m_directories;
Peers m_all_peers;
std::map<Peer, std::unique_ptr<PeerReplayer>> m_peer_replayers;
RadosRef m_cluster;
std::string m_addrs;
librados::IoCtx m_ioctx;
InstanceWatcher *m_instance_watcher = nullptr;
MirrorWatcher *m_mirror_watcher = nullptr;
int m_retval = 0;
bool m_stopping = false;
bool m_init_failed = false;
Context *m_on_init_finish = nullptr;
Context *m_on_shutdown_finish = nullptr;
MirrorAdminSocketHook *m_asok_hook = nullptr;
MountRef m_mount;
int init_replayer(PeerReplayer *peer_replayer);
void shutdown_replayer(PeerReplayer *peer_replayer);
void cleanup();
void init_instance_watcher(Context *on_finish);
void handle_init_instance_watcher(int r);
void init_mirror_watcher();
void handle_init_mirror_watcher(int r);
void shutdown_peer_replayers();
void shutdown_mirror_watcher();
void handle_shutdown_mirror_watcher(int r);
void shutdown_instance_watcher();
void handle_shutdown_instance_watcher(int r);
void handle_acquire_directory(std::string_view dir_path);
void handle_release_directory(std::string_view dir_path);
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_FS_MIRROR_H
| 3,867 | 23.327044 | 76 | h |
null | ceph-main/src/tools/cephfs_mirror/InstanceWatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_INSTANCE_WATCHER_H
#define CEPHFS_MIRROR_INSTANCE_WATCHER_H
#include <string_view>
#include "common/ceph_mutex.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "Watcher.h"
class ContextWQ;
namespace cephfs {
namespace mirror {
// watch directory update notifications via per daemon rados
// object and invoke listener callback.
class InstanceWatcher : public Watcher {
public:
struct Listener {
virtual ~Listener() {
}
virtual void acquire_directory(std::string_view dir_path) = 0;
virtual void release_directory(std::string_view dir_path) = 0;
};
static InstanceWatcher *create(librados::IoCtx &ioctx,
Listener &listener, ContextWQ *work_queue) {
return new InstanceWatcher(ioctx, listener, work_queue);
}
InstanceWatcher(librados::IoCtx &ioctx, Listener &listener, ContextWQ *work_queue);
~InstanceWatcher();
void init(Context *on_finish);
void shutdown(Context *on_finish);
void handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist& bl) override;
void handle_rewatch_complete(int r) override;
bool is_blocklisted() {
std::scoped_lock locker(m_lock);
return m_blocklisted;
}
bool is_failed() {
std::scoped_lock locker(m_lock);
return m_failed;
}
private:
librados::IoCtx &m_ioctx;
Listener &m_listener;
ContextWQ *m_work_queue;
ceph::mutex m_lock;
Context *m_on_init_finish = nullptr;
Context *m_on_shutdown_finish = nullptr;
bool m_blocklisted = false;
bool m_failed = false;
void create_instance();
void handle_create_instance(int r);
void register_watcher();
void handle_register_watcher(int r);
void remove_instance();
void handle_remove_instance(int r);
void unregister_watcher();
void handle_unregister_watcher(int r);
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_INSTANCE_WATCHER_H
| 2,071 | 23.093023 | 85 | h |
null | ceph-main/src/tools/cephfs_mirror/Mirror.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_H
#define CEPHFS_MIRROR_H
#include <map>
#include <set>
#include <vector>
#include "common/ceph_mutex.h"
#include "common/WorkQueue.h"
#include "mds/FSMap.h"
#include "ClusterWatcher.h"
#include "FSMirror.h"
#include "ServiceDaemon.h"
#include "Types.h"
class Messenger;
class MonClient;
class ContextWQ;
namespace cephfs {
namespace mirror {
// this wraps up ClusterWatcher and FSMirrors to implement mirroring
// for ceph filesystems.
class Mirror {
public:
Mirror(CephContext *cct, const std::vector<const char*> &args,
MonClient *monc, Messenger *msgr);
~Mirror();
int init(std::string &reason);
void shutdown();
void run();
void handle_signal(int signum);
private:
static constexpr std::string_view MIRRORING_MODULE = "mirroring";
struct C_EnableMirroring;
struct C_DisableMirroring;
struct C_PeerUpdate;
struct C_RestartMirroring;
struct ClusterListener : ClusterWatcher::Listener {
Mirror *mirror;
ClusterListener(Mirror *mirror)
: mirror(mirror) {
}
void handle_mirroring_enabled(const FilesystemSpec &spec) override {
mirror->mirroring_enabled(spec.filesystem, spec.pool_id);
}
void handle_mirroring_disabled(const Filesystem &filesystem) override {
mirror->mirroring_disabled(filesystem);
}
void handle_peers_added(const Filesystem &filesystem, const Peer &peer) override {
mirror->peer_added(filesystem, peer);
}
void handle_peers_removed(const Filesystem &filesystem, const Peer &peer) override {
mirror->peer_removed(filesystem, peer);
}
};
struct MirrorAction {
MirrorAction(uint64_t pool_id) :
pool_id(pool_id) {
}
uint64_t pool_id; // for restarting blocklisted mirror instance
bool action_in_progress = false;
std::list<Context *> action_ctxs;
std::unique_ptr<FSMirror> fs_mirror;
};
ceph::mutex m_lock = ceph::make_mutex("cephfs::mirror::Mirror");
ceph::condition_variable m_cond;
CephContext *m_cct;
std::vector<const char *> m_args;
MonClient *m_monc;
Messenger *m_msgr;
ClusterListener m_listener;
ThreadPool *m_thread_pool = nullptr;
ContextWQ *m_work_queue = nullptr;
SafeTimer *m_timer = nullptr;
ceph::mutex *m_timer_lock = nullptr;
Context *m_timer_task = nullptr;
bool m_stopping = false;
std::unique_ptr<ClusterWatcher> m_cluster_watcher;
std::map<Filesystem, MirrorAction> m_mirror_actions;
utime_t m_last_blocklist_check;
utime_t m_last_failure_check;
RadosRef m_local;
std::unique_ptr<ServiceDaemon> m_service_daemon;
int init_mon_client();
// called via listener
void mirroring_enabled(const Filesystem &filesystem, uint64_t local_pool_id);
void mirroring_disabled(const Filesystem &filesystem);
void peer_added(const Filesystem &filesystem, const Peer &peer);
void peer_removed(const Filesystem &filesystem, const Peer &peer);
// mirror enable callback
void enable_mirroring(const Filesystem &filesystem, uint64_t local_pool_id,
Context *on_finish, bool is_restart=false);
void handle_enable_mirroring(const Filesystem &filesystem, int r);
void handle_enable_mirroring(const Filesystem &filesystem, const Peers &peers, int r);
// mirror disable callback
void disable_mirroring(const Filesystem &filesystem, Context *on_finish);
void handle_disable_mirroring(const Filesystem &filesystem, int r);
// peer update callback
void add_peer(const Filesystem &filesystem, const Peer &peer);
void remove_peer(const Filesystem &filesystem, const Peer &peer);
void schedule_mirror_update_task();
void update_fs_mirrors();
void reopen_logs();
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_H
| 3,834 | 26.198582 | 88 | h |
null | ceph-main/src/tools/cephfs_mirror/MirrorWatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_MIRROR_WATCHER_H
#define CEPHFS_MIRROR_MIRROR_WATCHER_H
#include <string_view>
#include "common/ceph_mutex.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "Watcher.h"
class ContextWQ;
class Messenger;
namespace cephfs {
namespace mirror {
class FSMirror;
// watch for notifications via cephfs_mirror object (in metadata
// pool). this is used sending keepalived with keepalive payload
// being the rados instance address (used by the manager module
// to blocklist when needed).
class MirrorWatcher : public Watcher {
public:
static MirrorWatcher *create(librados::IoCtx &ioctx, FSMirror *fs_mirror,
ContextWQ *work_queue) {
return new MirrorWatcher(ioctx, fs_mirror, work_queue);
}
MirrorWatcher(librados::IoCtx &ioctx, FSMirror *fs_mirror,
ContextWQ *work_queue);
~MirrorWatcher();
void init(Context *on_finish);
void shutdown(Context *on_finish);
void handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist& bl) override;
void handle_rewatch_complete(int r) override;
bool is_blocklisted() {
std::scoped_lock locker(m_lock);
return m_blocklisted;
}
bool is_failed() {
std::scoped_lock locker(m_lock);
return m_failed;
}
private:
librados::IoCtx &m_ioctx;
FSMirror *m_fs_mirror;
ContextWQ *m_work_queue;
ceph::mutex m_lock;
std::string m_instance_id;
Context *m_on_init_finish = nullptr;
Context *m_on_shutdown_finish = nullptr;
bool m_blocklisted = false;
bool m_failed = false;
void register_watcher();
void handle_register_watcher(int r);
void unregister_watcher();
void handle_unregister_watcher(int r);
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_MIRROR_WATCHER_H
| 1,937 | 23.225 | 75 | h |
null | ceph-main/src/tools/cephfs_mirror/PeerReplayer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_PEER_REPLAYER_H
#define CEPHFS_MIRROR_PEER_REPLAYER_H
#include "common/Formatter.h"
#include "common/Thread.h"
#include "mds/FSMap.h"
#include "ServiceDaemon.h"
#include "Types.h"
namespace cephfs {
namespace mirror {
class FSMirror;
class PeerReplayerAdminSocketHook;
class PeerReplayer {
public:
PeerReplayer(CephContext *cct, FSMirror *fs_mirror,
RadosRef local_cluster, const Filesystem &filesystem,
const Peer &peer, const std::set<std::string, std::less<>> &directories,
MountRef mount, ServiceDaemon *service_daemon);
~PeerReplayer();
// initialize replayer for a peer
int init();
// shutdown replayer for a peer
void shutdown();
// add a directory to mirror queue
void add_directory(std::string_view dir_root);
// remove a directory from queue
void remove_directory(std::string_view dir_root);
// admin socket helpers
void peer_status(Formatter *f);
// reopen logs
void reopen_logs();
private:
inline static const std::string PRIMARY_SNAP_ID_KEY = "primary_snap_id";
inline static const std::string SERVICE_DAEMON_FAILED_DIR_COUNT_KEY = "failure_count";
inline static const std::string SERVICE_DAEMON_RECOVERED_DIR_COUNT_KEY = "recovery_count";
using Snapshot = std::pair<std::string, uint64_t>;
// file descriptor "triplet" for synchronizing a snapshot
// w/ an added MountRef for accessing "previous" snapshot.
struct FHandles {
// open file descriptor on the snap directory for snapshot
// currently being synchronized. Always use this fd with
// @m_local_mount.
int c_fd;
// open file descriptor on the "previous" snapshot or on
// dir_root on remote filesystem (based on if the snapshot
// can be used for incremental transfer). Always use this
// fd with p_mnt which either points to @m_local_mount (
// for local incremental comparison) or @m_remote_mount (
// for remote incremental comparison).
int p_fd;
MountRef p_mnt;
// open file descriptor on dir_root on remote filesystem.
// Always use this fd with @m_remote_mount.
int r_fd_dir_root;
};
bool is_stopping() {
return m_stopping;
}
struct Replayer;
class SnapshotReplayerThread : public Thread {
public:
SnapshotReplayerThread(PeerReplayer *peer_replayer)
: m_peer_replayer(peer_replayer) {
}
void *entry() override {
m_peer_replayer->run(this);
return 0;
}
private:
PeerReplayer *m_peer_replayer;
};
struct DirRegistry {
int fd;
bool canceled = false;
SnapshotReplayerThread *replayer;
};
struct SyncEntry {
std::string epath;
ceph_dir_result *dirp; // valid for directories
struct ceph_statx stx;
// set by incremental sync _after_ ensuring missing entries
// in the currently synced snapshot have been propagated to
// the remote filesystem.
bool remote_synced = false;
SyncEntry(std::string_view path,
const struct ceph_statx &stx)
: epath(path),
stx(stx) {
}
SyncEntry(std::string_view path,
ceph_dir_result *dirp,
const struct ceph_statx &stx)
: epath(path),
dirp(dirp),
stx(stx) {
}
bool is_directory() const {
return S_ISDIR(stx.stx_mode);
}
bool needs_remote_sync() const {
return remote_synced;
}
void set_remote_synced() {
remote_synced = true;
}
};
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
// stats sent to service daemon
struct ServiceDaemonStats {
uint64_t failed_dir_count = 0;
uint64_t recovered_dir_count = 0;
};
struct SnapSyncStat {
uint64_t nr_failures = 0; // number of consecutive failures
boost::optional<time> last_failed; // lat failed timestamp
bool failed = false; // hit upper cap for consecutive failures
boost::optional<std::pair<uint64_t, std::string>> last_synced_snap;
boost::optional<std::pair<uint64_t, std::string>> current_syncing_snap;
uint64_t synced_snap_count = 0;
uint64_t deleted_snap_count = 0;
uint64_t renamed_snap_count = 0;
time last_synced = clock::zero();
boost::optional<double> last_sync_duration;
};
void _inc_failed_count(const std::string &dir_root) {
auto max_failures = g_ceph_context->_conf.get_val<uint64_t>(
"cephfs_mirror_max_consecutive_failures_per_directory");
auto &sync_stat = m_snap_sync_stats.at(dir_root);
sync_stat.last_failed = clock::now();
if (++sync_stat.nr_failures >= max_failures && !sync_stat.failed) {
sync_stat.failed = true;
++m_service_daemon_stats.failed_dir_count;
m_service_daemon->add_or_update_peer_attribute(m_filesystem.fscid, m_peer,
SERVICE_DAEMON_FAILED_DIR_COUNT_KEY,
m_service_daemon_stats.failed_dir_count);
}
}
void _reset_failed_count(const std::string &dir_root) {
auto &sync_stat = m_snap_sync_stats.at(dir_root);
if (sync_stat.failed) {
++m_service_daemon_stats.recovered_dir_count;
m_service_daemon->add_or_update_peer_attribute(m_filesystem.fscid, m_peer,
SERVICE_DAEMON_RECOVERED_DIR_COUNT_KEY,
m_service_daemon_stats.recovered_dir_count);
}
sync_stat.nr_failures = 0;
sync_stat.failed = false;
sync_stat.last_failed = boost::none;
}
void _set_last_synced_snap(const std::string &dir_root, uint64_t snap_id,
const std::string &snap_name) {
auto &sync_stat = m_snap_sync_stats.at(dir_root);
sync_stat.last_synced_snap = std::make_pair(snap_id, snap_name);
sync_stat.current_syncing_snap = boost::none;
}
void set_last_synced_snap(const std::string &dir_root, uint64_t snap_id,
const std::string &snap_name) {
std::scoped_lock locker(m_lock);
_set_last_synced_snap(dir_root, snap_id, snap_name);
}
void set_current_syncing_snap(const std::string &dir_root, uint64_t snap_id,
const std::string &snap_name) {
std::scoped_lock locker(m_lock);
auto &sync_stat = m_snap_sync_stats.at(dir_root);
sync_stat.current_syncing_snap = std::make_pair(snap_id, snap_name);
}
void clear_current_syncing_snap(const std::string &dir_root) {
std::scoped_lock locker(m_lock);
auto &sync_stat = m_snap_sync_stats.at(dir_root);
sync_stat.current_syncing_snap = boost::none;
}
void inc_deleted_snap(const std::string &dir_root) {
std::scoped_lock locker(m_lock);
auto &sync_stat = m_snap_sync_stats.at(dir_root);
++sync_stat.deleted_snap_count;
}
void inc_renamed_snap(const std::string &dir_root) {
std::scoped_lock locker(m_lock);
auto &sync_stat = m_snap_sync_stats.at(dir_root);
++sync_stat.renamed_snap_count;
}
void set_last_synced_stat(const std::string &dir_root, uint64_t snap_id,
const std::string &snap_name, double duration) {
std::scoped_lock locker(m_lock);
_set_last_synced_snap(dir_root, snap_id, snap_name);
auto &sync_stat = m_snap_sync_stats.at(dir_root);
sync_stat.last_synced = clock::now();
sync_stat.last_sync_duration = duration;
++sync_stat.synced_snap_count;
}
bool should_backoff(const std::string &dir_root, int *retval) {
if (m_fs_mirror->is_blocklisted()) {
*retval = -EBLOCKLISTED;
return true;
}
std::scoped_lock locker(m_lock);
if (is_stopping()) {
// ceph defines EBLOCKLISTED to ESHUTDOWN (108). so use
// EINPROGRESS to identify shutdown.
*retval = -EINPROGRESS;
return true;
}
auto &dr = m_registered.at(dir_root);
if (dr.canceled) {
*retval = -ECANCELED;
return true;
}
*retval = 0;
return false;
}
typedef std::vector<std::unique_ptr<SnapshotReplayerThread>> SnapshotReplayers;
CephContext *m_cct;
FSMirror *m_fs_mirror;
RadosRef m_local_cluster;
Filesystem m_filesystem;
Peer m_peer;
// probably need to be encapsulated when supporting cancelations
std::map<std::string, DirRegistry> m_registered;
std::vector<std::string> m_directories;
std::map<std::string, SnapSyncStat> m_snap_sync_stats;
MountRef m_local_mount;
ServiceDaemon *m_service_daemon;
PeerReplayerAdminSocketHook *m_asok_hook = nullptr;
ceph::mutex m_lock;
ceph::condition_variable m_cond;
RadosRef m_remote_cluster;
MountRef m_remote_mount;
bool m_stopping = false;
SnapshotReplayers m_replayers;
ServiceDaemonStats m_service_daemon_stats;
void run(SnapshotReplayerThread *replayer);
boost::optional<std::string> pick_directory();
int register_directory(const std::string &dir_root, SnapshotReplayerThread *replayer);
void unregister_directory(const std::string &dir_root);
int try_lock_directory(const std::string &dir_root, SnapshotReplayerThread *replayer,
DirRegistry *registry);
void unlock_directory(const std::string &dir_root, const DirRegistry ®istry);
void sync_snaps(const std::string &dir_root, std::unique_lock<ceph::mutex> &locker);
int build_snap_map(const std::string &dir_root, std::map<uint64_t, std::string> *snap_map,
bool is_remote=false);
int propagate_snap_deletes(const std::string &dir_root, const std::set<std::string> &snaps);
int propagate_snap_renames(const std::string &dir_root,
const std::set<std::pair<std::string,std::string>> &snaps);
int propagate_deleted_entries(const std::string &dir_root, const std::string &epath,
const FHandles &fh);
int cleanup_remote_dir(const std::string &dir_root, const std::string &epath,
const FHandles &fh);
int should_sync_entry(const std::string &epath, const struct ceph_statx &cstx,
const FHandles &fh, bool *need_data_sync, bool *need_attr_sync);
int open_dir(MountRef mnt, const std::string &dir_path, boost::optional<uint64_t> snap_id);
int pre_sync_check_and_open_handles(const std::string &dir_root, const Snapshot ¤t,
boost::optional<Snapshot> prev, FHandles *fh);
void post_sync_close_handles(const FHandles &fh);
int do_synchronize(const std::string &dir_root, const Snapshot ¤t,
boost::optional<Snapshot> prev);
int synchronize(const std::string &dir_root, const Snapshot ¤t,
boost::optional<Snapshot> prev);
int do_sync_snaps(const std::string &dir_root);
int remote_mkdir(const std::string &epath, const struct ceph_statx &stx, const FHandles &fh);
int remote_file_op(const std::string &dir_root, const std::string &epath, const struct ceph_statx &stx,
const FHandles &fh, bool need_data_sync, bool need_attr_sync);
int copy_to_remote(const std::string &dir_root, const std::string &epath, const struct ceph_statx &stx,
const FHandles &fh);
int sync_perms(const std::string& path);
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_PEER_REPLAYER_H
| 11,372 | 34.429907 | 105 | h |
null | ceph-main/src/tools/cephfs_mirror/ServiceDaemon.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_SERVICE_DAEMON_H
#define CEPHFS_MIRROR_SERVICE_DAEMON_H
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include "mds/FSMap.h"
#include "Types.h"
namespace cephfs {
namespace mirror {
class ServiceDaemon {
public:
ServiceDaemon(CephContext *cct, RadosRef rados);
~ServiceDaemon();
int init();
void add_filesystem(fs_cluster_id_t fscid, std::string_view fs_name);
void remove_filesystem(fs_cluster_id_t fscid);
void add_peer(fs_cluster_id_t fscid, const Peer &peer);
void remove_peer(fs_cluster_id_t fscid, const Peer &peer);
void add_or_update_fs_attribute(fs_cluster_id_t fscid, std::string_view key,
AttributeValue value);
void add_or_update_peer_attribute(fs_cluster_id_t fscid, const Peer &peer,
std::string_view key, AttributeValue value);
private:
struct Filesystem {
std::string fs_name;
Attributes fs_attributes;
std::map<Peer, Attributes> peer_attributes;
Filesystem(std::string_view fs_name)
: fs_name(fs_name) {
}
};
const std::string CEPHFS_MIRROR_AUTH_ID_PREFIX = "cephfs-mirror.";
CephContext *m_cct;
RadosRef m_rados;
SafeTimer *m_timer;
ceph::mutex m_timer_lock = ceph::make_mutex("cephfs::mirror::ServiceDaemon");
ceph::mutex m_lock = ceph::make_mutex("cephfs::mirror::service_daemon");
Context *m_timer_ctx = nullptr;
std::map<fs_cluster_id_t, Filesystem> m_filesystems;
void schedule_update_status();
void update_status();
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_SERVICE_DAEMON_H
| 1,713 | 26.206349 | 80 | h |
null | ceph-main/src/tools/cephfs_mirror/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_TYPES_H
#define CEPHFS_MIRROR_TYPES_H
#include <set>
#include <iostream>
#include <string_view>
#include "include/rados/librados.hpp"
#include "include/cephfs/libcephfs.h"
#include "mds/mdstypes.h"
namespace cephfs {
namespace mirror {
static const std::string CEPHFS_MIRROR_OBJECT("cephfs_mirror");
typedef boost::variant<bool, uint64_t, std::string> AttributeValue;
typedef std::map<std::string, AttributeValue> Attributes;
// distinct filesystem identifier
struct Filesystem {
fs_cluster_id_t fscid;
std::string fs_name;
bool operator==(const Filesystem &rhs) const {
return (fscid == rhs.fscid &&
fs_name == rhs.fs_name);
}
bool operator!=(const Filesystem &rhs) const {
return !(*this == rhs);
}
bool operator<(const Filesystem &rhs) const {
if (fscid != rhs.fscid) {
return fscid < rhs.fscid;
}
return fs_name < rhs.fs_name;
}
};
// specification of a filesystem -- pool id the metadata pool id.
struct FilesystemSpec {
FilesystemSpec() = default;
FilesystemSpec(const Filesystem &filesystem, uint64_t pool_id)
: filesystem(filesystem),
pool_id(pool_id) {
}
FilesystemSpec(fs_cluster_id_t fscid, std::string_view fs_name, uint64_t pool_id)
: filesystem(Filesystem{fscid, std::string(fs_name)}),
pool_id(pool_id) {
}
Filesystem filesystem;
uint64_t pool_id;
bool operator==(const FilesystemSpec &rhs) const {
return (filesystem == rhs.filesystem &&
pool_id == rhs.pool_id);
}
bool operator<(const FilesystemSpec &rhs) const {
if (filesystem != rhs.filesystem) {
return filesystem < rhs.filesystem;
}
return pool_id < rhs.pool_id;
}
};
std::ostream& operator<<(std::ostream& out, const Filesystem &filesystem);
std::ostream& operator<<(std::ostream& out, const FilesystemSpec &spec);
typedef std::shared_ptr<librados::Rados> RadosRef;
typedef std::shared_ptr<librados::IoCtx> IoCtxRef;
// not a shared_ptr since the type is incomplete
typedef ceph_mount_info *MountRef;
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_TYPES_H
| 2,221 | 24.25 | 83 | h |
null | ceph-main/src/tools/cephfs_mirror/Utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_UTILS_H
#define CEPHFS_MIRROR_UTILS_H
#include "Types.h"
namespace cephfs {
namespace mirror {
int connect(std::string_view client_name, std::string_view cluster_name,
RadosRef *cluster, std::string_view mon_host={}, std::string_view cephx_key={},
std::vector<const char *> args={});
int mount(RadosRef cluster, const Filesystem &filesystem, bool cross_check_fscid,
MountRef *mount);
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_UTILS_H
| 621 | 26.043478 | 91 | h |
null | ceph-main/src/tools/cephfs_mirror/Watcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_WATCHER_H
#define CEPHFS_MIRROR_WATCHER_H
#include <string_view>
#include "common/ceph_mutex.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
class ContextWQ;
namespace cephfs {
namespace mirror {
// generic watcher class -- establish watch on a given rados object
// and invoke handle_notify() when notified. On notify error, try
// to re-establish the watch. Errors during rewatch are notified via
// handle_rewatch_complete().
class Watcher {
public:
Watcher(librados::IoCtx &ioctx, std::string_view oid, ContextWQ *work_queue);
virtual ~Watcher();
void register_watch(Context *on_finish);
void unregister_watch(Context *on_finish);
protected:
std::string m_oid;
void acknowledge_notify(uint64_t notify_if, uint64_t handle, bufferlist &bl);
bool is_registered() const {
return m_state == STATE_IDLE && m_watch_handle != 0;
}
bool is_unregistered() const {
return m_state == STATE_IDLE && m_watch_handle == 0;
}
virtual void handle_rewatch_complete(int r) { }
private:
enum State {
STATE_IDLE,
STATE_REGISTERING,
STATE_REWATCHING
};
struct WatchCtx : public librados::WatchCtx2 {
Watcher &watcher;
WatchCtx(Watcher &parent) : watcher(parent) {}
void handle_notify(uint64_t notify_id,
uint64_t handle,
uint64_t notifier_id,
bufferlist& bl) override;
void handle_error(uint64_t handle, int err) override;
};
struct C_RegisterWatch : public Context {
Watcher *watcher;
Context *on_finish;
C_RegisterWatch(Watcher *watcher, Context *on_finish)
: watcher(watcher),
on_finish(on_finish) {
}
void finish(int r) override {
watcher->handle_register_watch(r, on_finish);
}
};
librados::IoCtx &m_ioctx;
ContextWQ *m_work_queue;
mutable ceph::shared_mutex m_lock;
State m_state;
bool m_watch_error = false;
bool m_watch_blocklisted = false;
uint64_t m_watch_handle;
WatchCtx m_watch_ctx;
Context *m_unregister_watch_ctx = nullptr;
virtual void handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist& bl) = 0;
void handle_error(uint64_t handle, int err);
void rewatch();
void handle_rewatch(int r);
void handle_rewatch_callback(int r);
void handle_register_watch(int r, Context *on_finish);
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_WATCHER_H
| 2,590 | 24.15534 | 79 | h |
null | ceph-main/src/tools/cephfs_mirror/aio_utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_AIO_UTILS_H
#define CEPHFS_MIRROR_AIO_UTILS_H
#include "include/rados/librados.hpp"
namespace cephfs {
namespace mirror {
template <typename T, void(T::*MF)(int)>
void rados_callback(rados_completion_t c, void *arg) {
T *obj = reinterpret_cast<T*>(arg);
int r = rados_aio_get_return_value(c);
(obj->*MF)(r);
}
template <typename T, void (T::*MF)(int)>
class C_CallbackAdapter : public Context {
T *obj;
public:
C_CallbackAdapter(T *obj)
: obj(obj) {
}
protected:
void finish(int r) override {
(obj->*MF)(r);
}
};
template <typename WQ>
struct C_AsyncCallback : public Context {
WQ *op_work_queue;
Context *on_finish;
C_AsyncCallback(WQ *op_work_queue, Context *on_finish)
: op_work_queue(op_work_queue), on_finish(on_finish) {
}
~C_AsyncCallback() override {
delete on_finish;
}
void finish(int r) override {
op_work_queue->queue(on_finish, r);
on_finish = nullptr;
}
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_AIO_UTILS_H
| 1,137 | 20.074074 | 70 | h |
null | ceph-main/src/tools/cephfs_mirror/watcher/RewatchRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_WATCHER_REWATCH_REQUEST_H
#define CEPHFS_MIRROR_WATCHER_REWATCH_REQUEST_H
#include "common/ceph_mutex.h"
#include "include/int_types.h"
#include "include/rados/librados.hpp"
struct Context;
namespace cephfs {
namespace mirror {
namespace watcher {
// Rewatch an existing watch -- the watch can be in an operatioal
// or error state.
class RewatchRequest {
public:
static RewatchRequest *create(librados::IoCtx &ioctx, const std::string &oid,
ceph::shared_mutex &watch_lock,
librados::WatchCtx2 *watch_ctx,
uint64_t *watch_handle, Context *on_finish) {
return new RewatchRequest(ioctx, oid, watch_lock, watch_ctx, watch_handle,
on_finish);
}
RewatchRequest(librados::IoCtx &ioctx, const std::string &oid,
ceph::shared_mutex &watch_lock, librados::WatchCtx2 *watch_ctx,
uint64_t *watch_handle, Context *on_finish);
void send();
private:
librados::IoCtx& m_ioctx;
std::string m_oid;
ceph::shared_mutex &m_lock;
librados::WatchCtx2 *m_watch_ctx;
uint64_t *m_watch_handle;
Context *m_on_finish;
uint64_t m_rewatch_handle = 0;
void unwatch();
void handle_unwatch(int r);
void rewatch();
void handle_rewatch(int r);
void finish(int r);
};
} // namespace watcher
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_WATCHER_REWATCH_REQUEST_H
| 1,581 | 24.934426 | 80 | h |
null | ceph-main/src/tools/immutable_object_cache/CacheClient.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_CACHE_CLIENT_H
#define CEPH_CACHE_CACHE_CLIENT_H
#include <atomic>
#include <boost/asio.hpp>
#include <boost/asio/error.hpp>
#include <boost/algorithm/string.hpp>
#include "include/ceph_assert.h"
#include "common/ceph_mutex.h"
#include "include/Context.h"
#include "Types.h"
#include "SocketCommon.h"
using boost::asio::local::stream_protocol;
namespace ceph {
namespace immutable_obj_cache {
class CacheClient {
public:
CacheClient(const std::string& file, CephContext* ceph_ctx);
~CacheClient();
void run();
bool is_session_work();
void close();
int stop();
int connect();
void connect(Context* on_finish);
void lookup_object(std::string pool_nspace, uint64_t pool_id,
uint64_t snap_id, uint64_t object_size, std::string oid,
CacheGenContextURef&& on_finish);
int register_client(Context* on_finish);
private:
void send_message();
void try_send();
void fault(const int err_type, const boost::system::error_code& err);
void handle_connect(Context* on_finish, const boost::system::error_code& err);
void try_receive();
void receive_message();
void process(ObjectCacheRequest* reply, uint64_t seq_id);
void read_reply_header();
void handle_reply_header(bufferptr bp_head,
const boost::system::error_code& ec,
size_t bytes_transferred);
void read_reply_data(bufferptr&& bp_head, bufferptr&& bp_data,
const uint64_t data_len);
void handle_reply_data(bufferptr bp_head, bufferptr bp_data,
const uint64_t data_len,
const boost::system::error_code& ec,
size_t bytes_transferred);
private:
CephContext* m_cct;
boost::asio::io_service m_io_service;
boost::asio::io_service::work m_io_service_work;
stream_protocol::socket m_dm_socket;
stream_protocol::endpoint m_ep;
std::shared_ptr<std::thread> m_io_thread;
std::atomic<bool> m_session_work;
uint64_t m_worker_thread_num;
boost::asio::io_service* m_worker;
std::vector<std::thread*> m_worker_threads;
boost::asio::io_service::work* m_worker_io_service_work;
std::atomic<bool> m_writing;
std::atomic<bool> m_reading;
std::atomic<uint64_t> m_sequence_id;
ceph::mutex m_lock =
ceph::make_mutex("ceph::cache::cacheclient::m_lock");
std::map<uint64_t, ObjectCacheRequest*> m_seq_to_req;
bufferlist m_outcoming_bl;
bufferptr m_bp_header;
};
} // namespace immutable_obj_cache
} // namespace ceph
#endif // CEPH_CACHE_CACHE_CLIENT_H
| 2,677 | 30.505882 | 80 | h |
null | ceph-main/src/tools/immutable_object_cache/CacheController.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_CACHE_CONTROLLER_H
#define CEPH_CACHE_CACHE_CONTROLLER_H
#include "common/ceph_context.h"
#include "common/WorkQueue.h"
#include "CacheServer.h"
#include "ObjectCacheStore.h"
namespace ceph {
namespace immutable_obj_cache {
class CacheController {
public:
CacheController(CephContext *cct, const std::vector<const char*> &args);
~CacheController();
int init();
int shutdown();
void handle_signal(int sinnum);
int run();
void handle_request(CacheSession* session, ObjectCacheRequest* msg);
private:
CacheServer *m_cache_server = nullptr;
std::vector<const char*> m_args;
CephContext *m_cct;
ObjectCacheStore *m_object_cache_store = nullptr;
};
} // namespace immutable_obj_cache
} // namespace ceph
#endif // CEPH_CACHE_CACHE_CONTROLLER_H
| 894 | 20.829268 | 74 | h |
null | ceph-main/src/tools/immutable_object_cache/CacheServer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_CACHE_SERVER_H
#define CEPH_CACHE_CACHE_SERVER_H
#include <boost/asio.hpp>
#include <boost/asio/error.hpp>
#include "Types.h"
#include "SocketCommon.h"
#include "CacheSession.h"
using boost::asio::local::stream_protocol;
namespace ceph {
namespace immutable_obj_cache {
class CacheServer {
public:
CacheServer(CephContext* cct, const std::string& file, ProcessMsg processmsg);
~CacheServer();
int run();
int start_accept();
int stop();
private:
void accept();
void handle_accept(CacheSessionPtr new_session,
const boost::system::error_code& error);
private:
CephContext* cct;
boost::asio::io_service m_io_service;
ProcessMsg m_server_process_msg;
stream_protocol::endpoint m_local_path;
stream_protocol::acceptor m_acceptor;
};
} // namespace immutable_obj_cache
} // namespace ceph
#endif
| 969 | 20.086957 | 80 | h |
null | ceph-main/src/tools/immutable_object_cache/CacheSession.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_SESSION_H
#define CEPH_CACHE_SESSION_H
#include <boost/asio.hpp>
#include <boost/asio/error.hpp>
#include "Types.h"
#include "SocketCommon.h"
using boost::asio::local::stream_protocol;
using boost::asio::io_service;
namespace ceph {
namespace immutable_obj_cache {
class CacheSession : public std::enable_shared_from_this<CacheSession> {
public:
CacheSession(io_service& io_service, ProcessMsg process_msg,
CephContext* ctx);
~CacheSession();
stream_protocol::socket& socket();
void close();
void start();
void read_request_header();
void handle_request_header(const boost::system::error_code& err,
size_t bytes_transferred);
void read_request_data(uint64_t data_len);
void handle_request_data(bufferptr bp, uint64_t data_len,
const boost::system::error_code& err,
size_t bytes_transferred);
void process(ObjectCacheRequest* req);
void fault(const boost::system::error_code& ec);
void send(ObjectCacheRequest* msg);
void set_client_version(const std::string &version);
const std::string &client_version() const;
private:
stream_protocol::socket m_dm_socket;
ProcessMsg m_server_process_msg;
CephContext* m_cct;
std::string m_client_version;
bufferptr m_bp_header;
};
typedef std::shared_ptr<CacheSession> CacheSessionPtr;
} // namespace immutable_obj_cache
} // namespace ceph
#endif // CEPH_CACHE_SESSION_H
| 1,578 | 26.701754 | 72 | h |
null | ceph-main/src/tools/immutable_object_cache/ObjectCacheStore.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_OBJECT_CACHE_STORE_H
#define CEPH_CACHE_OBJECT_CACHE_STORE_H
#include "common/ceph_context.h"
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include "common/Throttle.h"
#include "common/Cond.h"
#include "include/rados/librados.hpp"
#include "SimplePolicy.h"
using librados::Rados;
using librados::IoCtx;
class Context;
namespace ceph {
namespace immutable_obj_cache {
typedef std::shared_ptr<librados::Rados> RadosRef;
typedef std::shared_ptr<librados::IoCtx> IoCtxRef;
class ObjectCacheStore {
public:
ObjectCacheStore(CephContext *cct);
~ObjectCacheStore();
int init(bool reset);
int shutdown();
int init_cache();
int lookup_object(std::string pool_nspace,
uint64_t pool_id, uint64_t snap_id,
uint64_t object_size,
std::string object_name,
bool return_dne_path,
std::string& target_cache_file_path);
private:
enum ThrottleTypeCode {
THROTTLE_CODE_BYTE,
THROTTLE_CODE_OBJECT
};
std::string get_cache_file_name(std::string pool_nspace, uint64_t pool_id,
uint64_t snap_id, std::string oid);
std::string get_cache_file_path(std::string cache_file_name,
bool mkdir = false);
int evict_objects();
int do_promote(std::string pool_nspace, uint64_t pool_id,
uint64_t snap_id, std::string object_name);
int promote_object(librados::IoCtx*, std::string object_name,
librados::bufferlist* read_buf,
Context* on_finish);
int handle_promote_callback(int, bufferlist*, std::string);
int do_evict(std::string cache_file);
bool take_token_from_throttle(uint64_t object_size, uint64_t object_num);
void handle_throttle_ready(uint64_t tokens, uint64_t type);
void apply_qos_tick_and_limit(const uint64_t flag,
std::chrono::milliseconds min_tick,
uint64_t limit, uint64_t burst,
std::chrono::seconds burst_seconds);
CephContext *m_cct;
RadosRef m_rados;
std::map<uint64_t, librados::IoCtx> m_ioctx_map;
ceph::mutex m_ioctx_map_lock =
ceph::make_mutex("ceph::cache::ObjectCacheStore::m_ioctx_map_lock");
Policy* m_policy;
std::string m_cache_root_dir;
// throttle mechanism
uint64_t m_qos_enabled_flag{0};
std::map<uint64_t, TokenBucketThrottle*> m_throttles;
bool m_io_throttled{false};
ceph::mutex m_throttle_lock =
ceph::make_mutex("ceph::cache::ObjectCacheStore::m_throttle_lock");;
uint64_t m_iops_tokens{0};
uint64_t m_bps_tokens{0};
};
} // namespace immutable_obj_cache
} // ceph
#endif // CEPH_CACHE_OBJECT_CACHE_STORE_H
| 2,857 | 32.232558 | 76 | h |
null | ceph-main/src/tools/immutable_object_cache/Policy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_POLICY_H
#define CEPH_CACHE_POLICY_H
#include <list>
#include <string>
namespace ceph {
namespace immutable_obj_cache {
typedef enum {
OBJ_CACHE_NONE = 0,
OBJ_CACHE_PROMOTED,
OBJ_CACHE_SKIP,
OBJ_CACHE_DNE,
} cache_status_t;
class Policy {
public:
Policy() {}
virtual ~Policy() {}
virtual cache_status_t lookup_object(std::string) = 0;
virtual int evict_entry(std::string) = 0;
virtual void update_status(std::string, cache_status_t,
uint64_t size = 0) = 0;
virtual cache_status_t get_status(std::string) = 0;
virtual void get_evict_list(std::list<std::string>* obj_list) = 0;
};
} // namespace immutable_obj_cache
} // namespace ceph
#endif
| 818 | 22.4 | 70 | h |
null | ceph-main/src/tools/immutable_object_cache/SimplePolicy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_SIMPLE_POLICY_H
#define CEPH_CACHE_SIMPLE_POLICY_H
#include "common/ceph_context.h"
#include "common/ceph_mutex.h"
#include "include/lru.h"
#include "Policy.h"
#include <unordered_map>
#include <string>
namespace ceph {
namespace immutable_obj_cache {
class SimplePolicy : public Policy {
public:
SimplePolicy(CephContext *cct, uint64_t block_num, uint64_t max_inflight,
double watermark);
~SimplePolicy();
cache_status_t lookup_object(std::string file_name);
cache_status_t get_status(std::string file_name);
void update_status(std::string file_name,
cache_status_t new_status,
uint64_t size = 0);
int evict_entry(std::string file_name);
void get_evict_list(std::list<std::string>* obj_list);
uint64_t get_free_size();
uint64_t get_promoting_entry_num();
uint64_t get_promoted_entry_num();
std::string get_evict_entry();
private:
cache_status_t alloc_entry(std::string file_name);
class Entry : public LRUObject {
public:
cache_status_t status;
Entry() : status(OBJ_CACHE_NONE) {}
std::string file_name;
uint64_t size;
};
CephContext* cct;
double m_watermark;
uint64_t m_max_inflight_ops;
uint64_t m_max_cache_size;
std::atomic<uint64_t> inflight_ops = 0;
std::unordered_map<std::string, Entry*> m_cache_map;
ceph::shared_mutex m_cache_map_lock =
ceph::make_shared_mutex("rbd::cache::SimplePolicy::m_cache_map_lock");
std::atomic<uint64_t> m_cache_size;
LRU m_promoted_lru;
};
} // namespace immutable_obj_cache
} // namespace ceph
#endif // CEPH_CACHE_SIMPLE_POLICY_H
| 1,735 | 24.15942 | 75 | h |
null | ceph-main/src/tools/immutable_object_cache/SocketCommon.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_SOCKET_COMMON_H
#define CEPH_CACHE_SOCKET_COMMON_H
namespace ceph {
namespace immutable_obj_cache {
static const int RBDSC_REGISTER = 0X11;
static const int RBDSC_READ = 0X12;
static const int RBDSC_REGISTER_REPLY = 0X13;
static const int RBDSC_READ_REPLY = 0X14;
static const int RBDSC_READ_RADOS = 0X15;
static const int ASIO_ERROR_READ = 0X01;
static const int ASIO_ERROR_WRITE = 0X02;
static const int ASIO_ERROR_CONNECT = 0X03;
static const int ASIO_ERROR_ACCEPT = 0X04;
static const int ASIO_ERROR_MSG_INCOMPLETE = 0X05;
class ObjectCacheRequest;
class CacheSession;
typedef GenContextURef<ObjectCacheRequest*> CacheGenContextURef;
typedef std::function<void(CacheSession*, ObjectCacheRequest*)> ProcessMsg;
} // namespace immutable_obj_cache
} // namespace ceph
#endif // CEPH_CACHE_SOCKET_COMMON_H
| 968 | 29.28125 | 75 | h |
null | ceph-main/src/tools/immutable_object_cache/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_TYPES_H
#define CEPH_CACHE_TYPES_H
#include "include/encoding.h"
#include "include/Context.h"
#include "SocketCommon.h"
namespace ceph {
namespace immutable_obj_cache {
namespace {
struct HeaderHelper {
uint8_t v;
uint8_t c_v;
ceph_le32 len;
}__attribute__((packed));
inline uint8_t get_header_size() {
return sizeof(HeaderHelper);
}
inline uint32_t get_data_len(char* buf) {
HeaderHelper* header = reinterpret_cast<HeaderHelper*>(buf);
return header->len;
}
} // namespace
class ObjectCacheRequest {
public:
uint16_t type;
uint64_t seq;
bufferlist payload;
CacheGenContextURef process_msg;
ObjectCacheRequest();
ObjectCacheRequest(uint16_t type, uint64_t seq);
virtual ~ObjectCacheRequest();
// encode consists of two steps
// step 1 : directly encode common bits using encode method of base classs.
// step 2 : according to payload_empty, determine whether addtional bits
// need to be encoded which be implements by child class.
void encode();
void decode(bufferlist& bl);
bufferlist get_payload_bufferlist() { return payload; }
virtual void encode_payload() = 0;
virtual void decode_payload(bufferlist::const_iterator bl_it,
__u8 encode_version) = 0;
virtual uint16_t get_request_type() = 0;
virtual bool payload_empty() = 0;
};
class ObjectCacheRegData : public ObjectCacheRequest {
public:
std::string version;
ObjectCacheRegData();
ObjectCacheRegData(uint16_t t, uint64_t s, const std::string &version);
ObjectCacheRegData(uint16_t t, uint64_t s);
~ObjectCacheRegData() override;
void encode_payload() override;
void decode_payload(bufferlist::const_iterator bl,
__u8 encode_version) override;
uint16_t get_request_type() override { return RBDSC_REGISTER; }
bool payload_empty() override { return false; }
};
class ObjectCacheRegReplyData : public ObjectCacheRequest {
public:
ObjectCacheRegReplyData();
ObjectCacheRegReplyData(uint16_t t, uint64_t s);
~ObjectCacheRegReplyData() override;
void encode_payload() override;
void decode_payload(bufferlist::const_iterator iter,
__u8 encode_version) override;
uint16_t get_request_type() override { return RBDSC_REGISTER_REPLY; }
bool payload_empty() override { return true; }
};
class ObjectCacheReadData : public ObjectCacheRequest {
public:
uint64_t read_offset;
uint64_t read_len;
uint64_t pool_id;
uint64_t snap_id;
uint64_t object_size = 0;
std::string oid;
std::string pool_namespace;
ObjectCacheReadData(uint16_t t, uint64_t s, uint64_t read_offset,
uint64_t read_len, uint64_t pool_id,
uint64_t snap_id, uint64_t object_size,
std::string oid, std::string pool_namespace);
ObjectCacheReadData(uint16_t t, uint64_t s);
~ObjectCacheReadData() override;
void encode_payload() override;
void decode_payload(bufferlist::const_iterator bl,
__u8 encode_version) override;
uint16_t get_request_type() override { return RBDSC_READ; }
bool payload_empty() override { return false; }
};
class ObjectCacheReadReplyData : public ObjectCacheRequest {
public:
std::string cache_path;
ObjectCacheReadReplyData(uint16_t t, uint64_t s, std::string cache_path);
ObjectCacheReadReplyData(uint16_t t, uint64_t s);
~ObjectCacheReadReplyData() override;
void encode_payload() override;
void decode_payload(bufferlist::const_iterator bl,
__u8 encode_version) override;
uint16_t get_request_type() override { return RBDSC_READ_REPLY; }
bool payload_empty() override { return false; }
};
class ObjectCacheReadRadosData : public ObjectCacheRequest {
public:
ObjectCacheReadRadosData();
ObjectCacheReadRadosData(uint16_t t, uint64_t s);
~ObjectCacheReadRadosData() override;
void encode_payload() override;
void decode_payload(bufferlist::const_iterator bl,
__u8 encode_version) override;
uint16_t get_request_type() override { return RBDSC_READ_RADOS; }
bool payload_empty() override { return true; }
};
ObjectCacheRequest* decode_object_cache_request(bufferlist payload_buffer);
} // namespace immutable_obj_cache
} // namespace ceph
#endif // CEPH_CACHE_TYPES_H
| 4,398 | 31.109489 | 77 | h |
null | ceph-main/src/tools/immutable_object_cache/Utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_UTILS_H
#define CEPH_CACHE_UTILS_H
#include "include/rados/librados.hpp"
#include "include/Context.h"
namespace ceph {
namespace immutable_obj_cache {
namespace detail {
template <typename T, void(T::*MF)(int)>
void rados_callback(rados_completion_t c, void *arg) {
T *obj = reinterpret_cast<T*>(arg);
int r = rados_aio_get_return_value(c);
(obj->*MF)(r);
}
} // namespace detail
template <typename T, void(T::*MF)(int)=&T::complete>
librados::AioCompletion *create_rados_callback(T *obj) {
return librados::Rados::aio_create_completion(
obj, &detail::rados_callback<T, MF>);
}
} // namespace immutable_obj_cache
} // namespace ceph
#endif // CEPH_CACHE_UTILS_H
| 802 | 24.09375 | 70 | h |
null | ceph-main/src/tools/rados/PoolDump.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef POOL_DUMP_H_
#define POOL_DUMP_H_
#include "include/rados/librados_fwd.hpp"
#include "tools/RadosDump.h"
class PoolDump : public RadosDump
{
public:
explicit PoolDump(int file_fd_) : RadosDump(file_fd_, false) {}
int dump(librados::IoCtx *io_ctx);
};
#endif // POOL_DUMP_H_
| 695 | 22.2 | 70 | h |
null | ceph-main/src/tools/rados/RadosImport.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef RADOS_IMPORT_H_
#define RADOS_IMPORT_H_
#include <string>
#include "include/rados/librados.hpp"
#include "include/buffer_fwd.h"
#include "tools/RadosDump.h"
/**
* Specialization of RadosDump that adds
* methods for importing objects from a stream
* to a live cluster.
*/
class RadosImport : public RadosDump
{
protected:
uint64_t align;
int get_object_rados(librados::IoCtx &ioctx, bufferlist &bl, bool no_overwrite);
public:
RadosImport(int file_fd_, uint64_t align_, bool dry_run_)
: RadosDump(file_fd_, dry_run_), align(align_)
{}
int import(std::string pool, bool no_overwrite);
int import(librados::IoCtx &io_ctx, bool no_overwrite);
};
#endif // RADOS_IMPORT_H_
| 1,120 | 23.369565 | 84 | h |
null | ceph-main/src/tools/rbd/ArgumentTypes.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_ARGUMENT_TYPES_H
#define CEPH_RBD_ARGUMENT_TYPES_H
#include "include/int_types.h"
#include <set>
#include <string>
#include <vector>
#include <boost/any.hpp>
#include <boost/program_options.hpp>
#include <boost/shared_ptr.hpp>
namespace ceph { class Formatter; }
namespace rbd {
namespace argument_types {
enum ArgumentModifier {
ARGUMENT_MODIFIER_NONE,
ARGUMENT_MODIFIER_SOURCE,
ARGUMENT_MODIFIER_DEST
};
enum SpecFormat {
SPEC_FORMAT_IMAGE,
SPEC_FORMAT_SNAPSHOT,
SPEC_FORMAT_IMAGE_OR_SNAPSHOT
};
static const std::string SOURCE_PREFIX("source-");
static const std::string DEST_PREFIX("dest-");
// positional arguments
static const std::string POSITIONAL_COMMAND_SPEC("positional-command-spec");
static const std::string POSITIONAL_ARGUMENTS("positional-arguments");
static const std::string IMAGE_SPEC("image-spec");
static const std::string SNAPSHOT_SPEC("snap-spec");
static const std::string IMAGE_OR_SNAPSHOT_SPEC("image-or-snap-spec");
static const std::string PATH_NAME("path-name");
static const std::string IMAGE_ID("image-id");
// optional arguments
static const std::string CONFIG_PATH("conf");
static const std::string POOL_NAME("pool");
static const std::string DEST_POOL_NAME("dest-pool");
static const std::string NAMESPACE_NAME("namespace");
static const std::string DEST_NAMESPACE_NAME("dest-namespace");
static const std::string IMAGE_NAME("image");
static const std::string DEST_IMAGE_NAME("dest");
static const std::string SNAPSHOT_NAME("snap");
static const std::string SNAPSHOT_ID("snap-id");
static const std::string DEST_SNAPSHOT_NAME("dest-snap");
static const std::string PATH("path");
static const std::string FROM_SNAPSHOT_NAME("from-snap");
static const std::string WHOLE_OBJECT("whole-object");
// encryption arguments
static const std::string ENCRYPTION_FORMAT("encryption-format");
static const std::string ENCRYPTION_PASSPHRASE_FILE("encryption-passphrase-file");
static const std::string IMAGE_FORMAT("image-format");
static const std::string IMAGE_NEW_FORMAT("new-format");
static const std::string IMAGE_ORDER("order");
static const std::string IMAGE_OBJECT_SIZE("object-size");
static const std::string IMAGE_FEATURES("image-feature");
static const std::string IMAGE_SHARED("image-shared");
static const std::string IMAGE_SIZE("size");
static const std::string IMAGE_STRIPE_UNIT("stripe-unit");
static const std::string IMAGE_STRIPE_COUNT("stripe-count");
static const std::string IMAGE_DATA_POOL("data-pool");
static const std::string IMAGE_SPARSE_SIZE("sparse-size");
static const std::string IMAGE_THICK_PROVISION("thick-provision");
static const std::string IMAGE_FLATTEN("flatten");
static const std::string IMAGE_MIRROR_IMAGE_MODE("mirror-image-mode");
static const std::string JOURNAL_OBJECT_SIZE("journal-object-size");
static const std::string JOURNAL_SPLAY_WIDTH("journal-splay-width");
static const std::string JOURNAL_POOL("journal-pool");
static const std::string NO_PROGRESS("no-progress");
static const std::string FORMAT("format");
static const std::string PRETTY_FORMAT("pretty-format");
static const std::string VERBOSE("verbose");
static const std::string NO_ERR("no-error");
static const std::string LIMIT("limit");
static const std::string SKIP_QUIESCE("skip-quiesce");
static const std::string IGNORE_QUIESCE_ERROR("ignore-quiesce-error");
static const std::set<std::string> SWITCH_ARGUMENTS = {
WHOLE_OBJECT, IMAGE_SHARED, IMAGE_THICK_PROVISION, IMAGE_FLATTEN,
NO_PROGRESS, PRETTY_FORMAT, VERBOSE, NO_ERR, SKIP_QUIESCE,
IGNORE_QUIESCE_ERROR
};
struct ImageSize {};
struct ImageOrder {};
struct ImageObjectSize {};
struct ImageFormat {};
struct ImageNewFormat {};
struct ImageFeatures {
static const std::map<uint64_t, std::string> FEATURE_MAPPING;
uint64_t features;
};
struct MirrorImageMode {};
template <typename T>
struct TypedValue {
T value;
TypedValue(const T& t) : value(t) {}
};
struct Format : public TypedValue<std::string> {
typedef boost::shared_ptr<ceph::Formatter> Formatter;
Format(const std::string &format) : TypedValue<std::string>(format) {}
Formatter create_formatter(bool pretty) const;
};
struct JournalObjectSize {};
struct ExportFormat {};
struct Secret {};
struct EncryptionAlgorithm {};
struct EncryptionFormat {
uint64_t format;
};
void add_export_format_option(boost::program_options::options_description *opt);
std::string get_name_prefix(ArgumentModifier modifier);
std::string get_description_prefix(ArgumentModifier modifier);
void add_all_option(boost::program_options::options_description *opt,
std::string description);
void add_pool_option(boost::program_options::options_description *opt,
ArgumentModifier modifier,
const std::string &desc_suffix = "");
void add_namespace_option(boost::program_options::options_description *opt,
ArgumentModifier modifier);
void add_image_option(boost::program_options::options_description *opt,
ArgumentModifier modifier,
const std::string &desc_suffix = "");
void add_image_id_option(boost::program_options::options_description *opt,
const std::string &desc_suffix = "");
void add_snap_option(boost::program_options::options_description *opt,
ArgumentModifier modifier);
void add_snap_id_option(boost::program_options::options_description *opt);
void add_pool_options(boost::program_options::options_description *pos,
boost::program_options::options_description *opt,
bool namespaces_supported);
void add_image_spec_options(boost::program_options::options_description *pos,
boost::program_options::options_description *opt,
ArgumentModifier modifier);
void add_snap_spec_options(boost::program_options::options_description *pos,
boost::program_options::options_description *opt,
ArgumentModifier modifier);
void add_image_or_snap_spec_options(
boost::program_options::options_description *pos,
boost::program_options::options_description *opt,
ArgumentModifier modifier);
void add_create_image_options(boost::program_options::options_description *opt,
bool include_format);
void add_create_journal_options(
boost::program_options::options_description *opt);
void add_size_option(boost::program_options::options_description *opt);
void add_sparse_size_option(boost::program_options::options_description *opt);
void add_path_options(boost::program_options::options_description *pos,
boost::program_options::options_description *opt,
const std::string &description);
void add_limit_option(boost::program_options::options_description *opt);
void add_no_progress_option(boost::program_options::options_description *opt);
void add_format_options(boost::program_options::options_description *opt);
void add_verbose_option(boost::program_options::options_description *opt);
void add_no_error_option(boost::program_options::options_description *opt);
void add_flatten_option(boost::program_options::options_description *opt);
void add_snap_create_options(boost::program_options::options_description *opt);
void add_encryption_options(boost::program_options::options_description *opt);
std::string get_short_features_help(bool append_suffix);
std::string get_long_features_help();
void validate(boost::any& v, const std::vector<std::string>& values,
ExportFormat *target_type, int);
void validate(boost::any& v, const std::vector<std::string>& values,
ImageSize *target_type, int);
void validate(boost::any& v, const std::vector<std::string>& values,
ImageOrder *target_type, int);
void validate(boost::any& v, const std::vector<std::string>& values,
ImageObjectSize *target_type, int);
void validate(boost::any& v, const std::vector<std::string>& values,
ImageFormat *target_type, int);
void validate(boost::any& v, const std::vector<std::string>& values,
ImageNewFormat *target_type, int);
void validate(boost::any& v, const std::vector<std::string>& values,
ImageFeatures *target_type, int);
void validate(boost::any& v, const std::vector<std::string>& values,
Format *target_type, int);
void validate(boost::any& v, const std::vector<std::string>& values,
JournalObjectSize *target_type, int);
void validate(boost::any& v, const std::vector<std::string>& values,
EncryptionAlgorithm *target_type, int);
void validate(boost::any& v, const std::vector<std::string>& values,
EncryptionFormat *target_type, int);
void validate(boost::any& v, const std::vector<std::string>& values,
Secret *target_type, int);
std::ostream &operator<<(std::ostream &os, const ImageFeatures &features);
} // namespace argument_types
} // namespace rbd
#endif // CEPH_RBD_ARGUMENT_TYPES_H
| 9,170 | 36.432653 | 82 | h |
null | ceph-main/src/tools/rbd/IndentStream.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_INDENT_STREAM_H
#define CEPH_RBD_INDENT_STREAM_H
#include "include/int_types.h"
#include <iostream>
#include <streambuf>
#include <iomanip>
namespace rbd {
class IndentBuffer : public std::streambuf {
public:
IndentBuffer(size_t indent, size_t initial_offset, size_t line_length,
std::streambuf *streambuf)
: m_indent(indent), m_initial_offset(initial_offset),
m_line_length(line_length), m_streambuf(streambuf),
m_delim(" "), m_indent_prefix(m_indent, ' ') {
}
void set_delimiter(const std::string &delim) {
m_delim = delim;
}
protected:
int overflow (int c) override;
private:
size_t m_indent;
size_t m_initial_offset;
size_t m_line_length;
std::streambuf *m_streambuf;
std::string m_delim;
std::string m_indent_prefix;
std::string m_buffer;
void flush_line();
};
class IndentStream : public std::ostream {
public:
IndentStream(size_t indent, size_t initial_offset, size_t line_length,
std::ostream &os)
: std::ostream(&m_indent_buffer),
m_indent_buffer(indent, initial_offset, line_length, os.rdbuf()) {
}
void set_delimiter(const std::string &delim) {
m_indent_buffer.set_delimiter(delim);
}
private:
IndentBuffer m_indent_buffer;
};
} // namespace rbd
#endif // CEPH_RBD_INDENT_STREAM_ITERATOR_H
| 1,429 | 22.442623 | 72 | h |
null | ceph-main/src/tools/rbd/MirrorDaemonServiceInfo.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_DAEMON_SERVICE_INFO_H
#define CEPH_RBD_MIRROR_DAEMON_SERVICE_INFO_H
#include "include/rados/librados_fwd.hpp"
#include "tools/rbd/ArgumentTypes.h"
#include <iosfwd>
#include <list>
#include <map>
#include <string>
namespace rbd {
enum MirrorHealth {
MIRROR_HEALTH_OK = 0,
MIRROR_HEALTH_UNKNOWN = 1,
MIRROR_HEALTH_WARNING = 2,
MIRROR_HEALTH_ERROR = 3
};
std::ostream& operator<<(std::ostream& os, MirrorHealth mirror_health);
struct MirrorService {
MirrorService() {}
explicit MirrorService(const std::string& service_id)
: service_id(service_id) {
}
std::string service_id;
std::string instance_id;
bool leader = false;
std::string client_id;
std::string ceph_version;
std::string hostname;
std::list<std::string> callouts;
MirrorHealth health = MIRROR_HEALTH_UNKNOWN;
std::string get_image_description() const;
void dump_image(argument_types::Format::Formatter formatter) const;
};
typedef std::list<MirrorService> MirrorServices;
class MirrorDaemonServiceInfo {
public:
MirrorDaemonServiceInfo(librados::IoCtx &io_ctx) : m_io_ctx(io_ctx) {
}
int init();
const MirrorService* get_by_service_id(const std::string& service_id) const;
const MirrorService* get_by_instance_id(const std::string& instance_id) const;
MirrorServices get_mirror_services() const;
MirrorHealth get_daemon_health() const {
return m_daemon_health;
}
private:
librados::IoCtx &m_io_ctx;
std::map<std::string, MirrorService> m_mirror_services;
std::map<std::string, std::string> m_instance_to_service_ids;
MirrorHealth m_daemon_health = MIRROR_HEALTH_UNKNOWN;
int get_mirror_service_dump();
int get_mirror_service_status();
};
} // namespace rbd
#endif // CEPH_RBD_MIRROR_DAEMON_SERVICE_INFO_H
| 1,886 | 22.886076 | 80 | h |
null | ceph-main/src/tools/rbd/OptionPrinter.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_OPTION_PRINTER_H
#define CEPH_RBD_OPTION_PRINTER_H
#include "include/int_types.h"
#include <string>
#include <vector>
#include <boost/algorithm/string.hpp>
#include <boost/program_options.hpp>
namespace rbd {
class OptionPrinter {
public:
typedef boost::program_options::options_description OptionsDescription;
static const std::string POSITIONAL_ARGUMENTS;
static const std::string OPTIONAL_ARGUMENTS;
static const size_t LINE_WIDTH = 80;
static const size_t MIN_NAME_WIDTH = 20;
static const size_t MAX_DESCRIPTION_OFFSET = 37;
OptionPrinter(const OptionsDescription &positional,
const OptionsDescription &optional);
void print_short(std::ostream &os, size_t initial_offset);
void print_detailed(std::ostream &os);
static void print_optional(const OptionsDescription &global_opts,
size_t &name_width, std::ostream &os);
private:
const OptionsDescription &m_positional;
const OptionsDescription &m_optional;
size_t compute_name_width(size_t indent);
};
} // namespace rbd
#endif // CEPH_RBD_OPTION_PRINTER_H
| 1,207 | 26.454545 | 73 | h |
null | ceph-main/src/tools/rbd/Schedule.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_SCHEDULE_H
#define CEPH_RBD_SCHEDULE_H
#include "json_spirit/json_spirit.h"
#include <iostream>
#include <list>
#include <map>
#include <string>
#include <boost/program_options.hpp>
namespace ceph { class Formatter; }
namespace rbd {
void add_level_spec_options(
boost::program_options::options_description *options, bool allow_image=true);
int get_level_spec_args(const boost::program_options::variables_map &vm,
std::map<std::string, std::string> *args);
void normalize_level_spec_args(std::map<std::string, std::string> *args);
void add_schedule_options(
boost::program_options::options_description *positional, bool mandatory);
int get_schedule_args(const boost::program_options::variables_map &vm,
bool mandatory, std::map<std::string, std::string> *args);
class Schedule {
public:
Schedule() {
}
int parse(json_spirit::mValue &schedule_val);
void dump(ceph::Formatter *f);
friend std::ostream& operator<<(std::ostream& os, Schedule &s);
private:
std::string name;
std::list<std::pair<std::string, std::string>> items;
};
std::ostream& operator<<(std::ostream& os, Schedule &s);
class ScheduleList {
public:
ScheduleList(bool allow_images=true) : allow_images(allow_images) {
}
int parse(const std::string &list);
Schedule *find(const std::string &name);
void dump(ceph::Formatter *f);
friend std::ostream& operator<<(std::ostream& os, ScheduleList &l);
private:
bool allow_images;
std::map<std::string, Schedule> schedules;
};
std::ostream& operator<<(std::ostream& os, ScheduleList &l);
} // namespace rbd
#endif // CEPH_RBD_SCHEDULE_H
| 1,759 | 24.882353 | 80 | h |
null | ceph-main/src/tools/rbd/Shell.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_SHELL_H
#define CEPH_RBD_SHELL_H
#include "include/int_types.h"
#include <set>
#include <string>
#include <vector>
#include <boost/program_options.hpp>
namespace rbd {
class Shell {
public:
typedef std::vector<std::string> CommandSpec;
struct Action {
typedef void (*GetArguments)(boost::program_options::options_description *,
boost::program_options::options_description *);
typedef int (*Execute)(const boost::program_options::variables_map &,
const std::vector<std::string> &);
CommandSpec command_spec;
CommandSpec alias_command_spec;
const std::string description;
const std::string help;
GetArguments get_arguments;
Execute execute;
bool visible;
template <typename Args, typename Execute>
Action(const std::initializer_list<std::string> &command_spec,
const std::initializer_list<std::string> &alias_command_spec,
const std::string &description, const std::string &help,
Args args, Execute execute, bool visible = true)
: command_spec(command_spec), alias_command_spec(alias_command_spec),
description(description), help(help), get_arguments(args),
execute(execute), visible(visible) {
Shell::get_actions().push_back(this);
}
};
struct SwitchArguments {
SwitchArguments(const std::initializer_list<std::string> &arguments) {
Shell::get_switch_arguments().insert(arguments.begin(), arguments.end());
}
};
int execute(int argc, const char **argv);
private:
static std::vector<Action *>& get_actions();
static std::set<std::string>& get_switch_arguments();
void get_command_spec(const std::vector<std::string> &arguments,
std::vector<std::string> *command_spec);
Action *find_action(const CommandSpec &command_spec,
CommandSpec **matching_spec, bool *is_alias);
void get_global_options(boost::program_options::options_description *opts);
void print_help();
void print_action_help(Action *action, bool is_alias);
void print_unknown_action(const CommandSpec &command_spec);
void print_bash_completion(const CommandSpec &command_spec);
void print_bash_completion_options(
const boost::program_options::options_description &ops);
};
} // namespace rbd
#endif // CEPH_RBD_SHELL_H
| 2,482 | 31.246753 | 80 | h |
null | ceph-main/src/tools/rbd/Utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_UTILS_H
#define CEPH_RBD_UTILS_H
#include "include/compat.h"
#include "include/int_types.h"
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
#include "tools/rbd/ArgumentTypes.h"
#include <map>
#include <string>
#include <boost/program_options.hpp>
namespace rbd {
namespace utils {
namespace detail {
template <typename T, void(T::*MF)(int)>
void aio_completion_callback(librbd::completion_t completion,
void *arg) {
librbd::RBD::AioCompletion *aio_completion =
reinterpret_cast<librbd::RBD::AioCompletion*>(completion);
// complete the AIO callback in separate thread context
T *t = reinterpret_cast<T *>(arg);
int r = aio_completion->get_return_value();
aio_completion->release();
(t->*MF)(r);
}
} // namespace detail
static const std::string RBD_DIFF_BANNER ("rbd diff v1\n");
static const size_t RBD_DEFAULT_SPARSE_SIZE = 4096;
static const std::string RBD_IMAGE_BANNER_V2 ("rbd image v2\n");
static const std::string RBD_IMAGE_DIFFS_BANNER_V2 ("rbd image diffs v2\n");
static const std::string RBD_DIFF_BANNER_V2 ("rbd diff v2\n");
#define RBD_DIFF_FROM_SNAP 'f'
#define RBD_DIFF_TO_SNAP 't'
#define RBD_DIFF_IMAGE_SIZE 's'
#define RBD_DIFF_WRITE 'w'
#define RBD_DIFF_ZERO 'z'
#define RBD_DIFF_END 'e'
#define RBD_SNAP_PROTECTION_STATUS 'p'
#define RBD_EXPORT_IMAGE_ORDER 'O'
#define RBD_EXPORT_IMAGE_FEATURES 'T'
#define RBD_EXPORT_IMAGE_STRIPE_UNIT 'U'
#define RBD_EXPORT_IMAGE_STRIPE_COUNT 'C'
#define RBD_EXPORT_IMAGE_META 'M'
#define RBD_EXPORT_IMAGE_END 'E'
enum SnapshotPresence {
SNAPSHOT_PRESENCE_NONE,
SNAPSHOT_PRESENCE_PERMITTED,
SNAPSHOT_PRESENCE_REQUIRED
};
enum SpecValidation {
SPEC_VALIDATION_FULL,
SPEC_VALIDATION_SNAP,
SPEC_VALIDATION_NONE
};
struct ProgressContext : public librbd::ProgressContext {
const char *operation;
bool progress;
int last_pc;
ProgressContext(const char *o, bool no_progress)
: operation(o), progress(!no_progress), last_pc(0) {
}
int update_progress(uint64_t offset, uint64_t total) override;
void finish();
void fail();
};
int get_percentage(uint64_t part, uint64_t whole);
struct EncryptionOptions {
std::vector<librbd::encryption_spec_t> specs;
~EncryptionOptions() {
for (auto& spec : specs) {
switch (spec.format) {
case RBD_ENCRYPTION_FORMAT_LUKS: {
auto opts =
static_cast<librbd::encryption_luks_format_options_t*>(spec.opts);
ceph_memzero_s(opts->passphrase.data(), opts->passphrase.size(),
opts->passphrase.size());
delete opts;
break;
}
case RBD_ENCRYPTION_FORMAT_LUKS1: {
auto opts =
static_cast<librbd::encryption_luks1_format_options_t*>(spec.opts);
ceph_memzero_s(opts->passphrase.data(), opts->passphrase.size(),
opts->passphrase.size());
delete opts;
break;
}
case RBD_ENCRYPTION_FORMAT_LUKS2: {
auto opts =
static_cast<librbd::encryption_luks2_format_options_t*>(spec.opts);
ceph_memzero_s(opts->passphrase.data(), opts->passphrase.size(),
opts->passphrase.size());
delete opts;
break;
}
default:
ceph_abort();
}
}
}
};
template <typename T, void(T::*MF)(int)>
librbd::RBD::AioCompletion *create_aio_completion(T *t) {
return new librbd::RBD::AioCompletion(
t, &detail::aio_completion_callback<T, MF>);
}
void aio_context_callback(librbd::completion_t completion, void *arg);
int read_string(int fd, unsigned max, std::string *out);
int extract_spec(const std::string &spec, std::string *pool_name,
std::string *namespace_name, std::string *name,
std::string *snap_name, SpecValidation spec_validation);
std::string get_positional_argument(
const boost::program_options::variables_map &vm, size_t index);
void normalize_pool_name(std::string* pool_name);
std::string get_default_pool_name();
int get_image_or_snap_spec(const boost::program_options::variables_map &vm,
std::string *spec);
void append_options_as_args(const std::vector<std::string> &options,
std::vector<std::string> *args);
int get_pool_and_namespace_names(
const boost::program_options::variables_map &vm, bool validate_pool_name,
std::string* pool_name, std::string* namespace_name, size_t *arg_index);
int get_pool_image_snapshot_names(
const boost::program_options::variables_map &vm,
argument_types::ArgumentModifier mod, size_t *spec_arg_index,
std::string *pool_name, std::string *namespace_name,
std::string *image_name, std::string *snap_name, bool image_name_required,
SnapshotPresence snapshot_presence, SpecValidation spec_validation);
int get_pool_generic_snapshot_names(
const boost::program_options::variables_map &vm,
argument_types::ArgumentModifier mod, size_t *spec_arg_index,
const std::string& pool_key, std::string *pool_name,
std::string *namespace_name, const std::string& generic_key,
const std::string& generic_key_desc, std::string *generic_name,
std::string *snap_name, bool generic_name_required,
SnapshotPresence snapshot_presence, SpecValidation spec_validation);
int get_pool_image_id(const boost::program_options::variables_map &vm,
size_t *spec_arg_index,
std::string *pool_name,
std::string *namespace_name,
std::string *image_id);
int validate_snapshot_name(argument_types::ArgumentModifier mod,
const std::string &snap_name,
SnapshotPresence snapshot_presence,
SpecValidation spec_validation);
int get_image_options(const boost::program_options::variables_map &vm,
bool get_format, librbd::ImageOptions* opts);
int get_journal_options(const boost::program_options::variables_map &vm,
librbd::ImageOptions *opts);
int get_flatten_option(const boost::program_options::variables_map &vm,
librbd::ImageOptions *opts);
int get_image_size(const boost::program_options::variables_map &vm,
uint64_t *size);
int get_path(const boost::program_options::variables_map &vm,
size_t *arg_index, std::string *path);
int get_formatter(const boost::program_options::variables_map &vm,
argument_types::Format::Formatter *formatter);
int get_snap_create_flags(const boost::program_options::variables_map &vm,
uint32_t *flags);
int get_encryption_options(const boost::program_options::variables_map &vm,
EncryptionOptions* result);
void init_context();
int init_rados(librados::Rados *rados);
int init(const std::string& pool_name, const std::string& namespace_name,
librados::Rados *rados, librados::IoCtx *io_ctx);
int init_io_ctx(librados::Rados &rados, std::string pool_name,
const std::string& namespace_name, librados::IoCtx *io_ctx);
int set_namespace(const std::string& namespace_name, librados::IoCtx *io_ctx);
void disable_cache();
int open_image(librados::IoCtx &io_ctx, const std::string &image_name,
bool read_only, librbd::Image *image);
int open_image_by_id(librados::IoCtx &io_ctx, const std::string &image_id,
bool read_only, librbd::Image *image);
int init_and_open_image(const std::string &pool_name,
const std::string &namespace_name,
const std::string &image_name,
const std::string &image_id,
const std::string &snap_name, bool read_only,
librados::Rados *rados, librados::IoCtx *io_ctx,
librbd::Image *image);
int snap_set(librbd::Image &image, const std::string &snap_name);
void calc_sparse_extent(const bufferptr &bp,
size_t sparse_size,
size_t buffer_offset,
uint64_t length,
size_t *write_length,
bool *zeroed);
bool is_not_user_snap_namespace(librbd::Image* image,
const librbd::snap_info_t &snap_info);
std::string image_id(librbd::Image& image);
std::string mirror_image_mode(
librbd::mirror_image_mode_t mirror_image_mode);
std::string mirror_image_state(
librbd::mirror_image_state_t mirror_image_state);
std::string mirror_image_status_state(
librbd::mirror_image_status_state_t state);
std::string mirror_image_site_status_state(
const librbd::mirror_image_site_status_t& status);
std::string mirror_image_global_status_state(
const librbd::mirror_image_global_status_t& status);
int get_local_mirror_image_status(
const librbd::mirror_image_global_status_t& status,
librbd::mirror_image_site_status_t* local_status);
std::string timestr(time_t t);
// duplicate here to not include librbd_internal lib
uint64_t get_rbd_default_features(CephContext* cct);
void get_mirror_peer_sites(
librados::IoCtx& io_ctx,
std::vector<librbd::mirror_peer_site_t>* mirror_peers);
void get_mirror_peer_mirror_uuids_to_names(
const std::vector<librbd::mirror_peer_site_t>& mirror_peers,
std::map<std::string, std::string>* fsid_to_name);
void populate_unknown_mirror_image_site_statuses(
const std::vector<librbd::mirror_peer_site_t>& mirror_peers,
librbd::mirror_image_global_status_t* global_status);
int mgr_command(librados::Rados& rados, const std::string& cmd,
const std::map<std::string, std::string> &args,
std::ostream *out_os, std::ostream *err_os);
} // namespace utils
} // namespace rbd
#endif // CEPH_RBD_UTILS_H
| 9,916 | 33.919014 | 79 | h |
null | ceph-main/src/tools/rbd_ggate/ggate_drv.c | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <sys/param.h>
#include <sys/bio.h>
#include <sys/disk.h>
#include <sys/linker.h>
#include <sys/queue.h>
#include <sys/stat.h>
#include <geom/gate/g_gate.h>
#include <errno.h>
#include <fcntl.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <libgeom.h>
#include "debug.h"
#include "ggate_drv.h"
uint64_t ggate_drv_req_id(ggate_drv_req_t req) {
struct g_gate_ctl_io *ggio = (struct g_gate_ctl_io *)req;
return ggio->gctl_seq;
}
int ggate_drv_req_cmd(ggate_drv_req_t req) {
struct g_gate_ctl_io *ggio = (struct g_gate_ctl_io *)req;
switch (ggio->gctl_cmd) {
case BIO_WRITE:
return GGATE_DRV_CMD_WRITE;
case BIO_READ:
return GGATE_DRV_CMD_READ;
case BIO_FLUSH:
return GGATE_DRV_CMD_FLUSH;
case BIO_DELETE:
return GGATE_DRV_CMD_DISCARD;
default:
return GGATE_DRV_CMD_UNKNOWN;
}
}
uint64_t ggate_drv_req_offset(ggate_drv_req_t req) {
struct g_gate_ctl_io *ggio = (struct g_gate_ctl_io *)req;
return ggio->gctl_offset;
}
size_t ggate_drv_req_length(ggate_drv_req_t req) {
struct g_gate_ctl_io *ggio = (struct g_gate_ctl_io *)req;
return ggio->gctl_length;
}
void *ggate_drv_req_buf(ggate_drv_req_t req) {
struct g_gate_ctl_io *ggio = (struct g_gate_ctl_io *)req;
return ggio->gctl_data;
}
int ggate_drv_req_error(ggate_drv_req_t req) {
struct g_gate_ctl_io *ggio = (struct g_gate_ctl_io *)req;
return ggio->gctl_error;
}
void ggate_drv_req_set_error(ggate_drv_req_t req, int error) {
struct g_gate_ctl_io *ggio = (struct g_gate_ctl_io *)req;
ggio->gctl_error = error;
}
void *ggate_drv_req_release_buf(ggate_drv_req_t req) {
struct g_gate_ctl_io *ggio = (struct g_gate_ctl_io *)req;
void *data = ggio->gctl_data;
ggio->gctl_data = NULL;
return data;
}
struct ggate_drv {
int fd;
int unit;
};
int ggate_drv_load() {
if (modfind("g_gate") != -1) {
/* Present in kernel. */
return 0;
}
if (kldload("geom_gate") == -1 || modfind("g_gate") == -1) {
if (errno != EEXIST) {
err("failed to load geom_gate module");
return -errno;
}
}
return 0;
}
int ggate_drv_create(char *name, size_t namelen, size_t sectorsize,
size_t mediasize, bool readonly, const char *info, ggate_drv_t *drv_) {
struct ggate_drv *drv;
struct g_gate_ctl_create ggiocreate;
debug(20, "%s: name=%s, sectorsize=%zd, mediasize=%zd, readonly=%d, info=%s",
__func__, name, sectorsize, mediasize, (int)readonly, info);
if (*name != '\0') {
if (namelen > sizeof(ggiocreate.gctl_name) - 1) {
return -ENAMETOOLONG;
}
}
/*
* We communicate with ggate via /dev/ggctl. Open it.
*/
int fd = open("/dev/" G_GATE_CTL_NAME, O_RDWR);
if (fd == -1) {
err("failed to open /dev/" G_GATE_CTL_NAME);
return -errno;
}
drv = calloc(1, sizeof(*drv));
if (drv == NULL) {
errno = -ENOMEM;
goto fail_close;
}
/*
* Create provider.
*/
memset(&ggiocreate, 0, sizeof(ggiocreate));
ggiocreate.gctl_version = G_GATE_VERSION;
ggiocreate.gctl_mediasize = mediasize;
ggiocreate.gctl_sectorsize = sectorsize;
ggiocreate.gctl_flags = readonly ? G_GATE_FLAG_READONLY : 0;
ggiocreate.gctl_maxcount = 0;
ggiocreate.gctl_timeout = 0;
if (*name != '\0') {
ggiocreate.gctl_unit = G_GATE_NAME_GIVEN;
strlcpy(ggiocreate.gctl_name, name, sizeof(ggiocreate.gctl_name));
} else {
ggiocreate.gctl_unit = G_GATE_UNIT_AUTO;
}
strlcpy(ggiocreate.gctl_info, info, sizeof(ggiocreate.gctl_info));
if (ioctl(fd, G_GATE_CMD_CREATE, &ggiocreate) == -1) {
err("failed to create " G_GATE_PROVIDER_NAME " device");
goto fail;
}
debug(20, "%s: created, unit: %d, name: %s", __func__, ggiocreate.gctl_unit,
ggiocreate.gctl_name);
drv->fd = fd;
drv->unit = ggiocreate.gctl_unit;
*drv_ = drv;
if (*name == '\0') {
snprintf(name, namelen, "%s%d", G_GATE_PROVIDER_NAME, drv->unit);
}
return 0;
fail:
free(drv);
fail_close:
close(fd);
return -errno;
}
void ggate_drv_destroy(ggate_drv_t drv_) {
struct ggate_drv *drv = (struct ggate_drv *)drv_;
struct g_gate_ctl_destroy ggiodestroy;
debug(20, "%s %p", __func__, drv);
memset(&ggiodestroy, 0, sizeof(ggiodestroy));
ggiodestroy.gctl_version = G_GATE_VERSION;
ggiodestroy.gctl_unit = drv->unit;
ggiodestroy.gctl_force = 1;
// Remember errno.
int rerrno = errno;
int r = ioctl(drv->fd, G_GATE_CMD_DESTROY, &ggiodestroy);
if (r == -1) {
err("failed to destroy /dev/%s%d device", G_GATE_PROVIDER_NAME,
drv->unit);
}
// Restore errno.
errno = rerrno;
free(drv);
}
int ggate_drv_resize(ggate_drv_t drv_, size_t newsize) {
struct ggate_drv *drv = (struct ggate_drv *)drv_;
debug(20, "%s %p: newsize=%zd", __func__, drv, newsize);
struct g_gate_ctl_modify ggiomodify;
memset(&ggiomodify, 0, sizeof(ggiomodify));
ggiomodify.gctl_version = G_GATE_VERSION;
ggiomodify.gctl_unit = drv->unit;
ggiomodify.gctl_modify = GG_MODIFY_MEDIASIZE;
ggiomodify.gctl_mediasize = newsize;
int r = ioctl(drv->fd, G_GATE_CMD_MODIFY, &ggiomodify);
if (r == -1) {
r = -errno;
err("failed to resize /dev/%s%d device", G_GATE_PROVIDER_NAME, drv->unit);
}
return r;
}
int ggate_drv_kill(const char *devname) {
debug(20, "%s %s", __func__, devname);
int fd = open("/dev/" G_GATE_CTL_NAME, O_RDWR);
if (fd == -1) {
err("failed to open /dev/" G_GATE_CTL_NAME);
return -errno;
}
struct g_gate_ctl_destroy ggiodestroy;
memset(&ggiodestroy, 0, sizeof(ggiodestroy));
ggiodestroy.gctl_version = G_GATE_VERSION;
ggiodestroy.gctl_unit = G_GATE_NAME_GIVEN;
ggiodestroy.gctl_force = 1;
strlcpy(ggiodestroy.gctl_name, devname, sizeof(ggiodestroy.gctl_name));
int r = ioctl(fd, G_GATE_CMD_DESTROY, &ggiodestroy);
if (r == -1) {
r = -errno;
err("failed to destroy %s device", devname);
}
close(fd);
return r;
}
int ggate_drv_recv(ggate_drv_t drv_, ggate_drv_req_t *req) {
struct ggate_drv *drv = (struct ggate_drv *)drv_;
struct g_gate_ctl_io *ggio;
int error, r;
debug(20, "%s", __func__);
ggio = calloc(1, sizeof(*ggio));
if (ggio == NULL) {
return -ENOMEM;
}
ggio->gctl_version = G_GATE_VERSION;
ggio->gctl_unit = drv->unit;
ggio->gctl_data = malloc(MAXPHYS);
ggio->gctl_length = MAXPHYS;
debug(20, "%s: waiting for request from kernel", __func__);
if (ioctl(drv->fd, G_GATE_CMD_START, ggio) == -1) {
err("%s: G_GATE_CMD_START failed", __func__);
return -errno;
}
debug(20, "%s: got request from kernel: "
"unit=%d, seq=%ju, cmd=%u, offset=%ju, length=%ju, error=%d, data=%p",
__func__, ggio->gctl_unit, (uintmax_t)ggio->gctl_seq, ggio->gctl_cmd,
(uintmax_t)ggio->gctl_offset, (uintmax_t)ggio->gctl_length,
ggio->gctl_error, ggio->gctl_data);
error = ggio->gctl_error;
switch (error) {
case 0:
break;
case ECANCELED:
debug(10, "%s: canceled: exit gracefully", __func__);
r = -error;
goto fail;
case ENOMEM:
/*
* Buffer too small? Impossible, we allocate MAXPHYS
* bytes - request can't be bigger than that.
*/
/* FALLTHROUGH */
case ENXIO:
default:
errno = error;
err("%s: G_GATE_CMD_START failed", __func__);
r = -error;
goto fail;
}
*req = ggio;
return 0;
fail:
free(ggio->gctl_data);
free(ggio);
return r;
}
int ggate_drv_send(ggate_drv_t drv_, ggate_drv_req_t req) {
struct ggate_drv *drv = (struct ggate_drv *)drv_;
struct g_gate_ctl_io *ggio = (struct g_gate_ctl_io *)req;
int r = 0;
debug(20, "%s: send request to kernel: "
"unit=%d, seq=%ju, cmd=%u, offset=%ju, length=%ju, error=%d, data=%p",
__func__, ggio->gctl_unit, (uintmax_t)ggio->gctl_seq, ggio->gctl_cmd,
(uintmax_t)ggio->gctl_offset, (uintmax_t)ggio->gctl_length,
ggio->gctl_error, ggio->gctl_data);
if (ioctl(drv->fd, G_GATE_CMD_DONE, ggio) == -1) {
err("%s: G_GATE_CMD_DONE failed", __func__);
r = -errno;
}
free(ggio->gctl_data);
free(ggio);
return r;
}
static const char * get_conf(struct ggeom *gp, const char *name) {
struct gconfig *conf;
LIST_FOREACH(conf, &gp->lg_config, lg_config) {
if (strcmp(conf->lg_name, name) == 0)
return (conf->lg_val);
}
return "";
}
int ggate_drv_list(struct ggate_drv_info *info, size_t *size) {
struct gmesh mesh;
struct gclass *class;
struct ggeom *gp;
int r;
size_t max_size;
r = geom_gettree(&mesh);
if (r != 0) {
return -errno;
}
max_size = *size;
*size = 0;
LIST_FOREACH(class, &mesh.lg_class, lg_class) {
if (strcmp(class->lg_name, G_GATE_CLASS_NAME) == 0) {
LIST_FOREACH(gp, &class->lg_geom, lg_geom) {
(*size)++;
}
if (*size > max_size) {
r = -ERANGE;
goto done;
}
LIST_FOREACH(gp, &class->lg_geom, lg_geom) {
strlcpy(info->id, get_conf(gp, "unit"), sizeof(info->id));
strlcpy(info->name, gp->lg_name, sizeof(info->name));
strlcpy(info->info, get_conf(gp, "info"), sizeof(info->info));
info++;
}
}
}
done:
geom_deletetree(&mesh);
return r;
}
| 9,251 | 23.347368 | 79 | c |
null | ceph-main/src/tools/rbd_ggate/ggate_drv.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_GGATE_GGATE_DRV_H
#define CEPH_RBD_GGATE_GGATE_DRV_H
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/param.h>
#include <stdbool.h>
#include <stdint.h>
typedef void *ggate_drv_t;
typedef void *ggate_drv_req_t;
/*
* GGATE driver commands. They are mapped to GgateReq::Command.
*/
enum {
GGATE_DRV_CMD_UNKNOWN = 0,
GGATE_DRV_CMD_WRITE = 1,
GGATE_DRV_CMD_READ = 2,
GGATE_DRV_CMD_FLUSH = 3,
GGATE_DRV_CMD_DISCARD = 4,
};
struct ggate_drv_info {
char id[16];
char name[NAME_MAX];
char info[2048]; /* G_GATE_INFOSIZE */
};
uint64_t ggate_drv_req_id(ggate_drv_req_t req);
int ggate_drv_req_cmd(ggate_drv_req_t req);
void *ggate_drv_req_buf(ggate_drv_req_t req);
size_t ggate_drv_req_length(ggate_drv_req_t req);
uint64_t ggate_drv_req_offset(ggate_drv_req_t req);
int ggate_drv_req_error(ggate_drv_req_t req);
void ggate_drv_req_set_error(ggate_drv_req_t req, int error);
void *ggate_drv_req_release_buf(ggate_drv_req_t req);
int ggate_drv_load();
int ggate_drv_create(char *name, size_t namelen, size_t sectorsize,
size_t mediasize, bool readonly, const char *info, ggate_drv_t *drv);
void ggate_drv_destroy(ggate_drv_t drv);
int ggate_drv_recv(ggate_drv_t drv, ggate_drv_req_t *req);
int ggate_drv_send(ggate_drv_t drv, ggate_drv_req_t req);
int ggate_drv_resize(ggate_drv_t drv, size_t newsize);
int ggate_drv_kill(const char *devname);
int ggate_drv_list(struct ggate_drv_info *info, size_t *size);
#ifdef __cplusplus
}
#endif
#endif // CEPH_RBD_GGATE_GGATE_DRV_H
| 1,618 | 23.907692 | 73 | h |
null | ceph-main/src/tools/rbd_mirror/CancelableRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_CANCELABLE_REQUEST_H
#define CEPH_RBD_MIRROR_CANCELABLE_REQUEST_H
#include "common/RefCountedObj.h"
#include "include/Context.h"
namespace rbd {
namespace mirror {
class CancelableRequest : public RefCountedObject {
public:
CancelableRequest(const std::string& name, CephContext *cct,
Context *on_finish)
: RefCountedObject(cct), m_name(name), m_cct(cct),
m_on_finish(on_finish) {
}
virtual void send() = 0;
virtual void cancel() {}
protected:
virtual void finish(int r) {
if (m_cct) {
lsubdout(m_cct, rbd_mirror, 20) << m_name << "::finish: r=" << r << dendl;
}
if (m_on_finish) {
m_on_finish->complete(r);
}
put();
}
private:
const std::string m_name;
CephContext *m_cct;
Context *m_on_finish;
};
} // namespace mirror
} // namespace rbd
#endif // CEPH_RBD_MIRROR_CANCELABLE_REQUEST_H
| 998 | 21.2 | 80 | h |
null | ceph-main/src/tools/rbd_mirror/ClusterWatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_CLUSTER_WATCHER_H
#define CEPH_RBD_MIRROR_CLUSTER_WATCHER_H
#include <map>
#include <memory>
#include <set>
#include "common/ceph_context.h"
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include "include/rados/librados.hpp"
#include "tools/rbd_mirror/Types.h"
#include "tools/rbd_mirror/service_daemon/Types.h"
#include <unordered_map>
namespace librbd { struct ImageCtx; }
namespace rbd {
namespace mirror {
template <typename> class ServiceDaemon;
/**
* Tracks mirroring configuration for pools in a single
* cluster.
*/
class ClusterWatcher {
public:
struct PeerSpecCompare {
bool operator()(const PeerSpec& lhs, const PeerSpec& rhs) const {
return (lhs.uuid < rhs.uuid);
}
};
typedef std::set<PeerSpec, PeerSpecCompare> Peers;
typedef std::map<int64_t, Peers> PoolPeers;
ClusterWatcher(RadosRef cluster, ceph::mutex &lock,
ServiceDaemon<librbd::ImageCtx>* service_daemon);
~ClusterWatcher() = default;
ClusterWatcher(const ClusterWatcher&) = delete;
ClusterWatcher& operator=(const ClusterWatcher&) = delete;
// Caller controls frequency of calls
void refresh_pools();
const PoolPeers& get_pool_peers() const;
std::string get_site_name() const;
private:
typedef std::unordered_map<int64_t, service_daemon::CalloutId> ServicePools;
RadosRef m_cluster;
ceph::mutex &m_lock;
ServiceDaemon<librbd::ImageCtx>* m_service_daemon;
ServicePools m_service_pools;
PoolPeers m_pool_peers;
std::string m_site_name;
void read_pool_peers(PoolPeers *pool_peers);
int read_site_name(std::string* site_name);
int resolve_peer_site_config_keys(
int64_t pool_id, const std::string& pool_name, PeerSpec* peer);
};
} // namespace mirror
} // namespace rbd
#endif // CEPH_RBD_MIRROR_CLUSTER_WATCHER_H
| 1,918 | 24.932432 | 78 | h |
null | ceph-main/src/tools/rbd_mirror/ImageDeleter.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_RBD_MIRROR_IMAGE_DELETER_H
#define CEPH_RBD_MIRROR_IMAGE_DELETER_H
#include "include/utime.h"
#include "common/AsyncOpTracker.h"
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include "tools/rbd_mirror/Types.h"
#include "tools/rbd_mirror/image_deleter/Types.h"
#include <atomic>
#include <deque>
#include <iosfwd>
#include <map>
#include <memory>
#include <vector>
class AdminSocketHook;
class Context;
namespace librbd {
struct ImageCtx;
namespace asio { struct ContextWQ; }
} // namespace librbd
namespace rbd {
namespace mirror {
template <typename> class ServiceDaemon;
template <typename> class Threads;
template <typename> class Throttler;
namespace image_deleter { template <typename> struct TrashWatcher; }
/**
* Manage deletion of non-primary images.
*/
template <typename ImageCtxT = librbd::ImageCtx>
class ImageDeleter {
public:
static ImageDeleter* create(
librados::IoCtx& local_io_ctx, Threads<librbd::ImageCtx>* threads,
Throttler<librbd::ImageCtx>* image_deletion_throttler,
ServiceDaemon<librbd::ImageCtx>* service_daemon) {
return new ImageDeleter(local_io_ctx, threads, image_deletion_throttler,
service_daemon);
}
ImageDeleter(librados::IoCtx& local_io_ctx,
Threads<librbd::ImageCtx>* threads,
Throttler<librbd::ImageCtx>* image_deletion_throttler,
ServiceDaemon<librbd::ImageCtx>* service_daemon);
ImageDeleter(const ImageDeleter&) = delete;
ImageDeleter& operator=(const ImageDeleter&) = delete;
static void trash_move(librados::IoCtx& local_io_ctx,
const std::string& global_image_id, bool resync,
librbd::asio::ContextWQ* work_queue,
Context* on_finish);
void init(Context* on_finish);
void shut_down(Context* on_finish);
void print_status(Formatter *f);
// for testing purposes
void wait_for_deletion(const std::string &image_id,
bool scheduled_only, Context* on_finish);
std::vector<std::string> get_delete_queue_items();
std::vector<std::pair<std::string, int> > get_failed_queue_items();
inline void set_busy_timer_interval(double interval) {
m_busy_interval = interval;
}
private:
using clock_t = ceph::real_clock;
struct TrashListener : public image_deleter::TrashListener {
ImageDeleter *image_deleter;
TrashListener(ImageDeleter *image_deleter) : image_deleter(image_deleter) {
}
void handle_trash_image(const std::string& image_id,
const ceph::real_clock::time_point& deferment_end_time) override {
image_deleter->handle_trash_image(image_id, deferment_end_time);
}
};
struct DeleteInfo {
std::string image_id;
image_deleter::ErrorResult error_result = {};
int error_code = 0;
clock_t::time_point retry_time;
int retries = 0;
DeleteInfo(const std::string& image_id)
: image_id(image_id) {
}
inline bool operator==(const DeleteInfo& delete_info) const {
return (image_id == delete_info.image_id);
}
friend std::ostream& operator<<(std::ostream& os, DeleteInfo& delete_info) {
os << "[image_id=" << delete_info.image_id << "]";
return os;
}
void print_status(Formatter *f,
bool print_failure_info=false);
};
typedef std::shared_ptr<DeleteInfo> DeleteInfoRef;
typedef std::deque<DeleteInfoRef> DeleteQueue;
typedef std::map<std::string, Context*> OnDeleteContexts;
librados::IoCtx& m_local_io_ctx;
Threads<librbd::ImageCtx>* m_threads;
Throttler<librbd::ImageCtx>* m_image_deletion_throttler;
ServiceDaemon<librbd::ImageCtx>* m_service_daemon;
image_deleter::TrashWatcher<ImageCtxT>* m_trash_watcher = nullptr;
TrashListener m_trash_listener;
std::atomic<unsigned> m_running { 1 };
double m_busy_interval = 1;
AsyncOpTracker m_async_op_tracker;
ceph::mutex m_lock;
DeleteQueue m_delete_queue;
DeleteQueue m_retry_delete_queue;
DeleteQueue m_in_flight_delete_queue;
OnDeleteContexts m_on_delete_contexts;
AdminSocketHook *m_asok_hook = nullptr;
Context *m_timer_ctx = nullptr;
bool process_image_delete();
void complete_active_delete(DeleteInfoRef* delete_info, int r);
void enqueue_failed_delete(DeleteInfoRef* delete_info, int error_code,
double retry_delay);
DeleteInfoRef find_delete_info(const std::string &image_id);
void remove_images();
void remove_image(DeleteInfoRef delete_info);
void handle_remove_image(DeleteInfoRef delete_info, int r);
void schedule_retry_timer();
void cancel_retry_timer();
void handle_retry_timer();
void handle_trash_image(const std::string& image_id,
const clock_t::time_point& deferment_end_time);
void shut_down_trash_watcher(Context* on_finish);
void wait_for_ops(Context* on_finish);
void cancel_all_deletions(Context* on_finish);
void notify_on_delete(const std::string& image_id, int r);
};
} // namespace mirror
} // namespace rbd
extern template class rbd::mirror::ImageDeleter<librbd::ImageCtx>;
#endif // CEPH_RBD_MIRROR_IMAGE_DELETER_H
| 5,576 | 28.352632 | 80 | h |
null | ceph-main/src/tools/rbd_mirror/ImageMap.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_IMAGE_MAP_H
#define CEPH_RBD_MIRROR_IMAGE_MAP_H
#include <vector>
#include "common/ceph_mutex.h"
#include "include/Context.h"
#include "common/AsyncOpTracker.h"
#include "cls/rbd/cls_rbd_types.h"
#include "include/rados/librados.hpp"
#include "image_map/Policy.h"
#include "image_map/Types.h"
namespace librbd { class ImageCtx; }
namespace rbd {
namespace mirror {
template <typename> struct Threads;
template <typename ImageCtxT = librbd::ImageCtx>
class ImageMap {
public:
static ImageMap *create(librados::IoCtx &ioctx, Threads<ImageCtxT> *threads,
const std::string& instance_id,
image_map::Listener &listener) {
return new ImageMap(ioctx, threads, instance_id, listener);
}
~ImageMap();
// init (load) the instance map from disk
void init(Context *on_finish);
// shut down map operations
void shut_down(Context *on_finish);
// update (add/remove) images
void update_images(const std::string &mirror_uuid,
std::set<std::string> &&added_global_image_ids,
std::set<std::string> &&removed_global_image_ids);
// add/remove instances
void update_instances_added(const std::vector<std::string> &instances);
void update_instances_removed(const std::vector<std::string> &instances);
private:
struct C_NotifyInstance;
ImageMap(librados::IoCtx &ioctx, Threads<ImageCtxT> *threads,
const std::string& instance_id, image_map::Listener &listener);
struct Update {
std::string global_image_id;
std::string instance_id;
utime_t mapped_time;
Update(const std::string &global_image_id, const std::string &instance_id,
utime_t mapped_time)
: global_image_id(global_image_id),
instance_id(instance_id),
mapped_time(mapped_time) {
}
Update(const std::string &global_image_id, const std::string &instance_id)
: Update(global_image_id, instance_id, ceph_clock_now()) {
}
friend std::ostream& operator<<(std::ostream& os,
const Update& update) {
os << "{global_image_id=" << update.global_image_id << ", "
<< "instance_id=" << update.instance_id << "}";
return os;
}
};
typedef std::list<Update> Updates;
// Lock ordering: m_threads->timer_lock, m_lock
librados::IoCtx &m_ioctx;
Threads<ImageCtxT> *m_threads;
std::string m_instance_id;
image_map::Listener &m_listener;
std::unique_ptr<image_map::Policy> m_policy; // our mapping policy
Context *m_timer_task = nullptr;
ceph::mutex m_lock;
bool m_shutting_down = false;
AsyncOpTracker m_async_op_tracker;
// global_image_id -> registered peers ("" == local, remote otherwise)
std::map<std::string, std::set<std::string> > m_peer_map;
std::set<std::string> m_global_image_ids;
Context *m_rebalance_task = nullptr;
struct C_LoadMap : Context {
ImageMap *image_map;
Context *on_finish;
std::map<std::string, cls::rbd::MirrorImageMap> image_mapping;
C_LoadMap(ImageMap *image_map, Context *on_finish)
: image_map(image_map),
on_finish(on_finish) {
}
void finish(int r) override {
if (r == 0) {
image_map->handle_load(image_mapping);
}
image_map->finish_async_op();
on_finish->complete(r);
}
};
// async op-tracker helper routines
void start_async_op() {
m_async_op_tracker.start_op();
}
void finish_async_op() {
m_async_op_tracker.finish_op();
}
void wait_for_async_ops(Context *on_finish) {
m_async_op_tracker.wait_for_ops(on_finish);
}
void handle_peer_ack(const std::string &global_image_id, int r);
void handle_peer_ack_remove(const std::string &global_image_id, int r);
void handle_load(const std::map<std::string, cls::rbd::MirrorImageMap> &image_mapping);
void handle_update_request(const Updates &updates,
const std::set<std::string> &remove_global_image_ids, int r);
// continue (retry or resume depending on state machine) processing
// current action.
void continue_action(const std::set<std::string> &global_image_ids, int r);
// schedule an image for update
void schedule_action(const std::string &global_image_id);
void schedule_update_task();
void schedule_update_task(const ceph::mutex &timer_lock);
void process_updates();
void update_image_mapping(Updates&& map_updates,
std::set<std::string>&& map_removals);
void rebalance();
void schedule_rebalance_task();
void notify_listener_acquire_release_images(const Updates &acquire, const Updates &release);
void notify_listener_remove_images(const std::string &mirror_uuid,
const Updates &remove);
void update_images_added(const std::string &mirror_uuid,
const std::set<std::string> &global_image_ids);
void update_images_removed(const std::string &mirror_uuid,
const std::set<std::string> &global_image_ids);
void filter_instance_ids(const std::vector<std::string> &instance_ids,
std::vector<std::string> *filtered_instance_ids,
bool removal) const;
};
} // namespace mirror
} // namespace rbd
#endif // CEPH_RBD_MIRROR_IMAGE_MAP_H
| 5,441 | 29.745763 | 94 | h |
null | ceph-main/src/tools/rbd_mirror/ImageReplayer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_IMAGE_REPLAYER_H
#define CEPH_RBD_MIRROR_IMAGE_REPLAYER_H
#include "common/AsyncOpTracker.h"
#include "common/ceph_mutex.h"
#include "include/rados/librados.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include "ProgressContext.h"
#include "tools/rbd_mirror/Types.h"
#include "tools/rbd_mirror/image_replayer/Types.h"
#include <boost/optional.hpp>
#include <string>
class AdminSocketHook;
namespace journal { struct CacheManagerHandler; }
namespace librbd { class ImageCtx; }
namespace rbd {
namespace mirror {
template <typename> struct InstanceWatcher;
template <typename> struct MirrorStatusUpdater;
struct PoolMetaCache;
template <typename> struct Threads;
namespace image_replayer {
class Replayer;
template <typename> class BootstrapRequest;
template <typename> class StateBuilder;
} // namespace image_replayer
/**
* Replays changes from a remote cluster for a single image.
*/
template <typename ImageCtxT = librbd::ImageCtx>
class ImageReplayer {
public:
static ImageReplayer *create(
librados::IoCtx &local_io_ctx, const std::string &local_mirror_uuid,
const std::string &global_image_id, Threads<ImageCtxT> *threads,
InstanceWatcher<ImageCtxT> *instance_watcher,
MirrorStatusUpdater<ImageCtxT>* local_status_updater,
journal::CacheManagerHandler *cache_manager_handler,
PoolMetaCache* pool_meta_cache) {
return new ImageReplayer(local_io_ctx, local_mirror_uuid, global_image_id,
threads, instance_watcher, local_status_updater,
cache_manager_handler, pool_meta_cache);
}
void destroy() {
delete this;
}
ImageReplayer(librados::IoCtx &local_io_ctx,
const std::string &local_mirror_uuid,
const std::string &global_image_id,
Threads<ImageCtxT> *threads,
InstanceWatcher<ImageCtxT> *instance_watcher,
MirrorStatusUpdater<ImageCtxT>* local_status_updater,
journal::CacheManagerHandler *cache_manager_handler,
PoolMetaCache* pool_meta_cache);
virtual ~ImageReplayer();
ImageReplayer(const ImageReplayer&) = delete;
ImageReplayer& operator=(const ImageReplayer&) = delete;
bool is_stopped() { std::lock_guard l{m_lock}; return is_stopped_(); }
bool is_running() { std::lock_guard l{m_lock}; return is_running_(); }
bool is_replaying() { std::lock_guard l{m_lock}; return is_replaying_(); }
std::string get_name() { std::lock_guard l{m_lock}; return m_image_spec; };
void set_state_description(int r, const std::string &desc);
// TODO temporary until policy handles release of image replayers
inline bool is_finished() const {
std::lock_guard locker{m_lock};
return m_finished;
}
inline void set_finished(bool finished) {
std::lock_guard locker{m_lock};
m_finished = finished;
}
inline bool is_blocklisted() const {
std::lock_guard locker{m_lock};
return (m_last_r == -EBLOCKLISTED);
}
image_replayer::HealthState get_health_state() const;
void add_peer(const Peer<ImageCtxT>& peer);
inline int64_t get_local_pool_id() const {
return m_local_io_ctx.get_id();
}
inline const std::string& get_global_image_id() const {
return m_global_image_id;
}
void start(Context *on_finish, bool manual = false, bool restart = false);
void stop(Context *on_finish, bool manual = false, bool restart = false);
void restart(Context *on_finish = nullptr);
void flush();
void print_status(Formatter *f);
protected:
/**
* @verbatim
* (error)
* <uninitialized> <------------------------------------ FAIL
* | ^
* v *
* <starting> *
* | *
* v (error) *
* BOOTSTRAP_IMAGE * * * * * * * * * * * * * * * * * * * *
* | *
* v (error) *
* START_REPLAY * * * * * * * * * * * * * * * * * * * * * *
* |
* v
* REPLAYING
* |
* v
* JOURNAL_REPLAY_SHUT_DOWN
* |
* v
* LOCAL_IMAGE_CLOSE
* |
* v
* <stopped>
*
* @endverbatim
*/
void on_start_fail(int r, const std::string &desc);
bool on_start_interrupted();
bool on_start_interrupted(ceph::mutex& lock);
void on_stop_journal_replay(int r = 0, const std::string &desc = "");
bool on_replay_interrupted();
private:
typedef std::set<Peer<ImageCtxT>> Peers;
typedef std::list<Context *> Contexts;
enum State {
STATE_UNKNOWN,
STATE_STARTING,
STATE_REPLAYING,
STATE_STOPPING,
STATE_STOPPED,
};
struct ReplayerListener;
typedef boost::optional<State> OptionalState;
typedef boost::optional<cls::rbd::MirrorImageStatusState>
OptionalMirrorImageStatusState;
class BootstrapProgressContext : public ProgressContext {
public:
BootstrapProgressContext(ImageReplayer<ImageCtxT> *replayer) :
replayer(replayer) {
}
void update_progress(const std::string &description,
bool flush = true) override;
private:
ImageReplayer<ImageCtxT> *replayer;
};
librados::IoCtx &m_local_io_ctx;
std::string m_local_mirror_uuid;
std::string m_global_image_id;
Threads<ImageCtxT> *m_threads;
InstanceWatcher<ImageCtxT> *m_instance_watcher;
MirrorStatusUpdater<ImageCtxT>* m_local_status_updater;
journal::CacheManagerHandler *m_cache_manager_handler;
PoolMetaCache* m_pool_meta_cache;
Peers m_peers;
Peer<ImageCtxT> m_remote_image_peer;
std::string m_local_image_name;
std::string m_image_spec;
mutable ceph::mutex m_lock;
State m_state = STATE_STOPPED;
std::string m_state_desc;
OptionalMirrorImageStatusState m_mirror_image_status_state =
boost::make_optional(false, cls::rbd::MIRROR_IMAGE_STATUS_STATE_UNKNOWN);
int m_last_r = 0;
BootstrapProgressContext m_progress_cxt;
bool m_finished = false;
bool m_delete_in_progress = false;
bool m_delete_requested = false;
bool m_resync_requested = false;
bool m_restart_requested = false;
bool m_status_removed = false;
image_replayer::StateBuilder<ImageCtxT>* m_state_builder = nullptr;
image_replayer::Replayer* m_replayer = nullptr;
ReplayerListener* m_replayer_listener = nullptr;
Context *m_on_start_finish = nullptr;
Contexts m_on_stop_contexts;
bool m_stop_requested = false;
bool m_manual_stop = false;
AdminSocketHook *m_asok_hook = nullptr;
image_replayer::BootstrapRequest<ImageCtxT> *m_bootstrap_request = nullptr;
AsyncOpTracker m_in_flight_op_tracker;
Context* m_update_status_task = nullptr;
static std::string to_string(const State state);
bool is_stopped_() const {
return m_state == STATE_STOPPED;
}
bool is_running_() const {
return !is_stopped_() && m_state != STATE_STOPPING && !m_stop_requested;
}
bool is_replaying_() const {
return (m_state == STATE_REPLAYING);
}
void schedule_update_mirror_image_replay_status();
void handle_update_mirror_image_replay_status(int r);
void cancel_update_mirror_image_replay_status();
void update_mirror_image_status(bool force, const OptionalState &state);
void set_mirror_image_status_update(bool force, const OptionalState &state);
void shut_down(int r);
void handle_shut_down(int r);
void bootstrap();
void handle_bootstrap(int r);
void start_replay();
void handle_start_replay(int r);
void handle_replayer_notification();
void register_admin_socket_hook();
void unregister_admin_socket_hook();
void reregister_admin_socket_hook();
void remove_image_status(bool force, Context *on_finish);
void remove_image_status_remote(bool force, Context *on_finish);
};
} // namespace mirror
} // namespace rbd
extern template class rbd::mirror::ImageReplayer<librbd::ImageCtx>;
#endif // CEPH_RBD_MIRROR_IMAGE_REPLAYER_H
| 8,195 | 28.912409 | 78 | h |
null | ceph-main/src/tools/rbd_mirror/ImageSync.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef RBD_MIRROR_IMAGE_SYNC_H
#define RBD_MIRROR_IMAGE_SYNC_H
#include "include/int_types.h"
#include "librbd/ImageCtx.h"
#include "librbd/Types.h"
#include "common/ceph_mutex.h"
#include "tools/rbd_mirror/CancelableRequest.h"
#include "tools/rbd_mirror/image_sync/Types.h"
class Context;
namespace journal { class Journaler; }
namespace librbd { template <typename> class DeepCopyRequest; }
namespace rbd {
namespace mirror {
class ProgressContext;
template <typename> class InstanceWatcher;
template <typename> class Threads;
namespace image_sync { struct SyncPointHandler; }
template <typename ImageCtxT = librbd::ImageCtx>
class ImageSync : public CancelableRequest {
public:
static ImageSync* create(
Threads<ImageCtxT>* threads,
ImageCtxT *local_image_ctx,
ImageCtxT *remote_image_ctx,
const std::string &local_mirror_uuid,
image_sync::SyncPointHandler* sync_point_handler,
InstanceWatcher<ImageCtxT> *instance_watcher,
ProgressContext *progress_ctx,
Context *on_finish) {
return new ImageSync(threads, local_image_ctx, remote_image_ctx,
local_mirror_uuid, sync_point_handler,
instance_watcher, progress_ctx, on_finish);
}
ImageSync(
Threads<ImageCtxT>* threads,
ImageCtxT *local_image_ctx,
ImageCtxT *remote_image_ctx,
const std::string &local_mirror_uuid,
image_sync::SyncPointHandler* sync_point_handler,
InstanceWatcher<ImageCtxT> *instance_watcher,
ProgressContext *progress_ctx,
Context *on_finish);
~ImageSync() override;
void send() override;
void cancel() override;
protected:
void finish(int r) override;
private:
/**
* @verbatim
*
* <start>
* |
* v
* NOTIFY_SYNC_REQUEST
* |
* v
* PRUNE_CATCH_UP_SYNC_POINT
* |
* v
* CREATE_SYNC_POINT (skip if already exists and
* | not disconnected)
* v
* COPY_IMAGE . . . . . . . . . . . . . .
* | .
* v .
* FLUSH_SYNC_POINT .
* | . (image sync canceled)
* v .
* PRUNE_SYNC_POINTS .
* | .
* v .
* <finish> < . . . . . . . . . . . . . .
*
* @endverbatim
*/
class ImageCopyProgressHandler;
Threads<ImageCtxT>* m_threads;
ImageCtxT *m_local_image_ctx;
ImageCtxT *m_remote_image_ctx;
std::string m_local_mirror_uuid;
image_sync::SyncPointHandler* m_sync_point_handler;
InstanceWatcher<ImageCtxT> *m_instance_watcher;
ProgressContext *m_progress_ctx;
ceph::mutex m_lock;
bool m_canceled = false;
librbd::DeepCopyRequest<ImageCtxT> *m_image_copy_request = nullptr;
ImageCopyProgressHandler *m_image_copy_prog_handler = nullptr;
bool m_updating_sync_point = false;
Context *m_update_sync_ctx = nullptr;
double m_update_sync_point_interval;
uint64_t m_image_copy_object_no = 0;
uint64_t m_image_copy_object_count = 0;
librbd::SnapSeqs m_snap_seqs_copy;
image_sync::SyncPoints m_sync_points_copy;
int m_ret_val = 0;
void send_notify_sync_request();
void handle_notify_sync_request(int r);
void send_prune_catch_up_sync_point();
void handle_prune_catch_up_sync_point(int r);
void send_create_sync_point();
void handle_create_sync_point(int r);
void send_update_max_object_count();
void handle_update_max_object_count(int r);
void send_copy_image();
void handle_copy_image(int r);
void handle_copy_image_update_progress(uint64_t object_no,
uint64_t object_count);
void send_update_sync_point();
void handle_update_sync_point(int r);
void send_flush_sync_point();
void handle_flush_sync_point(int r);
void send_prune_sync_points();
void handle_prune_sync_points(int r);
void update_progress(const std::string &description);
};
} // namespace mirror
} // namespace rbd
extern template class rbd::mirror::ImageSync<librbd::ImageCtx>;
#endif // RBD_MIRROR_IMAGE_SYNC_H
| 4,287 | 27.210526 | 70 | h |
null | ceph-main/src/tools/rbd_mirror/InstanceReplayer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef RBD_MIRROR_INSTANCE_REPLAYER_H
#define RBD_MIRROR_INSTANCE_REPLAYER_H
#include <map>
#include <sstream>
#include "common/AsyncOpTracker.h"
#include "common/Formatter.h"
#include "common/ceph_mutex.h"
#include "tools/rbd_mirror/Types.h"
namespace journal { struct CacheManagerHandler; }
namespace librbd { class ImageCtx; }
namespace rbd {
namespace mirror {
template <typename> class ImageReplayer;
template <typename> class InstanceWatcher;
template <typename> class MirrorStatusUpdater;
struct PoolMetaCache;
template <typename> class ServiceDaemon;
template <typename> struct Threads;
template <typename ImageCtxT = librbd::ImageCtx>
class InstanceReplayer {
public:
static InstanceReplayer* create(
librados::IoCtx &local_io_ctx, const std::string &local_mirror_uuid,
Threads<ImageCtxT> *threads, ServiceDaemon<ImageCtxT> *service_daemon,
MirrorStatusUpdater<ImageCtxT>* local_status_updater,
journal::CacheManagerHandler *cache_manager_handler,
PoolMetaCache* pool_meta_cache) {
return new InstanceReplayer(local_io_ctx, local_mirror_uuid, threads,
service_daemon, local_status_updater,
cache_manager_handler, pool_meta_cache);
}
void destroy() {
delete this;
}
InstanceReplayer(librados::IoCtx &local_io_ctx,
const std::string &local_mirror_uuid,
Threads<ImageCtxT> *threads,
ServiceDaemon<ImageCtxT> *service_daemon,
MirrorStatusUpdater<ImageCtxT>* local_status_updater,
journal::CacheManagerHandler *cache_manager_handler,
PoolMetaCache* pool_meta_cache);
~InstanceReplayer();
bool is_blocklisted() const;
int init();
void shut_down();
void init(Context *on_finish);
void shut_down(Context *on_finish);
void add_peer(const Peer<ImageCtxT>& peer);
void acquire_image(InstanceWatcher<ImageCtxT> *instance_watcher,
const std::string &global_image_id, Context *on_finish);
void release_image(const std::string &global_image_id, Context *on_finish);
void remove_peer_image(const std::string &global_image_id,
const std::string &peer_mirror_uuid,
Context *on_finish);
void release_all(Context *on_finish);
void print_status(Formatter *f);
void start();
void stop();
void restart();
void flush();
void stop(Context *on_finish);
private:
/**
* @verbatim
*
* <uninitialized> <-------------------\
* | (init) | (repeat for each
* v STOP_IMAGE_REPLAYER ---\ image replayer)
* SCHEDULE_IMAGE_STATE_CHECK_TASK ^ ^ |
* | | | |
* v (shut_down) | \---------/
* <initialized> -----------------> WAIT_FOR_OPS
*
* @endverbatim
*/
typedef std::set<Peer<ImageCtxT>> Peers;
librados::IoCtx &m_local_io_ctx;
std::string m_local_mirror_uuid;
Threads<ImageCtxT> *m_threads;
ServiceDaemon<ImageCtxT> *m_service_daemon;
MirrorStatusUpdater<ImageCtxT>* m_local_status_updater;
journal::CacheManagerHandler *m_cache_manager_handler;
PoolMetaCache* m_pool_meta_cache;
mutable ceph::mutex m_lock;
AsyncOpTracker m_async_op_tracker;
std::map<std::string, ImageReplayer<ImageCtxT> *> m_image_replayers;
Peers m_peers;
Context *m_image_state_check_task = nullptr;
Context *m_on_shut_down = nullptr;
bool m_manual_stop = false;
bool m_blocklisted = false;
void wait_for_ops();
void handle_wait_for_ops(int r);
void start_image_replayer(ImageReplayer<ImageCtxT> *image_replayer);
void queue_start_image_replayers();
void start_image_replayers(int r);
void stop_image_replayer(ImageReplayer<ImageCtxT> *image_replayer,
Context *on_finish);
void stop_image_replayers();
void handle_stop_image_replayers(int r);
void schedule_image_state_check_task();
void cancel_image_state_check_task();
};
} // namespace mirror
} // namespace rbd
extern template class rbd::mirror::InstanceReplayer<librbd::ImageCtx>;
#endif // RBD_MIRROR_INSTANCE_REPLAYER_H
| 4,375 | 30.482014 | 78 | h |
null | ceph-main/src/tools/rbd_mirror/InstanceWatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_INSTANCE_WATCHER_H
#define CEPH_RBD_MIRROR_INSTANCE_WATCHER_H
#include <map>
#include <memory>
#include <set>
#include <string>
#include <vector>
#include "common/AsyncOpTracker.h"
#include "librbd/Watcher.h"
#include "librbd/managed_lock/Types.h"
#include "tools/rbd_mirror/instance_watcher/Types.h"
namespace librbd {
class AsioEngine;
class ImageCtx;
template <typename> class ManagedLock;
} // namespace librbd
namespace rbd {
namespace mirror {
template <typename> class InstanceReplayer;
template <typename> class Throttler;
template <typename> struct Threads;
template <typename ImageCtxT = librbd::ImageCtx>
class InstanceWatcher : protected librbd::Watcher {
using librbd::Watcher::unregister_watch; // Silence overloaded virtual warning
public:
static void get_instances(librados::IoCtx &io_ctx,
std::vector<std::string> *instance_ids,
Context *on_finish);
static void remove_instance(librados::IoCtx &io_ctx,
librbd::AsioEngine& asio_engine,
const std::string &instance_id,
Context *on_finish);
static InstanceWatcher *create(
librados::IoCtx &io_ctx, librbd::AsioEngine& asio_engine,
InstanceReplayer<ImageCtxT> *instance_replayer,
Throttler<ImageCtxT> *image_sync_throttler);
void destroy() {
delete this;
}
InstanceWatcher(librados::IoCtx &io_ctx, librbd::AsioEngine& asio_engine,
InstanceReplayer<ImageCtxT> *instance_replayer,
Throttler<ImageCtxT> *image_sync_throttler,
const std::string &instance_id);
~InstanceWatcher() override;
inline std::string &get_instance_id() {
return m_instance_id;
}
int init();
void shut_down();
void init(Context *on_finish);
void shut_down(Context *on_finish);
void remove(Context *on_finish);
void notify_image_acquire(const std::string &instance_id,
const std::string &global_image_id,
Context *on_notify_ack);
void notify_image_release(const std::string &instance_id,
const std::string &global_image_id,
Context *on_notify_ack);
void notify_peer_image_removed(const std::string &instance_id,
const std::string &global_image_id,
const std::string &peer_mirror_uuid,
Context *on_notify_ack);
void notify_sync_request(const std::string &sync_id, Context *on_sync_start);
bool cancel_sync_request(const std::string &sync_id);
void notify_sync_complete(const std::string &sync_id);
void cancel_notify_requests(const std::string &instance_id);
void handle_acquire_leader();
void handle_release_leader();
void handle_update_leader(const std::string &leader_instance_id);
private:
/**
* @verbatim
*
* BREAK_INSTANCE_LOCK -------\
* ^ |
* | (error) |
* GET_INSTANCE_LOCKER * * *>|
* ^ (remove) |
* | |
* <uninitialized> <----------------+---- WAIT_FOR_NOTIFY_OPS
* | (init) ^ | ^
* v (error) * | |
* REGISTER_INSTANCE * * * * * *|* *> UNREGISTER_INSTANCE
* | * | ^
* v (error) * v |
* CREATE_INSTANCE_OBJECT * * * * * *> REMOVE_INSTANCE_OBJECT
* | * ^
* v (error) * |
* REGISTER_WATCH * * * * * * * * * *> UNREGISTER_WATCH
* | * ^
* v (error) * |
* ACQUIRE_LOCK * * * * * * * * * * * RELEASE_LOCK
* | ^
* v (shut_down) |
* <watching> -------------------------------/
*
* @endverbatim
*/
struct C_NotifyInstanceRequest;
struct C_SyncRequest;
typedef std::pair<std::string, std::string> Id;
struct HandlePayloadVisitor : public boost::static_visitor<void> {
InstanceWatcher *instance_watcher;
std::string instance_id;
C_NotifyAck *on_notify_ack;
HandlePayloadVisitor(InstanceWatcher *instance_watcher,
const std::string &instance_id,
C_NotifyAck *on_notify_ack)
: instance_watcher(instance_watcher), instance_id(instance_id),
on_notify_ack(on_notify_ack) {
}
template <typename Payload>
inline void operator()(const Payload &payload) const {
instance_watcher->handle_payload(instance_id, payload, on_notify_ack);
}
};
struct Request {
std::string instance_id;
uint64_t request_id;
C_NotifyAck *on_notify_ack = nullptr;
Request(const std::string &instance_id, uint64_t request_id)
: instance_id(instance_id), request_id(request_id) {
}
inline bool operator<(const Request &rhs) const {
return instance_id < rhs.instance_id ||
(instance_id == rhs.instance_id && request_id < rhs.request_id);
}
};
Threads<ImageCtxT> *m_threads;
InstanceReplayer<ImageCtxT> *m_instance_replayer;
Throttler<ImageCtxT> *m_image_sync_throttler;
std::string m_instance_id;
mutable ceph::mutex m_lock;
librbd::ManagedLock<ImageCtxT> *m_instance_lock;
Context *m_on_finish = nullptr;
int m_ret_val = 0;
std::string m_leader_instance_id;
librbd::managed_lock::Locker m_instance_locker;
std::set<std::pair<std::string, C_NotifyInstanceRequest *>> m_notify_ops;
AsyncOpTracker m_notify_op_tracker;
uint64_t m_request_seq = 0;
std::set<Request> m_requests;
std::set<C_NotifyInstanceRequest *> m_suspended_ops;
std::map<std::string, C_SyncRequest *> m_inflight_sync_reqs;
inline bool is_leader() const {
return m_leader_instance_id == m_instance_id;
}
void register_instance();
void handle_register_instance(int r);
void create_instance_object();
void handle_create_instance_object(int r);
void register_watch();
void handle_register_watch(int r);
void acquire_lock();
void handle_acquire_lock(int r);
void release_lock();
void handle_release_lock(int r);
void unregister_watch();
void handle_unregister_watch(int r);
void remove_instance_object();
void handle_remove_instance_object(int r);
void unregister_instance();
void handle_unregister_instance(int r);
void wait_for_notify_ops();
void handle_wait_for_notify_ops(int r);
void get_instance_locker();
void handle_get_instance_locker(int r);
void break_instance_lock();
void handle_break_instance_lock(int r);
void suspend_notify_request(C_NotifyInstanceRequest *req);
bool unsuspend_notify_request(C_NotifyInstanceRequest *req);
void unsuspend_notify_requests();
void notify_sync_complete(const ceph::mutex& lock, const std::string &sync_id);
void handle_notify_sync_request(C_SyncRequest *sync_ctx, int r);
void handle_notify_sync_complete(C_SyncRequest *sync_ctx, int r);
void notify_sync_start(const std::string &instance_id,
const std::string &sync_id);
Context *prepare_request(const std::string &instance_id, uint64_t request_id,
C_NotifyAck *on_notify_ack);
void complete_request(const std::string &instance_id, uint64_t request_id,
int r);
void handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist &bl) override;
void handle_image_acquire(const std::string &global_image_id,
Context *on_finish);
void handle_image_release(const std::string &global_image_id,
Context *on_finish);
void handle_peer_image_removed(const std::string &global_image_id,
const std::string &peer_mirror_uuid,
Context *on_finish);
void handle_sync_request(const std::string &instance_id,
const std::string &sync_id, Context *on_finish);
void handle_sync_start(const std::string &instance_id,
const std::string &sync_id, Context *on_finish);
void handle_payload(const std::string &instance_id,
const instance_watcher::ImageAcquirePayload &payload,
C_NotifyAck *on_notify_ack);
void handle_payload(const std::string &instance_id,
const instance_watcher::ImageReleasePayload &payload,
C_NotifyAck *on_notify_ack);
void handle_payload(const std::string &instance_id,
const instance_watcher::PeerImageRemovedPayload &payload,
C_NotifyAck *on_notify_ack);
void handle_payload(const std::string &instance_id,
const instance_watcher::SyncRequestPayload &payload,
C_NotifyAck *on_notify_ack);
void handle_payload(const std::string &instance_id,
const instance_watcher::SyncStartPayload &payload,
C_NotifyAck *on_notify_ack);
void handle_payload(const std::string &instance_id,
const instance_watcher::UnknownPayload &payload,
C_NotifyAck *on_notify_ack);
};
} // namespace mirror
} // namespace rbd
#endif // CEPH_RBD_MIRROR_INSTANCE_WATCHER_H
| 9,612 | 34.603704 | 81 | h |
null | ceph-main/src/tools/rbd_mirror/Instances.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_INSTANCES_H
#define CEPH_RBD_MIRROR_INSTANCES_H
#include <map>
#include <vector>
#include "include/buffer_fwd.h"
#include "include/rados/librados_fwd.hpp"
#include "common/AsyncOpTracker.h"
#include "common/ceph_mutex.h"
#include "librbd/Watcher.h"
#include "tools/rbd_mirror/instances/Types.h"
namespace librbd { class ImageCtx; }
namespace rbd {
namespace mirror {
template <typename> struct Threads;
template <typename ImageCtxT = librbd::ImageCtx>
class Instances {
public:
typedef std::vector<std::string> InstanceIds;
static Instances *create(Threads<ImageCtxT> *threads,
librados::IoCtx &ioctx,
const std::string& instance_id,
instances::Listener& listener) {
return new Instances(threads, ioctx, instance_id, listener);
}
void destroy() {
delete this;
}
Instances(Threads<ImageCtxT> *threads, librados::IoCtx &ioctx,
const std::string& instance_id, instances::Listener& listener);
virtual ~Instances();
void init(Context *on_finish);
void shut_down(Context *on_finish);
void unblock_listener();
void acked(const InstanceIds& instance_ids);
void list(std::vector<std::string> *instance_ids);
private:
/**
* @verbatim
*
* <uninitialized> <---------------------\
* | (init) ^ |
* v (error) * |
* GET_INSTANCES * * * * * WAIT_FOR_OPS
* | ^
* v (shut_down) |
* <initialized> ------------------------/
* .
* . (remove_instance)
* v
* REMOVE_INSTANCE
*
* @endverbatim
*/
enum InstanceState {
INSTANCE_STATE_ADDING,
INSTANCE_STATE_IDLE,
INSTANCE_STATE_REMOVING
};
using clock_t = ceph::real_clock;
struct Instance {
clock_t::time_point acked_time{};
InstanceState state = INSTANCE_STATE_ADDING;
};
struct C_NotifyBase : public Context {
Instances *instances;
InstanceIds instance_ids;
C_NotifyBase(Instances *instances, const InstanceIds& instance_ids)
: instances(instances), instance_ids(instance_ids) {
instances->m_async_op_tracker.start_op();
}
void finish(int r) override {
execute();
instances->m_async_op_tracker.finish_op();
}
virtual void execute() = 0;
};
struct C_HandleAcked : public C_NotifyBase {
C_HandleAcked(Instances *instances, const InstanceIds& instance_ids)
: C_NotifyBase(instances, instance_ids) {
}
void execute() override {
this->instances->handle_acked(this->instance_ids);
}
};
struct C_NotifyInstancesAdded : public C_NotifyBase {
C_NotifyInstancesAdded(Instances *instances,
const InstanceIds& instance_ids)
: C_NotifyBase(instances, instance_ids) {
}
void execute() override {
this->instances->notify_instances_added(this->instance_ids);
}
};
struct C_NotifyInstancesRemoved : public C_NotifyBase {
C_NotifyInstancesRemoved(Instances *instances,
const InstanceIds& instance_ids)
: C_NotifyBase(instances, instance_ids) {
}
void execute() override {
this->instances->notify_instances_removed(this->instance_ids);
}
};
Threads<ImageCtxT> *m_threads;
librados::IoCtx &m_ioctx;
std::string m_instance_id;
instances::Listener& m_listener;
CephContext *m_cct;
ceph::mutex m_lock;
InstanceIds m_instance_ids;
std::map<std::string, Instance> m_instances;
Context *m_on_finish = nullptr;
AsyncOpTracker m_async_op_tracker;
Context *m_timer_task = nullptr;
bool m_listener_blocked = true;
void handle_acked(const InstanceIds& instance_ids);
void notify_instances_added(const InstanceIds& instance_ids);
void notify_instances_removed(const InstanceIds& instance_ids);
void get_instances();
void handle_get_instances(int r);
void wait_for_ops();
void handle_wait_for_ops(int r);
void remove_instances(const clock_t::time_point& time);
void handle_remove_instances(int r, const InstanceIds& instance_ids);
void cancel_remove_task();
void schedule_remove_task(const clock_t::time_point& time);
};
} // namespace mirror
} // namespace rbd
#endif // CEPH_RBD_MIRROR_INSTANCES_H
| 4,452 | 25.349112 | 75 | h |
null | ceph-main/src/tools/rbd_mirror/LeaderWatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_LEADER_WATCHER_H
#define CEPH_RBD_MIRROR_LEADER_WATCHER_H
#include <list>
#include <memory>
#include <string>
#include "common/AsyncOpTracker.h"
#include "librbd/ManagedLock.h"
#include "librbd/Watcher.h"
#include "librbd/managed_lock/Types.h"
#include "librbd/watcher/Types.h"
#include "Instances.h"
#include "tools/rbd_mirror/instances/Types.h"
#include "tools/rbd_mirror/leader_watcher/Types.h"
namespace librbd {
class ImageCtx;
namespace asio { struct ContextWQ; }
} // namespace librbd
namespace rbd {
namespace mirror {
template <typename> struct Threads;
template <typename ImageCtxT = librbd::ImageCtx>
class LeaderWatcher : protected librbd::Watcher {
using librbd::Watcher::unregister_watch; // Silence overloaded virtual warning
public:
static LeaderWatcher* create(Threads<ImageCtxT> *threads,
librados::IoCtx &io_ctx,
leader_watcher::Listener *listener) {
return new LeaderWatcher(threads, io_ctx, listener);
}
LeaderWatcher(Threads<ImageCtxT> *threads, librados::IoCtx &io_ctx,
leader_watcher::Listener *listener);
~LeaderWatcher() override;
int init();
void shut_down();
void init(Context *on_finish);
void shut_down(Context *on_finish);
bool is_blocklisted() const;
bool is_leader() const;
bool is_releasing_leader() const;
bool get_leader_instance_id(std::string *instance_id) const;
void release_leader();
void list_instances(std::vector<std::string> *instance_ids);
std::string get_instance_id();
private:
/**
* @verbatim
*
* <uninitialized> <------------------------------ WAIT_FOR_TASKS
* | (init) ^ ^
* v * |
* CREATE_OBJECT * * * * * (error) UNREGISTER_WATCH
* | * ^
* v * |
* REGISTER_WATCH * * * * * SHUT_DOWN_LEADER_LOCK
* | ^
* | (no leader heartbeat and acquire failed) |
* | BREAK_LOCK <-------------------------------------\ |
* | | (no leader heartbeat) | | (shut down)
* | | /----------------------------------------\ | |
* | | | (lock_released received) | |
* | | | /-------------------------------------\ | |
* | | | | (lock_acquired or | | |
* | | | | heartbeat received) | | |
* | | | | (ENOENT) /-----------\ | | |
* | | | | * * * * * * * * * * | | | | |
* v v v v v (error) * v | | | |
* ACQUIRE_LEADER_LOCK * * * * *> GET_LOCKER ---> <secondary>
* | * ^
* ....|...................*.................... .....|.....................
* . v * . . | post_release .
* .INIT_INSTANCES * * * * * . .NOTIFY_LOCK_RELEASED .
* . | . .....^.....................
* . v . |
* .NOTIFY_LISTENER . RELEASE_LEADER_LOCK
* . | . ^
* . v . .....|.....................
* .NOTIFY_LOCK_ACQUIRED . . | .
* . | post_acquire . .SHUT_DOWN_INSTANCES .
* ....|........................................ . ^ .
* v . | .
* <leader> -----------------------------------> .NOTIFY_LISTENER .
* (shut_down, release_leader, . pre_release .
* notify error) ...........................
* @endverbatim
*/
struct InstancesListener : public instances::Listener {
LeaderWatcher* leader_watcher;
InstancesListener(LeaderWatcher* leader_watcher)
: leader_watcher(leader_watcher) {
}
void handle_added(const InstanceIds& instance_ids) override {
leader_watcher->m_listener->handle_instances_added(instance_ids);
}
void handle_removed(const InstanceIds& instance_ids) override {
leader_watcher->m_listener->handle_instances_removed(instance_ids);
}
};
class LeaderLock : public librbd::ManagedLock<ImageCtxT> {
public:
typedef librbd::ManagedLock<ImageCtxT> Parent;
LeaderLock(librados::IoCtx& ioctx, librbd::AsioEngine& asio_engine,
const std::string& oid, LeaderWatcher *watcher,
bool blocklist_on_break_lock,
uint32_t blocklist_expire_seconds)
: Parent(ioctx, asio_engine, oid, watcher,
librbd::managed_lock::EXCLUSIVE, blocklist_on_break_lock,
blocklist_expire_seconds),
watcher(watcher) {
}
bool is_leader() const {
std::lock_guard locker{Parent::m_lock};
return Parent::is_state_post_acquiring() || Parent::is_state_locked();
}
bool is_releasing_leader() const {
std::lock_guard locker{Parent::m_lock};
return Parent::is_state_pre_releasing();
}
protected:
void post_acquire_lock_handler(int r, Context *on_finish) {
if (r == 0) {
// lock is owned at this point
std::lock_guard locker{Parent::m_lock};
Parent::set_state_post_acquiring();
}
watcher->handle_post_acquire_leader_lock(r, on_finish);
}
void pre_release_lock_handler(bool shutting_down,
Context *on_finish) {
watcher->handle_pre_release_leader_lock(on_finish);
}
void post_release_lock_handler(bool shutting_down, int r,
Context *on_finish) {
watcher->handle_post_release_leader_lock(r, on_finish);
}
private:
LeaderWatcher *watcher;
};
struct HandlePayloadVisitor : public boost::static_visitor<void> {
LeaderWatcher *leader_watcher;
Context *on_notify_ack;
HandlePayloadVisitor(LeaderWatcher *leader_watcher, Context *on_notify_ack)
: leader_watcher(leader_watcher), on_notify_ack(on_notify_ack) {
}
template <typename Payload>
inline void operator()(const Payload &payload) const {
leader_watcher->handle_payload(payload, on_notify_ack);
}
};
struct C_GetLocker : public Context {
LeaderWatcher *leader_watcher;
librbd::managed_lock::Locker locker;
C_GetLocker(LeaderWatcher *leader_watcher)
: leader_watcher(leader_watcher) {
}
void finish(int r) override {
leader_watcher->handle_get_locker(r, locker);
}
};
typedef void (LeaderWatcher<ImageCtxT>::*TimerCallback)();
struct C_TimerGate : public Context {
LeaderWatcher *leader_watcher;
bool leader = false;
TimerCallback timer_callback = nullptr;
C_TimerGate(LeaderWatcher *leader_watcher)
: leader_watcher(leader_watcher) {
}
void finish(int r) override {
leader_watcher->m_timer_gate = nullptr;
leader_watcher->execute_timer_task(leader, timer_callback);
}
};
Threads<ImageCtxT> *m_threads;
leader_watcher::Listener *m_listener;
InstancesListener m_instances_listener;
mutable ceph::mutex m_lock;
uint64_t m_notifier_id;
std::string m_instance_id;
LeaderLock *m_leader_lock;
Context *m_on_finish = nullptr;
Context *m_on_shut_down_finish = nullptr;
uint64_t m_acquire_attempts = 0;
int m_ret_val = 0;
Instances<ImageCtxT> *m_instances = nullptr;
librbd::managed_lock::Locker m_locker;
bool m_blocklisted = false;
AsyncOpTracker m_timer_op_tracker;
Context *m_timer_task = nullptr;
C_TimerGate *m_timer_gate = nullptr;
librbd::watcher::NotifyResponse m_heartbeat_response;
bool is_leader(ceph::mutex &m_lock) const;
bool is_releasing_leader(ceph::mutex &m_lock) const;
void cancel_timer_task();
void schedule_timer_task(const std::string &name,
int delay_factor, bool leader,
TimerCallback callback, bool shutting_down);
void execute_timer_task(bool leader, TimerCallback timer_callback);
void create_leader_object();
void handle_create_leader_object(int r);
void register_watch();
void handle_register_watch(int r);
void shut_down_leader_lock();
void handle_shut_down_leader_lock(int r);
void unregister_watch();
void handle_unregister_watch(int r);
void wait_for_tasks();
void handle_wait_for_tasks();
void break_leader_lock();
void handle_break_leader_lock(int r);
void schedule_get_locker(bool reset_leader, uint32_t delay_factor);
void get_locker();
void handle_get_locker(int r, librbd::managed_lock::Locker& locker);
void schedule_acquire_leader_lock(uint32_t delay_factor);
void acquire_leader_lock();
void handle_acquire_leader_lock(int r);
void release_leader_lock();
void handle_release_leader_lock(int r);
void init_instances();
void handle_init_instances(int r);
void shut_down_instances();
void handle_shut_down_instances(int r);
void notify_listener();
void handle_notify_listener(int r);
void notify_lock_acquired();
void handle_notify_lock_acquired(int r);
void notify_lock_released();
void handle_notify_lock_released(int r);
void notify_heartbeat();
void handle_notify_heartbeat(int r);
void handle_post_acquire_leader_lock(int r, Context *on_finish);
void handle_pre_release_leader_lock(Context *on_finish);
void handle_post_release_leader_lock(int r, Context *on_finish);
void handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist &bl) override;
void handle_rewatch_complete(int r) override;
void handle_heartbeat(Context *on_ack);
void handle_lock_acquired(Context *on_ack);
void handle_lock_released(Context *on_ack);
void handle_payload(const leader_watcher::HeartbeatPayload &payload,
Context *on_notify_ack);
void handle_payload(const leader_watcher::LockAcquiredPayload &payload,
Context *on_notify_ack);
void handle_payload(const leader_watcher::LockReleasedPayload &payload,
Context *on_notify_ack);
void handle_payload(const leader_watcher::UnknownPayload &payload,
Context *on_notify_ack);
};
} // namespace mirror
} // namespace rbd
#endif // CEPH_RBD_MIRROR_LEADER_WATCHER_H
| 10,829 | 33.490446 | 80 | h |
null | ceph-main/src/tools/rbd_mirror/Mirror.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_H
#define CEPH_RBD_MIRROR_H
#include "common/ceph_context.h"
#include "common/ceph_mutex.h"
#include "include/rados/librados.hpp"
#include "include/utime.h"
#include "ClusterWatcher.h"
#include "PoolReplayer.h"
#include "tools/rbd_mirror/Types.h"
#include <set>
#include <map>
#include <memory>
#include <atomic>
namespace journal { class CacheManagerHandler; }
namespace librbd { struct ImageCtx; }
namespace rbd {
namespace mirror {
template <typename> struct ServiceDaemon;
template <typename> struct Threads;
class CacheManagerHandler;
class MirrorAdminSocketHook;
class PoolMetaCache;
/**
* Contains the main loop and overall state for rbd-mirror.
*
* Sets up mirroring, and coordinates between noticing config
* changes and applying them.
*/
class Mirror {
public:
Mirror(CephContext *cct, const std::vector<const char*> &args);
Mirror(const Mirror&) = delete;
Mirror& operator=(const Mirror&) = delete;
~Mirror();
int init();
void run();
void handle_signal(int signum);
void print_status(Formatter *f);
void start();
void stop();
void restart();
void flush();
void release_leader();
private:
typedef ClusterWatcher::PoolPeers PoolPeers;
typedef std::pair<int64_t, PeerSpec> PoolPeer;
void update_pool_replayers(const PoolPeers &pool_peers,
const std::string& site_name);
void create_cache_manager();
void run_cache_manager(utime_t *next_run_interval);
CephContext *m_cct;
std::vector<const char*> m_args;
Threads<librbd::ImageCtx> *m_threads = nullptr;
ceph::mutex m_lock = ceph::make_mutex("rbd::mirror::Mirror");
ceph::condition_variable m_cond;
RadosRef m_local;
std::unique_ptr<ServiceDaemon<librbd::ImageCtx>> m_service_daemon;
// monitor local cluster for config changes in peers
std::unique_ptr<ClusterWatcher> m_local_cluster_watcher;
std::unique_ptr<CacheManagerHandler> m_cache_manager_handler;
std::unique_ptr<PoolMetaCache> m_pool_meta_cache;
std::map<PoolPeer, std::unique_ptr<PoolReplayer<>>> m_pool_replayers;
std::atomic<bool> m_stopping = { false };
bool m_manual_stop = false;
MirrorAdminSocketHook *m_asok_hook;
std::string m_site_name;
};
} // namespace mirror
} // namespace rbd
#endif // CEPH_RBD_MIRROR_H
| 2,376 | 25.411111 | 71 | h |
null | ceph-main/src/tools/rbd_mirror/MirrorStatusUpdater.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_MIRROR_STATUS_UPDATER_H
#define CEPH_RBD_MIRROR_MIRROR_STATUS_UPDATER_H
#include "include/rados/librados.hpp"
#include "common/ceph_mutex.h"
#include "cls/rbd/cls_rbd_types.h"
#include <list>
#include <map>
#include <set>
#include <string>
struct Context;
namespace librbd { class ImageCtx; }
namespace rbd {
namespace mirror {
template <typename> struct MirrorStatusWatcher;
template <typename> struct Threads;
template <typename ImageCtxT = librbd::ImageCtx>
class MirrorStatusUpdater {
public:
static MirrorStatusUpdater* create(librados::IoCtx& io_ctx,
Threads<ImageCtxT> *threads,
const std::string& local_mirror_uuid) {
return new MirrorStatusUpdater(io_ctx, threads, local_mirror_uuid);
}
MirrorStatusUpdater(librados::IoCtx& io_ctx, Threads<ImageCtxT> *threads,
const std::string& local_mirror_uuid);
~MirrorStatusUpdater();
void init(Context* on_finish);
void shut_down(Context* on_finish);
bool exists(const std::string& global_image_id);
void set_mirror_image_status(
const std::string& global_image_id,
const cls::rbd::MirrorImageSiteStatus& mirror_image_site_status,
bool immediate_update);
void remove_mirror_image_status(const std::string& global_image_id,
bool immediate_update, Context* on_finish);
void remove_refresh_mirror_image_status(const std::string& global_image_id,
Context* on_finish);
private:
/**
* @verbatim
*
* <uninitialized> <----------------------\
* | (init) ^ (error) |
* v * |
* INIT_STATUS_WATCHER * * * * * |
* | |
* | SHUT_DOWN_STATUS_WATCHER
* | ^
* | |
* | (shutdown) |
* <initialized> -------------------------/
*
* @endverbatim
*/
typedef std::list<Context*> Contexts;
typedef std::set<std::string> GlobalImageIds;
typedef std::map<std::string, cls::rbd::MirrorImageSiteStatus>
GlobalImageStatus;
librados::IoCtx m_io_ctx;
Threads<ImageCtxT>* m_threads;
std::string m_local_mirror_uuid;
Context* m_timer_task = nullptr;
ceph::mutex m_lock;
bool m_initialized = false;
MirrorStatusWatcher<ImageCtxT>* m_mirror_status_watcher = nullptr;
GlobalImageIds m_update_global_image_ids;
GlobalImageStatus m_global_image_status;
bool m_update_in_progress = false;
bool m_update_in_flight = false;
bool m_update_requested = false;
Contexts m_update_on_finish_ctxs;
GlobalImageIds m_updating_global_image_ids;
bool try_remove_mirror_image_status(const std::string& global_image_id,
bool queue_update, bool immediate_update,
Context* on_finish);
void init_mirror_status_watcher(Context* on_finish);
void handle_init_mirror_status_watcher(int r, Context* on_finish);
void shut_down_mirror_status_watcher(Context* on_finish);
void handle_shut_down_mirror_status_watcher(int r, Context* on_finish);
void finalize_shutdown(int r, Context* on_finish);
void schedule_timer_task();
void handle_timer_task(int r);
void queue_update_task(std::unique_lock<ceph::mutex>&& locker);
void update_task(int r);
void handle_update_task(int r);
};
} // namespace mirror
} // namespace rbd
extern template class rbd::mirror::MirrorStatusUpdater<librbd::ImageCtx>;
#endif // CEPH_RBD_MIRROR_MIRROR_STATUS_UPDATER_H
| 3,803 | 30.7 | 79 | h |
null | ceph-main/src/tools/rbd_mirror/MirrorStatusWatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_MIRROR_STATUS_WATCHER_H
#define CEPH_RBD_MIRROR_MIRROR_STATUS_WATCHER_H
#include "librbd/Watcher.h"
namespace librbd {
class ImageCtx;
namespace asio { struct ContextWQ; }
} // namespace librbd
namespace rbd {
namespace mirror {
template <typename ImageCtxT = librbd::ImageCtx>
class MirrorStatusWatcher : protected librbd::Watcher {
public:
static MirrorStatusWatcher *create(librados::IoCtx &io_ctx,
librbd::asio::ContextWQ *work_queue) {
return new MirrorStatusWatcher(io_ctx, work_queue);
}
void destroy() {
delete this;
}
MirrorStatusWatcher(librados::IoCtx &io_ctx,
librbd::asio::ContextWQ *work_queue);
~MirrorStatusWatcher() override;
void init(Context *on_finish);
void shut_down(Context *on_finish);
protected:
void handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist &bl) override;
};
} // namespace mirror
} // namespace rbd
#endif // CEPH_RBD_MIRROR_MIRROR_STATUS_WATCHER_H
| 1,157 | 25.318182 | 75 | h |
null | ceph-main/src/tools/rbd_mirror/NamespaceReplayer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_NAMESPACE_REPLAYER_H
#define CEPH_RBD_MIRROR_NAMESPACE_REPLAYER_H
#include "common/AsyncOpTracker.h"
#include "common/ceph_mutex.h"
#include "include/rados/librados.hpp"
#include "tools/rbd_mirror/ImageDeleter.h"
#include "tools/rbd_mirror/ImageMap.h"
#include "tools/rbd_mirror/InstanceReplayer.h"
#include "tools/rbd_mirror/InstanceWatcher.h"
#include "tools/rbd_mirror/MirrorStatusUpdater.h"
#include "tools/rbd_mirror/PoolWatcher.h"
#include "tools/rbd_mirror/Types.h"
#include "tools/rbd_mirror/image_map/Types.h"
#include "tools/rbd_mirror/pool_watcher/Types.h"
#include <memory>
#include <string>
#include <vector>
class AdminSocketHook;
namespace journal { struct CacheManagerHandler; }
namespace librbd { class ImageCtx; }
namespace rbd {
namespace mirror {
struct PoolMetaCache;
template <typename> class ServiceDaemon;
template <typename> class Throttler;
template <typename> struct Threads;
/**
* Controls mirroring for a single remote cluster.
*/
template <typename ImageCtxT = librbd::ImageCtx>
class NamespaceReplayer {
public:
static NamespaceReplayer *create(
const std::string &name,
librados::IoCtx &local_ioctx,
librados::IoCtx &remote_ioctx,
const std::string &local_mirror_uuid,
const std::string &local_mirror_peer_uuid,
const RemotePoolMeta& remote_pool_meta,
Threads<ImageCtxT> *threads,
Throttler<ImageCtxT> *image_sync_throttler,
Throttler<ImageCtxT> *image_deletion_throttler,
ServiceDaemon<ImageCtxT> *service_daemon,
journal::CacheManagerHandler *cache_manager_handler,
PoolMetaCache* pool_meta_cache) {
return new NamespaceReplayer(name, local_ioctx, remote_ioctx,
local_mirror_uuid, local_mirror_peer_uuid,
remote_pool_meta, threads,
image_sync_throttler, image_deletion_throttler,
service_daemon, cache_manager_handler,
pool_meta_cache);
}
NamespaceReplayer(const std::string &name,
librados::IoCtx &local_ioctx,
librados::IoCtx &remote_ioctx,
const std::string &local_mirror_uuid,
const std::string& local_mirror_peer_uuid,
const RemotePoolMeta& remote_pool_meta,
Threads<ImageCtxT> *threads,
Throttler<ImageCtxT> *image_sync_throttler,
Throttler<ImageCtxT> *image_deletion_throttler,
ServiceDaemon<ImageCtxT> *service_daemon,
journal::CacheManagerHandler *cache_manager_handler,
PoolMetaCache* pool_meta_cache);
NamespaceReplayer(const NamespaceReplayer&) = delete;
NamespaceReplayer& operator=(const NamespaceReplayer&) = delete;
bool is_blocklisted() const;
void init(Context *on_finish);
void shut_down(Context *on_finish);
void handle_acquire_leader(Context *on_finish);
void handle_release_leader(Context *on_finish);
void handle_update_leader(const std::string &leader_instance_id);
void handle_instances_added(const std::vector<std::string> &instance_ids);
void handle_instances_removed(const std::vector<std::string> &instance_ids);
void print_status(Formatter *f);
void start();
void stop();
void restart();
void flush();
private:
/**
* @verbatim
*
* <uninitialized> <------------------------------------\
* | (init) ^ (error) |
* v * |
* INIT_LOCAL_STATUS_UPDATER * * * * * * * * > SHUT_DOWN_LOCAL_STATUS_UPDATER
* | * (error) ^
* v * |
* INIT_REMOTE_STATUS_UPDATER * * * * * * * > SHUT_DOWN_REMOTE_STATUS_UPDATER
* | * (error) ^
* v * |
* INIT_INSTANCE_REPLAYER * * * * * * * * * > SHUT_DOWN_INSTANCE_REPLAYER
* | * ^
* v * |
* INIT_INSTANCE_WATCHER * * * * * * * * * * SHUT_DOWN_INSTANCE_WATCHER
* | (error) ^
* | |
* v STOP_INSTANCE_REPLAYER
* | ^
* | (shut down) |
* | /----------------------------------------------/
* v |
* <follower> <---------------------------\
* . |
* . |
* v (leader acquired) |
* INIT_IMAGE_MAP |
* | |
* v |
* INIT_LOCAL_POOL_WATCHER SHUT_DOWN_IMAGE_MAP
* | ^
* v |
* INIT_REMOTE_POOL_WATCHER SHUT_DOWN_POOL_WATCHERS
* | ^
* v |
* INIT_IMAGE_DELETER SHUT_DOWN_IMAGE_DELETER
* | ^
* v .
* <leader> <-----------\ .
* . | .
* . (image update) | .
* . . > NOTIFY_INSTANCE_WATCHER .
* . .
* . (leader lost / shut down) .
* . . . . . . . . . . . . . . . . . . .
*
* @endverbatim
*/
struct PoolWatcherListener : public pool_watcher::Listener {
NamespaceReplayer *namespace_replayer;
bool local;
PoolWatcherListener(NamespaceReplayer *namespace_replayer, bool local)
: namespace_replayer(namespace_replayer), local(local) {
}
void handle_update(const std::string &mirror_uuid,
ImageIds &&added_image_ids,
ImageIds &&removed_image_ids) override {
namespace_replayer->handle_update((local ? "" : mirror_uuid),
std::move(added_image_ids),
std::move(removed_image_ids));
}
};
struct ImageMapListener : public image_map::Listener {
NamespaceReplayer *namespace_replayer;
ImageMapListener(NamespaceReplayer *namespace_replayer)
: namespace_replayer(namespace_replayer) {
}
void acquire_image(const std::string &global_image_id,
const std::string &instance_id,
Context* on_finish) override {
namespace_replayer->handle_acquire_image(global_image_id, instance_id,
on_finish);
}
void release_image(const std::string &global_image_id,
const std::string &instance_id,
Context* on_finish) override {
namespace_replayer->handle_release_image(global_image_id, instance_id,
on_finish);
}
void remove_image(const std::string &mirror_uuid,
const std::string &global_image_id,
const std::string &instance_id,
Context* on_finish) override {
namespace_replayer->handle_remove_image(mirror_uuid, global_image_id,
instance_id, on_finish);
}
};
void handle_update(const std::string &mirror_uuid,
ImageIds &&added_image_ids,
ImageIds &&removed_image_ids);
int init_rados(const std::string &cluster_name,
const std::string &client_name,
const std::string &mon_host,
const std::string &key,
const std::string &description, RadosRef *rados_ref,
bool strip_cluster_overrides);
void init_local_status_updater();
void handle_init_local_status_updater(int r);
void init_remote_status_updater();
void handle_init_remote_status_updater(int r);
void init_instance_replayer();
void handle_init_instance_replayer(int r);
void init_instance_watcher();
void handle_init_instance_watcher(int r);
void stop_instance_replayer();
void handle_stop_instance_replayer(int r);
void shut_down_instance_watcher();
void handle_shut_down_instance_watcher(int r);
void shut_down_instance_replayer();
void handle_shut_down_instance_replayer(int r);
void shut_down_remote_status_updater();
void handle_shut_down_remote_status_updater(int r);
void shut_down_local_status_updater();
void handle_shut_down_local_status_updater(int r);
void init_image_map(Context *on_finish);
void handle_init_image_map(int r, ImageMap<ImageCtxT> *image_map,
Context *on_finish);
void init_local_pool_watcher(Context *on_finish);
void handle_init_local_pool_watcher(int r, Context *on_finish);
void init_remote_pool_watcher(Context *on_finish);
void handle_init_remote_pool_watcher(int r, Context *on_finish);
void init_image_deleter(Context* on_finish);
void handle_init_image_deleter(int r, Context* on_finish);
void shut_down_image_deleter(Context* on_finish);
void handle_shut_down_image_deleter(int r, Context* on_finish);
void shut_down_pool_watchers(Context *on_finish);
void handle_shut_down_pool_watchers(int r, Context *on_finish);
void shut_down_image_map(Context *on_finish);
void handle_shut_down_image_map(int r, Context *on_finish);
void handle_acquire_image(const std::string &global_image_id,
const std::string &instance_id,
Context* on_finish);
void handle_release_image(const std::string &global_image_id,
const std::string &instance_id,
Context* on_finish);
void handle_remove_image(const std::string &mirror_uuid,
const std::string &global_image_id,
const std::string &instance_id,
Context* on_finish);
std::string m_namespace_name;
librados::IoCtx m_local_io_ctx;
librados::IoCtx m_remote_io_ctx;
std::string m_local_mirror_uuid;
std::string m_local_mirror_peer_uuid;
RemotePoolMeta m_remote_pool_meta;
Threads<ImageCtxT> *m_threads;
Throttler<ImageCtxT> *m_image_sync_throttler;
Throttler<ImageCtxT> *m_image_deletion_throttler;
ServiceDaemon<ImageCtxT> *m_service_daemon;
journal::CacheManagerHandler *m_cache_manager_handler;
PoolMetaCache* m_pool_meta_cache;
mutable ceph::mutex m_lock;
int m_ret_val = 0;
Context *m_on_finish = nullptr;
std::unique_ptr<MirrorStatusUpdater<ImageCtxT>> m_local_status_updater;
std::unique_ptr<MirrorStatusUpdater<ImageCtxT>> m_remote_status_updater;
PoolWatcherListener m_local_pool_watcher_listener;
std::unique_ptr<PoolWatcher<ImageCtxT>> m_local_pool_watcher;
PoolWatcherListener m_remote_pool_watcher_listener;
std::unique_ptr<PoolWatcher<ImageCtxT>> m_remote_pool_watcher;
std::unique_ptr<InstanceReplayer<ImageCtxT>> m_instance_replayer;
std::unique_ptr<ImageDeleter<ImageCtxT>> m_image_deleter;
ImageMapListener m_image_map_listener;
std::unique_ptr<ImageMap<ImageCtxT>> m_image_map;
std::unique_ptr<InstanceWatcher<ImageCtxT>> m_instance_watcher;
};
} // namespace mirror
} // namespace rbd
extern template class rbd::mirror::NamespaceReplayer<librbd::ImageCtx>;
#endif // CEPH_RBD_MIRROR_NAMESPACE_REPLAYER_H
| 11,836 | 37.307443 | 82 | h |
null | ceph-main/src/tools/rbd_mirror/PoolMetaCache.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_POOL_META_CACHE_H
#define CEPH_RBD_MIRROR_POOL_META_CACHE_H
#include "include/int_types.h"
#include "common/ceph_mutex.h"
#include "tools/rbd_mirror/Types.h"
#include <map>
namespace rbd {
namespace mirror {
class PoolMetaCache {
public:
PoolMetaCache(CephContext* cct)
: m_cct(cct) {
}
PoolMetaCache(const PoolMetaCache&) = delete;
PoolMetaCache& operator=(const PoolMetaCache&) = delete;
int get_local_pool_meta(int64_t pool_id,
LocalPoolMeta* local_pool_meta) const;
void set_local_pool_meta(int64_t pool_id,
const LocalPoolMeta& local_pool_meta);
void remove_local_pool_meta(int64_t pool_id);
int get_remote_pool_meta(int64_t pool_id,
RemotePoolMeta* remote_pool_meta) const;
void set_remote_pool_meta(int64_t pool_id,
const RemotePoolMeta& remote_pool_meta);
void remove_remote_pool_meta(int64_t pool_id);
private:
CephContext* m_cct;
mutable ceph::shared_mutex m_lock =
ceph::make_shared_mutex("rbd::mirror::PoolMetaCache::m_lock");
std::map<int64_t, LocalPoolMeta> m_local_pool_metas;
std::map<int64_t, RemotePoolMeta> m_remote_pool_metas;
};
} // namespace mirror
} // namespace rbd
#endif // CEPH_RBD_MIRROR_POOL_META_CACHE_H
| 1,411 | 28.416667 | 70 | h |
null | ceph-main/src/tools/rbd_mirror/PoolReplayer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_POOL_REPLAYER_H
#define CEPH_RBD_MIRROR_POOL_REPLAYER_H
#include "common/Cond.h"
#include "common/ceph_mutex.h"
#include "include/rados/librados.hpp"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "tools/rbd_mirror/LeaderWatcher.h"
#include "tools/rbd_mirror/NamespaceReplayer.h"
#include "tools/rbd_mirror/Throttler.h"
#include "tools/rbd_mirror/Types.h"
#include "tools/rbd_mirror/leader_watcher/Types.h"
#include "tools/rbd_mirror/service_daemon/Types.h"
#include <map>
#include <memory>
#include <string>
#include <vector>
class AdminSocketHook;
namespace journal { struct CacheManagerHandler; }
namespace librbd { class ImageCtx; }
namespace rbd {
namespace mirror {
template <typename> class RemotePoolPoller;
namespace remote_pool_poller { struct Listener; }
struct PoolMetaCache;
template <typename> class ServiceDaemon;
template <typename> struct Threads;
/**
* Controls mirroring for a single remote cluster.
*/
template <typename ImageCtxT = librbd::ImageCtx>
class PoolReplayer {
public:
PoolReplayer(Threads<ImageCtxT> *threads,
ServiceDaemon<ImageCtxT> *service_daemon,
journal::CacheManagerHandler *cache_manager_handler,
PoolMetaCache* pool_meta_cache,
int64_t local_pool_id, const PeerSpec &peer,
const std::vector<const char*> &args);
~PoolReplayer();
PoolReplayer(const PoolReplayer&) = delete;
PoolReplayer& operator=(const PoolReplayer&) = delete;
bool is_blocklisted() const;
bool is_leader() const;
bool is_running() const;
void init(const std::string& site_name);
void shut_down();
void run();
void print_status(Formatter *f);
void start();
void stop(bool manual);
void restart();
void flush();
void release_leader();
void reopen_logs();
private:
/**
* @verbatim
*
* <start>
* |
* v
* INIT
* |
* v
* <follower> <---------------------\
* . |
* . (leader acquired) |
* v |
* NOTIFY_NAMESPACE_WATCHERS NOTIFY_NAMESPACE_WATCHERS
* | ^
* v .
* <leader> .
* . .
* . (leader lost / shut down) .
* . . . . . . . . . . . . . . . .
*
* @endverbatim
*/
struct RemotePoolPollerListener;
int init_rados(const std::string &cluster_name,
const std::string &client_name,
const std::string &mon_host,
const std::string &key,
const std::string &description, RadosRef *rados_ref,
bool strip_cluster_overrides);
void update_namespace_replayers();
int list_mirroring_namespaces(std::set<std::string> *namespaces);
void namespace_replayer_acquire_leader(const std::string &name,
Context *on_finish);
void handle_post_acquire_leader(Context *on_finish);
void handle_pre_release_leader(Context *on_finish);
void handle_update_leader(const std::string &leader_instance_id);
void handle_instances_added(const std::vector<std::string> &instance_ids);
void handle_instances_removed(const std::vector<std::string> &instance_ids);
// sync version, executed in the caller thread
template <typename L>
void with_namespace_replayers(L &&callback) {
std::lock_guard locker{m_lock};
if (m_namespace_replayers_locked) {
ceph_assert(m_on_namespace_replayers_unlocked == nullptr);
C_SaferCond cond;
m_on_namespace_replayers_unlocked = &cond;
m_lock.unlock();
cond.wait();
m_lock.lock();
} else {
m_namespace_replayers_locked = true;
}
ceph_assert(m_namespace_replayers_locked);
callback(); // may temporary release the lock
ceph_assert(m_namespace_replayers_locked);
if (m_on_namespace_replayers_unlocked == nullptr) {
m_namespace_replayers_locked = false;
return;
}
m_threads->work_queue->queue(m_on_namespace_replayers_unlocked);
m_on_namespace_replayers_unlocked = nullptr;
}
// async version
template <typename L>
void with_namespace_replayers(L &&callback, Context *on_finish) {
std::lock_guard locker{m_lock};
on_finish = librbd::util::create_async_context_callback(
m_threads->work_queue, new LambdaContext(
[this, on_finish](int r) {
{
std::lock_guard locker{m_lock};
ceph_assert(m_namespace_replayers_locked);
m_namespace_replayers_locked = false;
if (m_on_namespace_replayers_unlocked != nullptr) {
m_namespace_replayers_locked = true;
m_threads->work_queue->queue(m_on_namespace_replayers_unlocked);
m_on_namespace_replayers_unlocked = nullptr;
}
}
on_finish->complete(r);
}));
auto on_lock = new LambdaContext(
[this, callback, on_finish](int) {
std::lock_guard locker{m_lock};
ceph_assert(m_namespace_replayers_locked);
callback(on_finish);
});
if (m_namespace_replayers_locked) {
ceph_assert(m_on_namespace_replayers_unlocked == nullptr);
m_on_namespace_replayers_unlocked = on_lock;
return;
}
m_namespace_replayers_locked = true;
m_threads->work_queue->queue(on_lock);
}
void handle_remote_pool_meta_updated(const RemotePoolMeta& remote_pool_meta);
Threads<ImageCtxT> *m_threads;
ServiceDaemon<ImageCtxT> *m_service_daemon;
journal::CacheManagerHandler *m_cache_manager_handler;
PoolMetaCache* m_pool_meta_cache;
int64_t m_local_pool_id = -1;
PeerSpec m_peer;
std::vector<const char*> m_args;
mutable ceph::mutex m_lock;
ceph::condition_variable m_cond;
std::string m_site_name;
bool m_stopping = false;
bool m_manual_stop = false;
bool m_blocklisted = false;
RadosRef m_local_rados;
RadosRef m_remote_rados;
librados::IoCtx m_local_io_ctx;
librados::IoCtx m_remote_io_ctx;
std::string m_local_mirror_uuid;
RemotePoolMeta m_remote_pool_meta;
std::unique_ptr<remote_pool_poller::Listener> m_remote_pool_poller_listener;
std::unique_ptr<RemotePoolPoller<ImageCtxT>> m_remote_pool_poller;
std::unique_ptr<NamespaceReplayer<ImageCtxT>> m_default_namespace_replayer;
std::map<std::string, NamespaceReplayer<ImageCtxT> *> m_namespace_replayers;
std::string m_asok_hook_name;
AdminSocketHook *m_asok_hook = nullptr;
service_daemon::CalloutId m_callout_id = service_daemon::CALLOUT_ID_NONE;
bool m_leader = false;
bool m_namespace_replayers_locked = false;
Context *m_on_namespace_replayers_unlocked = nullptr;
class PoolReplayerThread : public Thread {
PoolReplayer *m_pool_replayer;
public:
PoolReplayerThread(PoolReplayer *pool_replayer)
: m_pool_replayer(pool_replayer) {
}
void *entry() override {
m_pool_replayer->run();
return 0;
}
} m_pool_replayer_thread;
class LeaderListener : public leader_watcher::Listener {
public:
LeaderListener(PoolReplayer *pool_replayer)
: m_pool_replayer(pool_replayer) {
}
protected:
void post_acquire_handler(Context *on_finish) override {
m_pool_replayer->handle_post_acquire_leader(on_finish);
}
void pre_release_handler(Context *on_finish) override {
m_pool_replayer->handle_pre_release_leader(on_finish);
}
void update_leader_handler(
const std::string &leader_instance_id) override {
m_pool_replayer->handle_update_leader(leader_instance_id);
}
void handle_instances_added(const InstanceIds& instance_ids) override {
m_pool_replayer->handle_instances_added(instance_ids);
}
void handle_instances_removed(const InstanceIds& instance_ids) override {
m_pool_replayer->handle_instances_removed(instance_ids);
}
private:
PoolReplayer *m_pool_replayer;
} m_leader_listener;
std::unique_ptr<LeaderWatcher<ImageCtxT>> m_leader_watcher;
std::unique_ptr<Throttler<ImageCtxT>> m_image_sync_throttler;
std::unique_ptr<Throttler<ImageCtxT>> m_image_deletion_throttler;
};
} // namespace mirror
} // namespace rbd
extern template class rbd::mirror::PoolReplayer<librbd::ImageCtx>;
#endif // CEPH_RBD_MIRROR_POOL_REPLAYER_H
| 8,480 | 28.346021 | 80 | h |
null | ceph-main/src/tools/rbd_mirror/PoolWatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_POOL_WATCHER_H
#define CEPH_RBD_MIRROR_POOL_WATCHER_H
#include <map>
#include <memory>
#include <set>
#include <string>
#include "common/AsyncOpTracker.h"
#include "common/ceph_context.h"
#include "common/ceph_mutex.h"
#include "include/rados/librados.hpp"
#include "tools/rbd_mirror/Types.h"
#include <boost/functional/hash.hpp>
#include <boost/optional.hpp>
#include "include/ceph_assert.h"
#include "tools/rbd_mirror/pool_watcher/Types.h"
namespace librbd { struct ImageCtx; }
namespace rbd {
namespace mirror {
template <typename> struct Threads;
/**
* Keeps track of images that have mirroring enabled within all
* pools.
*/
template <typename ImageCtxT = librbd::ImageCtx>
class PoolWatcher {
public:
static PoolWatcher* create(Threads<ImageCtxT> *threads,
librados::IoCtx &io_ctx,
const std::string& mirror_uuid,
pool_watcher::Listener &listener) {
return new PoolWatcher(threads, io_ctx, mirror_uuid, listener);
}
PoolWatcher(Threads<ImageCtxT> *threads,
librados::IoCtx &io_ctx,
const std::string& mirror_uuid,
pool_watcher::Listener &listener);
~PoolWatcher();
PoolWatcher(const PoolWatcher&) = delete;
PoolWatcher& operator=(const PoolWatcher&) = delete;
bool is_blocklisted() const;
void init(Context *on_finish = nullptr);
void shut_down(Context *on_finish);
inline uint64_t get_image_count() const {
std::lock_guard locker{m_lock};
return m_image_ids.size();
}
private:
/**
* @verbatim
*
* <start>
* |
* v
* INIT
* |
* v
* REGISTER_WATCHER
* |
* |/--------------------------------\
* | |
* v |
* REFRESH_IMAGES |
* | |
* |/----------------------------\ |
* | | |
* v | |
* NOTIFY_LISTENER | |
* | | |
* v | |
* IDLE ---\ | |
* | | | |
* | |\---> IMAGE_UPDATED | |
* | | | | |
* | | v | |
* | | GET_IMAGE_NAME --/ |
* | | |
* | \----> WATCH_ERROR ---------/
* v
* SHUT_DOWN
* |
* v
* UNREGISTER_WATCHER
* |
* v
* <finish>
*
* @endverbatim
*/
class MirroringWatcher;
Threads<ImageCtxT> *m_threads;
librados::IoCtx m_io_ctx;
std::string m_mirror_uuid;
pool_watcher::Listener &m_listener;
ImageIds m_refresh_image_ids;
bufferlist m_out_bl;
mutable ceph::mutex m_lock;
Context *m_on_init_finish = nullptr;
ImageIds m_image_ids;
bool m_pending_updates = false;
bool m_notify_listener_in_progress = false;
ImageIds m_pending_image_ids;
ImageIds m_pending_added_image_ids;
ImageIds m_pending_removed_image_ids;
MirroringWatcher *m_mirroring_watcher;
Context *m_timer_ctx = nullptr;
AsyncOpTracker m_async_op_tracker;
bool m_blocklisted = false;
bool m_shutting_down = false;
bool m_image_ids_invalid = true;
bool m_refresh_in_progress = false;
bool m_deferred_refresh = false;
void register_watcher();
void handle_register_watcher(int r);
void unregister_watcher();
void refresh_images();
void handle_refresh_images(int r);
void schedule_refresh_images(double interval);
void process_refresh_images();
void handle_rewatch_complete(int r);
void handle_image_updated(const std::string &image_id,
const std::string &global_image_id,
bool enabled);
void schedule_listener();
void notify_listener();
};
} // namespace mirror
} // namespace rbd
extern template class rbd::mirror::PoolWatcher<librbd::ImageCtx>;
#endif // CEPH_RBD_MIRROR_POOL_WATCHER_H
| 4,213 | 25.012346 | 70 | h |
null | ceph-main/src/tools/rbd_mirror/RemotePoolPoller.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_REMOTE_POOL_POLLER_H
#define CEPH_RBD_MIRROR_REMOTE_POOL_POLLER_H
#include "include/rados/librados.hpp"
#include "tools/rbd_mirror/Types.h"
#include <string>
struct Context;
namespace librbd { struct ImageCtx; }
namespace rbd {
namespace mirror {
template <typename> struct Threads;
namespace remote_pool_poller {
struct Listener {
virtual ~Listener() {}
virtual void handle_updated(const RemotePoolMeta& remote_pool_meta) = 0;
};
}; // namespace remote_pool_poller
template <typename ImageCtxT>
class RemotePoolPoller {
public:
static RemotePoolPoller* create(
Threads<ImageCtxT>* threads,
librados::IoCtx& remote_io_ctx,
const std::string& site_name,
const std::string& local_mirror_uuid,
remote_pool_poller::Listener& listener) {
return new RemotePoolPoller(threads, remote_io_ctx, site_name,
local_mirror_uuid, listener);
}
RemotePoolPoller(
Threads<ImageCtxT>* threads,
librados::IoCtx& remote_io_ctx,
const std::string& site_name,
const std::string& local_mirror_uuid,
remote_pool_poller::Listener& listener)
: m_threads(threads),
m_remote_io_ctx(remote_io_ctx),
m_site_name(site_name),
m_local_mirror_uuid(local_mirror_uuid),
m_listener(listener) {
}
~RemotePoolPoller();
void init(Context* on_finish);
void shut_down(Context* on_finish);
private:
/**
* @verbatim
*
* <start>
* |
* |/----------------------------\
* | |
* v |
* MIRROR_UUID_GET |
* | |
* v |
* MIRROR_PEER_PING |
* | |
* v |
* MIRROR_PEER_LIST |
* | |
* v |
* MIRROR_UUID_GET |
* | |
* v (skip if no changes) |
* NOTIFY_LISTENER |
* | |
* | (repeat periodically) |
* |\----------------------------/
* |
* v
* <finish>
*
* @endverbatim
*/
enum State {
STATE_INITIALIZING,
STATE_POLLING,
STATE_SHUTTING_DOWN
};
Threads<ImageCtxT>* m_threads;
librados::IoCtx& m_remote_io_ctx;
std::string m_site_name;
std::string m_local_mirror_uuid;
remote_pool_poller::Listener& m_listener;
bufferlist m_out_bl;
RemotePoolMeta m_remote_pool_meta;
bool m_updated = false;
State m_state = STATE_INITIALIZING;
Context* m_timer_task = nullptr;
Context* m_on_finish = nullptr;
void get_mirror_uuid();
void handle_get_mirror_uuid(int r);
void mirror_peer_ping();
void handle_mirror_peer_ping(int r);
void mirror_peer_list();
void handle_mirror_peer_list(int r);
void notify_listener();
void schedule_task(int r);
void handle_task();
};
} // namespace mirror
} // namespace rbd
extern template class rbd::mirror::RemotePoolPoller<librbd::ImageCtx>;
#endif // CEPH_RBD_MIRROR_REMOTE_POOL_POLLER_H
| 3,308 | 23.69403 | 74 | h |
null | ceph-main/src/tools/rbd_mirror/ServiceDaemon.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_SERVICE_DAEMON_H
#define CEPH_RBD_MIRROR_SERVICE_DAEMON_H
#include "common/ceph_mutex.h"
#include "include/common_fwd.h"
#include "tools/rbd_mirror/Types.h"
#include "tools/rbd_mirror/service_daemon/Types.h"
#include <map>
#include <string>
struct Context;
namespace librbd { struct ImageCtx; }
namespace rbd {
namespace mirror {
template <typename> struct Threads;
template <typename ImageCtxT = librbd::ImageCtx>
class ServiceDaemon {
public:
ServiceDaemon(CephContext *cct, RadosRef rados, Threads<ImageCtxT>* threads);
~ServiceDaemon();
int init();
void add_pool(int64_t pool_id, const std::string& pool_name);
void remove_pool(int64_t pool_id);
void add_namespace(int64_t pool_id, const std::string& namespace_name);
void remove_namespace(int64_t pool_id, const std::string& namespace_name);
uint64_t add_or_update_callout(int64_t pool_id, uint64_t callout_id,
service_daemon::CalloutLevel callout_level,
const std::string& text);
void remove_callout(int64_t pool_id, uint64_t callout_id);
void add_or_update_attribute(int64_t pool_id, const std::string& key,
const service_daemon::AttributeValue& value);
void add_or_update_namespace_attribute(
int64_t pool_id, const std::string& namespace_name,
const std::string& key, const service_daemon::AttributeValue& value);
void remove_attribute(int64_t pool_id, const std::string& key);
private:
struct Callout {
service_daemon::CalloutLevel level;
std::string text;
Callout() : level(service_daemon::CALLOUT_LEVEL_INFO) {
}
Callout(service_daemon::CalloutLevel level, const std::string& text)
: level(level), text(text) {
}
};
typedef std::map<uint64_t, Callout> Callouts;
typedef std::map<std::string, service_daemon::AttributeValue> Attributes;
typedef std::map<std::string, Attributes> NamespaceAttributes;
struct Pool {
std::string name;
Callouts callouts;
Attributes attributes;
NamespaceAttributes ns_attributes;
Pool(const std::string& name) : name(name) {
}
};
typedef std::map<int64_t, Pool> Pools;
CephContext *m_cct;
RadosRef m_rados;
Threads<ImageCtxT>* m_threads;
ceph::mutex m_lock = ceph::make_mutex("rbd::mirror::ServiceDaemon");
Pools m_pools;
uint64_t m_callout_id = service_daemon::CALLOUT_ID_NONE;
Context* m_timer_ctx = nullptr;
void schedule_update_status();
void update_status();
};
} // namespace mirror
} // namespace rbd
extern template class rbd::mirror::ServiceDaemon<librbd::ImageCtx>;
#endif // CEPH_RBD_MIRROR_SERVICE_DAEMON_H
| 2,775 | 28.221053 | 79 | h |
null | ceph-main/src/tools/rbd_mirror/Threads.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_THREADS_H
#define CEPH_RBD_MIRROR_THREADS_H
#include "include/common_fwd.h"
#include "include/rados/librados_fwd.hpp"
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include <memory>
class ThreadPool;
namespace librbd {
struct AsioEngine;
struct ImageCtx;
namespace asio { struct ContextWQ; }
} // namespace librbd
namespace rbd {
namespace mirror {
template <typename ImageCtxT = librbd::ImageCtx>
class Threads {
public:
librbd::AsioEngine* asio_engine = nullptr;
librbd::asio::ContextWQ* work_queue = nullptr;
SafeTimer *timer = nullptr;
ceph::mutex timer_lock = ceph::make_mutex("Threads::timer_lock");
explicit Threads(std::shared_ptr<librados::Rados>& rados);
Threads(const Threads&) = delete;
Threads& operator=(const Threads&) = delete;
~Threads();
};
} // namespace mirror
} // namespace rbd
extern template class rbd::mirror::Threads<librbd::ImageCtx>;
#endif // CEPH_RBD_MIRROR_THREADS_H
| 1,059 | 22.043478 | 70 | h |
null | ceph-main/src/tools/rbd_mirror/Throttler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef RBD_MIRROR_THROTTLER_H
#define RBD_MIRROR_THROTTLER_H
#include <list>
#include <map>
#include <set>
#include <sstream>
#include <string>
#include <utility>
#include "common/ceph_mutex.h"
#include "common/config_obs.h"
#include "include/common_fwd.h"
class Context;
namespace ceph { class Formatter; }
namespace librbd { class ImageCtx; }
namespace rbd {
namespace mirror {
template <typename ImageCtxT = librbd::ImageCtx>
class Throttler : public md_config_obs_t {
public:
static Throttler *create(
CephContext *cct,
const std::string &config_key) {
return new Throttler(cct, config_key);
}
void destroy() {
delete this;
}
Throttler(CephContext *cct,
const std::string &config_key);
~Throttler() override;
void set_max_concurrent_ops(uint32_t max);
void start_op(const std::string &ns, const std::string &id,
Context *on_start);
bool cancel_op(const std::string &ns, const std::string &id);
void finish_op(const std::string &ns, const std::string &id);
void drain(const std::string &ns, int r);
void print_status(ceph::Formatter *f);
private:
typedef std::pair<std::string, std::string> Id;
CephContext *m_cct;
const std::string m_config_key;
mutable const char* m_config_keys[2];
ceph::mutex m_lock;
uint32_t m_max_concurrent_ops;
std::list<Id> m_queue;
std::map<Id, Context *> m_queued_ops;
std::set<Id> m_inflight_ops;
const char **get_tracked_conf_keys() const override;
void handle_conf_change(const ConfigProxy& conf,
const std::set<std::string> &changed) override;
};
} // namespace mirror
} // namespace rbd
extern template class rbd::mirror::Throttler<librbd::ImageCtx>;
#endif // RBD_MIRROR_THROTTLER_H
| 1,856 | 23.76 | 73 | h |
null | ceph-main/src/tools/rbd_mirror/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_TYPES_H
#define CEPH_RBD_MIRROR_TYPES_H
#include <iostream>
#include <memory>
#include <set>
#include <string>
#include <vector>
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
namespace rbd {
namespace mirror {
template <typename> struct MirrorStatusUpdater;
// Performance counters
enum {
l_rbd_mirror_journal_first = 27000,
l_rbd_mirror_journal_entries,
l_rbd_mirror_journal_replay_bytes,
l_rbd_mirror_journal_replay_latency,
l_rbd_mirror_journal_last,
l_rbd_mirror_snapshot_first,
l_rbd_mirror_snapshot_snapshots,
l_rbd_mirror_snapshot_sync_time,
l_rbd_mirror_snapshot_sync_bytes,
// per-image only counters below
l_rbd_mirror_snapshot_remote_timestamp,
l_rbd_mirror_snapshot_local_timestamp,
l_rbd_mirror_snapshot_last_sync_time,
l_rbd_mirror_snapshot_last_sync_bytes,
l_rbd_mirror_snapshot_last,
};
typedef std::shared_ptr<librados::Rados> RadosRef;
typedef std::shared_ptr<librados::IoCtx> IoCtxRef;
typedef std::shared_ptr<librbd::Image> ImageRef;
struct ImageId {
std::string global_id;
std::string id;
explicit ImageId(const std::string &global_id) : global_id(global_id) {
}
ImageId(const std::string &global_id, const std::string &id)
: global_id(global_id), id(id) {
}
inline bool operator==(const ImageId &rhs) const {
return (global_id == rhs.global_id && id == rhs.id);
}
inline bool operator<(const ImageId &rhs) const {
return global_id < rhs.global_id;
}
};
std::ostream &operator<<(std::ostream &, const ImageId &image_id);
typedef std::set<ImageId> ImageIds;
struct LocalPoolMeta {
LocalPoolMeta() {}
LocalPoolMeta(const std::string& mirror_uuid)
: mirror_uuid(mirror_uuid) {
}
std::string mirror_uuid;
};
std::ostream& operator<<(std::ostream& lhs,
const LocalPoolMeta& local_pool_meta);
struct RemotePoolMeta {
RemotePoolMeta() {}
RemotePoolMeta(const std::string& mirror_uuid,
const std::string& mirror_peer_uuid)
: mirror_uuid(mirror_uuid),
mirror_peer_uuid(mirror_peer_uuid) {
}
std::string mirror_uuid;
std::string mirror_peer_uuid;
};
std::ostream& operator<<(std::ostream& lhs,
const RemotePoolMeta& remote_pool_meta);
template <typename I>
struct Peer {
std::string uuid;
mutable librados::IoCtx io_ctx;
RemotePoolMeta remote_pool_meta;
MirrorStatusUpdater<I>* mirror_status_updater = nullptr;
Peer() {
}
Peer(const std::string& uuid,
librados::IoCtx& io_ctx,
const RemotePoolMeta& remote_pool_meta,
MirrorStatusUpdater<I>* mirror_status_updater)
: io_ctx(io_ctx),
remote_pool_meta(remote_pool_meta),
mirror_status_updater(mirror_status_updater) {
}
inline bool operator<(const Peer &rhs) const {
return uuid < rhs.uuid;
}
};
template <typename I>
std::ostream& operator<<(std::ostream& lhs, const Peer<I>& peer) {
return lhs << peer.remote_pool_meta;
}
struct PeerSpec {
PeerSpec() = default;
PeerSpec(const std::string &uuid, const std::string &cluster_name,
const std::string &client_name)
: uuid(uuid), cluster_name(cluster_name), client_name(client_name)
{
}
PeerSpec(const librbd::mirror_peer_site_t &peer) :
uuid(peer.uuid),
cluster_name(peer.site_name),
client_name(peer.client_name)
{
}
std::string uuid;
std::string cluster_name;
std::string client_name;
/// optional config properties
std::string mon_host;
std::string key;
bool operator==(const PeerSpec& rhs) const {
return (uuid == rhs.uuid &&
cluster_name == rhs.cluster_name &&
client_name == rhs.client_name &&
mon_host == rhs.mon_host &&
key == rhs.key);
}
bool operator<(const PeerSpec& rhs) const {
if (uuid != rhs.uuid) {
return uuid < rhs.uuid;
} else if (cluster_name != rhs.cluster_name) {
return cluster_name < rhs.cluster_name;
} else if (client_name != rhs.client_name) {
return client_name < rhs.client_name;
} else if (mon_host < rhs.mon_host) {
return mon_host < rhs.mon_host;
} else {
return key < rhs.key;
}
}
};
std::ostream& operator<<(std::ostream& lhs, const PeerSpec &peer);
} // namespace mirror
} // namespace rbd
#endif // CEPH_RBD_MIRROR_TYPES_H
| 4,427 | 24.744186 | 73 | h |
null | ceph-main/src/tools/rbd_mirror/image_deleter/SnapshotPurgeRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_IMAGE_DELETER_SNAPSHOT_PURGE_REQUEST_H
#define CEPH_RBD_MIRROR_IMAGE_DELETER_SNAPSHOT_PURGE_REQUEST_H
#include "include/rados/librados.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include <string>
#include <vector>
class Context;
namespace librbd { struct ImageCtx; }
namespace rbd {
namespace mirror {
namespace image_deleter {
template <typename ImageCtxT = librbd::ImageCtx>
class SnapshotPurgeRequest {
public:
static SnapshotPurgeRequest* create(librados::IoCtx &io_ctx,
const std::string &image_id,
Context *on_finish) {
return new SnapshotPurgeRequest(io_ctx, image_id, on_finish);
}
SnapshotPurgeRequest(librados::IoCtx &io_ctx, const std::string &image_id,
Context *on_finish)
: m_io_ctx(io_ctx), m_image_id(image_id), m_on_finish(on_finish) {
}
void send();
private:
/*
* @verbatim
*
* <start>
* |
* v
* OPEN_IMAGE
* |
* v
* ACQUIRE_LOCK
* |
* | (repeat for each snapshot)
* |/------------------------\
* | |
* v (skip if not needed) |
* SNAP_UNPROTECT |
* | |
* v (skip if not needed) |
* SNAP_REMOVE -----------------/
* |
* v
* CLOSE_IMAGE
* |
* v
* <finish>
*
* @endverbatim
*/
librados::IoCtx &m_io_ctx;
std::string m_image_id;
Context *m_on_finish;
ImageCtxT *m_image_ctx = nullptr;
int m_ret_val = 0;
std::vector<librados::snap_t> m_snaps;
cls::rbd::SnapshotNamespace m_snap_namespace;
std::string m_snap_name;
void open_image();
void handle_open_image(int r);
void acquire_lock();
void handle_acquire_lock(int r);
void start_snap_unprotect();
void snap_unprotect();
void handle_snap_unprotect(int r);
void snap_remove();
void handle_snap_remove(int r);
void close_image();
void handle_close_image(int r);
void finish(int r);
Context *start_lock_op(int* r);
};
} // namespace image_deleter
} // namespace mirror
} // namespace rbd
extern template class rbd::mirror::image_deleter::SnapshotPurgeRequest<librbd::ImageCtx>;
#endif // CEPH_RBD_MIRROR_IMAGE_DELETER_SNAPSHOT_PURGE_REQUEST_H
| 2,399 | 21.641509 | 89 | h |
null | ceph-main/src/tools/rbd_mirror/image_deleter/TrashMoveRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_IMAGE_DELETE_TRASH_MOVE_REQUEST_H
#define CEPH_RBD_MIRROR_IMAGE_DELETE_TRASH_MOVE_REQUEST_H
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/mirror/Types.h"
#include <string>
struct Context;
namespace librbd {
struct ImageCtx;
namespace asio { struct ContextWQ; }
} // namespace librbd
namespace rbd {
namespace mirror {
namespace image_deleter {
template <typename ImageCtxT = librbd::ImageCtx>
class TrashMoveRequest {
public:
static TrashMoveRequest* create(librados::IoCtx& io_ctx,
const std::string& global_image_id,
bool resync,
librbd::asio::ContextWQ* op_work_queue,
Context* on_finish) {
return new TrashMoveRequest(io_ctx, global_image_id, resync, op_work_queue,
on_finish);
}
TrashMoveRequest(librados::IoCtx& io_ctx, const std::string& global_image_id,
bool resync, librbd::asio::ContextWQ* op_work_queue,
Context* on_finish)
: m_io_ctx(io_ctx), m_global_image_id(global_image_id), m_resync(resync),
m_op_work_queue(op_work_queue), m_on_finish(on_finish) {
}
void send();
private:
/*
* @verbatim
*
* <start>
* |
* v
* GET_MIRROR_IMAGE_ID
* |
* v
* GET_MIRROR_INFO
* |
* v
* DISABLE_MIRROR_IMAGE
* |
* v
* OPEN_IMAGE
* |
* v (skip if not needed)
* RESET_JOURNAL
* |
* v (skip if not needed)
* ACQUIRE_LOCK
* |
* v
* TRASH_MOVE
* |
* v
* REMOVE_MIRROR_IMAGE
* |
* v
* CLOSE_IMAGE
* |
* v
* NOTIFY_TRASH_ADD
* |
* v
* <finish>
*
* @endverbatim
*/
librados::IoCtx &m_io_ctx;
std::string m_global_image_id;
bool m_resync;
librbd::asio::ContextWQ *m_op_work_queue;
Context *m_on_finish;
ceph::bufferlist m_out_bl;
std::string m_image_id;
cls::rbd::MirrorImage m_mirror_image;
librbd::mirror::PromotionState m_promotion_state;
std::string m_primary_mirror_uuid;
cls::rbd::TrashImageSpec m_trash_image_spec;
ImageCtxT *m_image_ctx = nullptr;;
int m_ret_val = 0;
bool m_moved_to_trash = false;
void get_mirror_image_id();
void handle_get_mirror_image_id(int r);
void get_mirror_info();
void handle_get_mirror_info(int r);
void disable_mirror_image();
void handle_disable_mirror_image(int r);
void open_image();
void handle_open_image(int r);
void reset_journal();
void handle_reset_journal(int r);
void acquire_lock();
void handle_acquire_lock(int r);
void trash_move();
void handle_trash_move(int r);
void remove_mirror_image();
void handle_remove_mirror_image(int r);
void close_image();
void handle_close_image(int r);
void notify_trash_add();
void handle_notify_trash_add(int r);
void finish(int r);
};
} // namespace image_deleter
} // namespace mirror
} // namespace rbd
extern template class rbd::mirror::image_deleter::TrashMoveRequest<librbd::ImageCtx>;
#endif // CEPH_RBD_MIRROR_IMAGE_DELETE_TRASH_WATCHER_H
| 3,322 | 22.237762 | 85 | h |
null | ceph-main/src/tools/rbd_mirror/image_deleter/TrashRemoveRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_IMAGE_DELETER_TRASH_REMOVE_REQUEST_H
#define CEPH_RBD_MIRROR_IMAGE_DELETER_TRASH_REMOVE_REQUEST_H
#include "include/rados/librados.hpp"
#include "include/buffer.h"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/internal.h"
#include "tools/rbd_mirror/image_deleter/Types.h"
#include <string>
#include <vector>
class Context;
class ContextWQ;
namespace librbd {
struct ImageCtx;
namespace asio { struct ContextWQ; }
} // namespace librbd
namespace rbd {
namespace mirror {
namespace image_deleter {
template <typename ImageCtxT = librbd::ImageCtx>
class TrashRemoveRequest {
public:
static TrashRemoveRequest* create(librados::IoCtx &io_ctx,
const std::string &image_id,
ErrorResult *error_result,
librbd::asio::ContextWQ *op_work_queue,
Context *on_finish) {
return new TrashRemoveRequest(io_ctx, image_id, error_result, op_work_queue,
on_finish);
}
TrashRemoveRequest(librados::IoCtx &io_ctx, const std::string &image_id,
ErrorResult *error_result,
librbd::asio::ContextWQ *op_work_queue,
Context *on_finish)
: m_io_ctx(io_ctx), m_image_id(image_id), m_error_result(error_result),
m_op_work_queue(op_work_queue), m_on_finish(on_finish) {
}
void send();
private:
/*
* @verbatim
*
* <start>
* |
* v
* GET_TRASH_IMAGE_SPEC
* |
* v
* SET_TRASH_STATE
* |
* v
* GET_SNAP_CONTEXT
* |
* v
* PURGE_SNAPSHOTS
* |
* v
* TRASH_REMOVE
* |
* v
* NOTIFY_TRASH_REMOVE
* |
* v
* <finish>
*
* @endverbatim
*/
librados::IoCtx &m_io_ctx;
std::string m_image_id;
ErrorResult *m_error_result;
librbd::asio::ContextWQ *m_op_work_queue;
Context *m_on_finish;
ceph::bufferlist m_out_bl;
cls::rbd::TrashImageSpec m_trash_image_spec;
bool m_has_snapshots = false;
librbd::NoOpProgressContext m_progress_ctx;
void get_trash_image_spec();
void handle_get_trash_image_spec(int r);
void set_trash_state();
void handle_set_trash_state(int r);
void get_snap_context();
void handle_get_snap_context(int r);
void purge_snapshots();
void handle_purge_snapshots(int r);
void remove_image();
void handle_remove_image(int r);
void notify_trash_removed();
void handle_notify_trash_removed(int r);
void finish(int r);
};
} // namespace image_deleter
} // namespace mirror
} // namespace rbd
extern template class rbd::mirror::image_deleter::TrashRemoveRequest<librbd::ImageCtx>;
#endif // CEPH_RBD_MIRROR_IMAGE_DELETER_TRASH_REMOVE_REQUEST_H
| 2,883 | 23.440678 | 87 | h |
null | ceph-main/src/tools/rbd_mirror/image_deleter/TrashWatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_MIRROR_IMAGE_DELETE_TRASH_WATCHER_H
#define CEPH_RBD_MIRROR_IMAGE_DELETE_TRASH_WATCHER_H
#include "include/rados/librados.hpp"
#include "common/AsyncOpTracker.h"
#include "common/ceph_mutex.h"
#include "librbd/TrashWatcher.h"
#include <set>
#include <string>
struct Context;
namespace librbd { struct ImageCtx; }
namespace rbd {
namespace mirror {
template <typename> struct Threads;
namespace image_deleter {
struct TrashListener;
template <typename ImageCtxT = librbd::ImageCtx>
class TrashWatcher : public librbd::TrashWatcher<ImageCtxT> {
public:
static TrashWatcher* create(librados::IoCtx &io_ctx,
Threads<ImageCtxT> *threads,
TrashListener& trash_listener) {
return new TrashWatcher(io_ctx, threads, trash_listener);
}
TrashWatcher(librados::IoCtx &io_ctx, Threads<ImageCtxT> *threads,
TrashListener& trash_listener);
TrashWatcher(const TrashWatcher&) = delete;
TrashWatcher& operator=(const TrashWatcher&) = delete;
void init(Context *on_finish);
void shut_down(Context *on_finish);
protected:
void handle_image_added(const std::string &image_id,
const cls::rbd::TrashImageSpec& spec) override;
void handle_image_removed(const std::string &image_id) override;
void handle_rewatch_complete(int r) override;
private:
/**
* @verbatim
*
* <start>
* |
* v
* INIT
* |
* v
* CREATE_TRASH
* |
* v
* REGISTER_WATCHER
* |
* |/--------------------------------\
* | |
* |/---------\ |
* | | |
* v | (more images) |
* TRASH_LIST ---/ |
* | |
* |/----------------------------\ |
* | | |
* v | |
* <idle> --\ | |
* | | | |
* | |\---> IMAGE_ADDED -----/ |
* | | |
* | \----> WATCH_ERROR ---------/
* v
* SHUT_DOWN
* |
* v
* UNREGISTER_WATCHER
* |
* v
* <finish>
*
* @endverbatim
*/
librados::IoCtx m_io_ctx;
Threads<ImageCtxT> *m_threads;
TrashListener& m_trash_listener;
std::string m_last_image_id;
bufferlist m_out_bl;
mutable ceph::mutex m_lock;
Context *m_on_init_finish = nullptr;
Context *m_timer_ctx = nullptr;
AsyncOpTracker m_async_op_tracker;
bool m_trash_list_in_progress = false;
bool m_deferred_trash_list = false;
bool m_shutting_down = false;
void register_watcher();
void handle_register_watcher(int r);
void create_trash();
void handle_create_trash(int r);
void unregister_watcher(Context* on_finish);
void handle_unregister_watcher(int r, Context* on_finish);
void trash_list(bool initial_request);
void handle_trash_list(int r);
void schedule_trash_list(double interval);
void process_trash_list();
void get_mirror_uuid();
void handle_get_mirror_uuid(int r);
void add_image(const std::string& image_id,
const cls::rbd::TrashImageSpec& spec);
};
} // namespace image_deleter
} // namespace mirror
} // namespace rbd
extern template class rbd::mirror::image_deleter::TrashWatcher<librbd::ImageCtx>;
#endif // CEPH_RBD_MIRROR_IMAGE_DELETE_TRASH_WATCHER_H
| 3,602 | 24.735714 | 81 | h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.