repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | ceph-main/src/librbd/ImageCtx.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include <boost/assign/list_of.hpp>
#include <stddef.h>
#include "include/neorados/RADOS.hpp"
#include "common/ceph_context.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/perf_counters.h"
#include "common/Timer.h"
#include "librbd/AsioEngine.h"
#include "librbd/AsyncRequest.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/internal.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/ImageWatcher.h"
#include "librbd/Journal.h"
#include "librbd/LibrbdAdminSocketHook.h"
#include "librbd/ObjectMap.h"
#include "librbd/Operations.h"
#include "librbd/PluginRegistry.h"
#include "librbd/Types.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/exclusive_lock/AutomaticPolicy.h"
#include "librbd/exclusive_lock/StandardPolicy.h"
#include "librbd/crypto/EncryptionFormat.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/AsyncOperation.h"
#include "librbd/io/ImageDispatcher.h"
#include "librbd/io/ObjectDispatcher.h"
#include "librbd/io/QosImageDispatch.h"
#include "librbd/io/IoOperations.h"
#include "librbd/io/Utils.h"
#include "librbd/journal/StandardPolicy.h"
#include "librbd/operation/ResizeRequest.h"
#include "osdc/Striper.h"
#include <boost/algorithm/string/predicate.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ImageCtx: "
using std::map;
using std::pair;
using std::set;
using std::string;
using std::vector;
using ceph::bufferlist;
using librados::snap_t;
using librados::IoCtx;
namespace librbd {
namespace {
class SafeTimerSingleton : public CommonSafeTimer<ceph::mutex> {
public:
ceph::mutex lock = ceph::make_mutex("librbd::SafeTimerSingleton::lock");
explicit SafeTimerSingleton(CephContext *cct)
: SafeTimer(cct, lock, true) {
init();
}
~SafeTimerSingleton() {
std::lock_guard locker{lock};
shutdown();
}
};
librados::IoCtx duplicate_io_ctx(librados::IoCtx& io_ctx) {
librados::IoCtx dup_io_ctx;
dup_io_ctx.dup(io_ctx);
return dup_io_ctx;
}
} // anonymous namespace
const string ImageCtx::METADATA_CONF_PREFIX = "conf_";
ImageCtx::ImageCtx(const string &image_name, const string &image_id,
const char *snap, IoCtx& p, bool ro)
: cct((CephContext*)p.cct()),
config(cct->_conf),
perfcounter(NULL),
snap_id(CEPH_NOSNAP),
snap_exists(true),
read_only(ro),
read_only_flags(ro ? IMAGE_READ_ONLY_FLAG_USER : 0U),
exclusive_locked(false),
name(image_name),
asio_engine(std::make_shared<AsioEngine>(p)),
rados_api(asio_engine->get_rados_api()),
data_ctx(duplicate_io_ctx(p)),
md_ctx(duplicate_io_ctx(p)),
image_watcher(NULL),
journal(NULL),
owner_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ImageCtx::owner_lock", this))),
image_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ImageCtx::image_lock", this))),
timestamp_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ImageCtx::timestamp_lock", this))),
async_ops_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageCtx::async_ops_lock", this))),
copyup_list_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageCtx::copyup_list_lock", this))),
extra_read_flags(0),
old_format(false),
order(0), size(0), features(0),
format_string(NULL),
id(image_id), parent(NULL),
stripe_unit(0), stripe_count(0), flags(0),
readahead(),
total_bytes_read(0),
state(new ImageState<>(this)),
operations(new Operations<>(*this)),
exclusive_lock(nullptr), object_map(nullptr),
op_work_queue(asio_engine->get_work_queue()),
plugin_registry(new PluginRegistry<ImageCtx>(this)),
event_socket_completions(32),
asok_hook(nullptr),
trace_endpoint("librbd")
{
ldout(cct, 10) << this << " " << __func__ << ": "
<< "image_name=" << image_name << ", "
<< "image_id=" << image_id << dendl;
if (snap)
snap_name = snap;
rebuild_data_io_context();
// FIPS zeroization audit 20191117: this memset is not security related.
memset(&header, 0, sizeof(header));
io_image_dispatcher = new io::ImageDispatcher<ImageCtx>(this);
io_object_dispatcher = new io::ObjectDispatcher<ImageCtx>(this);
if (cct->_conf.get_val<bool>("rbd_auto_exclusive_lock_until_manual_request")) {
exclusive_lock_policy = new exclusive_lock::AutomaticPolicy(this);
} else {
exclusive_lock_policy = new exclusive_lock::StandardPolicy(this);
}
journal_policy = new journal::StandardPolicy(this);
}
ImageCtx::ImageCtx(const string &image_name, const string &image_id,
uint64_t snap_id, IoCtx& p, bool ro)
: ImageCtx(image_name, image_id, "", p, ro) {
open_snap_id = snap_id;
}
ImageCtx::~ImageCtx() {
ldout(cct, 10) << this << " " << __func__ << dendl;
ceph_assert(config_watcher == nullptr);
ceph_assert(image_watcher == NULL);
ceph_assert(exclusive_lock == NULL);
ceph_assert(object_map == NULL);
ceph_assert(journal == NULL);
ceph_assert(asok_hook == NULL);
if (perfcounter) {
perf_stop();
}
delete[] format_string;
md_ctx.aio_flush();
if (data_ctx.is_valid()) {
data_ctx.aio_flush();
}
delete io_object_dispatcher;
delete io_image_dispatcher;
delete journal_policy;
delete exclusive_lock_policy;
delete operations;
delete state;
delete plugin_registry;
}
void ImageCtx::init() {
ceph_assert(!header_oid.empty());
ceph_assert(old_format || !id.empty());
asok_hook = new LibrbdAdminSocketHook(this);
string pname = string("librbd-") + id + string("-") +
md_ctx.get_pool_name() + string("-") + name;
if (!snap_name.empty()) {
pname += "-";
pname += snap_name;
}
trace_endpoint.copy_name(pname);
perf_start(pname);
ceph_assert(image_watcher == NULL);
image_watcher = new ImageWatcher<>(*this);
}
void ImageCtx::shutdown() {
delete image_watcher;
image_watcher = nullptr;
delete asok_hook;
asok_hook = nullptr;
}
void ImageCtx::init_layout(int64_t pool_id)
{
if (stripe_unit == 0 || stripe_count == 0) {
stripe_unit = 1ull << order;
stripe_count = 1;
}
vector<uint64_t> alignments;
alignments.push_back(stripe_count << order); // object set (in file striping terminology)
alignments.push_back(stripe_unit * stripe_count); // stripe
alignments.push_back(stripe_unit); // stripe unit
readahead.set_alignments(alignments);
layout = file_layout_t();
layout.stripe_unit = stripe_unit;
layout.stripe_count = stripe_count;
layout.object_size = 1ull << order;
layout.pool_id = pool_id; // FIXME: pool id overflow?
delete[] format_string;
size_t len = object_prefix.length() + 16;
format_string = new char[len];
if (old_format) {
snprintf(format_string, len, "%s.%%012llx", object_prefix.c_str());
} else {
snprintf(format_string, len, "%s.%%016llx", object_prefix.c_str());
}
ldout(cct, 10) << "init_layout stripe_unit " << stripe_unit
<< " stripe_count " << stripe_count
<< " object_size " << layout.object_size
<< " prefix " << object_prefix
<< " format " << format_string
<< dendl;
}
void ImageCtx::perf_start(string name) {
auto perf_prio = PerfCountersBuilder::PRIO_DEBUGONLY;
if (child == nullptr) {
// ensure top-level IO stats are exported for librbd daemons
perf_prio = PerfCountersBuilder::PRIO_USEFUL;
}
PerfCountersBuilder plb(cct, name, l_librbd_first, l_librbd_last);
plb.add_u64_counter(l_librbd_rd, "rd", "Reads", "r", perf_prio);
plb.add_u64_counter(l_librbd_rd_bytes, "rd_bytes", "Data size in reads",
"rb", perf_prio, unit_t(UNIT_BYTES));
plb.add_time_avg(l_librbd_rd_latency, "rd_latency", "Latency of reads",
"rl", perf_prio);
plb.add_u64_counter(l_librbd_wr, "wr", "Writes", "w", perf_prio);
plb.add_u64_counter(l_librbd_wr_bytes, "wr_bytes", "Written data",
"wb", perf_prio, unit_t(UNIT_BYTES));
plb.add_time_avg(l_librbd_wr_latency, "wr_latency", "Write latency",
"wl", perf_prio);
plb.add_u64_counter(l_librbd_discard, "discard", "Discards");
plb.add_u64_counter(l_librbd_discard_bytes, "discard_bytes", "Discarded data", NULL, 0, unit_t(UNIT_BYTES));
plb.add_time_avg(l_librbd_discard_latency, "discard_latency", "Discard latency");
plb.add_u64_counter(l_librbd_flush, "flush", "Flushes");
plb.add_time_avg(l_librbd_flush_latency, "flush_latency", "Latency of flushes");
plb.add_u64_counter(l_librbd_ws, "ws", "WriteSames");
plb.add_u64_counter(l_librbd_ws_bytes, "ws_bytes", "WriteSame data", NULL, 0, unit_t(UNIT_BYTES));
plb.add_time_avg(l_librbd_ws_latency, "ws_latency", "WriteSame latency");
plb.add_u64_counter(l_librbd_cmp, "cmp", "CompareAndWrites");
plb.add_u64_counter(l_librbd_cmp_bytes, "cmp_bytes", "Data size in cmps", NULL, 0, unit_t(UNIT_BYTES));
plb.add_time_avg(l_librbd_cmp_latency, "cmp_latency", "Latency of cmps");
plb.add_u64_counter(l_librbd_snap_create, "snap_create", "Snap creations");
plb.add_u64_counter(l_librbd_snap_remove, "snap_remove", "Snap removals");
plb.add_u64_counter(l_librbd_snap_rollback, "snap_rollback", "Snap rollbacks");
plb.add_u64_counter(l_librbd_snap_rename, "snap_rename", "Snap rename");
plb.add_u64_counter(l_librbd_notify, "notify", "Updated header notifications");
plb.add_u64_counter(l_librbd_resize, "resize", "Resizes");
plb.add_u64_counter(l_librbd_readahead, "readahead", "Read ahead");
plb.add_u64_counter(l_librbd_readahead_bytes, "readahead_bytes", "Data size in read ahead", NULL, 0, unit_t(UNIT_BYTES));
plb.add_u64_counter(l_librbd_invalidate_cache, "invalidate_cache", "Cache invalidates");
plb.add_time(l_librbd_opened_time, "opened_time", "Opened time",
"ots", perf_prio);
plb.add_time(l_librbd_lock_acquired_time, "lock_acquired_time",
"Lock acquired time", "lats", perf_prio);
perfcounter = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(perfcounter);
perfcounter->tset(l_librbd_opened_time, ceph_clock_now());
}
void ImageCtx::perf_stop() {
ceph_assert(perfcounter);
cct->get_perfcounters_collection()->remove(perfcounter);
delete perfcounter;
}
void ImageCtx::set_read_flag(unsigned flag) {
extra_read_flags |= flag;
}
int ImageCtx::get_read_flags(snap_t snap_id) {
int flags = librados::OPERATION_NOFLAG | read_flags;
if (flags != 0)
return flags;
flags = librados::OPERATION_NOFLAG | extra_read_flags;
if (snap_id == LIBRADOS_SNAP_HEAD)
return flags;
if (config.get_val<bool>("rbd_balance_snap_reads"))
flags |= librados::OPERATION_BALANCE_READS;
else if (config.get_val<bool>("rbd_localize_snap_reads"))
flags |= librados::OPERATION_LOCALIZE_READS;
return flags;
}
int ImageCtx::snap_set(uint64_t in_snap_id) {
ceph_assert(ceph_mutex_is_wlocked(image_lock));
auto it = snap_info.find(in_snap_id);
if (in_snap_id != CEPH_NOSNAP && it != snap_info.end()) {
snap_id = in_snap_id;
snap_namespace = it->second.snap_namespace;
snap_name = it->second.name;
snap_exists = true;
if (data_ctx.is_valid()) {
data_ctx.snap_set_read(snap_id);
rebuild_data_io_context();
}
return 0;
}
return -ENOENT;
}
void ImageCtx::snap_unset()
{
ceph_assert(ceph_mutex_is_wlocked(image_lock));
snap_id = CEPH_NOSNAP;
snap_namespace = {};
snap_name = "";
snap_exists = true;
if (data_ctx.is_valid()) {
data_ctx.snap_set_read(snap_id);
rebuild_data_io_context();
}
}
snap_t ImageCtx::get_snap_id(const cls::rbd::SnapshotNamespace& in_snap_namespace,
const string& in_snap_name) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
auto it = snap_ids.find({in_snap_namespace, in_snap_name});
if (it != snap_ids.end()) {
return it->second;
}
return CEPH_NOSNAP;
}
const SnapInfo* ImageCtx::get_snap_info(snap_t in_snap_id) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
map<snap_t, SnapInfo>::const_iterator it =
snap_info.find(in_snap_id);
if (it != snap_info.end())
return &it->second;
return nullptr;
}
int ImageCtx::get_snap_name(snap_t in_snap_id,
string *out_snap_name) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*out_snap_name = info->name;
return 0;
}
return -ENOENT;
}
int ImageCtx::get_snap_namespace(snap_t in_snap_id,
cls::rbd::SnapshotNamespace *out_snap_namespace) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*out_snap_namespace = info->snap_namespace;
return 0;
}
return -ENOENT;
}
int ImageCtx::get_parent_spec(snap_t in_snap_id,
cls::rbd::ParentImageSpec *out_pspec) const
{
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*out_pspec = info->parent.spec;
return 0;
}
return -ENOENT;
}
uint64_t ImageCtx::get_current_size() const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
return size;
}
uint64_t ImageCtx::get_object_size() const
{
return 1ull << order;
}
string ImageCtx::get_object_name(uint64_t num) const {
return util::data_object_name(this, num);
}
uint64_t ImageCtx::get_stripe_unit() const
{
return stripe_unit;
}
uint64_t ImageCtx::get_stripe_count() const
{
return stripe_count;
}
uint64_t ImageCtx::get_stripe_period() const
{
return stripe_count * (1ull << order);
}
utime_t ImageCtx::get_create_timestamp() const
{
return create_timestamp;
}
utime_t ImageCtx::get_access_timestamp() const
{
return access_timestamp;
}
utime_t ImageCtx::get_modify_timestamp() const
{
return modify_timestamp;
}
void ImageCtx::set_access_timestamp(utime_t at)
{
ceph_assert(ceph_mutex_is_wlocked(timestamp_lock));
access_timestamp = at;
}
void ImageCtx::set_modify_timestamp(utime_t mt)
{
ceph_assert(ceph_mutex_is_locked(timestamp_lock));
modify_timestamp = mt;
}
int ImageCtx::is_snap_protected(snap_t in_snap_id,
bool *is_protected) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*is_protected =
(info->protection_status == RBD_PROTECTION_STATUS_PROTECTED);
return 0;
}
return -ENOENT;
}
int ImageCtx::is_snap_unprotected(snap_t in_snap_id,
bool *is_unprotected) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*is_unprotected =
(info->protection_status == RBD_PROTECTION_STATUS_UNPROTECTED);
return 0;
}
return -ENOENT;
}
void ImageCtx::add_snap(cls::rbd::SnapshotNamespace in_snap_namespace,
string in_snap_name,
snap_t id, uint64_t in_size,
const ParentImageInfo &parent,
uint8_t protection_status, uint64_t flags,
utime_t timestamp)
{
ceph_assert(ceph_mutex_is_wlocked(image_lock));
snaps.push_back(id);
SnapInfo info(in_snap_name, in_snap_namespace,
in_size, parent, protection_status, flags, timestamp);
snap_info.insert({id, info});
snap_ids.insert({{in_snap_namespace, in_snap_name}, id});
}
void ImageCtx::rm_snap(cls::rbd::SnapshotNamespace in_snap_namespace,
string in_snap_name,
snap_t id)
{
ceph_assert(ceph_mutex_is_wlocked(image_lock));
snaps.erase(std::remove(snaps.begin(), snaps.end(), id), snaps.end());
snap_info.erase(id);
snap_ids.erase({in_snap_namespace, in_snap_name});
}
uint64_t ImageCtx::get_image_size(snap_t in_snap_id) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
if (in_snap_id == CEPH_NOSNAP) {
if (!resize_reqs.empty() &&
resize_reqs.front()->shrinking()) {
return resize_reqs.front()->get_image_size();
}
return size;
}
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
return info->size;
}
return 0;
}
uint64_t ImageCtx::get_area_size(io::ImageArea area) const {
// image areas are defined only for the "opened at" snap_id
// (i.e. where encryption may be loaded)
uint64_t raw_size = get_image_size(snap_id);
if (raw_size == 0) {
return 0;
}
auto size = io::util::raw_to_area_offset(*this, raw_size);
ceph_assert(size.first <= raw_size && size.second == io::ImageArea::DATA);
switch (area) {
case io::ImageArea::DATA:
return size.first;
case io::ImageArea::CRYPTO_HEADER:
// CRYPTO_HEADER area ends where DATA area begins
return raw_size - size.first;
default:
ceph_abort();
}
}
uint64_t ImageCtx::get_object_count(snap_t in_snap_id) const {
ceph_assert(ceph_mutex_is_locked(image_lock));
uint64_t image_size = get_image_size(in_snap_id);
return Striper::get_num_objects(layout, image_size);
}
bool ImageCtx::test_features(uint64_t features) const
{
std::shared_lock l{image_lock};
return test_features(features, image_lock);
}
bool ImageCtx::test_features(uint64_t in_features,
const ceph::shared_mutex &in_image_lock) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
return ((features & in_features) == in_features);
}
bool ImageCtx::test_op_features(uint64_t in_op_features) const
{
std::shared_lock l{image_lock};
return test_op_features(in_op_features, image_lock);
}
bool ImageCtx::test_op_features(uint64_t in_op_features,
const ceph::shared_mutex &in_image_lock) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
return ((op_features & in_op_features) == in_op_features);
}
int ImageCtx::get_flags(librados::snap_t _snap_id, uint64_t *_flags) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
if (_snap_id == CEPH_NOSNAP) {
*_flags = flags;
return 0;
}
const SnapInfo *info = get_snap_info(_snap_id);
if (info) {
*_flags = info->flags;
return 0;
}
return -ENOENT;
}
int ImageCtx::test_flags(librados::snap_t in_snap_id,
uint64_t flags, bool *flags_set) const
{
std::shared_lock l{image_lock};
return test_flags(in_snap_id, flags, image_lock, flags_set);
}
int ImageCtx::test_flags(librados::snap_t in_snap_id,
uint64_t flags,
const ceph::shared_mutex &in_image_lock,
bool *flags_set) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
uint64_t snap_flags;
int r = get_flags(in_snap_id, &snap_flags);
if (r < 0) {
return r;
}
*flags_set = ((snap_flags & flags) == flags);
return 0;
}
int ImageCtx::update_flags(snap_t in_snap_id, uint64_t flag, bool enabled)
{
ceph_assert(ceph_mutex_is_wlocked(image_lock));
uint64_t *_flags;
if (in_snap_id == CEPH_NOSNAP) {
_flags = &flags;
} else {
map<snap_t, SnapInfo>::iterator it = snap_info.find(in_snap_id);
if (it == snap_info.end()) {
return -ENOENT;
}
_flags = &it->second.flags;
}
if (enabled) {
(*_flags) |= flag;
} else {
(*_flags) &= ~flag;
}
return 0;
}
const ParentImageInfo* ImageCtx::get_parent_info(snap_t in_snap_id) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
if (in_snap_id == CEPH_NOSNAP)
return &parent_md;
const SnapInfo *info = get_snap_info(in_snap_id);
if (info)
return &info->parent;
return NULL;
}
int64_t ImageCtx::get_parent_pool_id(snap_t in_snap_id) const
{
const auto info = get_parent_info(in_snap_id);
if (info)
return info->spec.pool_id;
return -1;
}
string ImageCtx::get_parent_image_id(snap_t in_snap_id) const
{
const auto info = get_parent_info(in_snap_id);
if (info)
return info->spec.image_id;
return "";
}
uint64_t ImageCtx::get_parent_snap_id(snap_t in_snap_id) const
{
const auto info = get_parent_info(in_snap_id);
if (info)
return info->spec.snap_id;
return CEPH_NOSNAP;
}
int ImageCtx::get_parent_overlap(snap_t in_snap_id,
uint64_t* raw_overlap) const {
const auto info = get_parent_info(in_snap_id);
if (info) {
*raw_overlap = info->overlap;
return 0;
}
return -ENOENT;
}
std::pair<uint64_t, io::ImageArea> ImageCtx::reduce_parent_overlap(
uint64_t raw_overlap, bool migration_write) const {
ceph_assert(ceph_mutex_is_locked(image_lock));
if (migration_write) {
// don't reduce migration write overlap -- it may be larger as
// it's the largest overlap across snapshots by construction
return io::util::raw_to_area_offset(*this, raw_overlap);
}
if (raw_overlap == 0 || parent == nullptr) {
// image opened with OPEN_FLAG_SKIP_OPEN_PARENT -> no overlap
return io::util::raw_to_area_offset(*this, 0);
}
// DATA area in the parent may be smaller than the part of DATA
// area in the clone that is still within the overlap (e.g. for
// LUKS2-encrypted parent + LUKS1-encrypted clone, due to LUKS2
// header usually being bigger than LUKS1 header)
auto overlap = io::util::raw_to_area_offset(*this, raw_overlap);
std::shared_lock parent_image_locker(parent->image_lock);
overlap.first = std::min(overlap.first,
parent->get_area_size(overlap.second));
return overlap;
}
uint64_t ImageCtx::prune_parent_extents(io::Extents& image_extents,
io::ImageArea area,
uint64_t raw_overlap,
bool migration_write) const {
ceph_assert(ceph_mutex_is_locked(image_lock));
ldout(cct, 10) << __func__ << ": image_extents=" << image_extents
<< " area=" << area << " raw_overlap=" << raw_overlap
<< " migration_write=" << migration_write << dendl;
if (raw_overlap == 0) {
image_extents.clear();
return 0;
}
auto overlap = reduce_parent_overlap(raw_overlap, migration_write);
if (area == overlap.second) {
// drop extents completely beyond the overlap
while (!image_extents.empty() &&
image_extents.back().first >= overlap.first) {
image_extents.pop_back();
}
if (!image_extents.empty()) {
// trim final overlapping extent
auto& last_extent = image_extents.back();
if (last_extent.first + last_extent.second > overlap.first) {
last_extent.second = overlap.first - last_extent.first;
}
}
} else if (area == io::ImageArea::DATA &&
overlap.second == io::ImageArea::CRYPTO_HEADER) {
// all extents completely beyond the overlap
image_extents.clear();
} else {
// all extents completely within the overlap
ceph_assert(area == io::ImageArea::CRYPTO_HEADER &&
overlap.second == io::ImageArea::DATA);
}
uint64_t overlap_bytes = 0;
for (auto [_, len] : image_extents) {
overlap_bytes += len;
}
ldout(cct, 10) << __func__ << ": overlap=" << overlap.first
<< "/" << overlap.second
<< " got overlap_bytes=" << overlap_bytes
<< " at " << image_extents << dendl;
return overlap_bytes;
}
void ImageCtx::register_watch(Context *on_finish) {
ceph_assert(image_watcher != NULL);
image_watcher->register_watch(on_finish);
}
void ImageCtx::cancel_async_requests() {
C_SaferCond ctx;
cancel_async_requests(&ctx);
ctx.wait();
}
void ImageCtx::cancel_async_requests(Context *on_finish) {
{
std::lock_guard async_ops_locker{async_ops_lock};
if (!async_requests.empty()) {
ldout(cct, 10) << "canceling async requests: count="
<< async_requests.size() << dendl;
for (auto req : async_requests) {
ldout(cct, 10) << "canceling async request: " << req << dendl;
req->cancel();
}
async_requests_waiters.push_back(on_finish);
return;
}
}
on_finish->complete(0);
}
void ImageCtx::apply_metadata(const std::map<std::string, bufferlist> &meta,
bool thread_safe) {
ldout(cct, 20) << __func__ << dendl;
std::unique_lock image_locker(image_lock);
// reset settings back to global defaults
config_overrides.clear();
config.set_config_values(cct->_conf.get_config_values());
// extract config overrides
for (auto meta_pair : meta) {
if (!boost::starts_with(meta_pair.first, METADATA_CONF_PREFIX)) {
continue;
}
std::string key = meta_pair.first.substr(METADATA_CONF_PREFIX.size());
if (!boost::starts_with(key, "rbd_")) {
// ignore non-RBD configuration keys
// TODO use option schema to determine applicable subsystem
ldout(cct, 0) << __func__ << ": ignoring config " << key << dendl;
continue;
}
if (config.find_option(key) != nullptr) {
std::string val(meta_pair.second.c_str(), meta_pair.second.length());
int r = config.set_val(key, val);
if (r >= 0) {
ldout(cct, 20) << __func__ << ": " << key << "=" << val << dendl;
config_overrides.insert(key);
} else {
lderr(cct) << __func__ << ": failed to set config " << key << " "
<< "with value " << val << ": " << cpp_strerror(r)
<< dendl;
}
}
}
image_locker.unlock();
#define ASSIGN_OPTION(param, type) \
param = config.get_val<type>("rbd_"#param)
bool skip_partial_discard = true;
ASSIGN_OPTION(non_blocking_aio, bool);
ASSIGN_OPTION(cache, bool);
ASSIGN_OPTION(sparse_read_threshold_bytes, Option::size_t);
ASSIGN_OPTION(clone_copy_on_read, bool);
ASSIGN_OPTION(enable_alloc_hint, bool);
ASSIGN_OPTION(mirroring_replay_delay, uint64_t);
ASSIGN_OPTION(mtime_update_interval, uint64_t);
ASSIGN_OPTION(atime_update_interval, uint64_t);
ASSIGN_OPTION(skip_partial_discard, bool);
ASSIGN_OPTION(discard_granularity_bytes, uint64_t);
ASSIGN_OPTION(blkin_trace_all, bool);
auto cache_policy = config.get_val<std::string>("rbd_cache_policy");
if (cache_policy == "writethrough" || cache_policy == "writeback") {
ASSIGN_OPTION(readahead_max_bytes, Option::size_t);
ASSIGN_OPTION(readahead_disable_after_bytes, Option::size_t);
}
#undef ASSIGN_OPTION
if (sparse_read_threshold_bytes == 0) {
sparse_read_threshold_bytes = get_object_size();
}
bool dirty_cache = test_features(RBD_FEATURE_DIRTY_CACHE);
if (!skip_partial_discard || dirty_cache) {
discard_granularity_bytes = 0;
}
alloc_hint_flags = 0;
auto compression_hint = config.get_val<std::string>("rbd_compression_hint");
if (compression_hint == "compressible") {
alloc_hint_flags |= librados::ALLOC_HINT_FLAG_COMPRESSIBLE;
} else if (compression_hint == "incompressible") {
alloc_hint_flags |= librados::ALLOC_HINT_FLAG_INCOMPRESSIBLE;
}
librados::Rados rados(md_ctx);
int8_t require_osd_release;
int r = rados.get_min_compatible_osd(&require_osd_release);
if (r == 0 && require_osd_release >= CEPH_RELEASE_OCTOPUS) {
read_flags = 0;
auto read_policy = config.get_val<std::string>("rbd_read_from_replica_policy");
if (read_policy == "balance") {
read_flags |= librados::OPERATION_BALANCE_READS;
} else if (read_policy == "localize") {
read_flags |= librados::OPERATION_LOCALIZE_READS;
}
}
io_image_dispatcher->apply_qos_schedule_tick_min(
config.get_val<uint64_t>("rbd_qos_schedule_tick_min"));
io_image_dispatcher->apply_qos_limit(
io::IMAGE_DISPATCH_FLAG_QOS_IOPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_iops_limit"),
config.get_val<uint64_t>("rbd_qos_iops_burst"),
config.get_val<uint64_t>("rbd_qos_iops_burst_seconds"));
io_image_dispatcher->apply_qos_limit(
io::IMAGE_DISPATCH_FLAG_QOS_BPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_bps_limit"),
config.get_val<uint64_t>("rbd_qos_bps_burst"),
config.get_val<uint64_t>("rbd_qos_bps_burst_seconds"));
io_image_dispatcher->apply_qos_limit(
io::IMAGE_DISPATCH_FLAG_QOS_READ_IOPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_read_iops_limit"),
config.get_val<uint64_t>("rbd_qos_read_iops_burst"),
config.get_val<uint64_t>("rbd_qos_read_iops_burst_seconds"));
io_image_dispatcher->apply_qos_limit(
io::IMAGE_DISPATCH_FLAG_QOS_WRITE_IOPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_write_iops_limit"),
config.get_val<uint64_t>("rbd_qos_write_iops_burst"),
config.get_val<uint64_t>("rbd_qos_write_iops_burst_seconds"));
io_image_dispatcher->apply_qos_limit(
io::IMAGE_DISPATCH_FLAG_QOS_READ_BPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_read_bps_limit"),
config.get_val<uint64_t>("rbd_qos_read_bps_burst"),
config.get_val<uint64_t>("rbd_qos_read_bps_burst_seconds"));
io_image_dispatcher->apply_qos_limit(
io::IMAGE_DISPATCH_FLAG_QOS_WRITE_BPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_write_bps_limit"),
config.get_val<uint64_t>("rbd_qos_write_bps_burst"),
config.get_val<uint64_t>("rbd_qos_write_bps_burst_seconds"));
io_image_dispatcher->apply_qos_exclude_ops(
librbd::io::rbd_io_operations_from_string(
config.get_val<std::string>("rbd_qos_exclude_ops"), nullptr));
if (!disable_zero_copy &&
config.get_val<bool>("rbd_disable_zero_copy_writes")) {
ldout(cct, 5) << this << ": disabling zero-copy writes" << dendl;
disable_zero_copy = true;
}
}
ExclusiveLock<ImageCtx> *ImageCtx::create_exclusive_lock() {
return new ExclusiveLock<ImageCtx>(*this);
}
ObjectMap<ImageCtx> *ImageCtx::create_object_map(uint64_t snap_id) {
return new ObjectMap<ImageCtx>(*this, snap_id);
}
Journal<ImageCtx> *ImageCtx::create_journal() {
return new Journal<ImageCtx>(*this);
}
void ImageCtx::set_image_name(const std::string &image_name) {
// update the name so rename can be invoked repeatedly
std::shared_lock owner_locker{owner_lock};
std::unique_lock image_locker{image_lock};
name = image_name;
if (old_format) {
header_oid = util::old_header_name(image_name);
}
}
void ImageCtx::notify_update() {
state->handle_update_notification();
ImageWatcher<>::notify_header_update(md_ctx, header_oid);
}
void ImageCtx::notify_update(Context *on_finish) {
state->handle_update_notification();
image_watcher->notify_header_update(on_finish);
}
exclusive_lock::Policy *ImageCtx::get_exclusive_lock_policy() const {
ceph_assert(ceph_mutex_is_locked(owner_lock));
ceph_assert(exclusive_lock_policy != nullptr);
return exclusive_lock_policy;
}
void ImageCtx::set_exclusive_lock_policy(exclusive_lock::Policy *policy) {
ceph_assert(ceph_mutex_is_wlocked(owner_lock));
ceph_assert(policy != nullptr);
delete exclusive_lock_policy;
exclusive_lock_policy = policy;
}
journal::Policy *ImageCtx::get_journal_policy() const {
ceph_assert(ceph_mutex_is_locked(image_lock));
ceph_assert(journal_policy != nullptr);
return journal_policy;
}
void ImageCtx::set_journal_policy(journal::Policy *policy) {
ceph_assert(ceph_mutex_is_wlocked(image_lock));
ceph_assert(policy != nullptr);
delete journal_policy;
journal_policy = policy;
}
void ImageCtx::rebuild_data_io_context() {
auto ctx = std::make_shared<neorados::IOContext>(
data_ctx.get_id(), data_ctx.get_namespace());
if (snap_id != CEPH_NOSNAP) {
ctx->read_snap(snap_id);
}
if (!snapc.snaps.empty()) {
ctx->write_snap_context(
{{snapc.seq, {snapc.snaps.begin(), snapc.snaps.end()}}});
}
if (data_ctx.get_pool_full_try()) {
ctx->full_try(true);
}
// atomically reset the data IOContext to new version
atomic_store(&data_io_context, ctx);
}
IOContext ImageCtx::get_data_io_context() const {
return atomic_load(&data_io_context);
}
IOContext ImageCtx::duplicate_data_io_context() const {
auto ctx = get_data_io_context();
return std::make_shared<neorados::IOContext>(*ctx);
}
void ImageCtx::get_timer_instance(CephContext *cct, SafeTimer **timer,
ceph::mutex **timer_lock) {
auto safe_timer_singleton =
&cct->lookup_or_create_singleton_object<SafeTimerSingleton>(
"librbd::journal::safe_timer", false, cct);
*timer = safe_timer_singleton;
*timer_lock = &safe_timer_singleton->lock;
}
}
| 33,692 | 31.71165 | 125 | cc |
null | ceph-main/src/librbd/ImageCtx.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGECTX_H
#define CEPH_LIBRBD_IMAGECTX_H
#include "include/int_types.h"
#include <atomic>
#include <list>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <vector>
#include "common/Timer.h"
#include "common/ceph_mutex.h"
#include "common/config_proxy.h"
#include "common/event_socket.h"
#include "common/Readahead.h"
#include "common/snap_types.h"
#include "common/zipkin_trace.h"
#include "include/common_fwd.h"
#include "include/buffer_fwd.h"
#include "include/rbd/librbd.hpp"
#include "include/rbd_types.h"
#include "include/types.h"
#include "include/xlist.h"
#include "cls/rbd/cls_rbd_types.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/AsyncRequest.h"
#include "librbd/Types.h"
#include <boost/lockfree/policies.hpp>
#include <boost/lockfree/queue.hpp>
namespace neorados {
class IOContext;
class RADOS;
} // namespace neorados
namespace librbd {
struct AsioEngine;
template <typename> class ConfigWatcher;
template <typename> class ExclusiveLock;
template <typename> class ImageState;
template <typename> class ImageWatcher;
template <typename> class Journal;
class LibrbdAdminSocketHook;
template <typename> class ObjectMap;
template <typename> class Operations;
template <typename> class PluginRegistry;
namespace asio { struct ContextWQ; }
namespace crypto { template <typename> class EncryptionFormat; }
namespace exclusive_lock { struct Policy; }
namespace io {
class AioCompletion;
class AsyncOperation;
template <typename> class CopyupRequest;
enum class ImageArea;
struct ImageDispatcherInterface;
struct ObjectDispatcherInterface;
}
namespace journal { struct Policy; }
namespace operation {
template <typename> class ResizeRequest;
}
struct ImageCtx {
typedef std::pair<cls::rbd::SnapshotNamespace, std::string> SnapKey;
struct SnapKeyComparator {
inline bool operator()(const SnapKey& lhs, const SnapKey& rhs) const {
// only compare by namespace type and name
if (lhs.first.index() != rhs.first.index()) {
return lhs.first.index() < rhs.first.index();
}
return lhs.second < rhs.second;
}
};
static const std::string METADATA_CONF_PREFIX;
CephContext *cct;
ConfigProxy config;
std::set<std::string> config_overrides;
PerfCounters *perfcounter;
struct rbd_obj_header_ondisk header;
::SnapContext snapc;
std::vector<librados::snap_t> snaps; // this mirrors snapc.snaps, but is in
// a format librados can understand
std::map<librados::snap_t, SnapInfo> snap_info;
std::map<SnapKey, librados::snap_t, SnapKeyComparator> snap_ids;
uint64_t open_snap_id = CEPH_NOSNAP;
uint64_t snap_id;
bool snap_exists; // false if our snap_id was deleted
// whether the image was opened read-only. cannot be changed after opening
bool read_only;
uint32_t read_only_flags = 0U;
uint32_t read_only_mask = ~0U;
std::map<rados::cls::lock::locker_id_t,
rados::cls::lock::locker_info_t> lockers;
bool exclusive_locked;
std::string lock_tag;
std::string name;
cls::rbd::SnapshotNamespace snap_namespace;
std::string snap_name;
std::shared_ptr<AsioEngine> asio_engine;
// New ASIO-style RADOS API
neorados::RADOS& rados_api;
// Legacy RADOS API
librados::IoCtx data_ctx;
librados::IoCtx md_ctx;
ConfigWatcher<ImageCtx> *config_watcher = nullptr;
ImageWatcher<ImageCtx> *image_watcher;
Journal<ImageCtx> *journal;
/**
* Lock ordering:
*
* owner_lock, image_lock
* async_op_lock, timestamp_lock
*/
ceph::shared_mutex owner_lock; // protects exclusive lock leadership updates
mutable ceph::shared_mutex image_lock; // protects snapshot-related member variables,
// features (and associated helper classes), and flags
// protects access to the mutable image metadata that
// isn't guarded by other locks below, and blocks writes
// when held exclusively, so snapshots can be consistent.
// Fields guarded include:
// total_bytes_read
// exclusive_locked
// lock_tag
// lockers
// object_map
// parent_md and parent
// encryption_format
ceph::shared_mutex timestamp_lock; // protects (create/access/modify)_timestamp
ceph::mutex async_ops_lock; // protects async_ops and async_requests
ceph::mutex copyup_list_lock; // protects copyup_waiting_list
unsigned extra_read_flags; // librados::OPERATION_*
bool old_format;
uint8_t order;
uint64_t size;
uint64_t features;
std::string object_prefix;
char *format_string;
std::string header_oid;
std::string id; // only used for new-format images
ParentImageInfo parent_md;
ImageCtx *parent;
ImageCtx *child = nullptr;
MigrationInfo migration_info;
cls::rbd::GroupSpec group_spec;
uint64_t stripe_unit, stripe_count;
uint64_t flags;
uint64_t op_features = 0;
bool operations_disabled = false;
utime_t create_timestamp;
utime_t access_timestamp;
utime_t modify_timestamp;
file_layout_t layout;
Readahead readahead;
std::atomic<uint64_t> total_bytes_read = {0};
std::map<uint64_t, io::CopyupRequest<ImageCtx>*> copyup_list;
xlist<io::AsyncOperation*> async_ops;
xlist<AsyncRequest<>*> async_requests;
std::list<Context*> async_requests_waiters;
ImageState<ImageCtx> *state;
Operations<ImageCtx> *operations;
ExclusiveLock<ImageCtx> *exclusive_lock;
ObjectMap<ImageCtx> *object_map;
xlist<operation::ResizeRequest<ImageCtx>*> resize_reqs;
io::ImageDispatcherInterface *io_image_dispatcher = nullptr;
io::ObjectDispatcherInterface *io_object_dispatcher = nullptr;
asio::ContextWQ *op_work_queue;
PluginRegistry<ImageCtx>* plugin_registry;
using Completions = boost::lockfree::queue<io::AioCompletion*>;
Completions event_socket_completions;
EventSocket event_socket;
bool ignore_migrating = false;
bool disable_zero_copy = false;
bool enable_sparse_copyup = false;
/// Cached latency-sensitive configuration settings
bool non_blocking_aio;
bool cache;
uint64_t sparse_read_threshold_bytes;
uint64_t readahead_max_bytes = 0;
uint64_t readahead_disable_after_bytes = 0;
bool clone_copy_on_read;
bool enable_alloc_hint;
uint32_t alloc_hint_flags = 0U;
uint32_t read_flags = 0U; // librados::OPERATION_*
uint32_t discard_granularity_bytes = 0;
bool blkin_trace_all;
uint64_t mirroring_replay_delay;
uint64_t mtime_update_interval;
uint64_t atime_update_interval;
LibrbdAdminSocketHook *asok_hook;
exclusive_lock::Policy *exclusive_lock_policy = nullptr;
journal::Policy *journal_policy = nullptr;
ZTracer::Endpoint trace_endpoint;
std::unique_ptr<crypto::EncryptionFormat<ImageCtx>> encryption_format;
// unit test mock helpers
static ImageCtx* create(const std::string &image_name,
const std::string &image_id,
const char *snap, IoCtx& p, bool read_only) {
return new ImageCtx(image_name, image_id, snap, p, read_only);
}
static ImageCtx* create(const std::string &image_name,
const std::string &image_id,
librados::snap_t snap_id, IoCtx& p,
bool read_only) {
return new ImageCtx(image_name, image_id, snap_id, p, read_only);
}
/**
* Either image_name or image_id must be set.
* If id is not known, pass the empty std::string,
* and init() will look it up.
*/
ImageCtx(const std::string &image_name, const std::string &image_id,
const char *snap, IoCtx& p, bool read_only);
ImageCtx(const std::string &image_name, const std::string &image_id,
librados::snap_t snap_id, IoCtx& p, bool read_only);
~ImageCtx();
void init();
void shutdown();
void init_layout(int64_t pool_id);
void perf_start(std::string name);
void perf_stop();
void set_read_flag(unsigned flag);
int get_read_flags(librados::snap_t snap_id);
int snap_set(uint64_t snap_id);
void snap_unset();
librados::snap_t get_snap_id(const cls::rbd::SnapshotNamespace& in_snap_namespace,
const std::string& in_snap_name) const;
const SnapInfo* get_snap_info(librados::snap_t in_snap_id) const;
int get_snap_name(librados::snap_t in_snap_id,
std::string *out_snap_name) const;
int get_snap_namespace(librados::snap_t in_snap_id,
cls::rbd::SnapshotNamespace *out_snap_namespace) const;
int get_parent_spec(librados::snap_t in_snap_id,
cls::rbd::ParentImageSpec *pspec) const;
int is_snap_protected(librados::snap_t in_snap_id,
bool *is_protected) const;
int is_snap_unprotected(librados::snap_t in_snap_id,
bool *is_unprotected) const;
uint64_t get_current_size() const;
uint64_t get_object_size() const;
std::string get_object_name(uint64_t num) const;
uint64_t get_stripe_unit() const;
uint64_t get_stripe_count() const;
uint64_t get_stripe_period() const;
utime_t get_create_timestamp() const;
utime_t get_access_timestamp() const;
utime_t get_modify_timestamp() const;
void set_access_timestamp(utime_t at);
void set_modify_timestamp(utime_t at);
void add_snap(cls::rbd::SnapshotNamespace in_snap_namespace,
std::string in_snap_name,
librados::snap_t id,
uint64_t in_size, const ParentImageInfo &parent,
uint8_t protection_status, uint64_t flags, utime_t timestamp);
void rm_snap(cls::rbd::SnapshotNamespace in_snap_namespace,
std::string in_snap_name,
librados::snap_t id);
uint64_t get_image_size(librados::snap_t in_snap_id) const;
uint64_t get_area_size(io::ImageArea area) const;
uint64_t get_object_count(librados::snap_t in_snap_id) const;
bool test_features(uint64_t test_features) const;
bool test_features(uint64_t test_features,
const ceph::shared_mutex &in_image_lock) const;
bool test_op_features(uint64_t op_features) const;
bool test_op_features(uint64_t op_features,
const ceph::shared_mutex &in_image_lock) const;
int get_flags(librados::snap_t in_snap_id, uint64_t *flags) const;
int test_flags(librados::snap_t in_snap_id,
uint64_t test_flags, bool *flags_set) const;
int test_flags(librados::snap_t in_snap_id,
uint64_t test_flags, const ceph::shared_mutex &in_image_lock,
bool *flags_set) const;
int update_flags(librados::snap_t in_snap_id, uint64_t flag, bool enabled);
const ParentImageInfo* get_parent_info(librados::snap_t in_snap_id) const;
int64_t get_parent_pool_id(librados::snap_t in_snap_id) const;
std::string get_parent_image_id(librados::snap_t in_snap_id) const;
uint64_t get_parent_snap_id(librados::snap_t in_snap_id) const;
int get_parent_overlap(librados::snap_t in_snap_id,
uint64_t* raw_overlap) const;
std::pair<uint64_t, io::ImageArea> reduce_parent_overlap(
uint64_t raw_overlap, bool migration_write) const;
uint64_t prune_parent_extents(
std::vector<std::pair<uint64_t, uint64_t>>& image_extents,
io::ImageArea area, uint64_t raw_overlap, bool migration_write) const;
void register_watch(Context *on_finish);
void cancel_async_requests();
void cancel_async_requests(Context *on_finish);
void apply_metadata(const std::map<std::string, bufferlist> &meta,
bool thread_safe);
ExclusiveLock<ImageCtx> *create_exclusive_lock();
ObjectMap<ImageCtx> *create_object_map(uint64_t snap_id);
Journal<ImageCtx> *create_journal();
void set_image_name(const std::string &name);
void notify_update();
void notify_update(Context *on_finish);
exclusive_lock::Policy *get_exclusive_lock_policy() const;
void set_exclusive_lock_policy(exclusive_lock::Policy *policy);
journal::Policy *get_journal_policy() const;
void set_journal_policy(journal::Policy *policy);
void rebuild_data_io_context();
IOContext get_data_io_context() const;
IOContext duplicate_data_io_context() const;
static void get_timer_instance(CephContext *cct, SafeTimer **timer,
ceph::mutex **timer_lock);
private:
std::shared_ptr<neorados::IOContext> data_io_context;
};
}
#endif
| 12,929 | 34.04065 | 89 | h |
null | ceph-main/src/librbd/ImageState.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/ImageState.h"
#include "include/rbd/librbd.hpp"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "common/WorkQueue.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/TaskFinisher.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/image/CloseRequest.h"
#include "librbd/image/OpenRequest.h"
#include "librbd/image/RefreshRequest.h"
#include "librbd/image/SetSnapRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ImageState: " << this << " "
namespace librbd {
using util::create_async_context_callback;
using util::create_context_callback;
class ImageUpdateWatchers {
public:
explicit ImageUpdateWatchers(CephContext *cct) : m_cct(cct),
m_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageUpdateWatchers::m_lock", this))) {
}
~ImageUpdateWatchers() {
ceph_assert(m_watchers.empty());
ceph_assert(m_in_flight.empty());
ceph_assert(m_pending_unregister.empty());
ceph_assert(m_on_shut_down_finish == nullptr);
destroy_work_queue();
}
void flush(Context *on_finish) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << dendl;
{
std::lock_guard locker{m_lock};
if (!m_in_flight.empty()) {
Context *ctx = new LambdaContext(
[this, on_finish](int r) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__
<< ": completing flush" << dendl;
on_finish->complete(r);
});
m_work_queue->queue(ctx, 0);
return;
}
}
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__
<< ": completing flush" << dendl;
on_finish->complete(0);
}
void shut_down(Context *on_finish) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << dendl;
{
std::lock_guard locker{m_lock};
ceph_assert(m_on_shut_down_finish == nullptr);
m_watchers.clear();
if (!m_in_flight.empty()) {
m_on_shut_down_finish = on_finish;
return;
}
}
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__
<< ": completing shut down" << dendl;
on_finish->complete(0);
}
void register_watcher(UpdateWatchCtx *watcher, uint64_t *handle) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << ": watcher="
<< watcher << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_on_shut_down_finish == nullptr);
create_work_queue();
*handle = m_next_handle++;
m_watchers.insert(std::make_pair(*handle, watcher));
}
void unregister_watcher(uint64_t handle, Context *on_finish) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << ": handle="
<< handle << dendl;
int r = 0;
{
std::lock_guard locker{m_lock};
auto it = m_watchers.find(handle);
if (it == m_watchers.end()) {
r = -ENOENT;
} else {
if (m_in_flight.find(handle) != m_in_flight.end()) {
ceph_assert(m_pending_unregister.find(handle) == m_pending_unregister.end());
m_pending_unregister[handle] = on_finish;
on_finish = nullptr;
}
m_watchers.erase(it);
}
}
if (on_finish) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__
<< ": completing unregister" << dendl;
on_finish->complete(r);
}
}
void notify() {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << dendl;
std::lock_guard locker{m_lock};
for (auto it : m_watchers) {
send_notify(it.first, it.second);
}
}
void send_notify(uint64_t handle, UpdateWatchCtx *watcher) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << ": handle="
<< handle << ", watcher=" << watcher << dendl;
m_in_flight.insert(handle);
Context *ctx = new LambdaContext(
[this, handle, watcher](int r) {
handle_notify(handle, watcher);
});
m_work_queue->queue(ctx, 0);
}
void handle_notify(uint64_t handle, UpdateWatchCtx *watcher) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << ": handle="
<< handle << ", watcher=" << watcher << dendl;
watcher->handle_notify();
Context *on_unregister_finish = nullptr;
Context *on_shut_down_finish = nullptr;
{
std::lock_guard locker{m_lock};
auto in_flight_it = m_in_flight.find(handle);
ceph_assert(in_flight_it != m_in_flight.end());
m_in_flight.erase(in_flight_it);
// If there is no more in flight notifications for this watcher
// and it is pending unregister, complete it now.
if (m_in_flight.find(handle) == m_in_flight.end()) {
auto it = m_pending_unregister.find(handle);
if (it != m_pending_unregister.end()) {
on_unregister_finish = it->second;
m_pending_unregister.erase(it);
}
}
if (m_in_flight.empty()) {
ceph_assert(m_pending_unregister.empty());
if (m_on_shut_down_finish != nullptr) {
std::swap(m_on_shut_down_finish, on_shut_down_finish);
}
}
}
if (on_unregister_finish != nullptr) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__
<< ": completing unregister" << dendl;
on_unregister_finish->complete(0);
}
if (on_shut_down_finish != nullptr) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__
<< ": completing shut down" << dendl;
on_shut_down_finish->complete(0);
}
}
private:
class ThreadPoolSingleton : public ThreadPool {
public:
explicit ThreadPoolSingleton(CephContext *cct)
: ThreadPool(cct, "librbd::ImageUpdateWatchers::thread_pool", "tp_librbd",
1) {
start();
}
~ThreadPoolSingleton() override {
stop();
}
};
CephContext *m_cct;
ceph::mutex m_lock;
ContextWQ *m_work_queue = nullptr;
std::map<uint64_t, UpdateWatchCtx*> m_watchers;
uint64_t m_next_handle = 0;
std::multiset<uint64_t> m_in_flight;
std::map<uint64_t, Context*> m_pending_unregister;
Context *m_on_shut_down_finish = nullptr;
void create_work_queue() {
if (m_work_queue != nullptr) {
return;
}
auto& thread_pool = m_cct->lookup_or_create_singleton_object<
ThreadPoolSingleton>("librbd::ImageUpdateWatchers::thread_pool",
false, m_cct);
m_work_queue = new ContextWQ("librbd::ImageUpdateWatchers::work_queue",
ceph::make_timespan(
m_cct->_conf.get_val<uint64_t>("rbd_op_thread_timeout")),
&thread_pool);
}
void destroy_work_queue() {
if (m_work_queue == nullptr) {
return;
}
m_work_queue->drain();
delete m_work_queue;
}
};
class QuiesceWatchers {
public:
explicit QuiesceWatchers(CephContext *cct, asio::ContextWQ* work_queue)
: m_cct(cct),
m_work_queue(work_queue),
m_lock(ceph::make_mutex(util::unique_lock_name(
"librbd::QuiesceWatchers::m_lock", this))) {
}
~QuiesceWatchers() {
ceph_assert(m_pending_unregister.empty());
ceph_assert(m_on_notify == nullptr);
}
void register_watcher(QuiesceWatchCtx *watcher, uint64_t *handle) {
ldout(m_cct, 20) << "QuiesceWatchers::" << __func__ << ": watcher="
<< watcher << dendl;
std::lock_guard locker{m_lock};
*handle = m_next_handle++;
m_watchers[*handle] = watcher;
}
void unregister_watcher(uint64_t handle, Context *on_finish) {
int r = 0;
{
std::lock_guard locker{m_lock};
auto it = m_watchers.find(handle);
if (it == m_watchers.end()) {
r = -ENOENT;
} else {
if (m_on_notify != nullptr) {
ceph_assert(!m_pending_unregister.count(handle));
m_pending_unregister[handle] = on_finish;
on_finish = nullptr;
}
m_watchers.erase(it);
}
}
if (on_finish) {
ldout(m_cct, 20) << "QuiesceWatchers::" << __func__
<< ": completing unregister " << handle << dendl;
on_finish->complete(r);
}
}
void notify_quiesce(Context *on_finish) {
std::lock_guard locker{m_lock};
if (m_blocked) {
ldout(m_cct, 20) << "QuiesceWatchers::" << __func__ << ": queue" << dendl;
m_pending_notify.push_back(on_finish);
return;
}
notify(QUIESCE, on_finish);
}
void notify_unquiesce(Context *on_finish) {
std::lock_guard locker{m_lock};
notify(UNQUIESCE, on_finish);
}
void quiesce_complete(uint64_t handle, int r) {
Context *on_notify = nullptr;
{
std::lock_guard locker{m_lock};
ceph_assert(m_on_notify != nullptr);
ceph_assert(m_handle_quiesce_cnt > 0);
m_handle_quiesce_cnt--;
if (r < 0) {
ldout(m_cct, 10) << "QuiesceWatchers::" << __func__ << ": watcher "
<< handle << " failed" << dendl;
m_failed_watchers.insert(handle);
m_ret_val = r;
}
if (m_handle_quiesce_cnt > 0) {
return;
}
std::swap(on_notify, m_on_notify);
r = m_ret_val;
}
on_notify->complete(r);
}
private:
enum EventType {QUIESCE, UNQUIESCE};
CephContext *m_cct;
asio::ContextWQ *m_work_queue;
ceph::mutex m_lock;
std::map<uint64_t, QuiesceWatchCtx*> m_watchers;
uint64_t m_next_handle = 0;
Context *m_on_notify = nullptr;
std::list<Context *> m_pending_notify;
std::map<uint64_t, Context*> m_pending_unregister;
uint64_t m_handle_quiesce_cnt = 0;
std::set<uint64_t> m_failed_watchers;
bool m_blocked = false;
int m_ret_val = 0;
void notify(EventType event_type, Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_watchers.empty()) {
m_work_queue->queue(on_finish);
return;
}
ldout(m_cct, 20) << "QuiesceWatchers::" << __func__ << " event: "
<< event_type << dendl;
Context *ctx = nullptr;
if (event_type == QUIESCE) {
ceph_assert(!m_blocked);
ceph_assert(m_handle_quiesce_cnt == 0);
m_blocked = true;
m_handle_quiesce_cnt = m_watchers.size();
m_failed_watchers.clear();
m_ret_val = 0;
} else {
ceph_assert(event_type == UNQUIESCE);
ceph_assert(m_blocked);
ctx = create_async_context_callback(
m_work_queue, create_context_callback<
QuiesceWatchers, &QuiesceWatchers::handle_notify_unquiesce>(this));
}
auto gather_ctx = new C_Gather(m_cct, ctx);
ceph_assert(m_on_notify == nullptr);
m_on_notify = on_finish;
for (auto &[handle, watcher] : m_watchers) {
send_notify(handle, watcher, event_type, gather_ctx->new_sub());
}
gather_ctx->activate();
}
void send_notify(uint64_t handle, QuiesceWatchCtx *watcher,
EventType event_type, Context *on_finish) {
auto ctx = new LambdaContext(
[this, handle, watcher, event_type, on_finish](int) {
ldout(m_cct, 20) << "QuiesceWatchers::" << __func__ << ": handle="
<< handle << ", event_type=" << event_type << dendl;
switch (event_type) {
case QUIESCE:
watcher->handle_quiesce();
break;
case UNQUIESCE:
{
std::lock_guard locker{m_lock};
if (m_failed_watchers.count(handle)) {
ldout(m_cct, 20) << "QuiesceWatchers::" << __func__
<< ": skip for failed watcher" << dendl;
break;
}
}
watcher->handle_unquiesce();
break;
default:
ceph_abort_msgf("invalid event_type %d", event_type);
}
on_finish->complete(0);
});
m_work_queue->queue(ctx);
}
void handle_notify_unquiesce(int r) {
ldout(m_cct, 20) << "QuiesceWatchers::" << __func__ << ": r=" << r
<< dendl;
ceph_assert(r == 0);
std::unique_lock locker{m_lock};
if (!m_pending_unregister.empty()) {
std::map<uint64_t, Context*> pending_unregister;
std::swap(pending_unregister, m_pending_unregister);
locker.unlock();
for (auto &it : pending_unregister) {
ldout(m_cct, 20) << "QuiesceWatchers::" << __func__
<< ": completing unregister " << it.first << dendl;
it.second->complete(0);
}
locker.lock();
}
Context *on_notify = nullptr;
std::swap(on_notify, m_on_notify);
ceph_assert(m_blocked);
m_blocked = false;
if (!m_pending_notify.empty()) {
auto on_finish = m_pending_notify.front();
m_pending_notify.pop_front();
notify(QUIESCE, on_finish);
}
locker.unlock();
on_notify->complete(0);
}
};
template <typename I>
ImageState<I>::ImageState(I *image_ctx)
: m_image_ctx(image_ctx), m_state(STATE_UNINITIALIZED),
m_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageState::m_lock", this))),
m_last_refresh(0), m_refresh_seq(0),
m_update_watchers(new ImageUpdateWatchers(image_ctx->cct)),
m_quiesce_watchers(new QuiesceWatchers(
image_ctx->cct, image_ctx->asio_engine->get_work_queue())) {
}
template <typename I>
ImageState<I>::~ImageState() {
ceph_assert(m_state == STATE_UNINITIALIZED || m_state == STATE_CLOSED);
delete m_update_watchers;
delete m_quiesce_watchers;
}
template <typename I>
int ImageState<I>::open(uint64_t flags) {
C_SaferCond ctx;
open(flags, &ctx);
int r = ctx.wait();
return r;
}
template <typename I>
void ImageState<I>::open(uint64_t flags, Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_lock.lock();
ceph_assert(m_state == STATE_UNINITIALIZED);
m_open_flags = flags;
Action action(ACTION_TYPE_OPEN);
action.refresh_seq = m_refresh_seq;
execute_action_unlock(action, on_finish);
}
template <typename I>
int ImageState<I>::close() {
C_SaferCond ctx;
close(&ctx);
int r = ctx.wait();
return r;
}
template <typename I>
void ImageState<I>::close(Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_lock.lock();
ceph_assert(!is_closed());
Action action(ACTION_TYPE_CLOSE);
action.refresh_seq = m_refresh_seq;
execute_action_unlock(action, on_finish);
}
template <typename I>
void ImageState<I>::handle_update_notification() {
std::lock_guard locker{m_lock};
++m_refresh_seq;
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << ": refresh_seq = " << m_refresh_seq << ", "
<< "last_refresh = " << m_last_refresh << dendl;
switch (m_state) {
case STATE_UNINITIALIZED:
case STATE_CLOSED:
case STATE_OPENING:
case STATE_CLOSING:
ldout(cct, 5) << "dropping update notification to watchers" << dendl;
return;
default:
break;
}
m_update_watchers->notify();
}
template <typename I>
bool ImageState<I>::is_refresh_required() const {
std::lock_guard locker{m_lock};
return (m_last_refresh != m_refresh_seq || find_pending_refresh() != nullptr);
}
template <typename I>
int ImageState<I>::refresh() {
C_SaferCond refresh_ctx;
refresh(&refresh_ctx);
return refresh_ctx.wait();
}
template <typename I>
void ImageState<I>::refresh(Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_lock.lock();
if (is_closed()) {
m_lock.unlock();
on_finish->complete(-ESHUTDOWN);
return;
}
Action action(ACTION_TYPE_REFRESH);
action.refresh_seq = m_refresh_seq;
execute_action_unlock(action, on_finish);
}
template <typename I>
int ImageState<I>::refresh_if_required() {
C_SaferCond ctx;
{
m_lock.lock();
Action action(ACTION_TYPE_REFRESH);
action.refresh_seq = m_refresh_seq;
auto refresh_action = find_pending_refresh();
if (refresh_action != nullptr) {
// if a refresh is in-flight, delay until it is finished
action = *refresh_action;
} else if (m_last_refresh == m_refresh_seq) {
m_lock.unlock();
return 0;
} else if (is_closed()) {
m_lock.unlock();
return -ESHUTDOWN;
}
execute_action_unlock(action, &ctx);
}
return ctx.wait();
}
template <typename I>
const typename ImageState<I>::Action *
ImageState<I>::find_pending_refresh() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
auto it = std::find_if(m_actions_contexts.rbegin(),
m_actions_contexts.rend(),
[](const ActionContexts& action_contexts) {
return (action_contexts.first == ACTION_TYPE_REFRESH);
});
if (it != m_actions_contexts.rend()) {
return &it->first;
}
return nullptr;
}
template <typename I>
void ImageState<I>::snap_set(uint64_t snap_id, Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << ": snap_id=" << snap_id << dendl;
Action action(ACTION_TYPE_SET_SNAP);
action.snap_id = snap_id;
m_lock.lock();
execute_action_unlock(action, on_finish);
}
template <typename I>
void ImageState<I>::prepare_lock(Context *on_ready) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << __func__ << dendl;
m_lock.lock();
if (is_closed()) {
m_lock.unlock();
on_ready->complete(-ESHUTDOWN);
return;
}
Action action(ACTION_TYPE_LOCK);
action.on_ready = on_ready;
execute_action_unlock(action, nullptr);
}
template <typename I>
void ImageState<I>::handle_prepare_lock_complete() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << __func__ << dendl;
m_lock.lock();
if (m_state != STATE_PREPARING_LOCK) {
m_lock.unlock();
return;
}
complete_action_unlock(STATE_OPEN, 0);
}
template <typename I>
int ImageState<I>::register_update_watcher(UpdateWatchCtx *watcher,
uint64_t *handle) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_update_watchers->register_watcher(watcher, handle);
ldout(cct, 20) << __func__ << ": handle=" << *handle << dendl;
return 0;
}
template <typename I>
void ImageState<I>::unregister_update_watcher(uint64_t handle,
Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << ": handle=" << handle << dendl;
m_update_watchers->unregister_watcher(handle, on_finish);
}
template <typename I>
int ImageState<I>::unregister_update_watcher(uint64_t handle) {
C_SaferCond ctx;
unregister_update_watcher(handle, &ctx);
return ctx.wait();
}
template <typename I>
void ImageState<I>::flush_update_watchers(Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_update_watchers->flush(on_finish);
}
template <typename I>
void ImageState<I>::shut_down_update_watchers(Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_update_watchers->shut_down(on_finish);
}
template <typename I>
bool ImageState<I>::is_transition_state() const {
switch (m_state) {
case STATE_UNINITIALIZED:
case STATE_OPEN:
case STATE_CLOSED:
return false;
case STATE_OPENING:
case STATE_CLOSING:
case STATE_REFRESHING:
case STATE_SETTING_SNAP:
case STATE_PREPARING_LOCK:
break;
}
return true;
}
template <typename I>
bool ImageState<I>::is_closed() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return ((m_state == STATE_CLOSED) ||
(!m_actions_contexts.empty() &&
m_actions_contexts.back().first.action_type == ACTION_TYPE_CLOSE));
}
template <typename I>
void ImageState<I>::append_context(const Action &action, Context *context) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ActionContexts *action_contexts = nullptr;
for (auto &action_ctxs : m_actions_contexts) {
if (action == action_ctxs.first) {
action_contexts = &action_ctxs;
break;
}
}
if (action_contexts == nullptr) {
m_actions_contexts.push_back({action, {}});
action_contexts = &m_actions_contexts.back();
}
if (context != nullptr) {
action_contexts->second.push_back(context);
}
}
template <typename I>
void ImageState<I>::execute_next_action_unlock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_actions_contexts.empty());
switch (m_actions_contexts.front().first.action_type) {
case ACTION_TYPE_OPEN:
send_open_unlock();
return;
case ACTION_TYPE_CLOSE:
send_close_unlock();
return;
case ACTION_TYPE_REFRESH:
send_refresh_unlock();
return;
case ACTION_TYPE_SET_SNAP:
send_set_snap_unlock();
return;
case ACTION_TYPE_LOCK:
send_prepare_lock_unlock();
return;
}
ceph_abort();
}
template <typename I>
void ImageState<I>::execute_action_unlock(const Action &action,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_lock));
append_context(action, on_finish);
if (!is_transition_state()) {
execute_next_action_unlock();
} else {
m_lock.unlock();
}
}
template <typename I>
void ImageState<I>::complete_action_unlock(State next_state, int r) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_actions_contexts.empty());
ActionContexts action_contexts(std::move(m_actions_contexts.front()));
m_actions_contexts.pop_front();
m_state = next_state;
m_lock.unlock();
if (next_state == STATE_CLOSED ||
(next_state == STATE_UNINITIALIZED && r < 0)) {
// the ImageCtx must be deleted outside the scope of its callback threads
auto ctx = new LambdaContext(
[image_ctx=m_image_ctx, contexts=std::move(action_contexts.second)]
(int r) {
delete image_ctx;
for (auto ctx : contexts) {
ctx->complete(r);
}
});
TaskFinisherSingleton::get_singleton(m_image_ctx->cct).queue(ctx, r);
} else {
for (auto ctx : action_contexts.second) {
if (next_state == STATE_OPEN) {
// we couldn't originally wrap the open callback w/ an async wrapper in
// case the image failed to open
ctx = create_async_context_callback(*m_image_ctx, ctx);
}
ctx->complete(r);
}
m_lock.lock();
if (!is_transition_state() && !m_actions_contexts.empty()) {
execute_next_action_unlock();
} else {
m_lock.unlock();
}
}
}
template <typename I>
void ImageState<I>::send_open_unlock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_state = STATE_OPENING;
Context *ctx = create_context_callback<
ImageState<I>, &ImageState<I>::handle_open>(this);
image::OpenRequest<I> *req = image::OpenRequest<I>::create(
m_image_ctx, m_open_flags, ctx);
m_lock.unlock();
req->send();
}
template <typename I>
void ImageState<I>::handle_open(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to open image: " << cpp_strerror(r) << dendl;
}
m_lock.lock();
complete_action_unlock(r < 0 ? STATE_UNINITIALIZED : STATE_OPEN, r);
}
template <typename I>
void ImageState<I>::send_close_unlock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_state = STATE_CLOSING;
Context *ctx = create_context_callback<
ImageState<I>, &ImageState<I>::handle_close>(this);
image::CloseRequest<I> *req = image::CloseRequest<I>::create(
m_image_ctx, ctx);
m_lock.unlock();
req->send();
}
template <typename I>
void ImageState<I>::handle_close(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
if (r < 0) {
lderr(cct) << "error occurred while closing image: " << cpp_strerror(r)
<< dendl;
}
m_lock.lock();
complete_action_unlock(STATE_CLOSED, r);
}
template <typename I>
void ImageState<I>::send_refresh_unlock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_state = STATE_REFRESHING;
ceph_assert(!m_actions_contexts.empty());
auto &action_context = m_actions_contexts.front().first;
ceph_assert(action_context.action_type == ACTION_TYPE_REFRESH);
Context *ctx = create_async_context_callback(
*m_image_ctx, create_context_callback<
ImageState<I>, &ImageState<I>::handle_refresh>(this));
image::RefreshRequest<I> *req = image::RefreshRequest<I>::create(
*m_image_ctx, false, false, ctx);
m_lock.unlock();
req->send();
}
template <typename I>
void ImageState<I>::handle_refresh(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
m_lock.lock();
ceph_assert(!m_actions_contexts.empty());
ActionContexts &action_contexts(m_actions_contexts.front());
ceph_assert(action_contexts.first.action_type == ACTION_TYPE_REFRESH);
ceph_assert(m_last_refresh <= action_contexts.first.refresh_seq);
if (r == -ERESTART) {
ldout(cct, 5) << "incomplete refresh: not updating sequence" << dendl;
r = 0;
} else {
m_last_refresh = action_contexts.first.refresh_seq;
}
complete_action_unlock(STATE_OPEN, r);
}
template <typename I>
void ImageState<I>::send_set_snap_unlock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
m_state = STATE_SETTING_SNAP;
ceph_assert(!m_actions_contexts.empty());
ActionContexts &action_contexts(m_actions_contexts.front());
ceph_assert(action_contexts.first.action_type == ACTION_TYPE_SET_SNAP);
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": "
<< "snap_id=" << action_contexts.first.snap_id << dendl;
Context *ctx = create_async_context_callback(
*m_image_ctx, create_context_callback<
ImageState<I>, &ImageState<I>::handle_set_snap>(this));
image::SetSnapRequest<I> *req = image::SetSnapRequest<I>::create(
*m_image_ctx, action_contexts.first.snap_id, ctx);
m_lock.unlock();
req->send();
}
template <typename I>
void ImageState<I>::handle_set_snap(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << " r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to set snapshot: " << cpp_strerror(r) << dendl;
}
m_lock.lock();
complete_action_unlock(STATE_OPEN, r);
}
template <typename I>
void ImageState<I>::send_prepare_lock_unlock() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
m_state = STATE_PREPARING_LOCK;
ceph_assert(!m_actions_contexts.empty());
ActionContexts &action_contexts(m_actions_contexts.front());
ceph_assert(action_contexts.first.action_type == ACTION_TYPE_LOCK);
Context *on_ready = action_contexts.first.on_ready;
m_lock.unlock();
if (on_ready == nullptr) {
complete_action_unlock(STATE_OPEN, 0);
return;
}
// wake up the lock handler now that its safe to proceed
on_ready->complete(0);
}
template <typename I>
int ImageState<I>::register_quiesce_watcher(QuiesceWatchCtx *watcher,
uint64_t *handle) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_quiesce_watchers->register_watcher(watcher, handle);
ldout(cct, 20) << __func__ << ": handle=" << *handle << dendl;
return 0;
}
template <typename I>
int ImageState<I>::unregister_quiesce_watcher(uint64_t handle) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << ": handle=" << handle << dendl;
C_SaferCond ctx;
m_quiesce_watchers->unregister_watcher(handle, &ctx);
return ctx.wait();
}
template <typename I>
void ImageState<I>::notify_quiesce(Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_quiesce_watchers->notify_quiesce(on_finish);
}
template <typename I>
void ImageState<I>::notify_unquiesce(Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_quiesce_watchers->notify_unquiesce(on_finish);
}
template <typename I>
void ImageState<I>::quiesce_complete(uint64_t handle, int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << ": handle=" << handle << " r=" << r << dendl;
m_quiesce_watchers->quiesce_complete(handle, r);
}
} // namespace librbd
template class librbd::ImageState<librbd::ImageCtx>;
| 28,604 | 26.478386 | 99 | cc |
null | ceph-main/src/librbd/ImageState.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_STATE_H
#define CEPH_LIBRBD_IMAGE_STATE_H
#include "include/int_types.h"
#include "common/ceph_mutex.h"
#include <list>
#include <string>
#include <utility>
#include "cls/rbd/cls_rbd_types.h"
class Context;
class RWLock;
namespace librbd {
class QuiesceWatchCtx;
class QuiesceWatchers;
class ImageCtx;
class ImageUpdateWatchers;
class UpdateWatchCtx;
template <typename ImageCtxT = ImageCtx>
class ImageState {
public:
ImageState(ImageCtxT *image_ctx);
~ImageState();
int open(uint64_t flags);
void open(uint64_t flags, Context *on_finish);
int close();
void close(Context *on_finish);
void handle_update_notification();
bool is_refresh_required() const;
int refresh();
int refresh_if_required();
void refresh(Context *on_finish);
void snap_set(uint64_t snap_id, Context *on_finish);
void prepare_lock(Context *on_ready);
void handle_prepare_lock_complete();
int register_update_watcher(UpdateWatchCtx *watcher, uint64_t *handle);
void unregister_update_watcher(uint64_t handle, Context *on_finish);
int unregister_update_watcher(uint64_t handle);
void flush_update_watchers(Context *on_finish);
void shut_down_update_watchers(Context *on_finish);
int register_quiesce_watcher(QuiesceWatchCtx *watcher, uint64_t *handle);
int unregister_quiesce_watcher(uint64_t handle);
void notify_quiesce(Context *on_finish);
void notify_unquiesce(Context *on_finish);
void quiesce_complete(uint64_t handle, int r);
private:
enum State {
STATE_UNINITIALIZED,
STATE_OPEN,
STATE_CLOSED,
STATE_OPENING,
STATE_CLOSING,
STATE_REFRESHING,
STATE_SETTING_SNAP,
STATE_PREPARING_LOCK
};
enum ActionType {
ACTION_TYPE_OPEN,
ACTION_TYPE_CLOSE,
ACTION_TYPE_REFRESH,
ACTION_TYPE_SET_SNAP,
ACTION_TYPE_LOCK
};
struct Action {
ActionType action_type;
uint64_t refresh_seq = 0;
uint64_t snap_id = CEPH_NOSNAP;
Context *on_ready = nullptr;
Action(ActionType action_type) : action_type(action_type) {
}
inline bool operator==(const Action &action) const {
if (action_type != action.action_type) {
return false;
}
switch (action_type) {
case ACTION_TYPE_REFRESH:
return (refresh_seq == action.refresh_seq);
case ACTION_TYPE_SET_SNAP:
return (snap_id == action.snap_id);
case ACTION_TYPE_LOCK:
return false;
default:
return true;
}
}
};
typedef std::list<Context *> Contexts;
typedef std::pair<Action, Contexts> ActionContexts;
typedef std::list<ActionContexts> ActionsContexts;
ImageCtxT *m_image_ctx;
State m_state;
mutable ceph::mutex m_lock;
ActionsContexts m_actions_contexts;
uint64_t m_last_refresh;
uint64_t m_refresh_seq;
ImageUpdateWatchers *m_update_watchers;
QuiesceWatchers *m_quiesce_watchers;
uint64_t m_open_flags;
bool is_transition_state() const;
bool is_closed() const;
const Action *find_pending_refresh() const;
void append_context(const Action &action, Context *context);
void execute_next_action_unlock();
void execute_action_unlock(const Action &action, Context *context);
void complete_action_unlock(State next_state, int r);
void send_open_unlock();
void handle_open(int r);
void send_close_unlock();
void handle_close(int r);
void send_refresh_unlock();
void handle_refresh(int r);
void send_set_snap_unlock();
void handle_set_snap(int r);
void send_prepare_lock_unlock();
};
} // namespace librbd
extern template class librbd::ImageState<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_STATE_H
| 3,735 | 22.948718 | 75 | h |
null | ceph-main/src/librbd/ImageWatcher.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/ImageWatcher.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/internal.h"
#include "librbd/TaskFinisher.h"
#include "librbd/Types.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/exclusive_lock/Policy.h"
#include "librbd/image_watcher/NotifyLockOwner.h"
#include "librbd/io/AioCompletion.h"
#include "include/encoding.h"
#include "common/errno.h"
#include <boost/bind/bind.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ImageWatcher: "
namespace librbd {
using namespace image_watcher;
using namespace watch_notify;
using util::create_async_context_callback;
using util::create_context_callback;
using util::create_rados_callback;
using ceph::encode;
using ceph::decode;
using namespace boost::placeholders;
static const double RETRY_DELAY_SECONDS = 1.0;
template <typename I>
struct ImageWatcher<I>::C_ProcessPayload : public Context {
ImageWatcher *image_watcher;
uint64_t notify_id;
uint64_t handle;
std::unique_ptr<watch_notify::Payload> payload;
C_ProcessPayload(ImageWatcher *image_watcher, uint64_t notify_id,
uint64_t handle,
std::unique_ptr<watch_notify::Payload> &&payload)
: image_watcher(image_watcher), notify_id(notify_id), handle(handle),
payload(std::move(payload)) {
}
void finish(int r) override {
image_watcher->m_async_op_tracker.start_op();
if (image_watcher->notifications_blocked()) {
// requests are blocked -- just ack the notification
bufferlist bl;
image_watcher->acknowledge_notify(notify_id, handle, bl);
} else {
image_watcher->process_payload(notify_id, handle, payload.get());
}
image_watcher->m_async_op_tracker.finish_op();
}
};
template <typename I>
ImageWatcher<I>::ImageWatcher(I &image_ctx)
: Watcher(image_ctx.md_ctx, image_ctx.op_work_queue, image_ctx.header_oid),
m_image_ctx(image_ctx),
m_task_finisher(new TaskFinisher<Task>(*m_image_ctx.cct)),
m_async_request_lock(ceph::make_shared_mutex(
util::unique_lock_name("librbd::ImageWatcher::m_async_request_lock", this))),
m_owner_client_id_lock(ceph::make_mutex(
util::unique_lock_name("librbd::ImageWatcher::m_owner_client_id_lock", this)))
{
}
template <typename I>
ImageWatcher<I>::~ImageWatcher()
{
delete m_task_finisher;
}
template <typename I>
void ImageWatcher<I>::unregister_watch(Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " unregistering image watcher" << dendl;
cancel_async_requests();
// flush the task finisher queue before completing
on_finish = create_async_context_callback(m_task_finisher, on_finish);
on_finish = new LambdaContext([this, on_finish](int r) {
cancel_quiesce_requests();
m_task_finisher->cancel_all();
m_async_op_tracker.wait_for_ops(on_finish);
});
Watcher::unregister_watch(on_finish);
}
template <typename I>
void ImageWatcher<I>::block_notifies(Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
on_finish = new LambdaContext([this, on_finish](int r) {
cancel_async_requests();
on_finish->complete(r);
});
Watcher::block_notifies(on_finish);
}
template <typename I>
void ImageWatcher<I>::schedule_async_progress(const AsyncRequestId &request,
uint64_t offset, uint64_t total) {
auto ctx = new LambdaContext([this, request, offset, total](int r) {
if (r != -ECANCELED) {
notify_async_progress(request, offset, total);
}
});
m_task_finisher->queue(Task(TASK_CODE_ASYNC_PROGRESS, request), ctx);
}
template <typename I>
int ImageWatcher<I>::notify_async_progress(const AsyncRequestId &request,
uint64_t offset, uint64_t total) {
ldout(m_image_ctx.cct, 20) << this << " remote async request progress: "
<< request << " @ " << offset
<< "/" << total << dendl;
send_notify(new AsyncProgressPayload(request, offset, total));
return 0;
}
template <typename I>
void ImageWatcher<I>::schedule_async_complete(const AsyncRequestId &request,
int r) {
m_async_op_tracker.start_op();
auto ctx = new LambdaContext([this, request, ret_val=r](int r) {
if (r != -ECANCELED) {
notify_async_complete(request, ret_val);
}
});
m_task_finisher->queue(ctx);
}
template <typename I>
void ImageWatcher<I>::notify_async_complete(const AsyncRequestId &request,
int r) {
ldout(m_image_ctx.cct, 20) << this << " remote async request finished: "
<< request << "=" << r << dendl;
send_notify(new AsyncCompletePayload(request, r),
new LambdaContext(boost::bind(&ImageWatcher<I>::handle_async_complete,
this, request, r, _1)));
}
template <typename I>
void ImageWatcher<I>::handle_async_complete(const AsyncRequestId &request,
int r, int ret_val) {
ldout(m_image_ctx.cct, 20) << this << " " << __func__ << ": "
<< "request=" << request << ", r=" << ret_val
<< dendl;
if (ret_val < 0) {
lderr(m_image_ctx.cct) << this << " failed to notify async complete: "
<< cpp_strerror(ret_val) << dendl;
if (ret_val == -ETIMEDOUT && !is_unregistered()) {
schedule_async_complete(request, r);
m_async_op_tracker.finish_op();
return;
}
}
std::unique_lock async_request_locker{m_async_request_lock};
mark_async_request_complete(request, r);
m_async_op_tracker.finish_op();
}
template <typename I>
void ImageWatcher<I>::notify_flatten(uint64_t request_id,
ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(async_request_id, new FlattenPayload(async_request_id),
prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_resize(uint64_t request_id, uint64_t size,
bool allow_shrink,
ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(async_request_id,
new ResizePayload(async_request_id, size, allow_shrink),
prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_snap_create(uint64_t request_id,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
uint64_t flags,
ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(async_request_id,
new SnapCreatePayload(async_request_id, snap_namespace,
snap_name, flags),
prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_snap_rename(uint64_t request_id,
const snapid_t &src_snap_id,
const std::string &dst_snap_name,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(
async_request_id,
new SnapRenamePayload(async_request_id, src_snap_id, dst_snap_name),
m_no_op_prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_snap_remove(
uint64_t request_id, const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name, Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(
async_request_id,
new SnapRemovePayload(async_request_id, snap_namespace, snap_name),
m_no_op_prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_snap_protect(
uint64_t request_id, const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name, Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(
async_request_id,
new SnapProtectPayload(async_request_id, snap_namespace, snap_name),
m_no_op_prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_snap_unprotect(
uint64_t request_id, const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name, Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(
async_request_id,
new SnapUnprotectPayload(async_request_id, snap_namespace, snap_name),
m_no_op_prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_rebuild_object_map(uint64_t request_id,
ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(async_request_id,
new RebuildObjectMapPayload(async_request_id),
prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_rename(uint64_t request_id,
const std::string &image_name,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(async_request_id,
new RenamePayload(async_request_id, image_name),
m_no_op_prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_update_features(uint64_t request_id,
uint64_t features, bool enabled,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(async_request_id,
new UpdateFeaturesPayload(async_request_id, features, enabled),
m_no_op_prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_migrate(uint64_t request_id,
ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(async_request_id, new MigratePayload(async_request_id),
prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_sparsify(uint64_t request_id, size_t sparse_size,
ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(async_request_id,
new SparsifyPayload(async_request_id, sparse_size),
prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_header_update(Context *on_finish) {
ldout(m_image_ctx.cct, 10) << this << ": " << __func__ << dendl;
// supports legacy (empty buffer) clients
send_notify(new HeaderUpdatePayload(), on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_header_update(librados::IoCtx &io_ctx,
const std::string &oid) {
// supports legacy (empty buffer) clients
bufferlist bl;
encode(NotifyMessage(new HeaderUpdatePayload()), bl);
io_ctx.notify2(oid, bl, watcher::Notifier::NOTIFY_TIMEOUT, nullptr);
}
template <typename I>
void ImageWatcher<I>::notify_quiesce(uint64_t *request_id,
ProgressContext &prog_ctx,
Context *on_finish) {
*request_id = util::reserve_async_request_id();
ldout(m_image_ctx.cct, 10) << this << " " << __func__ << ": request_id="
<< request_id << dendl;
AsyncRequestId async_request_id(get_client_id(), *request_id);
auto total_attempts = m_image_ctx.config.template get_val<uint64_t>(
"rbd_quiesce_notification_attempts");
notify_quiesce(async_request_id, 1, total_attempts, prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_quiesce(const AsyncRequestId &async_request_id,
size_t attempt, size_t total_attempts,
ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(attempt <= total_attempts);
ldout(m_image_ctx.cct, 10) << this << " " << __func__ << ": async_request_id="
<< async_request_id << " attempts @ "
<< attempt << "/" << total_attempts << dendl;
auto notify_response = new watcher::NotifyResponse();
auto on_notify = new LambdaContext(
[notify_response=std::unique_ptr<watcher::NotifyResponse>(notify_response),
this, async_request_id, attempt, total_attempts, &prog_ctx,
on_finish](int r) {
prog_ctx.update_progress(attempt, total_attempts);
if (r == -ETIMEDOUT) {
ldout(m_image_ctx.cct, 10) << this << " " << __func__
<< ": async_request_id=" << async_request_id
<< " timed out" << dendl;
if (attempt < total_attempts) {
notify_quiesce(async_request_id, attempt + 1, total_attempts,
prog_ctx, on_finish);
return;
}
} else if (r == 0) {
for (auto &[client_id, bl] : notify_response->acks) {
if (bl.length() == 0) {
continue;
}
try {
auto iter = bl.cbegin();
ResponseMessage response_message;
using ceph::decode;
decode(response_message, iter);
if (response_message.result != -EOPNOTSUPP) {
r = response_message.result;
}
} catch (const buffer::error &err) {
r = -EINVAL;
}
if (r < 0) {
break;
}
}
}
if (r < 0) {
lderr(m_image_ctx.cct) << this << " failed to notify quiesce: "
<< cpp_strerror(r) << dendl;
}
on_finish->complete(r);
});
bufferlist bl;
encode(NotifyMessage(new QuiescePayload(async_request_id)), bl);
Watcher::send_notify(bl, notify_response, on_notify);
}
template <typename I>
void ImageWatcher<I>::notify_unquiesce(uint64_t request_id, Context *on_finish) {
ldout(m_image_ctx.cct, 10) << this << " " << __func__ << ": request_id="
<< request_id << dendl;
AsyncRequestId async_request_id(get_client_id(), request_id);
send_notify(new UnquiescePayload(async_request_id), on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_metadata_set(uint64_t request_id,
const std::string &key,
const std::string &value,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(
async_request_id,
new MetadataUpdatePayload(async_request_id, key,
std::optional<std::string>{value}),
m_no_op_prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_metadata_remove(uint64_t request_id,
const std::string &key,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(
async_request_id,
new MetadataUpdatePayload(async_request_id, key, std::nullopt),
m_no_op_prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::schedule_cancel_async_requests() {
auto ctx = new LambdaContext([this](int r) {
if (r != -ECANCELED) {
cancel_async_requests();
}
});
m_task_finisher->queue(TASK_CODE_CANCEL_ASYNC_REQUESTS, ctx);
}
template <typename I>
void ImageWatcher<I>::cancel_async_requests() {
std::unique_lock l{m_async_request_lock};
for (auto iter = m_async_requests.begin(); iter != m_async_requests.end(); ) {
if (iter->second.second == nullptr) {
// Quiesce notify request. Skip.
iter++;
} else {
iter->second.first->complete(-ERESTART);
iter = m_async_requests.erase(iter);
}
}
}
template <typename I>
void ImageWatcher<I>::set_owner_client_id(const ClientId& client_id) {
ceph_assert(ceph_mutex_is_locked(m_owner_client_id_lock));
m_owner_client_id = client_id;
ldout(m_image_ctx.cct, 10) << this << " current lock owner: "
<< m_owner_client_id << dendl;
}
template <typename I>
ClientId ImageWatcher<I>::get_client_id() {
std::shared_lock l{this->m_watch_lock};
return ClientId(m_image_ctx.md_ctx.get_instance_id(), this->m_watch_handle);
}
template <typename I>
void ImageWatcher<I>::notify_acquired_lock() {
ldout(m_image_ctx.cct, 10) << this << " notify acquired lock" << dendl;
ClientId client_id = get_client_id();
{
std::lock_guard owner_client_id_locker{m_owner_client_id_lock};
set_owner_client_id(client_id);
}
send_notify(new AcquiredLockPayload(client_id));
}
template <typename I>
void ImageWatcher<I>::notify_released_lock() {
ldout(m_image_ctx.cct, 10) << this << " notify released lock" << dendl;
{
std::lock_guard owner_client_id_locker{m_owner_client_id_lock};
set_owner_client_id(ClientId());
}
send_notify(new ReleasedLockPayload(get_client_id()));
}
template <typename I>
void ImageWatcher<I>::schedule_request_lock(bool use_timer, int timer_delay) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
// see notify_request_lock()
if (m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner()) {
return;
}
std::shared_lock watch_locker{this->m_watch_lock};
if (this->is_registered(this->m_watch_lock)) {
ldout(m_image_ctx.cct, 15) << this << " requesting exclusive lock" << dendl;
auto ctx = new LambdaContext([this](int r) {
if (r != -ECANCELED) {
notify_request_lock();
}
});
if (use_timer) {
if (timer_delay < 0) {
timer_delay = RETRY_DELAY_SECONDS;
}
m_task_finisher->add_event_after(TASK_CODE_REQUEST_LOCK,
timer_delay, ctx);
} else {
m_task_finisher->queue(TASK_CODE_REQUEST_LOCK, ctx);
}
}
}
template <typename I>
void ImageWatcher<I>::notify_request_lock() {
std::shared_lock owner_locker{m_image_ctx.owner_lock};
std::shared_lock image_locker{m_image_ctx.image_lock};
// ExclusiveLock state machine can be dynamically disabled or
// race with task cancel
if (m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner()) {
return;
}
ldout(m_image_ctx.cct, 10) << this << " notify request lock" << dendl;
notify_lock_owner(new RequestLockPayload(get_client_id(), false),
create_context_callback<
ImageWatcher, &ImageWatcher<I>::handle_request_lock>(this));
}
template <typename I>
void ImageWatcher<I>::handle_request_lock(int r) {
std::shared_lock owner_locker{m_image_ctx.owner_lock};
std::shared_lock image_locker{m_image_ctx.image_lock};
// ExclusiveLock state machine cannot transition -- but can be
// dynamically disabled
if (m_image_ctx.exclusive_lock == nullptr) {
return;
}
if (r == -ETIMEDOUT) {
ldout(m_image_ctx.cct, 5) << this << " timed out requesting lock: retrying"
<< dendl;
// treat this is a dead client -- so retest acquiring the lock
m_image_ctx.exclusive_lock->handle_peer_notification(0);
} else if (r == -EROFS) {
ldout(m_image_ctx.cct, 5) << this << " peer will not release lock" << dendl;
m_image_ctx.exclusive_lock->handle_peer_notification(r);
} else if (r < 0) {
lderr(m_image_ctx.cct) << this << " error requesting lock: "
<< cpp_strerror(r) << dendl;
schedule_request_lock(true);
} else {
// lock owner acked -- but resend if we don't see them release the lock
int retry_timeout = m_image_ctx.cct->_conf.template get_val<int64_t>(
"client_notify_timeout");
ldout(m_image_ctx.cct, 15) << this << " will retry in " << retry_timeout
<< " seconds" << dendl;
schedule_request_lock(true, retry_timeout);
}
}
template <typename I>
void ImageWatcher<I>::notify_lock_owner(Payload *payload, Context *on_finish) {
ceph_assert(on_finish != nullptr);
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
bufferlist bl;
encode(NotifyMessage(payload), bl);
NotifyLockOwner *notify_lock_owner = NotifyLockOwner::create(
m_image_ctx, this->m_notifier, std::move(bl), on_finish);
notify_lock_owner->send();
}
template <typename I>
bool ImageWatcher<I>::is_new_request(const AsyncRequestId &id) const {
ceph_assert(ceph_mutex_is_locked(m_async_request_lock));
return m_async_pending.count(id) == 0 && m_async_complete.count(id) == 0;
}
template <typename I>
bool ImageWatcher<I>::mark_async_request_complete(const AsyncRequestId &id,
int r) {
ceph_assert(ceph_mutex_is_locked(m_async_request_lock));
bool found = m_async_pending.erase(id);
auto now = ceph_clock_now();
auto it = m_async_complete_expiration.begin();
while (it != m_async_complete_expiration.end() && it->first < now) {
m_async_complete.erase(it->second);
it = m_async_complete_expiration.erase(it);
}
if (!m_async_complete.insert({id, r}).second) {
for (it = m_async_complete_expiration.begin();
it != m_async_complete_expiration.end(); it++) {
if (it->second == id) {
m_async_complete_expiration.erase(it);
break;
}
}
}
auto expiration_time = now;
expiration_time += 600;
m_async_complete_expiration.insert({expiration_time, id});
return found;
}
template <typename I>
Context *ImageWatcher<I>::remove_async_request(const AsyncRequestId &id) {
std::unique_lock async_request_locker{m_async_request_lock};
return remove_async_request(id, m_async_request_lock);
}
template <typename I>
Context *ImageWatcher<I>::remove_async_request(const AsyncRequestId &id,
ceph::shared_mutex &lock) {
ceph_assert(ceph_mutex_is_locked(lock));
ldout(m_image_ctx.cct, 20) << __func__ << ": " << id << dendl;
auto it = m_async_requests.find(id);
if (it != m_async_requests.end()) {
Context *on_complete = it->second.first;
m_async_requests.erase(it);
return on_complete;
}
return nullptr;
}
template <typename I>
void ImageWatcher<I>::schedule_async_request_timed_out(const AsyncRequestId &id) {
ldout(m_image_ctx.cct, 20) << "scheduling async request time out: " << id
<< dendl;
auto ctx = new LambdaContext([this, id](int r) {
if (r != -ECANCELED) {
async_request_timed_out(id);
}
});
Task task(TASK_CODE_ASYNC_REQUEST, id);
m_task_finisher->cancel(task);
m_task_finisher->add_event_after(
task, m_image_ctx.config.template get_val<uint64_t>("rbd_request_timed_out_seconds"),
ctx);
}
template <typename I>
void ImageWatcher<I>::async_request_timed_out(const AsyncRequestId &id) {
Context *on_complete = remove_async_request(id);
if (on_complete != nullptr) {
ldout(m_image_ctx.cct, 5) << "async request timed out: " << id << dendl;
m_image_ctx.op_work_queue->queue(on_complete, -ETIMEDOUT);
}
}
template <typename I>
void ImageWatcher<I>::notify_async_request(
const AsyncRequestId &async_request_id, Payload *payload,
ProgressContext& prog_ctx, Context *on_finish) {
ceph_assert(on_finish != nullptr);
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ldout(m_image_ctx.cct, 10) << this << " async request: " << async_request_id
<< dendl;
Context *on_notify = new LambdaContext([this, async_request_id](int r) {
if (r < 0) {
// notification failed -- don't expect updates
Context *on_complete = remove_async_request(async_request_id);
if (on_complete != nullptr) {
on_complete->complete(r);
}
}
});
Context *on_complete = new LambdaContext(
[this, async_request_id, on_finish](int r) {
m_task_finisher->cancel(Task(TASK_CODE_ASYNC_REQUEST, async_request_id));
on_finish->complete(r);
});
{
std::unique_lock async_request_locker{m_async_request_lock};
m_async_requests[async_request_id] = AsyncRequest(on_complete, &prog_ctx);
}
schedule_async_request_timed_out(async_request_id);
notify_lock_owner(payload, on_notify);
}
template <typename I>
int ImageWatcher<I>::prepare_async_request(const AsyncRequestId& async_request_id,
bool* new_request, Context** ctx,
ProgressContext** prog_ctx) {
if (async_request_id.client_id == get_client_id()) {
return -ERESTART;
} else {
std::unique_lock l{m_async_request_lock};
if (is_new_request(async_request_id)) {
m_async_pending.insert(async_request_id);
*new_request = true;
*prog_ctx = new RemoteProgressContext(*this, async_request_id);
*ctx = new RemoteContext(*this, async_request_id, *prog_ctx);
} else {
*new_request = false;
auto it = m_async_complete.find(async_request_id);
if (it != m_async_complete.end()) {
int r = it->second;
// reset complete request expiration time
mark_async_request_complete(async_request_id, r);
return r;
}
}
}
return 0;
}
template <typename I>
Context *ImageWatcher<I>::prepare_quiesce_request(
const AsyncRequestId &request, C_NotifyAck *ack_ctx) {
std::unique_lock locker{m_async_request_lock};
auto timeout = 2 * watcher::Notifier::NOTIFY_TIMEOUT / 1000;
if (!is_new_request(request)) {
auto it = m_async_requests.find(request);
if (it != m_async_requests.end()) {
delete it->second.first;
it->second.first = ack_ctx;
} else {
auto it = m_async_complete.find(request);
ceph_assert(it != m_async_complete.end());
m_task_finisher->queue(new C_ResponseMessage(ack_ctx), it->second);
// reset complete request expiration time
mark_async_request_complete(request, it->second);
}
locker.unlock();
m_task_finisher->reschedule_event_after(Task(TASK_CODE_QUIESCE, request),
timeout);
return nullptr;
}
m_async_pending.insert(request);
m_async_requests[request] = AsyncRequest(ack_ctx, nullptr);
m_async_op_tracker.start_op();
return new LambdaContext(
[this, request, timeout](int r) {
auto unquiesce_ctx = new LambdaContext(
[this, request](int r) {
if (r == 0) {
ldout(m_image_ctx.cct, 10) << this << " quiesce request "
<< request << " timed out" << dendl;
}
auto on_finish = new LambdaContext(
[this](int r) {
m_async_op_tracker.finish_op();
});
m_image_ctx.state->notify_unquiesce(on_finish);
});
m_task_finisher->add_event_after(Task(TASK_CODE_QUIESCE, request),
timeout, unquiesce_ctx);
std::unique_lock async_request_locker{m_async_request_lock};
mark_async_request_complete(request, r);
auto ctx = remove_async_request(request, m_async_request_lock);
async_request_locker.unlock();
if (ctx != nullptr) {
ctx = new C_ResponseMessage(static_cast<C_NotifyAck *>(ctx));
ctx->complete(r);
} else {
m_task_finisher->cancel(Task(TASK_CODE_QUIESCE, request));
}
});
}
template <typename I>
void ImageWatcher<I>::prepare_unquiesce_request(const AsyncRequestId &request) {
{
std::unique_lock async_request_locker{m_async_request_lock};
auto it = m_async_complete.find(request);
if (it == m_async_complete.end()) {
ldout(m_image_ctx.cct, 20) << this << " " << request
<< ": not found in complete" << dendl;
return;
}
// reset complete request expiration time
mark_async_request_complete(request, it->second);
}
bool canceled = m_task_finisher->cancel(Task(TASK_CODE_QUIESCE, request));
if (!canceled) {
ldout(m_image_ctx.cct, 20) << this << " " << request
<< ": timer task not found" << dendl;
}
}
template <typename I>
void ImageWatcher<I>::cancel_quiesce_requests() {
std::unique_lock l{m_async_request_lock};
for (auto it = m_async_requests.begin(); it != m_async_requests.end(); ) {
if (it->second.second == nullptr) {
// Quiesce notify request.
mark_async_request_complete(it->first, 0);
delete it->second.first;
it = m_async_requests.erase(it);
} else {
it++;
}
}
}
template <typename I>
bool ImageWatcher<I>::handle_operation_request(
const AsyncRequestId& async_request_id,
exclusive_lock::OperationRequestType request_type, Operation operation,
std::function<void(ProgressContext &prog_ctx, Context*)> execute,
C_NotifyAck *ack_ctx) {
std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
int r = 0;
if (m_image_ctx.exclusive_lock->accept_request(request_type, &r)) {
bool new_request;
Context *ctx;
ProgressContext *prog_ctx;
bool complete;
if (async_request_id) {
r = prepare_async_request(async_request_id, &new_request, &ctx,
&prog_ctx);
encode(ResponseMessage(r), ack_ctx->out);
complete = true;
} else {
new_request = true;
ctx = new C_ResponseMessage(ack_ctx);
prog_ctx = &m_no_op_prog_ctx;
complete = false;
}
if (r == 0 && new_request) {
ctx = new LambdaContext(
[this, operation, ctx](int r) {
m_image_ctx.operations->finish_op(operation, r);
ctx->complete(r);
});
ctx = new LambdaContext(
[this, execute, prog_ctx, ctx](int r) {
if (r < 0) {
ctx->complete(r);
return;
}
std::shared_lock l{m_image_ctx.owner_lock};
execute(*prog_ctx, ctx);
});
m_image_ctx.operations->start_op(operation, ctx);
}
return complete;
} else if (r < 0) {
encode(ResponseMessage(r), ack_ctx->out);
}
}
return true;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const HeaderUpdatePayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " image header updated" << dendl;
m_image_ctx.state->handle_update_notification();
m_image_ctx.perfcounter->inc(l_librbd_notify);
if (ack_ctx != nullptr) {
m_image_ctx.state->flush_update_watchers(new C_ResponseMessage(ack_ctx));
return false;
}
return true;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const AcquiredLockPayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " image exclusively locked announcement"
<< dendl;
bool cancel_async_requests = true;
if (payload.client_id.is_valid()) {
std::lock_guard owner_client_id_locker{m_owner_client_id_lock};
if (payload.client_id == m_owner_client_id) {
cancel_async_requests = false;
}
set_owner_client_id(payload.client_id);
}
std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
// potentially wake up the exclusive lock state machine now that
// a lock owner has advertised itself
m_image_ctx.exclusive_lock->handle_peer_notification(0);
}
if (cancel_async_requests &&
(m_image_ctx.exclusive_lock == nullptr ||
!m_image_ctx.exclusive_lock->is_lock_owner())) {
schedule_cancel_async_requests();
}
return true;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const ReleasedLockPayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " exclusive lock released" << dendl;
bool cancel_async_requests = true;
if (payload.client_id.is_valid()) {
std::lock_guard l{m_owner_client_id_lock};
if (payload.client_id != m_owner_client_id) {
ldout(m_image_ctx.cct, 10) << this << " unexpected owner: "
<< payload.client_id << " != "
<< m_owner_client_id << dendl;
cancel_async_requests = false;
} else {
set_owner_client_id(ClientId());
}
}
std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (cancel_async_requests &&
(m_image_ctx.exclusive_lock == nullptr ||
!m_image_ctx.exclusive_lock->is_lock_owner())) {
schedule_cancel_async_requests();
}
// alert the exclusive lock state machine that the lock is available
if (m_image_ctx.exclusive_lock != nullptr &&
!m_image_ctx.exclusive_lock->is_lock_owner()) {
m_task_finisher->cancel(TASK_CODE_REQUEST_LOCK);
m_image_ctx.exclusive_lock->handle_peer_notification(0);
}
return true;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const RequestLockPayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " exclusive lock requested" << dendl;
if (payload.client_id == get_client_id()) {
return true;
}
std::shared_lock l{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr &&
m_image_ctx.exclusive_lock->is_lock_owner()) {
int r = 0;
bool accept_request = m_image_ctx.exclusive_lock->accept_request(
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, &r);
if (accept_request) {
ceph_assert(r == 0);
std::lock_guard owner_client_id_locker{m_owner_client_id_lock};
if (!m_owner_client_id.is_valid()) {
return true;
}
ldout(m_image_ctx.cct, 10) << this << " queuing release of exclusive lock"
<< dendl;
r = m_image_ctx.get_exclusive_lock_policy()->lock_requested(
payload.force);
}
encode(ResponseMessage(r), ack_ctx->out);
}
return true;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const AsyncProgressPayload &payload,
C_NotifyAck *ack_ctx) {
std::shared_lock l{m_async_request_lock};
std::map<AsyncRequestId, AsyncRequest>::iterator req_it =
m_async_requests.find(payload.async_request_id);
if (req_it != m_async_requests.end()) {
ldout(m_image_ctx.cct, 20) << this << " request progress: "
<< payload.async_request_id << " @ "
<< payload.offset << "/" << payload.total
<< dendl;
schedule_async_request_timed_out(payload.async_request_id);
req_it->second.second->update_progress(payload.offset, payload.total);
}
return true;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const AsyncCompletePayload &payload,
C_NotifyAck *ack_ctx) {
Context *on_complete = remove_async_request(payload.async_request_id);
if (on_complete != nullptr) {
ldout(m_image_ctx.cct, 10) << this << " request finished: "
<< payload.async_request_id << "="
<< payload.result << dendl;
on_complete->complete(payload.result);
}
return true;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const FlattenPayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote flatten request: "
<< payload.async_request_id << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_FLATTEN, std::bind(&Operations<I>::execute_flatten,
m_image_ctx.operations,
std::placeholders::_1,
std::placeholders::_2),
ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const ResizePayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote resize request: "
<< payload.async_request_id << " "
<< payload.size << " "
<< payload.allow_shrink << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_RESIZE, std::bind(&Operations<I>::execute_resize,
m_image_ctx.operations, payload.size,
payload.allow_shrink, std::placeholders::_1,
std::placeholders::_2, 0), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const SnapCreatePayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote snap_create request: "
<< payload.async_request_id << " "
<< payload.snap_namespace << " "
<< payload.snap_name << " "
<< payload.flags << dendl;
auto request_type = exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL;
// rbd-mirror needs to accept forced promotion orphan snap create requests
auto mirror_ns = std::get_if<cls::rbd::MirrorSnapshotNamespace>(
&payload.snap_namespace);
if (mirror_ns != nullptr && mirror_ns->is_orphan()) {
request_type = exclusive_lock::OPERATION_REQUEST_TYPE_FORCE_PROMOTION;
}
return handle_operation_request(
payload.async_request_id, request_type,
OPERATION_SNAP_CREATE, std::bind(&Operations<I>::execute_snap_create,
m_image_ctx.operations,
payload.snap_namespace,
payload.snap_name, std::placeholders::_2,
0, payload.flags, std::placeholders::_1),
ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const SnapRenamePayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote snap_rename request: "
<< payload.async_request_id << " "
<< payload.snap_id << " to "
<< payload.snap_name << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_SNAP_RENAME, std::bind(&Operations<I>::execute_snap_rename,
m_image_ctx.operations, payload.snap_id,
payload.snap_name,
std::placeholders::_2), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const SnapRemovePayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote snap_remove request: "
<< payload.snap_name << dendl;
auto request_type = exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL;
if (cls::rbd::get_snap_namespace_type(payload.snap_namespace) ==
cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH) {
request_type = exclusive_lock::OPERATION_REQUEST_TYPE_TRASH_SNAP_REMOVE;
}
return handle_operation_request(
payload.async_request_id, request_type, OPERATION_SNAP_REMOVE,
std::bind(&Operations<I>::execute_snap_remove, m_image_ctx.operations,
payload.snap_namespace, payload.snap_name,
std::placeholders::_2), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const SnapProtectPayload& payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote snap_protect request: "
<< payload.async_request_id << " "
<< payload.snap_name << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_SNAP_PROTECT, std::bind(&Operations<I>::execute_snap_protect,
m_image_ctx.operations,
payload.snap_namespace,
payload.snap_name,
std::placeholders::_2), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const SnapUnprotectPayload& payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote snap_unprotect request: "
<< payload.async_request_id << " "
<< payload.snap_name << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_SNAP_UNPROTECT, std::bind(&Operations<I>::execute_snap_unprotect,
m_image_ctx.operations,
payload.snap_namespace,
payload.snap_name,
std::placeholders::_2), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const RebuildObjectMapPayload& payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote rebuild object map request: "
<< payload.async_request_id << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_REBUILD_OBJECT_MAP,
std::bind(&Operations<I>::execute_rebuild_object_map,
m_image_ctx.operations, std::placeholders::_1,
std::placeholders::_2), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const RenamePayload& payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote rename request: "
<< payload.async_request_id << " "
<< payload.image_name << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_RENAME, std::bind(&Operations<I>::execute_rename,
m_image_ctx.operations, payload.image_name,
std::placeholders::_2), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const UpdateFeaturesPayload& payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote update_features request: "
<< payload.async_request_id << " "
<< payload.features << " "
<< (payload.enabled ? "enabled" : "disabled")
<< dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_UPDATE_FEATURES,
std::bind(&Operations<I>::execute_update_features, m_image_ctx.operations,
payload.features, payload.enabled, std::placeholders::_2, 0),
ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const MigratePayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote migrate request: "
<< payload.async_request_id << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_MIGRATE, std::bind(&Operations<I>::execute_migrate,
m_image_ctx.operations,
std::placeholders::_1,
std::placeholders::_2), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const SparsifyPayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote sparsify request: "
<< payload.async_request_id << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_SPARSIFY, std::bind(&Operations<I>::execute_sparsify,
m_image_ctx.operations,
payload.sparse_size, std::placeholders::_1,
std::placeholders::_2), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const MetadataUpdatePayload &payload,
C_NotifyAck *ack_ctx) {
if (payload.value) {
ldout(m_image_ctx.cct, 10) << this << " remote metadata_set request: "
<< payload.async_request_id << " "
<< "key=" << payload.key << ", value="
<< *payload.value << dendl;
return handle_operation_request(
payload.async_request_id,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_METADATA_UPDATE,
std::bind(&Operations<I>::execute_metadata_set,
m_image_ctx.operations, payload.key, *payload.value,
std::placeholders::_2),
ack_ctx);
} else {
ldout(m_image_ctx.cct, 10) << this << " remote metadata_remove request: "
<< payload.async_request_id << " "
<< "key=" << payload.key << dendl;
return handle_operation_request(
payload.async_request_id,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_METADATA_UPDATE,
std::bind(&Operations<I>::execute_metadata_remove,
m_image_ctx.operations, payload.key, std::placeholders::_2),
ack_ctx);
}
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const QuiescePayload &payload,
C_NotifyAck *ack_ctx) {
auto on_finish = prepare_quiesce_request(payload.async_request_id, ack_ctx);
if (on_finish == nullptr) {
ldout(m_image_ctx.cct, 10) << this << " duplicate quiesce request: "
<< payload.async_request_id << dendl;
return false;
}
ldout(m_image_ctx.cct, 10) << this << " quiesce request: "
<< payload.async_request_id << dendl;
m_image_ctx.state->notify_quiesce(on_finish);
return false;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const UnquiescePayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " unquiesce request: "
<< payload.async_request_id << dendl;
prepare_unquiesce_request(payload.async_request_id);
return true;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const UnknownPayload &payload,
C_NotifyAck *ack_ctx) {
std::shared_lock l{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
int r;
if (m_image_ctx.exclusive_lock->accept_request(
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, &r) || r < 0) {
encode(ResponseMessage(-EOPNOTSUPP), ack_ctx->out);
}
}
return true;
}
template <typename I>
void ImageWatcher<I>::process_payload(uint64_t notify_id, uint64_t handle,
Payload *payload) {
auto ctx = new Watcher::C_NotifyAck(this, notify_id, handle);
bool complete;
switch (payload->get_notify_op()) {
case NOTIFY_OP_ACQUIRED_LOCK:
complete = handle_payload(*(static_cast<AcquiredLockPayload *>(payload)),
ctx);
break;
case NOTIFY_OP_RELEASED_LOCK:
complete = handle_payload(*(static_cast<ReleasedLockPayload *>(payload)),
ctx);
break;
case NOTIFY_OP_REQUEST_LOCK:
complete = handle_payload(*(static_cast<RequestLockPayload *>(payload)),
ctx);
break;
case NOTIFY_OP_HEADER_UPDATE:
complete = handle_payload(*(static_cast<HeaderUpdatePayload *>(payload)),
ctx);
break;
case NOTIFY_OP_ASYNC_PROGRESS:
complete = handle_payload(*(static_cast<AsyncProgressPayload *>(payload)),
ctx);
break;
case NOTIFY_OP_ASYNC_COMPLETE:
complete = handle_payload(*(static_cast<AsyncCompletePayload *>(payload)),
ctx);
break;
case NOTIFY_OP_FLATTEN:
complete = handle_payload(*(static_cast<FlattenPayload *>(payload)), ctx);
break;
case NOTIFY_OP_RESIZE:
complete = handle_payload(*(static_cast<ResizePayload *>(payload)), ctx);
break;
case NOTIFY_OP_SNAP_CREATE:
complete = handle_payload(*(static_cast<SnapCreatePayload *>(payload)),
ctx);
break;
case NOTIFY_OP_SNAP_REMOVE:
complete = handle_payload(*(static_cast<SnapRemovePayload *>(payload)),
ctx);
break;
case NOTIFY_OP_SNAP_RENAME:
complete = handle_payload(*(static_cast<SnapRenamePayload *>(payload)),
ctx);
break;
case NOTIFY_OP_SNAP_PROTECT:
complete = handle_payload(*(static_cast<SnapProtectPayload *>(payload)),
ctx);
break;
case NOTIFY_OP_SNAP_UNPROTECT:
complete = handle_payload(*(static_cast<SnapUnprotectPayload *>(payload)),
ctx);
break;
case NOTIFY_OP_REBUILD_OBJECT_MAP:
complete = handle_payload(*(static_cast<RebuildObjectMapPayload *>(payload)),
ctx);
break;
case NOTIFY_OP_RENAME:
complete = handle_payload(*(static_cast<RenamePayload *>(payload)), ctx);
break;
case NOTIFY_OP_UPDATE_FEATURES:
complete = handle_payload(*(static_cast<UpdateFeaturesPayload *>(payload)),
ctx);
break;
case NOTIFY_OP_MIGRATE:
complete = handle_payload(*(static_cast<MigratePayload *>(payload)), ctx);
break;
case NOTIFY_OP_SPARSIFY:
complete = handle_payload(*(static_cast<SparsifyPayload *>(payload)), ctx);
break;
case NOTIFY_OP_QUIESCE:
complete = handle_payload(*(static_cast<QuiescePayload *>(payload)), ctx);
break;
case NOTIFY_OP_UNQUIESCE:
complete = handle_payload(*(static_cast<UnquiescePayload *>(payload)), ctx);
break;
case NOTIFY_OP_METADATA_UPDATE:
complete = handle_payload(*(static_cast<MetadataUpdatePayload *>(payload)), ctx);
break;
default:
ceph_assert(payload->get_notify_op() == static_cast<NotifyOp>(-1));
complete = handle_payload(*(static_cast<UnknownPayload *>(payload)), ctx);
}
if (complete) {
ctx->complete(0);
}
}
template <typename I>
void ImageWatcher<I>::handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist &bl) {
NotifyMessage notify_message;
if (bl.length() == 0) {
// legacy notification for header updates
notify_message = NotifyMessage(new HeaderUpdatePayload());
} else {
try {
auto iter = bl.cbegin();
decode(notify_message, iter);
} catch (const buffer::error &err) {
lderr(m_image_ctx.cct) << this << " error decoding image notification: "
<< err.what() << dendl;
return;
}
}
// if an image refresh is required, refresh before processing the request
if (notify_message.check_for_refresh() &&
m_image_ctx.state->is_refresh_required()) {
m_image_ctx.state->refresh(
new C_ProcessPayload(this, notify_id, handle,
std::move(notify_message.payload)));
} else {
process_payload(notify_id, handle, notify_message.payload.get());
}
}
template <typename I>
void ImageWatcher<I>::handle_error(uint64_t handle, int err) {
lderr(m_image_ctx.cct) << this << " image watch failed: " << handle << ", "
<< cpp_strerror(err) << dendl;
{
std::lock_guard l{m_owner_client_id_lock};
set_owner_client_id(ClientId());
}
Watcher::handle_error(handle, err);
}
template <typename I>
void ImageWatcher<I>::handle_rewatch_complete(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
{
std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
// update the lock cookie with the new watch handle
m_image_ctx.exclusive_lock->reacquire_lock(nullptr);
}
}
// image might have been updated while we didn't have active watch
handle_payload(HeaderUpdatePayload(), nullptr);
}
template <typename I>
void ImageWatcher<I>::send_notify(Payload *payload, Context *ctx) {
bufferlist bl;
encode(NotifyMessage(payload), bl);
Watcher::send_notify(bl, nullptr, ctx);
}
template <typename I>
void ImageWatcher<I>::RemoteContext::finish(int r) {
m_image_watcher.schedule_async_complete(m_async_request_id, r);
}
template <typename I>
void ImageWatcher<I>::C_ResponseMessage::finish(int r) {
CephContext *cct = notify_ack->cct;
ldout(cct, 10) << this << " C_ResponseMessage: r=" << r << dendl;
encode(ResponseMessage(r), notify_ack->out);
notify_ack->complete(0);
}
} // namespace librbd
template class librbd::ImageWatcher<librbd::ImageCtx>;
| 56,631 | 35.395887 | 91 | cc |
null | ceph-main/src/librbd/ImageWatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_WATCHER_H
#define CEPH_LIBRBD_IMAGE_WATCHER_H
#include "cls/rbd/cls_rbd_types.h"
#include "common/AsyncOpTracker.h"
#include "common/ceph_mutex.h"
#include "include/Context.h"
#include "include/rbd/librbd.hpp"
#include "librbd/Operations.h"
#include "librbd/Watcher.h"
#include "librbd/WatchNotifyTypes.h"
#include "librbd/exclusive_lock/Policy.h"
#include "librbd/internal.h"
#include <functional>
#include <set>
#include <string>
#include <utility>
class entity_name_t;
namespace librbd {
class ImageCtx;
template <typename> class TaskFinisher;
template <typename ImageCtxT = ImageCtx>
class ImageWatcher : public Watcher {
public:
ImageWatcher(ImageCtxT& image_ctx);
~ImageWatcher() override;
void unregister_watch(Context *on_finish) override;
void block_notifies(Context *on_finish) override;
void notify_flatten(uint64_t request_id, ProgressContext &prog_ctx,
Context *on_finish);
void notify_resize(uint64_t request_id, uint64_t size, bool allow_shrink,
ProgressContext &prog_ctx, Context *on_finish);
void notify_snap_create(uint64_t request_id,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
uint64_t flags,
ProgressContext &prog_ctx,
Context *on_finish);
void notify_snap_rename(uint64_t request_id,
const snapid_t &src_snap_id,
const std::string &dst_snap_name,
Context *on_finish);
void notify_snap_remove(uint64_t request_id,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish);
void notify_snap_protect(uint64_t request_id,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish);
void notify_snap_unprotect(uint64_t request_id,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish);
void notify_rebuild_object_map(uint64_t request_id,
ProgressContext &prog_ctx, Context *on_finish);
void notify_rename(uint64_t request_id,
const std::string &image_name, Context *on_finish);
void notify_update_features(uint64_t request_id,
uint64_t features, bool enabled,
Context *on_finish);
void notify_migrate(uint64_t request_id, ProgressContext &prog_ctx,
Context *on_finish);
void notify_sparsify(uint64_t request_id, size_t sparse_size,
ProgressContext &prog_ctx, Context *on_finish);
void notify_acquired_lock();
void notify_released_lock();
void notify_request_lock();
void notify_header_update(Context *on_finish);
static void notify_header_update(librados::IoCtx &io_ctx,
const std::string &oid);
void notify_quiesce(uint64_t *request_id, ProgressContext &prog_ctx,
Context *on_finish);
void notify_unquiesce(uint64_t request_id, Context *on_finish);
void notify_metadata_set(uint64_t request_id,
const std::string &key, const std::string &value,
Context *on_finish);
void notify_metadata_remove(uint64_t request_id,
const std::string &key, Context *on_finish);
private:
enum TaskCode {
TASK_CODE_REQUEST_LOCK,
TASK_CODE_CANCEL_ASYNC_REQUESTS,
TASK_CODE_REREGISTER_WATCH,
TASK_CODE_ASYNC_REQUEST,
TASK_CODE_ASYNC_PROGRESS,
TASK_CODE_QUIESCE,
};
typedef std::pair<Context *, ProgressContext *> AsyncRequest;
class Task {
public:
Task(TaskCode task_code) : m_task_code(task_code) {}
Task(TaskCode task_code, const watch_notify::AsyncRequestId &id)
: m_task_code(task_code), m_async_request_id(id) {}
inline bool operator<(const Task& rhs) const {
if (m_task_code != rhs.m_task_code) {
return m_task_code < rhs.m_task_code;
} else if ((m_task_code == TASK_CODE_ASYNC_REQUEST ||
m_task_code == TASK_CODE_ASYNC_PROGRESS ||
m_task_code == TASK_CODE_QUIESCE) &&
m_async_request_id != rhs.m_async_request_id) {
return m_async_request_id < rhs.m_async_request_id;
}
return false;
}
private:
TaskCode m_task_code;
watch_notify::AsyncRequestId m_async_request_id;
};
class RemoteProgressContext : public ProgressContext {
public:
RemoteProgressContext(ImageWatcher &image_watcher,
const watch_notify::AsyncRequestId &id)
: m_image_watcher(image_watcher), m_async_request_id(id)
{
}
int update_progress(uint64_t offset, uint64_t total) override {
m_image_watcher.schedule_async_progress(m_async_request_id, offset,
total);
return 0;
}
private:
ImageWatcher &m_image_watcher;
watch_notify::AsyncRequestId m_async_request_id;
};
class RemoteContext : public Context {
public:
RemoteContext(ImageWatcher &image_watcher,
const watch_notify::AsyncRequestId &id,
ProgressContext *prog_ctx)
: m_image_watcher(image_watcher), m_async_request_id(id),
m_prog_ctx(prog_ctx)
{
}
~RemoteContext() override {
delete m_prog_ctx;
}
void finish(int r) override;
private:
ImageWatcher &m_image_watcher;
watch_notify::AsyncRequestId m_async_request_id;
ProgressContext *m_prog_ctx;
};
struct C_ProcessPayload;
struct C_ResponseMessage : public Context {
C_NotifyAck *notify_ack;
C_ResponseMessage(C_NotifyAck *notify_ack) : notify_ack(notify_ack) {
}
void finish(int r) override;
};
ImageCtxT &m_image_ctx;
TaskFinisher<Task> *m_task_finisher;
ceph::shared_mutex m_async_request_lock;
std::map<watch_notify::AsyncRequestId, AsyncRequest> m_async_requests;
std::set<watch_notify::AsyncRequestId> m_async_pending;
std::map<watch_notify::AsyncRequestId, int> m_async_complete;
std::set<std::pair<utime_t,
watch_notify::AsyncRequestId>> m_async_complete_expiration;
ceph::mutex m_owner_client_id_lock;
watch_notify::ClientId m_owner_client_id;
AsyncOpTracker m_async_op_tracker;
NoOpProgressContext m_no_op_prog_ctx;
void handle_register_watch(int r);
void schedule_cancel_async_requests();
void cancel_async_requests();
void set_owner_client_id(const watch_notify::ClientId &client_id);
watch_notify::ClientId get_client_id();
void handle_request_lock(int r);
void schedule_request_lock(bool use_timer, int timer_delay = -1);
void notify_lock_owner(watch_notify::Payload *payload, Context *on_finish);
bool is_new_request(const watch_notify::AsyncRequestId &id) const;
bool mark_async_request_complete(const watch_notify::AsyncRequestId &id,
int r);
Context *remove_async_request(const watch_notify::AsyncRequestId &id);
Context *remove_async_request(const watch_notify::AsyncRequestId &id,
ceph::shared_mutex &lock);
void schedule_async_request_timed_out(const watch_notify::AsyncRequestId &id);
void async_request_timed_out(const watch_notify::AsyncRequestId &id);
void notify_async_request(const watch_notify::AsyncRequestId &id,
watch_notify::Payload *payload,
ProgressContext& prog_ctx,
Context *on_finish);
void schedule_async_progress(const watch_notify::AsyncRequestId &id,
uint64_t offset, uint64_t total);
int notify_async_progress(const watch_notify::AsyncRequestId &id,
uint64_t offset, uint64_t total);
void schedule_async_complete(const watch_notify::AsyncRequestId &id, int r);
void notify_async_complete(const watch_notify::AsyncRequestId &id, int r);
void handle_async_complete(const watch_notify::AsyncRequestId &request, int r,
int ret_val);
int prepare_async_request(const watch_notify::AsyncRequestId& id,
bool* new_request, Context** ctx,
ProgressContext** prog_ctx);
Context *prepare_quiesce_request(const watch_notify::AsyncRequestId &request,
C_NotifyAck *ack_ctx);
void prepare_unquiesce_request(const watch_notify::AsyncRequestId &request);
void cancel_quiesce_requests();
void notify_quiesce(const watch_notify::AsyncRequestId &async_request_id,
size_t attempt, size_t total_attempts,
ProgressContext &prog_ctx, Context *on_finish);
bool handle_operation_request(
const watch_notify::AsyncRequestId& async_request_id,
exclusive_lock::OperationRequestType request_type, Operation operation,
std::function<void(ProgressContext &prog_ctx, Context*)> execute,
C_NotifyAck *ack_ctx);
bool handle_payload(const watch_notify::HeaderUpdatePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::AcquiredLockPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::ReleasedLockPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::RequestLockPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::AsyncProgressPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::AsyncCompletePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::FlattenPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::ResizePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::SnapCreatePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::SnapRenamePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::SnapRemovePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::SnapProtectPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::SnapUnprotectPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::RebuildObjectMapPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::RenamePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::UpdateFeaturesPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::MigratePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::SparsifyPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::QuiescePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::UnquiescePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::MetadataUpdatePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::UnknownPayload& payload,
C_NotifyAck *ctx);
void process_payload(uint64_t notify_id, uint64_t handle,
watch_notify::Payload *payload);
void handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist &bl) override;
void handle_error(uint64_t cookie, int err) override;
void handle_rewatch_complete(int r) override;
void send_notify(watch_notify::Payload *payload, Context *ctx = nullptr);
};
} // namespace librbd
extern template class librbd::ImageWatcher<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_WATCHER_H
| 12,304 | 38.187898 | 80 | h |
null | ceph-main/src/librbd/Journal.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/Journal.h"
#include "include/rados/librados.hpp"
#include "common/AsyncOpTracker.h"
#include "common/errno.h"
#include "common/Timer.h"
#include "common/WorkQueue.h"
#include "cls/journal/cls_journal_types.h"
#include "journal/Journaler.h"
#include "journal/Policy.h"
#include "journal/ReplayEntry.h"
#include "journal/Settings.h"
#include "journal/Utils.h"
#include "librbd/ImageCtx.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/io/ObjectDispatchSpec.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/journal/CreateRequest.h"
#include "librbd/journal/DemoteRequest.h"
#include "librbd/journal/ObjectDispatch.h"
#include "librbd/journal/OpenRequest.h"
#include "librbd/journal/RemoveRequest.h"
#include "librbd/journal/ResetRequest.h"
#include "librbd/journal/Replay.h"
#include "librbd/journal/PromoteRequest.h"
#include <boost/scope_exit.hpp>
#include <utility>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::Journal: "
namespace librbd {
using util::create_async_context_callback;
using util::create_context_callback;
using journal::util::C_DecodeTag;
using journal::util::C_DecodeTags;
namespace {
// TODO: once journaler is 100% async and converted to ASIO, remove separate
// threads and reuse librbd's AsioEngine
class ThreadPoolSingleton : public ThreadPool {
public:
ContextWQ *work_queue;
explicit ThreadPoolSingleton(CephContext *cct)
: ThreadPool(cct, "librbd::Journal", "tp_librbd_journ", 1),
work_queue(new ContextWQ("librbd::journal::work_queue",
ceph::make_timespan(
cct->_conf.get_val<uint64_t>("rbd_op_thread_timeout")),
this)) {
start();
}
~ThreadPoolSingleton() override {
work_queue->drain();
delete work_queue;
stop();
}
};
template <typename I>
struct C_IsTagOwner : public Context {
librados::IoCtx &io_ctx;
std::string image_id;
bool *is_tag_owner;
asio::ContextWQ *op_work_queue;
Context *on_finish;
CephContext *cct = nullptr;
Journaler *journaler;
cls::journal::Client client;
journal::ImageClientMeta client_meta;
uint64_t tag_tid = 0;
journal::TagData tag_data;
C_IsTagOwner(librados::IoCtx &io_ctx, const std::string &image_id,
bool *is_tag_owner, asio::ContextWQ *op_work_queue,
Context *on_finish)
: io_ctx(io_ctx), image_id(image_id), is_tag_owner(is_tag_owner),
op_work_queue(op_work_queue), on_finish(on_finish),
cct(reinterpret_cast<CephContext*>(io_ctx.cct())),
journaler(new Journaler(io_ctx, image_id, Journal<>::IMAGE_CLIENT_ID,
{}, nullptr)) {
}
void finish(int r) override {
ldout(cct, 20) << this << " C_IsTagOwner::" << __func__ << ": r=" << r
<< dendl;
if (r < 0) {
lderr(cct) << this << " C_IsTagOwner::" << __func__ << ": "
<< "failed to get tag owner: " << cpp_strerror(r) << dendl;
} else {
*is_tag_owner = (tag_data.mirror_uuid == Journal<>::LOCAL_MIRROR_UUID);
}
Journaler *journaler = this->journaler;
Context *on_finish = this->on_finish;
auto ctx = new LambdaContext(
[journaler, on_finish](int r) {
on_finish->complete(r);
delete journaler;
});
op_work_queue->queue(ctx, r);
}
};
struct C_GetTagOwner : public Context {
std::string *mirror_uuid;
Context *on_finish;
Journaler journaler;
cls::journal::Client client;
journal::ImageClientMeta client_meta;
uint64_t tag_tid = 0;
journal::TagData tag_data;
C_GetTagOwner(librados::IoCtx &io_ctx, const std::string &image_id,
std::string *mirror_uuid, Context *on_finish)
: mirror_uuid(mirror_uuid), on_finish(on_finish),
journaler(io_ctx, image_id, Journal<>::IMAGE_CLIENT_ID, {}, nullptr) {
}
virtual void finish(int r) {
if (r >= 0) {
*mirror_uuid = tag_data.mirror_uuid;
}
on_finish->complete(r);
}
};
template <typename J>
struct GetTagsRequest {
CephContext *cct;
J *journaler;
cls::journal::Client *client;
journal::ImageClientMeta *client_meta;
uint64_t *tag_tid;
journal::TagData *tag_data;
Context *on_finish;
ceph::mutex lock = ceph::make_mutex("lock");
GetTagsRequest(CephContext *cct, J *journaler, cls::journal::Client *client,
journal::ImageClientMeta *client_meta, uint64_t *tag_tid,
journal::TagData *tag_data, Context *on_finish)
: cct(cct), journaler(journaler), client(client), client_meta(client_meta),
tag_tid(tag_tid), tag_data(tag_data), on_finish(on_finish) {
}
/**
* @verbatim
*
* <start>
* |
* v
* GET_CLIENT * * * * * * * * * * * *
* | *
* v *
* GET_TAGS * * * * * * * * * * * * * (error)
* | *
* v *
* <finish> * * * * * * * * * * * * *
*
* @endverbatim
*/
void send() {
send_get_client();
}
void send_get_client() {
ldout(cct, 20) << __func__ << dendl;
auto ctx = new LambdaContext(
[this](int r) {
handle_get_client(r);
});
journaler->get_client(Journal<ImageCtx>::IMAGE_CLIENT_ID, client, ctx);
}
void handle_get_client(int r) {
ldout(cct, 20) << __func__ << ": r=" << r << dendl;
if (r < 0) {
complete(r);
return;
}
librbd::journal::ClientData client_data;
auto bl_it = client->data.cbegin();
try {
decode(client_data, bl_it);
} catch (const buffer::error &err) {
lderr(cct) << this << " OpenJournalerRequest::" << __func__ << ": "
<< "failed to decode client data" << dendl;
complete(-EBADMSG);
return;
}
journal::ImageClientMeta *image_client_meta =
boost::get<journal::ImageClientMeta>(&client_data.client_meta);
if (image_client_meta == nullptr) {
lderr(cct) << this << " OpenJournalerRequest::" << __func__ << ": "
<< "failed to get client meta" << dendl;
complete(-EINVAL);
return;
}
*client_meta = *image_client_meta;
send_get_tags();
}
void send_get_tags() {
ldout(cct, 20) << __func__ << dendl;
auto ctx = new LambdaContext(
[this](int r) {
handle_get_tags(r);
});
C_DecodeTags *tags_ctx = new C_DecodeTags(cct, &lock, tag_tid, tag_data,
ctx);
journaler->get_tags(client_meta->tag_class, &tags_ctx->tags, tags_ctx);
}
void handle_get_tags(int r) {
ldout(cct, 20) << __func__ << ": r=" << r << dendl;
complete(r);
}
void complete(int r) {
on_finish->complete(r);
delete this;
}
};
template <typename J>
void get_tags(CephContext *cct, J *journaler,
cls::journal::Client *client,
journal::ImageClientMeta *client_meta,
uint64_t *tag_tid, journal::TagData *tag_data,
Context *on_finish) {
ldout(cct, 20) << __func__ << dendl;
GetTagsRequest<J> *req =
new GetTagsRequest<J>(cct, journaler, client, client_meta, tag_tid,
tag_data, on_finish);
req->send();
}
template <typename J>
int allocate_journaler_tag(CephContext *cct, J *journaler,
uint64_t tag_class,
const journal::TagPredecessor &predecessor,
const std::string &mirror_uuid,
cls::journal::Tag *new_tag) {
journal::TagData tag_data;
tag_data.mirror_uuid = mirror_uuid;
tag_data.predecessor = predecessor;
bufferlist tag_bl;
encode(tag_data, tag_bl);
C_SaferCond allocate_tag_ctx;
journaler->allocate_tag(tag_class, tag_bl, new_tag, &allocate_tag_ctx);
int r = allocate_tag_ctx.wait();
if (r < 0) {
lderr(cct) << __func__ << ": "
<< "failed to allocate tag: " << cpp_strerror(r) << dendl;
return r;
}
return 0;
}
} // anonymous namespace
// client id for local image
template <typename I>
const std::string Journal<I>::IMAGE_CLIENT_ID("");
// mirror uuid to use for local images
template <typename I>
const std::string Journal<I>::LOCAL_MIRROR_UUID("");
// mirror uuid to use for orphaned (demoted) images
template <typename I>
const std::string Journal<I>::ORPHAN_MIRROR_UUID("<orphan>");
template <typename I>
std::ostream &operator<<(std::ostream &os,
const typename Journal<I>::State &state) {
switch (state) {
case Journal<I>::STATE_UNINITIALIZED:
os << "Uninitialized";
break;
case Journal<I>::STATE_INITIALIZING:
os << "Initializing";
break;
case Journal<I>::STATE_REPLAYING:
os << "Replaying";
break;
case Journal<I>::STATE_FLUSHING_RESTART:
os << "FlushingRestart";
break;
case Journal<I>::STATE_RESTARTING_REPLAY:
os << "RestartingReplay";
break;
case Journal<I>::STATE_FLUSHING_REPLAY:
os << "FlushingReplay";
break;
case Journal<I>::STATE_READY:
os << "Ready";
break;
case Journal<I>::STATE_STOPPING:
os << "Stopping";
break;
case Journal<I>::STATE_CLOSING:
os << "Closing";
break;
case Journal<I>::STATE_CLOSED:
os << "Closed";
break;
default:
os << "Unknown (" << static_cast<uint32_t>(state) << ")";
break;
}
return os;
}
template <typename I>
void Journal<I>::MetadataListener::handle_update(::journal::JournalMetadata *) {
auto ctx = new LambdaContext([this](int r) {
journal->handle_metadata_updated();
});
journal->m_work_queue->queue(ctx, 0);
}
template <typename I>
void Journal<I>::get_work_queue(CephContext *cct, ContextWQ **work_queue) {
auto thread_pool_singleton =
&cct->lookup_or_create_singleton_object<ThreadPoolSingleton>(
"librbd::journal::thread_pool", false, cct);
*work_queue = thread_pool_singleton->work_queue;
}
template <typename I>
Journal<I>::Journal(I &image_ctx)
: RefCountedObject(image_ctx.cct),
m_image_ctx(image_ctx), m_journaler(NULL),
m_state(STATE_UNINITIALIZED),
m_error_result(0), m_replay_handler(this), m_close_pending(false),
m_event_tid(0),
m_blocking_writes(false), m_journal_replay(NULL),
m_metadata_listener(this) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << ": ictx=" << &m_image_ctx << dendl;
get_work_queue(cct, &m_work_queue);
ImageCtx::get_timer_instance(cct, &m_timer, &m_timer_lock);
}
template <typename I>
Journal<I>::~Journal() {
if (m_work_queue != nullptr) {
m_work_queue->drain();
}
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_UNINITIALIZED || m_state == STATE_CLOSED);
ceph_assert(m_journaler == NULL);
ceph_assert(m_journal_replay == NULL);
ceph_assert(m_wait_for_state_contexts.empty());
}
template <typename I>
bool Journal<I>::is_journal_supported(I &image_ctx) {
ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
return ((image_ctx.features & RBD_FEATURE_JOURNALING) &&
!image_ctx.read_only && image_ctx.snap_id == CEPH_NOSNAP);
}
template <typename I>
int Journal<I>::create(librados::IoCtx &io_ctx, const std::string &image_id,
uint8_t order, uint8_t splay_width,
const std::string &object_pool) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 5) << __func__ << ": image=" << image_id << dendl;
ContextWQ *work_queue;
get_work_queue(cct, &work_queue);
C_SaferCond cond;
journal::TagData tag_data(LOCAL_MIRROR_UUID);
journal::CreateRequest<I> *req = journal::CreateRequest<I>::create(
io_ctx, image_id, order, splay_width, object_pool, cls::journal::Tag::TAG_CLASS_NEW,
tag_data, IMAGE_CLIENT_ID, work_queue, &cond);
req->send();
return cond.wait();
}
template <typename I>
int Journal<I>::remove(librados::IoCtx &io_ctx, const std::string &image_id) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 5) << __func__ << ": image=" << image_id << dendl;
ContextWQ *work_queue;
get_work_queue(cct, &work_queue);
C_SaferCond cond;
journal::RemoveRequest<I> *req = journal::RemoveRequest<I>::create(
io_ctx, image_id, IMAGE_CLIENT_ID, work_queue, &cond);
req->send();
return cond.wait();
}
template <typename I>
int Journal<I>::reset(librados::IoCtx &io_ctx, const std::string &image_id) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 5) << __func__ << ": image=" << image_id << dendl;
ContextWQ *work_queue;
get_work_queue(cct, &work_queue);
C_SaferCond cond;
auto req = journal::ResetRequest<I>::create(io_ctx, image_id, IMAGE_CLIENT_ID,
Journal<>::LOCAL_MIRROR_UUID,
work_queue, &cond);
req->send();
return cond.wait();
}
template <typename I>
void Journal<I>::is_tag_owner(I *image_ctx, bool *owner,
Context *on_finish) {
Journal<I>::is_tag_owner(image_ctx->md_ctx, image_ctx->id, owner,
image_ctx->op_work_queue, on_finish);
}
template <typename I>
void Journal<I>::is_tag_owner(librados::IoCtx& io_ctx, std::string& image_id,
bool *is_tag_owner,
asio::ContextWQ *op_work_queue,
Context *on_finish) {
CephContext *cct = reinterpret_cast<CephContext*>(io_ctx.cct());
ldout(cct, 20) << __func__ << dendl;
C_IsTagOwner<I> *is_tag_owner_ctx = new C_IsTagOwner<I>(
io_ctx, image_id, is_tag_owner, op_work_queue, on_finish);
get_tags(cct, is_tag_owner_ctx->journaler, &is_tag_owner_ctx->client,
&is_tag_owner_ctx->client_meta, &is_tag_owner_ctx->tag_tid,
&is_tag_owner_ctx->tag_data, is_tag_owner_ctx);
}
template <typename I>
void Journal<I>::get_tag_owner(IoCtx& io_ctx, std::string& image_id,
std::string *mirror_uuid,
asio::ContextWQ *op_work_queue,
Context *on_finish) {
CephContext *cct = static_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << __func__ << dendl;
auto ctx = new C_GetTagOwner(io_ctx, image_id, mirror_uuid, on_finish);
get_tags(cct, &ctx->journaler, &ctx->client, &ctx->client_meta, &ctx->tag_tid,
&ctx->tag_data, create_async_context_callback(op_work_queue, ctx));
}
template <typename I>
int Journal<I>::request_resync(I *image_ctx) {
CephContext *cct = image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
Journaler journaler(image_ctx->md_ctx, image_ctx->id, IMAGE_CLIENT_ID, {},
nullptr);
ceph::mutex lock = ceph::make_mutex("lock");
journal::ImageClientMeta client_meta;
uint64_t tag_tid;
journal::TagData tag_data;
C_SaferCond open_ctx;
auto open_req = journal::OpenRequest<I>::create(image_ctx, &journaler, &lock,
&client_meta, &tag_tid,
&tag_data, &open_ctx);
open_req->send();
BOOST_SCOPE_EXIT_ALL(&journaler) {
journaler.shut_down();
};
int r = open_ctx.wait();
if (r < 0) {
return r;
}
client_meta.resync_requested = true;
journal::ClientData client_data(client_meta);
bufferlist client_data_bl;
encode(client_data, client_data_bl);
C_SaferCond update_client_ctx;
journaler.update_client(client_data_bl, &update_client_ctx);
r = update_client_ctx.wait();
if (r < 0) {
lderr(cct) << __func__ << ": "
<< "failed to update client: " << cpp_strerror(r) << dendl;
return r;
}
return 0;
}
template <typename I>
void Journal<I>::promote(I *image_ctx, Context *on_finish) {
CephContext *cct = image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
auto promote_req = journal::PromoteRequest<I>::create(image_ctx, false,
on_finish);
promote_req->send();
}
template <typename I>
void Journal<I>::demote(I *image_ctx, Context *on_finish) {
CephContext *cct = image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
auto req = journal::DemoteRequest<I>::create(*image_ctx, on_finish);
req->send();
}
template <typename I>
bool Journal<I>::is_journal_ready() const {
std::lock_guard locker{m_lock};
return (m_state == STATE_READY);
}
template <typename I>
bool Journal<I>::is_journal_replaying() const {
std::lock_guard locker{m_lock};
return is_journal_replaying(m_lock);
}
template <typename I>
bool Journal<I>::is_journal_replaying(const ceph::mutex &) const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return (m_state == STATE_REPLAYING ||
m_state == STATE_FLUSHING_REPLAY ||
m_state == STATE_FLUSHING_RESTART ||
m_state == STATE_RESTARTING_REPLAY);
}
template <typename I>
bool Journal<I>::is_journal_appending() const {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
std::lock_guard locker{m_lock};
return (m_state == STATE_READY &&
!m_image_ctx.get_journal_policy()->append_disabled());
}
template <typename I>
void Journal<I>::wait_for_journal_ready(Context *on_ready) {
on_ready = create_async_context_callback(m_image_ctx, on_ready);
std::lock_guard locker{m_lock};
if (m_state == STATE_READY) {
on_ready->complete(m_error_result);
} else {
wait_for_steady_state(on_ready);
}
}
template <typename I>
void Journal<I>::open(Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
on_finish = create_context_callback<Context>(on_finish, this);
on_finish = create_async_context_callback(m_image_ctx, on_finish);
// inject our handler into the object dispatcher chain
m_image_ctx.io_object_dispatcher->register_dispatch(
journal::ObjectDispatch<I>::create(&m_image_ctx, this));
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_UNINITIALIZED);
wait_for_steady_state(on_finish);
create_journaler();
}
template <typename I>
void Journal<I>::close(Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
on_finish = create_context_callback<Context>(on_finish, this);
on_finish = new LambdaContext([this, on_finish](int r) {
// remove our handler from object dispatcher chain - preserve error
auto ctx = new LambdaContext([on_finish, r](int _) {
on_finish->complete(r);
});
m_image_ctx.io_object_dispatcher->shut_down_dispatch(
io::OBJECT_DISPATCH_LAYER_JOURNAL, ctx);
});
on_finish = create_async_context_callback(m_image_ctx, on_finish);
std::unique_lock locker{m_lock};
m_listener_cond.wait(locker, [this] { return !m_listener_notify; });
Listeners listeners(m_listeners);
m_listener_notify = true;
locker.unlock();
for (auto listener : listeners) {
listener->handle_close();
}
locker.lock();
m_listener_notify = false;
m_listener_cond.notify_all();
ceph_assert(m_state != STATE_UNINITIALIZED);
if (m_state == STATE_CLOSED) {
on_finish->complete(m_error_result);
return;
}
if (m_state == STATE_READY) {
stop_recording();
}
m_close_pending = true;
wait_for_steady_state(on_finish);
}
template <typename I>
bool Journal<I>::is_tag_owner() const {
std::lock_guard locker{m_lock};
return is_tag_owner(m_lock);
}
template <typename I>
bool Journal<I>::is_tag_owner(const ceph::mutex &) const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return (m_tag_data.mirror_uuid == LOCAL_MIRROR_UUID);
}
template <typename I>
uint64_t Journal<I>::get_tag_tid() const {
std::lock_guard locker{m_lock};
return m_tag_tid;
}
template <typename I>
journal::TagData Journal<I>::get_tag_data() const {
std::lock_guard locker{m_lock};
return m_tag_data;
}
template <typename I>
void Journal<I>::allocate_local_tag(Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
journal::TagPredecessor predecessor;
predecessor.mirror_uuid = LOCAL_MIRROR_UUID;
{
std::lock_guard locker{m_lock};
ceph_assert(m_journaler != nullptr && is_tag_owner(m_lock));
cls::journal::Client client;
int r = m_journaler->get_cached_client(IMAGE_CLIENT_ID, &client);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to retrieve client: " << cpp_strerror(r) << dendl;
m_image_ctx.op_work_queue->queue(on_finish, r);
return;
}
// since we are primary, populate the predecessor with our known commit
// position
ceph_assert(m_tag_data.mirror_uuid == LOCAL_MIRROR_UUID);
if (!client.commit_position.object_positions.empty()) {
auto position = client.commit_position.object_positions.front();
predecessor.commit_valid = true;
predecessor.tag_tid = position.tag_tid;
predecessor.entry_tid = position.entry_tid;
}
}
allocate_tag(LOCAL_MIRROR_UUID, predecessor, on_finish);
}
template <typename I>
void Journal<I>::allocate_tag(const std::string &mirror_uuid,
const journal::TagPredecessor &predecessor,
Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": mirror_uuid=" << mirror_uuid
<< dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_journaler != nullptr);
journal::TagData tag_data;
tag_data.mirror_uuid = mirror_uuid;
tag_data.predecessor = predecessor;
bufferlist tag_bl;
encode(tag_data, tag_bl);
C_DecodeTag *decode_tag_ctx = new C_DecodeTag(cct, &m_lock, &m_tag_tid,
&m_tag_data, on_finish);
m_journaler->allocate_tag(m_tag_class, tag_bl, &decode_tag_ctx->tag,
decode_tag_ctx);
}
template <typename I>
void Journal<I>::flush_commit_position(Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_journaler != nullptr);
m_journaler->flush_commit_position(on_finish);
}
template <typename I>
void Journal<I>::user_flushed() {
if (m_state == STATE_READY && !m_user_flushed.exchange(true) &&
m_image_ctx.config.template get_val<bool>("rbd_journal_object_writethrough_until_flush")) {
std::lock_guard locker{m_lock};
if (m_state == STATE_READY) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
ceph_assert(m_journaler != nullptr);
m_journaler->set_append_batch_options(
m_image_ctx.config.template get_val<uint64_t>("rbd_journal_object_flush_interval"),
m_image_ctx.config.template get_val<Option::size_t>("rbd_journal_object_flush_bytes"),
m_image_ctx.config.template get_val<double>("rbd_journal_object_flush_age"));
} else {
m_user_flushed = false;
}
}
}
template <typename I>
uint64_t Journal<I>::append_write_event(uint64_t offset, size_t length,
const bufferlist &bl,
bool flush_entry) {
ceph_assert(m_max_append_size > journal::AioWriteEvent::get_fixed_size());
uint64_t max_write_data_size =
m_max_append_size - journal::AioWriteEvent::get_fixed_size();
// ensure that the write event fits within the journal entry
Bufferlists bufferlists;
uint64_t bytes_remaining = length;
uint64_t event_offset = 0;
do {
uint64_t event_length = std::min(bytes_remaining, max_write_data_size);
bufferlist event_bl;
event_bl.substr_of(bl, event_offset, event_length);
journal::EventEntry event_entry(journal::AioWriteEvent(offset + event_offset,
event_length,
event_bl),
ceph_clock_now());
bufferlists.emplace_back();
encode(event_entry, bufferlists.back());
event_offset += event_length;
bytes_remaining -= event_length;
} while (bytes_remaining > 0);
return append_io_events(journal::EVENT_TYPE_AIO_WRITE, bufferlists, offset,
length, flush_entry, 0);
}
template <typename I>
uint64_t Journal<I>::append_compare_and_write_event(uint64_t offset,
size_t length,
const bufferlist &cmp_bl,
const bufferlist &write_bl,
bool flush_entry) {
ceph_assert(
m_max_append_size > journal::AioCompareAndWriteEvent::get_fixed_size());
uint64_t max_compare_and_write_data_size =
m_max_append_size - journal::AioCompareAndWriteEvent::get_fixed_size();
// we need double the size because we store cmp and write buffers
max_compare_and_write_data_size /= 2;
// ensure that the compare and write event fits within the journal entry
Bufferlists bufferlists;
uint64_t bytes_remaining = length;
uint64_t event_offset = 0;
do {
uint64_t event_length = std::min(bytes_remaining,
max_compare_and_write_data_size);
bufferlist event_cmp_bl;
event_cmp_bl.substr_of(cmp_bl, event_offset, event_length);
bufferlist event_write_bl;
event_write_bl.substr_of(write_bl, event_offset, event_length);
journal::EventEntry event_entry(
journal::AioCompareAndWriteEvent(offset + event_offset,
event_length,
event_cmp_bl,
event_write_bl),
ceph_clock_now());
bufferlists.emplace_back();
encode(event_entry, bufferlists.back());
event_offset += event_length;
bytes_remaining -= event_length;
} while (bytes_remaining > 0);
return append_io_events(journal::EVENT_TYPE_AIO_COMPARE_AND_WRITE,
bufferlists, offset, length, flush_entry, -EILSEQ);
}
template <typename I>
uint64_t Journal<I>::append_io_event(journal::EventEntry &&event_entry,
uint64_t offset, size_t length,
bool flush_entry, int filter_ret_val) {
bufferlist bl;
event_entry.timestamp = ceph_clock_now();
encode(event_entry, bl);
return append_io_events(event_entry.get_event_type(), {bl}, offset, length,
flush_entry, filter_ret_val);
}
template <typename I>
uint64_t Journal<I>::append_io_events(journal::EventType event_type,
const Bufferlists &bufferlists,
uint64_t offset, size_t length,
bool flush_entry, int filter_ret_val) {
ceph_assert(!bufferlists.empty());
uint64_t tid;
{
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_READY);
tid = ++m_event_tid;
ceph_assert(tid != 0);
}
Futures futures;
for (auto &bl : bufferlists) {
ceph_assert(bl.length() <= m_max_append_size);
futures.push_back(m_journaler->append(m_tag_tid, bl));
}
{
std::lock_guard event_locker{m_event_lock};
m_events[tid] = Event(futures, offset, length, filter_ret_val);
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": "
<< "event=" << event_type << ", "
<< "offset=" << offset << ", "
<< "length=" << length << ", "
<< "flush=" << flush_entry << ", tid=" << tid << dendl;
Context *on_safe = create_async_context_callback(
m_image_ctx, new C_IOEventSafe(this, tid));
if (flush_entry) {
futures.back().flush(on_safe);
} else {
futures.back().wait(on_safe);
}
return tid;
}
template <typename I>
void Journal<I>::commit_io_event(uint64_t tid, int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": tid=" << tid << ", "
"r=" << r << dendl;
std::lock_guard event_locker{m_event_lock};
typename Events::iterator it = m_events.find(tid);
if (it == m_events.end()) {
return;
}
complete_event(it, r);
}
template <typename I>
void Journal<I>::commit_io_event_extent(uint64_t tid, uint64_t offset,
uint64_t length, int r) {
ceph_assert(length > 0);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": tid=" << tid << ", "
<< "offset=" << offset << ", "
<< "length=" << length << ", "
<< "r=" << r << dendl;
std::lock_guard event_locker{m_event_lock};
typename Events::iterator it = m_events.find(tid);
if (it == m_events.end()) {
return;
}
Event &event = it->second;
if (event.ret_val == 0 && r < 0) {
event.ret_val = r;
}
ExtentInterval extent;
extent.insert(offset, length);
ExtentInterval intersect;
intersect.intersection_of(extent, event.pending_extents);
event.pending_extents.subtract(intersect);
if (!event.pending_extents.empty()) {
ldout(cct, 20) << this << " " << __func__ << ": "
<< "pending extents: " << event.pending_extents << dendl;
return;
}
complete_event(it, event.ret_val);
}
template <typename I>
void Journal<I>::append_op_event(uint64_t op_tid,
journal::EventEntry &&event_entry,
Context *on_safe) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
bufferlist bl;
event_entry.timestamp = ceph_clock_now();
encode(event_entry, bl);
Future future;
{
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_READY);
future = m_journaler->append(m_tag_tid, bl);
// delay committing op event to ensure consistent replay
ceph_assert(m_op_futures.count(op_tid) == 0);
m_op_futures[op_tid] = future;
}
on_safe = create_async_context_callback(m_image_ctx, on_safe);
on_safe = new LambdaContext([this, on_safe](int r) {
// ensure all committed IO before this op is committed
m_journaler->flush_commit_position(on_safe);
});
future.flush(on_safe);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": "
<< "op_tid=" << op_tid << ", "
<< "event=" << event_entry.get_event_type() << dendl;
}
template <typename I>
void Journal<I>::commit_op_event(uint64_t op_tid, int r, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": op_tid=" << op_tid << ", "
<< "r=" << r << dendl;
journal::EventEntry event_entry((journal::OpFinishEvent(op_tid, r)),
ceph_clock_now());
bufferlist bl;
encode(event_entry, bl);
Future op_start_future;
Future op_finish_future;
{
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_READY);
// ready to commit op event
auto it = m_op_futures.find(op_tid);
ceph_assert(it != m_op_futures.end());
op_start_future = it->second;
m_op_futures.erase(it);
op_finish_future = m_journaler->append(m_tag_tid, bl);
}
op_finish_future.flush(create_async_context_callback(
m_image_ctx, new C_OpEventSafe(this, op_tid, op_start_future,
op_finish_future, on_safe)));
}
template <typename I>
void Journal<I>::replay_op_ready(uint64_t op_tid, Context *on_resume) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": op_tid=" << op_tid << dendl;
{
std::lock_guard locker{m_lock};
ceph_assert(m_journal_replay != nullptr);
m_journal_replay->replay_op_ready(op_tid, on_resume);
}
}
template <typename I>
void Journal<I>::flush_event(uint64_t tid, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": tid=" << tid << ", "
<< "on_safe=" << on_safe << dendl;
on_safe = create_context_callback<Context>(on_safe, this);
Future future;
{
std::lock_guard event_locker{m_event_lock};
future = wait_event(m_lock, tid, on_safe);
}
if (future.is_valid()) {
future.flush(nullptr);
}
}
template <typename I>
void Journal<I>::wait_event(uint64_t tid, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": tid=" << tid << ", "
<< "on_safe=" << on_safe << dendl;
on_safe = create_context_callback<Context>(on_safe, this);
std::lock_guard event_locker{m_event_lock};
wait_event(m_lock, tid, on_safe);
}
template <typename I>
typename Journal<I>::Future Journal<I>::wait_event(ceph::mutex &lock, uint64_t tid,
Context *on_safe) {
ceph_assert(ceph_mutex_is_locked(m_event_lock));
CephContext *cct = m_image_ctx.cct;
typename Events::iterator it = m_events.find(tid);
ceph_assert(it != m_events.end());
Event &event = it->second;
if (event.safe) {
// journal entry already safe
ldout(cct, 20) << this << " " << __func__ << ": "
<< "journal entry already safe" << dendl;
m_image_ctx.op_work_queue->queue(on_safe, event.ret_val);
return Future();
}
event.on_safe_contexts.push_back(create_async_context_callback(m_image_ctx,
on_safe));
return event.futures.back();
}
template <typename I>
void Journal<I>::start_external_replay(journal::Replay<I> **journal_replay,
Context *on_start) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_READY);
ceph_assert(m_journal_replay == nullptr);
on_start = util::create_async_context_callback(m_image_ctx, on_start);
on_start = new LambdaContext(
[this, journal_replay, on_start](int r) {
handle_start_external_replay(r, journal_replay, on_start);
});
// safely flush all in-flight events before starting external replay
m_journaler->stop_append(util::create_async_context_callback(m_image_ctx,
on_start));
}
template <typename I>
void Journal<I>::handle_start_external_replay(int r,
journal::Replay<I> **journal_replay,
Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_READY);
ceph_assert(m_journal_replay == nullptr);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to stop recording: " << cpp_strerror(r) << dendl;
*journal_replay = nullptr;
// get back to a sane-state
start_append();
on_finish->complete(r);
return;
}
transition_state(STATE_REPLAYING, 0);
m_journal_replay = journal::Replay<I>::create(m_image_ctx);
*journal_replay = m_journal_replay;
on_finish->complete(0);
}
template <typename I>
void Journal<I>::stop_external_replay() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_journal_replay != nullptr);
ceph_assert(m_state == STATE_REPLAYING);
delete m_journal_replay;
m_journal_replay = nullptr;
if (m_close_pending) {
destroy_journaler(0);
return;
}
start_append();
}
template <typename I>
void Journal<I>::create_journaler() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_UNINITIALIZED || m_state == STATE_RESTARTING_REPLAY);
ceph_assert(m_journaler == NULL);
transition_state(STATE_INITIALIZING, 0);
::journal::Settings settings;
settings.commit_interval =
m_image_ctx.config.template get_val<double>("rbd_journal_commit_age");
settings.max_payload_bytes =
m_image_ctx.config.template get_val<Option::size_t>("rbd_journal_max_payload_bytes");
settings.max_concurrent_object_sets =
m_image_ctx.config.template get_val<uint64_t>("rbd_journal_max_concurrent_object_sets");
// TODO: a configurable filter to exclude certain peers from being
// disconnected.
settings.ignored_laggy_clients = {IMAGE_CLIENT_ID};
m_journaler = new Journaler(m_work_queue, m_timer, m_timer_lock,
m_image_ctx.md_ctx, m_image_ctx.id,
IMAGE_CLIENT_ID, settings, nullptr);
m_journaler->add_listener(&m_metadata_listener);
Context *ctx = create_async_context_callback(
m_image_ctx, create_context_callback<
Journal<I>, &Journal<I>::handle_open>(this));
auto open_req = journal::OpenRequest<I>::create(&m_image_ctx, m_journaler,
&m_lock, &m_client_meta,
&m_tag_tid, &m_tag_data, ctx);
open_req->send();
}
template <typename I>
void Journal<I>::destroy_journaler(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
delete m_journal_replay;
m_journal_replay = NULL;
m_journaler->remove_listener(&m_metadata_listener);
transition_state(STATE_CLOSING, r);
Context *ctx = create_async_context_callback(
m_image_ctx, create_context_callback<
Journal<I>, &Journal<I>::handle_journal_destroyed>(this));
ctx = new LambdaContext(
[this, ctx](int r) {
std::lock_guard locker{m_lock};
m_journaler->shut_down(ctx);
});
ctx = create_async_context_callback(m_image_ctx, ctx);
m_async_journal_op_tracker.wait_for_ops(ctx);
}
template <typename I>
void Journal<I>::recreate_journaler(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_FLUSHING_RESTART ||
m_state == STATE_FLUSHING_REPLAY);
delete m_journal_replay;
m_journal_replay = NULL;
m_journaler->remove_listener(&m_metadata_listener);
transition_state(STATE_RESTARTING_REPLAY, r);
m_journaler->shut_down(create_async_context_callback(
m_image_ctx, create_context_callback<
Journal<I>, &Journal<I>::handle_journal_destroyed>(this)));
}
template <typename I>
void Journal<I>::complete_event(typename Events::iterator it, int r) {
ceph_assert(ceph_mutex_is_locked(m_event_lock));
ceph_assert(m_state == STATE_READY);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": tid=" << it->first << " "
<< "r=" << r << dendl;
Event &event = it->second;
if (r < 0 && r == event.filter_ret_val) {
// ignore allowed error codes
r = 0;
}
if (r < 0) {
// event recorded to journal but failed to update disk, we cannot
// commit this IO event. this event must be replayed.
ceph_assert(event.safe);
lderr(cct) << this << " " << __func__ << ": "
<< "failed to commit IO to disk, replay required: "
<< cpp_strerror(r) << dendl;
}
event.committed_io = true;
if (event.safe) {
if (r >= 0) {
for (auto &future : event.futures) {
m_journaler->committed(future);
}
}
m_events.erase(it);
}
}
template <typename I>
void Journal<I>::start_append() {
ceph_assert(ceph_mutex_is_locked(m_lock));
m_journaler->start_append(
m_image_ctx.config.template get_val<uint64_t>("rbd_journal_object_max_in_flight_appends"));
if (!m_image_ctx.config.template get_val<bool>("rbd_journal_object_writethrough_until_flush")) {
m_journaler->set_append_batch_options(
m_image_ctx.config.template get_val<uint64_t>("rbd_journal_object_flush_interval"),
m_image_ctx.config.template get_val<Option::size_t>("rbd_journal_object_flush_bytes"),
m_image_ctx.config.template get_val<double>("rbd_journal_object_flush_age"));
}
transition_state(STATE_READY, 0);
}
template <typename I>
void Journal<I>::handle_open(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_INITIALIZING);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to initialize journal: " << cpp_strerror(r)
<< dendl;
destroy_journaler(r);
return;
}
m_tag_class = m_client_meta.tag_class;
m_max_append_size = m_journaler->get_max_append_size();
ldout(cct, 20) << this << " " << __func__ << ": "
<< "tag_class=" << m_tag_class << ", "
<< "max_append_size=" << m_max_append_size << dendl;
transition_state(STATE_REPLAYING, 0);
m_journal_replay = journal::Replay<I>::create(m_image_ctx);
m_journaler->start_replay(&m_replay_handler);
}
template <typename I>
void Journal<I>::handle_replay_ready() {
CephContext *cct = m_image_ctx.cct;
ReplayEntry replay_entry;
{
std::lock_guard locker{m_lock};
if (m_state != STATE_REPLAYING) {
return;
}
ldout(cct, 20) << this << " " << __func__ << dendl;
if (!m_journaler->try_pop_front(&replay_entry)) {
return;
}
// only one entry should be in-flight at a time
ceph_assert(!m_processing_entry);
m_processing_entry = true;
}
m_async_journal_op_tracker.start_op();
bufferlist data = replay_entry.get_data();
auto it = data.cbegin();
journal::EventEntry event_entry;
int r = m_journal_replay->decode(&it, &event_entry);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to decode journal event entry" << dendl;
handle_replay_process_safe(replay_entry, r);
return;
}
Context *on_ready = create_context_callback<
Journal<I>, &Journal<I>::handle_replay_process_ready>(this);
Context *on_commit = new C_ReplayProcessSafe(this, std::move(replay_entry));
m_journal_replay->process(event_entry, on_ready, on_commit);
}
template <typename I>
void Journal<I>::handle_replay_complete(int r) {
CephContext *cct = m_image_ctx.cct;
bool cancel_ops = false;
{
std::lock_guard locker{m_lock};
if (m_state != STATE_REPLAYING) {
return;
}
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
if (r < 0) {
cancel_ops = true;
transition_state(STATE_FLUSHING_RESTART, r);
} else {
// state might change back to FLUSHING_RESTART on flush error
transition_state(STATE_FLUSHING_REPLAY, 0);
}
}
Context *ctx = new LambdaContext([this, cct](int r) {
ldout(cct, 20) << this << " handle_replay_complete: "
<< "handle shut down replay" << dendl;
State state;
{
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_FLUSHING_RESTART ||
m_state == STATE_FLUSHING_REPLAY);
state = m_state;
}
if (state == STATE_FLUSHING_RESTART) {
handle_flushing_restart(0);
} else {
handle_flushing_replay();
}
});
ctx = new LambdaContext([this, ctx](int r) {
// ensure the commit position is flushed to disk
m_journaler->flush_commit_position(ctx);
});
ctx = create_async_context_callback(m_image_ctx, ctx);
ctx = new LambdaContext([this, ctx](int r) {
m_async_journal_op_tracker.wait_for_ops(ctx);
});
ctx = new LambdaContext([this, cct, cancel_ops, ctx](int r) {
ldout(cct, 20) << this << " handle_replay_complete: "
<< "shut down replay" << dendl;
m_journal_replay->shut_down(cancel_ops, ctx);
});
m_journaler->stop_replay(ctx);
}
template <typename I>
void Journal<I>::handle_replay_process_ready(int r) {
// journal::Replay is ready for more events -- attempt to pop another
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
ceph_assert(r == 0);
{
std::lock_guard locker{m_lock};
ceph_assert(m_processing_entry);
m_processing_entry = false;
}
handle_replay_ready();
}
template <typename I>
void Journal<I>::handle_replay_process_safe(ReplayEntry replay_entry, int r) {
CephContext *cct = m_image_ctx.cct;
std::unique_lock locker{m_lock};
ceph_assert(m_state == STATE_REPLAYING ||
m_state == STATE_FLUSHING_RESTART ||
m_state == STATE_FLUSHING_REPLAY);
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
if (r < 0) {
if (r != -ECANCELED) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to commit journal event to disk: "
<< cpp_strerror(r) << dendl;
}
if (m_state == STATE_REPLAYING) {
// abort the replay if we have an error
transition_state(STATE_FLUSHING_RESTART, r);
locker.unlock();
// stop replay, shut down, and restart
Context* ctx = create_context_callback<
Journal<I>, &Journal<I>::handle_flushing_restart>(this);
ctx = new LambdaContext([this, ctx](int r) {
// ensure the commit position is flushed to disk
m_journaler->flush_commit_position(ctx);
});
ctx = new LambdaContext([this, cct, ctx](int r) {
ldout(cct, 20) << this << " handle_replay_process_safe: "
<< "shut down replay" << dendl;
{
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_FLUSHING_RESTART);
}
m_journal_replay->shut_down(true, ctx);
});
m_journaler->stop_replay(ctx);
m_async_journal_op_tracker.finish_op();
return;
} else if (m_state == STATE_FLUSHING_REPLAY) {
// end-of-replay flush in-progress -- we need to restart replay
transition_state(STATE_FLUSHING_RESTART, r);
locker.unlock();
m_async_journal_op_tracker.finish_op();
return;
}
} else {
// only commit the entry if written successfully
m_journaler->committed(replay_entry);
}
locker.unlock();
m_async_journal_op_tracker.finish_op();
}
template <typename I>
void Journal<I>::handle_flushing_restart(int r) {
std::lock_guard locker{m_lock};
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
ceph_assert(r == 0);
ceph_assert(m_state == STATE_FLUSHING_RESTART);
if (m_close_pending) {
destroy_journaler(r);
return;
}
recreate_journaler(r);
}
template <typename I>
void Journal<I>::handle_flushing_replay() {
std::lock_guard locker{m_lock};
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
ceph_assert(m_state == STATE_FLUSHING_REPLAY ||
m_state == STATE_FLUSHING_RESTART);
if (m_close_pending) {
destroy_journaler(0);
return;
} else if (m_state == STATE_FLUSHING_RESTART) {
// failed to replay one-or-more events -- restart
recreate_journaler(0);
return;
}
delete m_journal_replay;
m_journal_replay = NULL;
m_error_result = 0;
start_append();
}
template <typename I>
void Journal<I>::handle_recording_stopped(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_STOPPING);
destroy_journaler(r);
}
template <typename I>
void Journal<I>::handle_journal_destroyed(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
if (r < 0) {
lderr(cct) << this << " " << __func__
<< "error detected while closing journal: " << cpp_strerror(r)
<< dendl;
}
std::lock_guard locker{m_lock};
delete m_journaler;
m_journaler = nullptr;
ceph_assert(m_state == STATE_CLOSING || m_state == STATE_RESTARTING_REPLAY);
if (m_state == STATE_RESTARTING_REPLAY) {
create_journaler();
return;
}
transition_state(STATE_CLOSED, r);
}
template <typename I>
void Journal<I>::handle_io_event_safe(int r, uint64_t tid) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << ", "
<< "tid=" << tid << dendl;
// journal will be flushed before closing
ceph_assert(m_state == STATE_READY || m_state == STATE_STOPPING);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to commit IO event: " << cpp_strerror(r) << dendl;
}
Contexts on_safe_contexts;
{
std::lock_guard event_locker{m_event_lock};
typename Events::iterator it = m_events.find(tid);
ceph_assert(it != m_events.end());
Event &event = it->second;
on_safe_contexts.swap(event.on_safe_contexts);
if (r < 0 || event.committed_io) {
// failed journal write so IO won't be sent -- or IO extent was
// overwritten by future IO operations so this was a no-op IO event
event.ret_val = r;
for (auto &future : event.futures) {
m_journaler->committed(future);
}
}
if (event.committed_io) {
m_events.erase(it);
} else {
event.safe = true;
}
}
ldout(cct, 20) << this << " " << __func__ << ": "
<< "completing tid=" << tid << dendl;
// alert the cache about the journal event status
for (Contexts::iterator it = on_safe_contexts.begin();
it != on_safe_contexts.end(); ++it) {
(*it)->complete(r);
}
}
template <typename I>
void Journal<I>::handle_op_event_safe(int r, uint64_t tid,
const Future &op_start_future,
const Future &op_finish_future,
Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << ", "
<< "tid=" << tid << dendl;
// journal will be flushed before closing
ceph_assert(m_state == STATE_READY || m_state == STATE_STOPPING);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to commit op event: " << cpp_strerror(r) << dendl;
}
m_journaler->committed(op_start_future);
m_journaler->committed(op_finish_future);
// reduce the replay window after committing an op event
m_journaler->flush_commit_position(on_safe);
}
template <typename I>
void Journal<I>::stop_recording() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_journaler != NULL);
ceph_assert(m_state == STATE_READY);
transition_state(STATE_STOPPING, 0);
m_journaler->stop_append(util::create_async_context_callback(
m_image_ctx, create_context_callback<
Journal<I>, &Journal<I>::handle_recording_stopped>(this)));
}
template <typename I>
void Journal<I>::transition_state(State state, int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": new state=" << state << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
m_state = state;
if (m_error_result == 0 && r < 0) {
m_error_result = r;
}
if (is_steady_state()) {
auto wait_for_state_contexts(std::move(m_wait_for_state_contexts));
m_wait_for_state_contexts.clear();
for (auto ctx : wait_for_state_contexts) {
ctx->complete(m_error_result);
}
}
}
template <typename I>
bool Journal<I>::is_steady_state() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
switch (m_state) {
case STATE_READY:
case STATE_CLOSED:
return true;
case STATE_UNINITIALIZED:
case STATE_INITIALIZING:
case STATE_REPLAYING:
case STATE_FLUSHING_RESTART:
case STATE_RESTARTING_REPLAY:
case STATE_FLUSHING_REPLAY:
case STATE_STOPPING:
case STATE_CLOSING:
break;
}
return false;
}
template <typename I>
void Journal<I>::wait_for_steady_state(Context *on_state) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!is_steady_state());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": on_state=" << on_state
<< dendl;
m_wait_for_state_contexts.push_back(on_state);
}
template <typename I>
int Journal<I>::is_resync_requested(bool *do_resync) {
std::lock_guard l{m_lock};
return check_resync_requested(do_resync);
}
template <typename I>
int Journal<I>::check_resync_requested(bool *do_resync) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(do_resync != nullptr);
cls::journal::Client client;
int r = m_journaler->get_cached_client(IMAGE_CLIENT_ID, &client);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to retrieve client: " << cpp_strerror(r) << dendl;
return r;
}
librbd::journal::ClientData client_data;
auto bl_it = client.data.cbegin();
try {
decode(client_data, bl_it);
} catch (const buffer::error &err) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to decode client data: " << err.what() << dendl;
return -EINVAL;
}
journal::ImageClientMeta *image_client_meta =
boost::get<journal::ImageClientMeta>(&client_data.client_meta);
if (image_client_meta == nullptr) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to access image client meta struct" << dendl;
return -EINVAL;
}
*do_resync = image_client_meta->resync_requested;
return 0;
}
struct C_RefreshTags : public Context {
AsyncOpTracker &async_op_tracker;
Context *on_finish = nullptr;
ceph::mutex lock =
ceph::make_mutex("librbd::Journal::C_RefreshTags::lock");
uint64_t tag_tid = 0;
journal::TagData tag_data;
explicit C_RefreshTags(AsyncOpTracker &async_op_tracker)
: async_op_tracker(async_op_tracker) {
async_op_tracker.start_op();
}
~C_RefreshTags() override {
async_op_tracker.finish_op();
}
void finish(int r) override {
on_finish->complete(r);
}
};
template <typename I>
void Journal<I>::handle_metadata_updated() {
CephContext *cct = m_image_ctx.cct;
std::lock_guard locker{m_lock};
if (m_state != STATE_READY && !is_journal_replaying(m_lock)) {
return;
} else if (is_tag_owner(m_lock)) {
ldout(cct, 20) << this << " " << __func__ << ": primary image" << dendl;
return;
} else if (m_listeners.empty()) {
ldout(cct, 20) << this << " " << __func__ << ": no listeners" << dendl;
return;
}
uint64_t refresh_sequence = ++m_refresh_sequence;
ldout(cct, 20) << this << " " << __func__ << ": "
<< "refresh_sequence=" << refresh_sequence << dendl;
// pull the most recent tags from the journal, decode, and
// update the internal tag state
C_RefreshTags *refresh_ctx = new C_RefreshTags(m_async_journal_op_tracker);
refresh_ctx->on_finish = new LambdaContext(
[this, refresh_sequence, refresh_ctx](int r) {
handle_refresh_metadata(refresh_sequence, refresh_ctx->tag_tid,
refresh_ctx->tag_data, r);
});
C_DecodeTags *decode_tags_ctx = new C_DecodeTags(
cct, &refresh_ctx->lock, &refresh_ctx->tag_tid,
&refresh_ctx->tag_data, refresh_ctx);
m_journaler->get_tags(m_tag_tid == 0 ? 0 : m_tag_tid - 1, m_tag_class,
&decode_tags_ctx->tags, decode_tags_ctx);
}
template <typename I>
void Journal<I>::handle_refresh_metadata(uint64_t refresh_sequence,
uint64_t tag_tid,
journal::TagData tag_data, int r) {
CephContext *cct = m_image_ctx.cct;
std::unique_lock locker{m_lock};
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": failed to refresh metadata: "
<< cpp_strerror(r) << dendl;
return;
} else if (m_state != STATE_READY && !is_journal_replaying(m_lock)) {
return;
} else if (refresh_sequence != m_refresh_sequence) {
// another, more up-to-date refresh is in-flight
return;
}
ldout(cct, 20) << this << " " << __func__ << ": "
<< "refresh_sequence=" << refresh_sequence << ", "
<< "tag_tid=" << tag_tid << ", "
<< "tag_data=" << tag_data << dendl;
m_listener_cond.wait(locker, [this] { return !m_listener_notify; });
bool was_tag_owner = is_tag_owner(m_lock);
if (m_tag_tid < tag_tid) {
m_tag_tid = tag_tid;
m_tag_data = tag_data;
}
bool promoted_to_primary = (!was_tag_owner && is_tag_owner(m_lock));
bool resync_requested = false;
r = check_resync_requested(&resync_requested);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to check if a resync was requested" << dendl;
return;
}
Listeners listeners(m_listeners);
m_listener_notify = true;
locker.unlock();
if (promoted_to_primary) {
for (auto listener : listeners) {
listener->handle_promoted();
}
} else if (resync_requested) {
for (auto listener : listeners) {
listener->handle_resync();
}
}
locker.lock();
m_listener_notify = false;
m_listener_cond.notify_all();
}
template <typename I>
void Journal<I>::add_listener(journal::Listener *listener) {
std::lock_guard locker{m_lock};
m_listeners.insert(listener);
}
template <typename I>
void Journal<I>::remove_listener(journal::Listener *listener) {
std::unique_lock locker{m_lock};
m_listener_cond.wait(locker, [this] { return !m_listener_notify; });
m_listeners.erase(listener);
}
} // namespace librbd
#ifndef TEST_F
template class librbd::Journal<librbd::ImageCtx>;
#endif
| 57,986 | 30.125604 | 98 | cc |
null | ceph-main/src/librbd/Journal.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_JOURNAL_H
#define CEPH_LIBRBD_JOURNAL_H
#include "include/int_types.h"
#include "include/Context.h"
#include "include/interval_set.h"
#include "include/rados/librados_fwd.hpp"
#include "common/AsyncOpTracker.h"
#include "common/Cond.h"
#include "common/Timer.h"
#include "common/RefCountedObj.h"
#include "journal/Future.h"
#include "journal/JournalMetadataListener.h"
#include "journal/ReplayEntry.h"
#include "journal/ReplayHandler.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/journal/Types.h"
#include "librbd/journal/TypeTraits.h"
#include <algorithm>
#include <list>
#include <string>
#include <atomic>
#include <unordered_map>
class ContextWQ;
namespace journal { class Journaler; }
namespace librbd {
class ImageCtx;
namespace journal { template <typename> class Replay; }
template <typename ImageCtxT = ImageCtx>
class Journal : public RefCountedObject {
public:
/**
* @verbatim
*
* <start>
* |
* v
* UNINITIALIZED ---> INITIALIZING ---> REPLAYING ------> FLUSHING ---> READY
* | * . ^ * . * |
* | * . | * . * |
* | * . | (error) * . . . . . . . * |
* | * . | * . * |
* | * . | v . * |
* | * . | FLUSHING_RESTART . * |
* | * . | | . * |
* | * . | | . * |
* | * . | v . * v
* | * . | RESTARTING < * * * * * STOPPING
* | * . | | . |
* | * . | | . |
* | * * * * * * . \-------------/ . |
* | * (error) . . |
* | * . . . . . . . . . . . . . . . . |
* | * . . |
* | v v v |
* | CLOSED <----- CLOSING <---------------------------------------/
* | |
* | v
* \---> <finish>
*
* @endverbatim
*/
enum State {
STATE_UNINITIALIZED,
STATE_INITIALIZING,
STATE_REPLAYING,
STATE_FLUSHING_RESTART,
STATE_RESTARTING_REPLAY,
STATE_FLUSHING_REPLAY,
STATE_READY,
STATE_STOPPING,
STATE_CLOSING,
STATE_CLOSED
};
static const std::string IMAGE_CLIENT_ID;
static const std::string LOCAL_MIRROR_UUID;
static const std::string ORPHAN_MIRROR_UUID;
Journal(ImageCtxT &image_ctx);
~Journal();
static void get_work_queue(CephContext *cct, ContextWQ **work_queue);
static bool is_journal_supported(ImageCtxT &image_ctx);
static int create(librados::IoCtx &io_ctx, const std::string &image_id,
uint8_t order, uint8_t splay_width,
const std::string &object_pool);
static int remove(librados::IoCtx &io_ctx, const std::string &image_id);
static int reset(librados::IoCtx &io_ctx, const std::string &image_id);
static void is_tag_owner(ImageCtxT *image_ctx, bool *is_tag_owner,
Context *on_finish);
static void is_tag_owner(librados::IoCtx& io_ctx, std::string& image_id,
bool *is_tag_owner, asio::ContextWQ *op_work_queue,
Context *on_finish);
static void get_tag_owner(librados::IoCtx& io_ctx, std::string& image_id,
std::string *mirror_uuid,
asio::ContextWQ *op_work_queue, Context *on_finish);
static int request_resync(ImageCtxT *image_ctx);
static void promote(ImageCtxT *image_ctx, Context *on_finish);
static void demote(ImageCtxT *image_ctx, Context *on_finish);
bool is_journal_ready() const;
bool is_journal_replaying() const;
bool is_journal_appending() const;
void wait_for_journal_ready(Context *on_ready);
void open(Context *on_finish);
void close(Context *on_finish);
bool is_tag_owner() const;
uint64_t get_tag_tid() const;
journal::TagData get_tag_data() const;
void allocate_local_tag(Context *on_finish);
void allocate_tag(const std::string &mirror_uuid,
const journal::TagPredecessor &predecessor,
Context *on_finish);
void flush_commit_position(Context *on_finish);
void user_flushed();
uint64_t append_write_event(uint64_t offset, size_t length,
const bufferlist &bl,
bool flush_entry);
uint64_t append_compare_and_write_event(uint64_t offset,
size_t length,
const bufferlist &cmp_bl,
const bufferlist &write_bl,
bool flush_entry);
uint64_t append_io_event(journal::EventEntry &&event_entry,
uint64_t offset, size_t length,
bool flush_entry, int filter_ret_val);
void commit_io_event(uint64_t tid, int r);
void commit_io_event_extent(uint64_t tid, uint64_t offset, uint64_t length,
int r);
void append_op_event(uint64_t op_tid, journal::EventEntry &&event_entry,
Context *on_safe);
void commit_op_event(uint64_t tid, int r, Context *on_safe);
void replay_op_ready(uint64_t op_tid, Context *on_resume);
void flush_event(uint64_t tid, Context *on_safe);
void wait_event(uint64_t tid, Context *on_safe);
uint64_t allocate_op_tid() {
uint64_t op_tid = ++m_op_tid;
ceph_assert(op_tid != 0);
return op_tid;
}
void start_external_replay(journal::Replay<ImageCtxT> **journal_replay,
Context *on_start);
void stop_external_replay();
void add_listener(journal::Listener *listener);
void remove_listener(journal::Listener *listener);
int is_resync_requested(bool *do_resync);
inline ContextWQ *get_work_queue() {
return m_work_queue;
}
private:
ImageCtxT &m_image_ctx;
// mock unit testing support
typedef journal::TypeTraits<ImageCtxT> TypeTraits;
typedef typename TypeTraits::Journaler Journaler;
typedef typename TypeTraits::Future Future;
typedef typename TypeTraits::ReplayEntry ReplayEntry;
typedef std::list<bufferlist> Bufferlists;
typedef std::list<Context *> Contexts;
typedef std::list<Future> Futures;
typedef interval_set<uint64_t> ExtentInterval;
struct Event {
Futures futures;
Contexts on_safe_contexts;
ExtentInterval pending_extents;
int filter_ret_val = 0;
bool committed_io = false;
bool safe = false;
int ret_val = 0;
Event() {
}
Event(const Futures &_futures, uint64_t offset, size_t length,
int filter_ret_val)
: futures(_futures), filter_ret_val(filter_ret_val) {
if (length > 0) {
pending_extents.insert(offset, length);
}
}
};
typedef std::unordered_map<uint64_t, Event> Events;
typedef std::unordered_map<uint64_t, Future> TidToFutures;
struct C_IOEventSafe : public Context {
Journal *journal;
uint64_t tid;
C_IOEventSafe(Journal *_journal, uint64_t _tid)
: journal(_journal), tid(_tid) {
}
void finish(int r) override {
journal->handle_io_event_safe(r, tid);
}
};
struct C_OpEventSafe : public Context {
Journal *journal;
uint64_t tid;
Future op_start_future;
Future op_finish_future;
Context *on_safe;
C_OpEventSafe(Journal *journal, uint64_t tid, const Future &op_start_future,
const Future &op_finish_future, Context *on_safe)
: journal(journal), tid(tid), op_start_future(op_start_future),
op_finish_future(op_finish_future), on_safe(on_safe) {
}
void finish(int r) override {
journal->handle_op_event_safe(r, tid, op_start_future, op_finish_future,
on_safe);
}
};
struct C_ReplayProcessSafe : public Context {
Journal *journal;
ReplayEntry replay_entry;
C_ReplayProcessSafe(Journal *journal, ReplayEntry &&replay_entry) :
journal(journal), replay_entry(std::move(replay_entry)) {
}
void finish(int r) override {
journal->handle_replay_process_safe(replay_entry, r);
}
};
struct ReplayHandler : public ::journal::ReplayHandler {
Journal *journal;
ReplayHandler(Journal *_journal) : journal(_journal) {
}
void handle_entries_available() override {
journal->handle_replay_ready();
}
void handle_complete(int r) override {
journal->handle_replay_complete(r);
}
};
ContextWQ *m_work_queue = nullptr;
SafeTimer *m_timer = nullptr;
ceph::mutex *m_timer_lock = nullptr;
Journaler *m_journaler;
mutable ceph::mutex m_lock = ceph::make_mutex("Journal<I>::m_lock");
State m_state;
uint64_t m_max_append_size = 0;
uint64_t m_tag_class = 0;
uint64_t m_tag_tid = 0;
journal::ImageClientMeta m_client_meta;
journal::TagData m_tag_data;
int m_error_result;
Contexts m_wait_for_state_contexts;
ReplayHandler m_replay_handler;
bool m_close_pending;
ceph::mutex m_event_lock = ceph::make_mutex("Journal<I>::m_event_lock");
uint64_t m_event_tid;
Events m_events;
std::atomic<bool> m_user_flushed = false;
std::atomic<uint64_t> m_op_tid = { 0 };
TidToFutures m_op_futures;
bool m_processing_entry = false;
bool m_blocking_writes;
journal::Replay<ImageCtxT> *m_journal_replay;
AsyncOpTracker m_async_journal_op_tracker;
struct MetadataListener : public ::journal::JournalMetadataListener {
Journal<ImageCtxT> *journal;
MetadataListener(Journal<ImageCtxT> *journal) : journal(journal) { }
void handle_update(::journal::JournalMetadata *) override;
} m_metadata_listener;
typedef std::set<journal::Listener *> Listeners;
Listeners m_listeners;
ceph::condition_variable m_listener_cond;
bool m_listener_notify = false;
uint64_t m_refresh_sequence = 0;
bool is_journal_replaying(const ceph::mutex &) const;
bool is_tag_owner(const ceph::mutex &) const;
uint64_t append_io_events(journal::EventType event_type,
const Bufferlists &bufferlists,
uint64_t offset, size_t length, bool flush_entry,
int filter_ret_val);
Future wait_event(ceph::mutex &lock, uint64_t tid, Context *on_safe);
void create_journaler();
void destroy_journaler(int r);
void recreate_journaler(int r);
void complete_event(typename Events::iterator it, int r);
void start_append();
void handle_open(int r);
void handle_replay_ready();
void handle_replay_complete(int r);
void handle_replay_process_ready(int r);
void handle_replay_process_safe(ReplayEntry replay_entry, int r);
void handle_start_external_replay(int r,
journal::Replay<ImageCtxT> **journal_replay,
Context *on_finish);
void handle_flushing_restart(int r);
void handle_flushing_replay();
void handle_recording_stopped(int r);
void handle_journal_destroyed(int r);
void handle_io_event_safe(int r, uint64_t tid);
void handle_op_event_safe(int r, uint64_t tid, const Future &op_start_future,
const Future &op_finish_future, Context *on_safe);
void stop_recording();
void transition_state(State state, int r);
bool is_steady_state() const;
void wait_for_steady_state(Context *on_state);
int check_resync_requested(bool *do_resync);
void handle_metadata_updated();
void handle_refresh_metadata(uint64_t refresh_sequence, uint64_t tag_tid,
journal::TagData tag_data, int r);
};
} // namespace librbd
extern template class librbd::Journal<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_JOURNAL_H
| 12,384 | 31.506562 | 80 | h |
null | ceph-main/src/librbd/LibrbdAdminSocketHook.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/LibrbdAdminSocketHook.h"
#include "librbd/internal.h"
#include "librbd/api/Io.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbdadminsocket: "
namespace librbd {
class LibrbdAdminSocketCommand {
public:
virtual ~LibrbdAdminSocketCommand() {}
virtual int call(Formatter *f) = 0;
};
class FlushCacheCommand : public LibrbdAdminSocketCommand {
public:
explicit FlushCacheCommand(ImageCtx *ictx) : ictx(ictx) {}
int call(Formatter *f) override {
return api::Io<>::flush(*ictx);
}
private:
ImageCtx *ictx;
};
struct InvalidateCacheCommand : public LibrbdAdminSocketCommand {
public:
explicit InvalidateCacheCommand(ImageCtx *ictx) : ictx(ictx) {}
int call(Formatter *f) override {
return invalidate_cache(ictx);
}
private:
ImageCtx *ictx;
};
LibrbdAdminSocketHook::LibrbdAdminSocketHook(ImageCtx *ictx) :
admin_socket(ictx->cct->get_admin_socket()) {
std::string command;
std::string imagename;
int r;
imagename = ictx->md_ctx.get_pool_name() + "/" + ictx->name;
command = "rbd cache flush " + imagename;
r = admin_socket->register_command(command, this,
"flush rbd image " + imagename +
" cache");
if (r == 0) {
commands[command] = new FlushCacheCommand(ictx);
}
command = "rbd cache invalidate " + imagename;
r = admin_socket->register_command(command, this,
"invalidate rbd image " + imagename +
" cache");
if (r == 0) {
commands[command] = new InvalidateCacheCommand(ictx);
}
}
LibrbdAdminSocketHook::~LibrbdAdminSocketHook() {
(void)admin_socket->unregister_commands(this);
for (Commands::const_iterator i = commands.begin(); i != commands.end();
++i) {
delete i->second;
}
}
int LibrbdAdminSocketHook::call(std::string_view command,
const cmdmap_t& cmdmap,
const bufferlist&,
Formatter *f,
std::ostream& errss,
bufferlist& out) {
Commands::const_iterator i = commands.find(command);
ceph_assert(i != commands.end());
return i->second->call(f);
}
} // namespace librbd
| 2,245 | 23.150538 | 74 | cc |
null | ceph-main/src/librbd/LibrbdAdminSocketHook.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_LIBRBDADMINSOCKETHOOK_H
#define CEPH_LIBRBD_LIBRBDADMINSOCKETHOOK_H
#include <map>
#include "common/admin_socket.h"
namespace librbd {
struct ImageCtx;
class LibrbdAdminSocketCommand;
class LibrbdAdminSocketHook : public AdminSocketHook {
public:
LibrbdAdminSocketHook(ImageCtx *ictx);
~LibrbdAdminSocketHook() override;
int call(std::string_view command, const cmdmap_t& cmdmap,
const bufferlist&,
Formatter *f,
std::ostream& errss,
bufferlist& out) override;
private:
typedef std::map<std::string,LibrbdAdminSocketCommand*,
std::less<>> Commands;
AdminSocket *admin_socket;
Commands commands;
};
}
#endif
| 801 | 21.277778 | 70 | h |
null | ceph-main/src/librbd/ManagedLock.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/ManagedLock.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/Watcher.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/managed_lock/AcquireRequest.h"
#include "librbd/managed_lock/BreakRequest.h"
#include "librbd/managed_lock/GetLockerRequest.h"
#include "librbd/managed_lock/ReleaseRequest.h"
#include "librbd/managed_lock/ReacquireRequest.h"
#include "librbd/managed_lock/Types.h"
#include "librbd/managed_lock/Utils.h"
#include "cls/lock/cls_lock_client.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "librbd/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ManagedLock: " << this << " " \
<< __func__ << ": "
namespace librbd {
using std::string;
using namespace managed_lock;
namespace {
template <typename R>
struct C_SendLockRequest : public Context {
R* request;
explicit C_SendLockRequest(R* request) : request(request) {
}
void finish(int r) override {
request->send();
}
};
struct C_Tracked : public Context {
AsyncOpTracker &tracker;
Context *ctx;
C_Tracked(AsyncOpTracker &tracker, Context *ctx)
: tracker(tracker), ctx(ctx) {
tracker.start_op();
}
~C_Tracked() override {
tracker.finish_op();
}
void finish(int r) override {
ctx->complete(r);
}
};
} // anonymous namespace
using librbd::util::create_context_callback;
using librbd::util::unique_lock_name;
using managed_lock::util::decode_lock_cookie;
using managed_lock::util::encode_lock_cookie;
template <typename I>
ManagedLock<I>::ManagedLock(librados::IoCtx &ioctx, AsioEngine& asio_engine,
const string& oid, Watcher *watcher, Mode mode,
bool blocklist_on_break_lock,
uint32_t blocklist_expire_seconds)
: m_lock(ceph::make_mutex(unique_lock_name("librbd::ManagedLock<I>::m_lock", this))),
m_ioctx(ioctx), m_cct(reinterpret_cast<CephContext *>(ioctx.cct())),
m_asio_engine(asio_engine),
m_work_queue(asio_engine.get_work_queue()),
m_oid(oid),
m_watcher(watcher),
m_mode(mode),
m_blocklist_on_break_lock(blocklist_on_break_lock),
m_blocklist_expire_seconds(blocklist_expire_seconds),
m_state(STATE_UNLOCKED) {
}
template <typename I>
ManagedLock<I>::~ManagedLock() {
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_SHUTDOWN || m_state == STATE_UNLOCKED ||
m_state == STATE_UNINITIALIZED);
if (m_state == STATE_UNINITIALIZED) {
// never initialized -- ensure any in-flight ops are complete
// since we wouldn't expect shut_down to be invoked
C_SaferCond ctx;
m_async_op_tracker.wait_for_ops(&ctx);
ctx.wait();
}
ceph_assert(m_async_op_tracker.empty());
}
template <typename I>
bool ManagedLock<I>::is_lock_owner() const {
std::lock_guard locker{m_lock};
return is_lock_owner(m_lock);
}
template <typename I>
bool ManagedLock<I>::is_lock_owner(ceph::mutex &lock) const {
ceph_assert(ceph_mutex_is_locked(m_lock));
bool lock_owner;
switch (m_state) {
case STATE_LOCKED:
case STATE_REACQUIRING:
case STATE_PRE_SHUTTING_DOWN:
case STATE_POST_ACQUIRING:
case STATE_PRE_RELEASING:
lock_owner = true;
break;
default:
lock_owner = false;
break;
}
ldout(m_cct, 20) << lock_owner << dendl;
return lock_owner;
}
template <typename I>
void ManagedLock<I>::shut_down(Context *on_shut_down) {
ldout(m_cct, 10) << dendl;
std::lock_guard locker{m_lock};
ceph_assert(!is_state_shutdown());
if (m_state == STATE_WAITING_FOR_REGISTER) {
// abort stalled acquire lock state
ldout(m_cct, 10) << "woke up waiting (re)acquire" << dendl;
Action active_action = get_active_action();
ceph_assert(active_action == ACTION_TRY_LOCK ||
active_action == ACTION_ACQUIRE_LOCK);
complete_active_action(STATE_UNLOCKED, -ERESTART);
}
execute_action(ACTION_SHUT_DOWN, on_shut_down);
}
template <typename I>
void ManagedLock<I>::acquire_lock(Context *on_acquired) {
int r = 0;
{
std::lock_guard locker{m_lock};
if (is_state_shutdown()) {
r = -ERESTART;
} else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
ldout(m_cct, 10) << dendl;
execute_action(ACTION_ACQUIRE_LOCK, on_acquired);
return;
}
}
if (on_acquired != nullptr) {
on_acquired->complete(r);
}
}
template <typename I>
void ManagedLock<I>::try_acquire_lock(Context *on_acquired) {
int r = 0;
{
std::lock_guard locker{m_lock};
if (is_state_shutdown()) {
r = -ERESTART;
} else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
ldout(m_cct, 10) << dendl;
execute_action(ACTION_TRY_LOCK, on_acquired);
return;
}
}
if (on_acquired != nullptr) {
on_acquired->complete(r);
}
}
template <typename I>
void ManagedLock<I>::release_lock(Context *on_released) {
int r = 0;
{
std::lock_guard locker{m_lock};
if (is_state_shutdown()) {
r = -ERESTART;
} else if (m_state != STATE_UNLOCKED || !m_actions_contexts.empty()) {
ldout(m_cct, 10) << dendl;
execute_action(ACTION_RELEASE_LOCK, on_released);
return;
}
}
if (on_released != nullptr) {
on_released->complete(r);
}
}
template <typename I>
void ManagedLock<I>::reacquire_lock(Context *on_reacquired) {
{
std::lock_guard locker{m_lock};
if (m_state == STATE_WAITING_FOR_REGISTER) {
// restart the acquire lock process now that watch is valid
ldout(m_cct, 10) << "woke up waiting (re)acquire" << dendl;
Action active_action = get_active_action();
ceph_assert(active_action == ACTION_TRY_LOCK ||
active_action == ACTION_ACQUIRE_LOCK);
execute_next_action();
} else if (!is_state_shutdown() &&
(m_state == STATE_LOCKED ||
m_state == STATE_ACQUIRING ||
m_state == STATE_POST_ACQUIRING ||
m_state == STATE_WAITING_FOR_LOCK)) {
// interlock the lock operation with other state ops
ldout(m_cct, 10) << dendl;
execute_action(ACTION_REACQUIRE_LOCK, on_reacquired);
return;
}
}
// ignore request if shutdown or not in a locked-related state
if (on_reacquired != nullptr) {
on_reacquired->complete(0);
}
}
template <typename I>
void ManagedLock<I>::get_locker(managed_lock::Locker *locker,
Context *on_finish) {
ldout(m_cct, 10) << dendl;
int r;
{
std::lock_guard l{m_lock};
if (is_state_shutdown()) {
r = -ERESTART;
} else {
on_finish = new C_Tracked(m_async_op_tracker, on_finish);
auto req = managed_lock::GetLockerRequest<I>::create(
m_ioctx, m_oid, m_mode == EXCLUSIVE, locker, on_finish);
req->send();
return;
}
}
on_finish->complete(r);
}
template <typename I>
void ManagedLock<I>::break_lock(const managed_lock::Locker &locker,
bool force_break_lock, Context *on_finish) {
ldout(m_cct, 10) << dendl;
int r;
{
std::lock_guard l{m_lock};
if (is_state_shutdown()) {
r = -ERESTART;
} else if (is_lock_owner(m_lock)) {
r = -EBUSY;
} else {
on_finish = new C_Tracked(m_async_op_tracker, on_finish);
auto req = managed_lock::BreakRequest<I>::create(
m_ioctx, m_asio_engine, m_oid, locker, m_mode == EXCLUSIVE,
m_blocklist_on_break_lock, m_blocklist_expire_seconds, force_break_lock,
on_finish);
req->send();
return;
}
}
on_finish->complete(r);
}
template <typename I>
int ManagedLock<I>::assert_header_locked() {
ldout(m_cct, 10) << dendl;
librados::ObjectReadOperation op;
{
std::lock_guard locker{m_lock};
rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME,
(m_mode == EXCLUSIVE ? ClsLockType::EXCLUSIVE :
ClsLockType::SHARED),
m_cookie,
managed_lock::util::get_watcher_lock_tag());
}
int r = m_ioctx.operate(m_oid, &op, nullptr);
if (r < 0) {
if (r == -EBLOCKLISTED) {
ldout(m_cct, 5) << "client is not lock owner -- client blocklisted"
<< dendl;
} else if (r == -ENOENT) {
ldout(m_cct, 5) << "client is not lock owner -- no lock detected"
<< dendl;
} else if (r == -EBUSY) {
ldout(m_cct, 5) << "client is not lock owner -- owned by different client"
<< dendl;
} else {
lderr(m_cct) << "failed to verify lock ownership: " << cpp_strerror(r)
<< dendl;
}
return r;
}
return 0;
}
template <typename I>
void ManagedLock<I>::shutdown_handler(int r, Context *on_finish) {
on_finish->complete(r);
}
template <typename I>
void ManagedLock<I>::pre_acquire_lock_handler(Context *on_finish) {
on_finish->complete(0);
}
template <typename I>
void ManagedLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
on_finish->complete(r);
}
template <typename I>
void ManagedLock<I>::pre_release_lock_handler(bool shutting_down,
Context *on_finish) {
on_finish->complete(0);
}
template <typename I>
void ManagedLock<I>::post_release_lock_handler(bool shutting_down, int r,
Context *on_finish) {
on_finish->complete(r);
}
template <typename I>
void ManagedLock<I>::post_reacquire_lock_handler(int r, Context *on_finish) {
on_finish->complete(r);
}
template <typename I>
bool ManagedLock<I>::is_transition_state() const {
switch (m_state) {
case STATE_ACQUIRING:
case STATE_WAITING_FOR_REGISTER:
case STATE_REACQUIRING:
case STATE_RELEASING:
case STATE_PRE_SHUTTING_DOWN:
case STATE_SHUTTING_DOWN:
case STATE_INITIALIZING:
case STATE_WAITING_FOR_LOCK:
case STATE_POST_ACQUIRING:
case STATE_PRE_RELEASING:
return true;
case STATE_UNLOCKED:
case STATE_LOCKED:
case STATE_SHUTDOWN:
case STATE_UNINITIALIZED:
break;
}
return false;
}
template <typename I>
void ManagedLock<I>::append_context(Action action, Context *ctx) {
ceph_assert(ceph_mutex_is_locked(m_lock));
for (auto &action_ctxs : m_actions_contexts) {
if (action == action_ctxs.first) {
if (ctx != nullptr) {
action_ctxs.second.push_back(ctx);
}
return;
}
}
Contexts contexts;
if (ctx != nullptr) {
contexts.push_back(ctx);
}
m_actions_contexts.push_back({action, std::move(contexts)});
}
template <typename I>
void ManagedLock<I>::execute_action(Action action, Context *ctx) {
ceph_assert(ceph_mutex_is_locked(m_lock));
append_context(action, ctx);
if (!is_transition_state()) {
execute_next_action();
}
}
template <typename I>
void ManagedLock<I>::execute_next_action() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_actions_contexts.empty());
switch (get_active_action()) {
case ACTION_ACQUIRE_LOCK:
case ACTION_TRY_LOCK:
send_acquire_lock();
break;
case ACTION_REACQUIRE_LOCK:
send_reacquire_lock();
break;
case ACTION_RELEASE_LOCK:
send_release_lock();
break;
case ACTION_SHUT_DOWN:
send_shutdown();
break;
default:
ceph_abort();
break;
}
}
template <typename I>
typename ManagedLock<I>::Action ManagedLock<I>::get_active_action() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_actions_contexts.empty());
return m_actions_contexts.front().first;
}
template <typename I>
void ManagedLock<I>::complete_active_action(State next_state, int r) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_actions_contexts.empty());
ActionContexts action_contexts(std::move(m_actions_contexts.front()));
m_actions_contexts.pop_front();
m_state = next_state;
m_lock.unlock();
for (auto ctx : action_contexts.second) {
ctx->complete(r);
}
m_lock.lock();
if (!is_transition_state() && !m_actions_contexts.empty()) {
execute_next_action();
}
}
template <typename I>
bool ManagedLock<I>::is_state_shutdown() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
switch (m_state) {
case STATE_PRE_SHUTTING_DOWN:
case STATE_SHUTTING_DOWN:
case STATE_SHUTDOWN:
return true;
default:
break;
}
return (!m_actions_contexts.empty() &&
m_actions_contexts.back().first == ACTION_SHUT_DOWN);
}
template <typename I>
void ManagedLock<I>::send_acquire_lock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_state == STATE_LOCKED) {
complete_active_action(STATE_LOCKED, 0);
return;
}
ldout(m_cct, 10) << dendl;
uint64_t watch_handle = m_watcher->get_watch_handle();
if (watch_handle == 0) {
if (m_watcher->is_blocklisted()) {
lderr(m_cct) << "watcher not registered - client blocklisted" << dendl;
complete_active_action(STATE_UNLOCKED, -EBLOCKLISTED);
} else {
lderr(m_cct) << "watcher not registered - delaying request" << dendl;
m_state = STATE_WAITING_FOR_REGISTER;
// shut down might race w/ release/re-acquire of the lock
if (is_state_shutdown()) {
complete_active_action(STATE_UNLOCKED, -ERESTART);
}
}
return;
}
m_state = STATE_ACQUIRING;
m_cookie = encode_lock_cookie(watch_handle);
m_work_queue->queue(new LambdaContext([this](int r) {
pre_acquire_lock_handler(create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_pre_acquire_lock>(this));
}));
}
template <typename I>
void ManagedLock<I>::handle_pre_acquire_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
handle_acquire_lock(r);
return;
}
using managed_lock::AcquireRequest;
AcquireRequest<I>* req = AcquireRequest<I>::create(
m_ioctx, m_watcher, m_asio_engine, m_oid, m_cookie, m_mode == EXCLUSIVE,
m_blocklist_on_break_lock, m_blocklist_expire_seconds,
create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_acquire_lock>(this));
m_work_queue->queue(new C_SendLockRequest<AcquireRequest<I>>(req), 0);
}
template <typename I>
void ManagedLock<I>::handle_acquire_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r == -EBUSY || r == -EAGAIN || r == -EROFS) {
ldout(m_cct, 5) << "unable to acquire exclusive lock" << dendl;
} else if (r < 0) {
lderr(m_cct) << "failed to acquire exclusive lock: " << cpp_strerror(r)
<< dendl;
} else {
ldout(m_cct, 5) << "successfully acquired exclusive lock" << dendl;
}
m_post_next_state = (r < 0 ? STATE_UNLOCKED : STATE_LOCKED);
m_work_queue->queue(new LambdaContext([this, r](int ret) {
post_acquire_lock_handler(r, create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_post_acquire_lock>(this));
}));
}
template <typename I>
void ManagedLock<I>::handle_post_acquire_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
std::lock_guard locker{m_lock};
if (r < 0 && m_post_next_state == STATE_LOCKED) {
// release_lock without calling pre and post handlers
revert_to_unlock_state(r);
} else if (r != -ECANCELED) {
// fail the lock request
complete_active_action(m_post_next_state, r);
}
}
template <typename I>
void ManagedLock<I>::revert_to_unlock_state(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
using managed_lock::ReleaseRequest;
ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
m_work_queue, m_oid, m_cookie,
new LambdaContext([this, r](int ret) {
std::lock_guard locker{m_lock};
ceph_assert(ret == 0);
complete_active_action(STATE_UNLOCKED, r);
}));
m_work_queue->queue(new C_SendLockRequest<ReleaseRequest<I>>(req));
}
template <typename I>
void ManagedLock<I>::send_reacquire_lock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_state != STATE_LOCKED) {
complete_active_action(m_state, 0);
return;
}
ldout(m_cct, 10) << dendl;
m_state = STATE_REACQUIRING;
uint64_t watch_handle = m_watcher->get_watch_handle();
if (watch_handle == 0) {
// watch (re)failed while recovering
lderr(m_cct) << "aborting reacquire due to invalid watch handle"
<< dendl;
// treat double-watch failure as a lost lock and invoke the
// release/acquire handlers
release_acquire_lock();
complete_active_action(STATE_LOCKED, 0);
return;
}
m_new_cookie = encode_lock_cookie(watch_handle);
if (m_cookie == m_new_cookie && m_blocklist_on_break_lock) {
ldout(m_cct, 10) << "skipping reacquire since cookie still valid"
<< dendl;
auto ctx = create_context_callback<
ManagedLock, &ManagedLock<I>::handle_no_op_reacquire_lock>(this);
post_reacquire_lock_handler(0, ctx);
return;
}
auto ctx = create_context_callback<
ManagedLock, &ManagedLock<I>::handle_reacquire_lock>(this);
ctx = new LambdaContext([this, ctx](int r) {
post_reacquire_lock_handler(r, ctx);
});
using managed_lock::ReacquireRequest;
ReacquireRequest<I>* req = ReacquireRequest<I>::create(m_ioctx, m_oid,
m_cookie, m_new_cookie, m_mode == EXCLUSIVE, ctx);
m_work_queue->queue(new C_SendLockRequest<ReacquireRequest<I>>(req));
}
template <typename I>
void ManagedLock<I>::handle_reacquire_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_REACQUIRING);
if (r < 0) {
if (r == -EOPNOTSUPP) {
ldout(m_cct, 10) << "updating lock is not supported" << dendl;
} else {
lderr(m_cct) << "failed to update lock cookie: " << cpp_strerror(r)
<< dendl;
}
release_acquire_lock();
} else {
m_cookie = m_new_cookie;
}
complete_active_action(STATE_LOCKED, 0);
}
template <typename I>
void ManagedLock<I>::handle_no_op_reacquire_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
ceph_assert(m_state == STATE_REACQUIRING);
ceph_assert(r >= 0);
complete_active_action(STATE_LOCKED, 0);
}
template <typename I>
void ManagedLock<I>::release_acquire_lock() {
assert(ceph_mutex_is_locked(m_lock));
if (!is_state_shutdown()) {
// queue a release and re-acquire of the lock since cookie cannot
// be updated on older OSDs
execute_action(ACTION_RELEASE_LOCK, nullptr);
ceph_assert(!m_actions_contexts.empty());
ActionContexts &action_contexts(m_actions_contexts.front());
// reacquire completes when the request lock completes
Contexts contexts;
std::swap(contexts, action_contexts.second);
if (contexts.empty()) {
execute_action(ACTION_ACQUIRE_LOCK, nullptr);
} else {
for (auto ctx : contexts) {
execute_action(ACTION_ACQUIRE_LOCK, ctx);
}
}
}
}
template <typename I>
void ManagedLock<I>::send_release_lock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_state == STATE_UNLOCKED) {
complete_active_action(STATE_UNLOCKED, 0);
return;
}
ldout(m_cct, 10) << dendl;
m_state = STATE_PRE_RELEASING;
m_work_queue->queue(new LambdaContext([this](int r) {
pre_release_lock_handler(false, create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_pre_release_lock>(this));
}));
}
template <typename I>
void ManagedLock<I>::handle_pre_release_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
{
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_PRE_RELEASING);
m_state = STATE_RELEASING;
}
if (r < 0) {
handle_release_lock(r);
return;
}
using managed_lock::ReleaseRequest;
ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
m_work_queue, m_oid, m_cookie,
create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_release_lock>(this));
m_work_queue->queue(new C_SendLockRequest<ReleaseRequest<I>>(req), 0);
}
template <typename I>
void ManagedLock<I>::handle_release_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_RELEASING);
if (r >= 0 || r == -EBLOCKLISTED || r == -ENOENT) {
m_cookie = "";
m_post_next_state = STATE_UNLOCKED;
} else {
m_post_next_state = STATE_LOCKED;
}
m_work_queue->queue(new LambdaContext([this, r](int ret) {
post_release_lock_handler(false, r, create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_post_release_lock>(this));
}));
}
template <typename I>
void ManagedLock<I>::handle_post_release_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
std::lock_guard locker{m_lock};
complete_active_action(m_post_next_state, r);
}
template <typename I>
void ManagedLock<I>::send_shutdown() {
ldout(m_cct, 10) << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_state == STATE_UNLOCKED) {
m_state = STATE_SHUTTING_DOWN;
m_work_queue->queue(new LambdaContext([this](int r) {
shutdown_handler(r, create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_shutdown>(this));
}));
return;
}
ceph_assert(m_state == STATE_LOCKED);
m_state = STATE_PRE_SHUTTING_DOWN;
m_lock.unlock();
m_work_queue->queue(new C_ShutDownRelease(this), 0);
m_lock.lock();
}
template <typename I>
void ManagedLock<I>::handle_shutdown(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
wait_for_tracked_ops(r);
}
template <typename I>
void ManagedLock<I>::send_shutdown_release() {
ldout(m_cct, 10) << dendl;
std::lock_guard locker{m_lock};
m_work_queue->queue(new LambdaContext([this](int r) {
pre_release_lock_handler(true, create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_shutdown_pre_release>(this));
}));
}
template <typename I>
void ManagedLock<I>::handle_shutdown_pre_release(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
std::string cookie;
{
std::lock_guard locker{m_lock};
cookie = m_cookie;
ceph_assert(m_state == STATE_PRE_SHUTTING_DOWN);
m_state = STATE_SHUTTING_DOWN;
}
using managed_lock::ReleaseRequest;
ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
m_work_queue, m_oid, cookie,
new LambdaContext([this, r](int l) {
int rst = r < 0 ? r : l;
post_release_lock_handler(true, rst, create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_shutdown_post_release>(this));
}));
req->send();
}
template <typename I>
void ManagedLock<I>::handle_shutdown_post_release(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
wait_for_tracked_ops(r);
}
template <typename I>
void ManagedLock<I>::wait_for_tracked_ops(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
Context *ctx = new LambdaContext([this, r](int ret) {
complete_shutdown(r);
});
m_async_op_tracker.wait_for_ops(ctx);
}
template <typename I>
void ManagedLock<I>::complete_shutdown(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to shut down lock: " << cpp_strerror(r)
<< dendl;
}
ActionContexts action_contexts;
{
std::lock_guard locker{m_lock};
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_actions_contexts.size() == 1);
action_contexts = std::move(m_actions_contexts.front());
m_actions_contexts.pop_front();
m_state = STATE_SHUTDOWN;
}
// expect to be destroyed after firing callback
for (auto ctx : action_contexts.second) {
ctx->complete(r);
}
}
} // namespace librbd
template class librbd::ManagedLock<librbd::ImageCtx>;
| 23,787 | 26.660465 | 87 | cc |
null | ceph-main/src/librbd/ManagedLock.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MANAGED_LOCK_H
#define CEPH_LIBRBD_MANAGED_LOCK_H
#include "include/int_types.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "common/AsyncOpTracker.h"
#include "cls/lock/cls_lock_types.h"
#include "librbd/watcher/Types.h"
#include "librbd/managed_lock/Types.h"
#include <list>
#include <string>
#include <utility>
namespace librbd {
struct AsioEngine;
struct ImageCtx;
namespace asio { struct ContextWQ; }
namespace managed_lock { struct Locker; }
template <typename ImageCtxT = librbd::ImageCtx>
class ManagedLock {
private:
typedef watcher::Traits<ImageCtxT> TypeTraits;
typedef typename TypeTraits::Watcher Watcher;
public:
static ManagedLock *create(librados::IoCtx& ioctx,
AsioEngine& asio_engine,
const std::string& oid, Watcher *watcher,
managed_lock::Mode mode,
bool blocklist_on_break_lock,
uint32_t blocklist_expire_seconds) {
return new ManagedLock(ioctx, asio_engine, oid, watcher, mode,
blocklist_on_break_lock, blocklist_expire_seconds);
}
void destroy() {
delete this;
}
ManagedLock(librados::IoCtx& ioctx, AsioEngine& asio_engine,
const std::string& oid, Watcher *watcher,
managed_lock::Mode mode, bool blocklist_on_break_lock,
uint32_t blocklist_expire_seconds);
virtual ~ManagedLock();
bool is_lock_owner() const;
void shut_down(Context *on_shutdown);
void acquire_lock(Context *on_acquired);
void try_acquire_lock(Context *on_acquired);
void release_lock(Context *on_released);
void reacquire_lock(Context *on_reacquired);
void get_locker(managed_lock::Locker *locker, Context *on_finish);
void break_lock(const managed_lock::Locker &locker, bool force_break_lock,
Context *on_finish);
int assert_header_locked();
bool is_shutdown() const {
std::lock_guard l{m_lock};
return is_state_shutdown();
}
protected:
mutable ceph::mutex m_lock;
inline void set_state_uninitialized() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_UNLOCKED);
m_state = STATE_UNINITIALIZED;
}
inline void set_state_initializing() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_UNINITIALIZED);
m_state = STATE_INITIALIZING;
}
inline void set_state_unlocked() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_INITIALIZING || m_state == STATE_RELEASING);
m_state = STATE_UNLOCKED;
}
inline void set_state_waiting_for_lock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_ACQUIRING);
m_state = STATE_WAITING_FOR_LOCK;
}
inline void set_state_post_acquiring() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_ACQUIRING);
m_state = STATE_POST_ACQUIRING;
}
bool is_state_shutdown() const;
inline bool is_state_acquiring() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_ACQUIRING;
}
inline bool is_state_post_acquiring() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_POST_ACQUIRING;
}
inline bool is_state_releasing() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_RELEASING;
}
inline bool is_state_pre_releasing() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_PRE_RELEASING;
}
inline bool is_state_locked() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_LOCKED;
}
inline bool is_state_waiting_for_lock() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_WAITING_FOR_LOCK;
}
inline bool is_action_acquire_lock() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return get_active_action() == ACTION_ACQUIRE_LOCK;
}
virtual void shutdown_handler(int r, Context *on_finish);
virtual void pre_acquire_lock_handler(Context *on_finish);
virtual void post_acquire_lock_handler(int r, Context *on_finish);
virtual void pre_release_lock_handler(bool shutting_down,
Context *on_finish);
virtual void post_release_lock_handler(bool shutting_down, int r,
Context *on_finish);
virtual void post_reacquire_lock_handler(int r, Context *on_finish);
void execute_next_action();
private:
/**
* @verbatim
*
* <start>
* |
* |
* v (acquire_lock)
* UNLOCKED -----------------------------------------> ACQUIRING
* ^ |
* | |
* RELEASING |
* | |
* | |
* | (release_lock) v
* PRE_RELEASING <----------------------------------------- LOCKED
*
* <LOCKED state>
* |
* v
* REACQUIRING -------------------------------------> <finish>
* . ^
* . |
* . . . > <RELEASE action> ---> <ACQUIRE action> ---/
*
* <UNLOCKED/LOCKED states>
* |
* |
* v
* PRE_SHUTTING_DOWN ---> SHUTTING_DOWN ---> SHUTDOWN ---> <finish>
*
* @endverbatim
*/
enum State {
STATE_UNINITIALIZED,
STATE_INITIALIZING,
STATE_UNLOCKED,
STATE_LOCKED,
STATE_ACQUIRING,
STATE_POST_ACQUIRING,
STATE_WAITING_FOR_REGISTER,
STATE_WAITING_FOR_LOCK,
STATE_REACQUIRING,
STATE_PRE_RELEASING,
STATE_RELEASING,
STATE_PRE_SHUTTING_DOWN,
STATE_SHUTTING_DOWN,
STATE_SHUTDOWN,
};
enum Action {
ACTION_TRY_LOCK,
ACTION_ACQUIRE_LOCK,
ACTION_REACQUIRE_LOCK,
ACTION_RELEASE_LOCK,
ACTION_SHUT_DOWN
};
typedef std::list<Context *> Contexts;
typedef std::pair<Action, Contexts> ActionContexts;
typedef std::list<ActionContexts> ActionsContexts;
struct C_ShutDownRelease : public Context {
ManagedLock *lock;
C_ShutDownRelease(ManagedLock *lock)
: lock(lock) {
}
void finish(int r) override {
lock->send_shutdown_release();
}
};
librados::IoCtx& m_ioctx;
CephContext *m_cct;
AsioEngine& m_asio_engine;
asio::ContextWQ* m_work_queue;
std::string m_oid;
Watcher *m_watcher;
managed_lock::Mode m_mode;
bool m_blocklist_on_break_lock;
uint32_t m_blocklist_expire_seconds;
std::string m_cookie;
std::string m_new_cookie;
State m_state;
State m_post_next_state;
ActionsContexts m_actions_contexts;
AsyncOpTracker m_async_op_tracker;
bool is_lock_owner(ceph::mutex &lock) const;
bool is_transition_state() const;
void append_context(Action action, Context *ctx);
void execute_action(Action action, Context *ctx);
Action get_active_action() const;
void complete_active_action(State next_state, int r);
void send_acquire_lock();
void handle_pre_acquire_lock(int r);
void handle_acquire_lock(int r);
void handle_no_op_reacquire_lock(int r);
void handle_post_acquire_lock(int r);
void revert_to_unlock_state(int r);
void send_reacquire_lock();
void handle_reacquire_lock(int r);
void release_acquire_lock();
void send_release_lock();
void handle_pre_release_lock(int r);
void handle_release_lock(int r);
void handle_post_release_lock(int r);
void send_shutdown();
void handle_shutdown(int r);
void send_shutdown_release();
void handle_shutdown_pre_release(int r);
void handle_shutdown_post_release(int r);
void wait_for_tracked_ops(int r);
void complete_shutdown(int r);
};
} // namespace librbd
extern template class librbd::ManagedLock<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MANAGED_LOCK_H
| 8,221 | 29.339483 | 78 | h |
null | ceph-main/src/librbd/MirroringWatcher.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/MirroringWatcher.h"
#include "include/rbd_types.h"
#include "include/rados/librados.hpp"
#include "common/errno.h"
#include "common/Cond.h"
#include "librbd/Utils.h"
#include "librbd/watcher/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::MirroringWatcher: "
namespace librbd {
using namespace mirroring_watcher;
using namespace watcher;
using librbd::util::create_rados_callback;
namespace {
static const uint64_t NOTIFY_TIMEOUT_MS = 5000;
} // anonymous namespace
template <typename I>
MirroringWatcher<I>::MirroringWatcher(librados::IoCtx &io_ctx,
asio::ContextWQ *work_queue)
: Watcher(io_ctx, work_queue, RBD_MIRRORING) {
}
template <typename I>
int MirroringWatcher<I>::notify_mode_updated(librados::IoCtx &io_ctx,
cls::rbd::MirrorMode mirror_mode) {
C_SaferCond ctx;
notify_mode_updated(io_ctx, mirror_mode, &ctx);
return ctx.wait();
}
template <typename I>
void MirroringWatcher<I>::notify_mode_updated(librados::IoCtx &io_ctx,
cls::rbd::MirrorMode mirror_mode,
Context *on_finish) {
CephContext *cct = reinterpret_cast<CephContext*>(io_ctx.cct());
ldout(cct, 20) << dendl;
bufferlist bl;
encode(NotifyMessage{ModeUpdatedPayload{mirror_mode}}, bl);
librados::AioCompletion *comp = create_rados_callback(on_finish);
int r = io_ctx.aio_notify(RBD_MIRRORING, comp, bl, NOTIFY_TIMEOUT_MS,
nullptr);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
int MirroringWatcher<I>::notify_image_updated(
librados::IoCtx &io_ctx, cls::rbd::MirrorImageState mirror_image_state,
const std::string &image_id, const std::string &global_image_id) {
C_SaferCond ctx;
notify_image_updated(io_ctx, mirror_image_state, image_id, global_image_id,
&ctx);
return ctx.wait();
}
template <typename I>
void MirroringWatcher<I>::notify_image_updated(
librados::IoCtx &io_ctx, cls::rbd::MirrorImageState mirror_image_state,
const std::string &image_id, const std::string &global_image_id,
Context *on_finish) {
CephContext *cct = reinterpret_cast<CephContext*>(io_ctx.cct());
ldout(cct, 20) << dendl;
bufferlist bl;
encode(NotifyMessage{ImageUpdatedPayload{
mirror_image_state, image_id, global_image_id}}, bl);
librados::AioCompletion *comp = create_rados_callback(on_finish);
int r = io_ctx.aio_notify(RBD_MIRRORING, comp, bl, NOTIFY_TIMEOUT_MS,
nullptr);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void MirroringWatcher<I>::handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist &bl) {
CephContext *cct = this->m_cct;
ldout(cct, 15) << ": notify_id=" << notify_id << ", "
<< "handle=" << handle << dendl;
NotifyMessage notify_message;
try {
auto iter = bl.cbegin();
decode(notify_message, iter);
} catch (const buffer::error &err) {
lderr(cct) << ": error decoding image notification: " << err.what()
<< dendl;
Context *ctx = new C_NotifyAck(this, notify_id, handle);
ctx->complete(0);
return;
}
apply_visitor(watcher::util::HandlePayloadVisitor<MirroringWatcher<I>>(
this, notify_id, handle), notify_message.payload);
}
template <typename I>
bool MirroringWatcher<I>::handle_payload(const ModeUpdatedPayload &payload,
Context *on_notify_ack) {
CephContext *cct = this->m_cct;
ldout(cct, 20) << ": mode updated: " << payload.mirror_mode << dendl;
handle_mode_updated(payload.mirror_mode);
return true;
}
template <typename I>
bool MirroringWatcher<I>::handle_payload(const ImageUpdatedPayload &payload,
Context *on_notify_ack) {
CephContext *cct = this->m_cct;
ldout(cct, 20) << ": image state updated" << dendl;
handle_image_updated(payload.mirror_image_state, payload.image_id,
payload.global_image_id);
return true;
}
template <typename I>
bool MirroringWatcher<I>::handle_payload(const UnknownPayload &payload,
Context *on_notify_ack) {
return true;
}
} // namespace librbd
template class librbd::MirroringWatcher<librbd::ImageCtx>;
| 4,595 | 31.13986 | 81 | cc |
null | ceph-main/src/librbd/MirroringWatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRRORING_WATCHER_H
#define CEPH_LIBRBD_MIRRORING_WATCHER_H
#include "include/int_types.h"
#include "include/rados/librados_fwd.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/ImageCtx.h"
#include "librbd/Watcher.h"
#include "librbd/mirroring_watcher/Types.h"
namespace librbd {
namespace asio { struct ContextWQ; }
namespace watcher {
namespace util {
template <typename> struct HandlePayloadVisitor;
}
}
template <typename ImageCtxT = librbd::ImageCtx>
class MirroringWatcher : public Watcher {
friend struct watcher::util::HandlePayloadVisitor<MirroringWatcher<ImageCtxT>>;
public:
MirroringWatcher(librados::IoCtx &io_ctx, asio::ContextWQ *work_queue);
static int notify_mode_updated(librados::IoCtx &io_ctx,
cls::rbd::MirrorMode mirror_mode);
static void notify_mode_updated(librados::IoCtx &io_ctx,
cls::rbd::MirrorMode mirror_mode,
Context *on_finish);
static int notify_image_updated(librados::IoCtx &io_ctx,
cls::rbd::MirrorImageState mirror_image_state,
const std::string &image_id,
const std::string &global_image_id);
static void notify_image_updated(librados::IoCtx &io_ctx,
cls::rbd::MirrorImageState mirror_image_state,
const std::string &image_id,
const std::string &global_image_id,
Context *on_finish);
virtual void handle_mode_updated(cls::rbd::MirrorMode mirror_mode) = 0;
virtual void handle_image_updated(cls::rbd::MirrorImageState state,
const std::string &image_id,
const std::string &global_image_id) = 0;
private:
bool handle_payload(const mirroring_watcher::ModeUpdatedPayload &payload,
Context *on_notify_ack);
bool handle_payload(const mirroring_watcher::ImageUpdatedPayload &payload,
Context *on_notify_ack);
bool handle_payload(const mirroring_watcher::UnknownPayload &payload,
Context *on_notify_ack);
void handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist &bl) override;
};
} // namespace librbd
extern template class librbd::MirroringWatcher<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRRORING_WATCHER_H
| 2,638 | 37.808824 | 81 | h |
null | ceph-main/src/librbd/ObjectMap.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/ObjectMap.h"
#include "librbd/BlockGuard.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/object_map/RefreshRequest.h"
#include "librbd/object_map/ResizeRequest.h"
#include "librbd/object_map/SnapshotCreateRequest.h"
#include "librbd/object_map/SnapshotRemoveRequest.h"
#include "librbd/object_map/SnapshotRollbackRequest.h"
#include "librbd/object_map/UnlockRequest.h"
#include "librbd/object_map/UpdateRequest.h"
#include "librbd/Utils.h"
#include "common/dout.h"
#include "common/errno.h"
#include "include/rados/librados.hpp"
#include "cls/lock/cls_lock_client.h"
#include "cls/rbd/cls_rbd_types.h"
#include "include/stringify.h"
#include "osdc/Striper.h"
#include <sstream>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ObjectMap: " << this << " " << __func__ \
<< ": "
namespace librbd {
using librbd::util::create_context_callback;
template <typename I>
ObjectMap<I>::ObjectMap(I &image_ctx, uint64_t snap_id)
: RefCountedObject(image_ctx.cct),
m_image_ctx(image_ctx), m_snap_id(snap_id),
m_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ObjectMap::lock", this))),
m_update_guard(new UpdateGuard(m_image_ctx.cct)) {
}
template <typename I>
ObjectMap<I>::~ObjectMap() {
delete m_update_guard;
}
template <typename I>
int ObjectMap<I>::aio_remove(librados::IoCtx &io_ctx, const std::string &image_id,
librados::AioCompletion *c) {
return io_ctx.aio_remove(object_map_name(image_id, CEPH_NOSNAP), c);
}
template <typename I>
std::string ObjectMap<I>::object_map_name(const std::string &image_id,
uint64_t snap_id) {
std::string oid(RBD_OBJECT_MAP_PREFIX + image_id);
if (snap_id != CEPH_NOSNAP) {
std::stringstream snap_suffix;
snap_suffix << "." << std::setfill('0') << std::setw(16) << std::hex
<< snap_id;
oid += snap_suffix.str();
}
return oid;
}
template <typename I>
bool ObjectMap<I>::is_compatible(const file_layout_t& layout, uint64_t size) {
uint64_t object_count = Striper::get_num_objects(layout, size);
return (object_count <= cls::rbd::MAX_OBJECT_MAP_OBJECT_COUNT);
}
template <typename I>
uint8_t ObjectMap<I>::operator[](uint64_t object_no) const
{
std::shared_lock locker{m_lock};
ceph_assert(object_no < m_object_map.size());
return m_object_map[object_no];
}
template <typename I>
bool ObjectMap<I>::object_may_exist(uint64_t object_no) const
{
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
// Fall back to default logic if object map is disabled or invalid
if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock)) {
return true;
}
bool flags_set;
int r = m_image_ctx.test_flags(m_image_ctx.snap_id,
RBD_FLAG_OBJECT_MAP_INVALID,
m_image_ctx.image_lock, &flags_set);
if (r < 0 || flags_set) {
return true;
}
uint8_t state = (*this)[object_no];
bool exists = (state != OBJECT_NONEXISTENT);
ldout(m_image_ctx.cct, 20) << "object_no=" << object_no << " r=" << exists
<< dendl;
return exists;
}
template <typename I>
bool ObjectMap<I>::object_may_not_exist(uint64_t object_no) const
{
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
// Fall back to default logic if object map is disabled or invalid
if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock)) {
return true;
}
bool flags_set;
int r = m_image_ctx.test_flags(m_image_ctx.snap_id,
RBD_FLAG_OBJECT_MAP_INVALID,
m_image_ctx.image_lock, &flags_set);
if (r < 0 || flags_set) {
return true;
}
uint8_t state = (*this)[object_no];
bool nonexistent = (state != OBJECT_EXISTS && state != OBJECT_EXISTS_CLEAN);
ldout(m_image_ctx.cct, 20) << "object_no=" << object_no << " r="
<< nonexistent << dendl;
return nonexistent;
}
template <typename I>
bool ObjectMap<I>::update_required(const ceph::BitVector<2>::Iterator& it,
uint8_t new_state) {
ceph_assert(ceph_mutex_is_locked(m_lock));
uint8_t state = *it;
if ((state == new_state) ||
(new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
(new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING)) {
return false;
}
return true;
}
template <typename I>
void ObjectMap<I>::open(Context *on_finish) {
Context *ctx = create_context_callback<Context>(on_finish, this);
auto req = object_map::RefreshRequest<I>::create(
m_image_ctx, &m_lock, &m_object_map, m_snap_id, ctx);
req->send();
}
template <typename I>
void ObjectMap<I>::close(Context *on_finish) {
Context *ctx = create_context_callback<Context>(on_finish, this);
if (m_snap_id != CEPH_NOSNAP) {
m_image_ctx.op_work_queue->queue(ctx, 0);
return;
}
ctx = new LambdaContext([this, ctx](int r) {
auto req = object_map::UnlockRequest<I>::create(m_image_ctx, ctx);
req->send();
});
// ensure the block guard for aio updates is empty before unlocking
// the object map
m_async_op_tracker.wait_for_ops(ctx);
}
template <typename I>
bool ObjectMap<I>::set_object_map(ceph::BitVector<2> &target_object_map) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock));
std::unique_lock locker{m_lock};
m_object_map = target_object_map;
return true;
}
template <typename I>
void ObjectMap<I>::rollback(uint64_t snap_id, Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
std::unique_lock locker{m_lock};
Context *ctx = create_context_callback<Context>(on_finish, this);
object_map::SnapshotRollbackRequest *req =
new object_map::SnapshotRollbackRequest(m_image_ctx, snap_id, ctx);
req->send();
}
template <typename I>
void ObjectMap<I>::snapshot_add(uint64_t snap_id, Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
ceph_assert(snap_id != CEPH_NOSNAP);
Context *ctx = create_context_callback<Context>(on_finish, this);
object_map::SnapshotCreateRequest *req =
new object_map::SnapshotCreateRequest(m_image_ctx, &m_lock, &m_object_map,
snap_id, ctx);
req->send();
}
template <typename I>
void ObjectMap<I>::snapshot_remove(uint64_t snap_id, Context *on_finish) {
ceph_assert(ceph_mutex_is_wlocked(m_image_ctx.image_lock));
ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
ceph_assert(snap_id != CEPH_NOSNAP);
Context *ctx = create_context_callback<Context>(on_finish, this);
object_map::SnapshotRemoveRequest *req =
new object_map::SnapshotRemoveRequest(m_image_ctx, &m_lock, &m_object_map,
snap_id, ctx);
req->send();
}
template <typename I>
void ObjectMap<I>::aio_save(Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock));
std::shared_lock locker{m_lock};
librados::ObjectWriteOperation op;
if (m_snap_id == CEPH_NOSNAP) {
rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME, ClsLockType::EXCLUSIVE, "", "");
}
cls_client::object_map_save(&op, m_object_map);
Context *ctx = create_context_callback<Context>(on_finish, this);
std::string oid(object_map_name(m_image_ctx.id, m_snap_id));
librados::AioCompletion *comp = util::create_rados_callback(ctx);
int r = m_image_ctx.md_ctx.aio_operate(oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void ObjectMap<I>::aio_resize(uint64_t new_size, uint8_t default_object_state,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock));
ceph_assert(m_image_ctx.image_watcher != NULL);
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
Context *ctx = create_context_callback<Context>(on_finish, this);
object_map::ResizeRequest *req = new object_map::ResizeRequest(
m_image_ctx, &m_lock, &m_object_map, m_snap_id, new_size,
default_object_state, ctx);
req->send();
}
template <typename I>
void ObjectMap<I>::detained_aio_update(UpdateOperation &&op) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert(ceph_mutex_is_wlocked(m_lock));
BlockGuardCell *cell;
int r = m_update_guard->detain({op.start_object_no, op.end_object_no},
&op, &cell);
if (r < 0) {
lderr(cct) << "failed to detain object map update: " << cpp_strerror(r)
<< dendl;
m_image_ctx.op_work_queue->queue(op.on_finish, r);
m_async_op_tracker.finish_op();
return;
} else if (r > 0) {
ldout(cct, 20) << "detaining object map update due to in-flight update: "
<< "start=" << op.start_object_no << ", "
<< "end=" << op.end_object_no << ", "
<< (op.current_state ?
stringify(static_cast<uint32_t>(*op.current_state)) :
"")
<< "->" << static_cast<uint32_t>(op.new_state) << dendl;
return;
}
ldout(cct, 20) << "in-flight update cell: " << cell << dendl;
Context *on_finish = op.on_finish;
Context *ctx = new LambdaContext([this, cell, on_finish](int r) {
handle_detained_aio_update(cell, r, on_finish);
});
aio_update(CEPH_NOSNAP, op.start_object_no, op.end_object_no, op.new_state,
op.current_state, op.parent_trace, op.ignore_enoent, ctx);
}
template <typename I>
void ObjectMap<I>::handle_detained_aio_update(BlockGuardCell *cell, int r,
Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "cell=" << cell << ", r=" << r << dendl;
typename UpdateGuard::BlockOperations block_ops;
m_update_guard->release(cell, &block_ops);
{
std::shared_lock image_locker{m_image_ctx.image_lock};
std::unique_lock locker{m_lock};
for (auto &op : block_ops) {
detained_aio_update(std::move(op));
}
}
on_finish->complete(r);
m_async_op_tracker.finish_op();
}
template <typename I>
void ObjectMap<I>::aio_update(uint64_t snap_id, uint64_t start_object_no,
uint64_t end_object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace,
bool ignore_enoent, Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
ceph_assert(m_image_ctx.image_watcher != nullptr);
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
ceph_assert(start_object_no < end_object_no);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "start=" << start_object_no << ", "
<< "end=" << end_object_no << ", "
<< (current_state ?
stringify(static_cast<uint32_t>(*current_state)) : "")
<< "->" << static_cast<uint32_t>(new_state) << dendl;
if (snap_id == CEPH_NOSNAP) {
ceph_assert(ceph_mutex_is_wlocked(m_lock));
end_object_no = std::min(end_object_no, m_object_map.size());
if (start_object_no >= end_object_no) {
ldout(cct, 20) << "skipping update of invalid object map" << dendl;
m_image_ctx.op_work_queue->queue(on_finish, 0);
return;
}
auto it = m_object_map.begin() + start_object_no;
auto end_it = m_object_map.begin() + end_object_no;
for (; it != end_it; ++it) {
if (update_required(it, new_state)) {
break;
}
}
if (it == end_it) {
ldout(cct, 20) << "object map update not required" << dendl;
m_image_ctx.op_work_queue->queue(on_finish, 0);
return;
}
}
auto req = object_map::UpdateRequest<I>::create(
m_image_ctx, &m_lock, &m_object_map, snap_id, start_object_no,
end_object_no, new_state, current_state, parent_trace, ignore_enoent,
on_finish);
req->send();
}
} // namespace librbd
template class librbd::ObjectMap<librbd::ImageCtx>;
| 13,119 | 33.435696 | 93 | cc |
null | ceph-main/src/librbd/ObjectMap.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OBJECT_MAP_H
#define CEPH_LIBRBD_OBJECT_MAP_H
#include "include/int_types.h"
#include "include/fs_types.h"
#include "include/rados/librados_fwd.hpp"
#include "include/rbd/object_map_types.h"
#include "common/AsyncOpTracker.h"
#include "common/bit_vector.hpp"
#include "common/RefCountedObj.h"
#include "librbd/Utils.h"
#include <boost/optional.hpp>
class Context;
namespace ZTracer { struct Trace; }
namespace librbd {
template <typename Op> class BlockGuard;
struct BlockGuardCell;
class ImageCtx;
template <typename ImageCtxT = ImageCtx>
class ObjectMap : public RefCountedObject {
public:
static ObjectMap *create(ImageCtxT &image_ctx, uint64_t snap_id) {
return new ObjectMap(image_ctx, snap_id);
}
ObjectMap(ImageCtxT &image_ctx, uint64_t snap_id);
~ObjectMap();
static int aio_remove(librados::IoCtx &io_ctx, const std::string &image_id, librados::AioCompletion *c);
static std::string object_map_name(const std::string &image_id,
uint64_t snap_id);
static bool is_compatible(const file_layout_t& layout, uint64_t size);
uint8_t operator[](uint64_t object_no) const;
inline uint64_t size() const {
std::shared_lock locker{m_lock};
return m_object_map.size();
}
inline void set_state(uint64_t object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state) {
std::unique_lock locker{m_lock};
ceph_assert(object_no < m_object_map.size());
if (current_state && m_object_map[object_no] != *current_state) {
return;
}
m_object_map[object_no] = new_state;
}
void open(Context *on_finish);
void close(Context *on_finish);
bool set_object_map(ceph::BitVector<2> &target_object_map);
bool object_may_exist(uint64_t object_no) const;
bool object_may_not_exist(uint64_t object_no) const;
void aio_save(Context *on_finish);
void aio_resize(uint64_t new_size, uint8_t default_object_state,
Context *on_finish);
template <typename T, void(T::*MF)(int) = &T::complete>
bool aio_update(uint64_t snap_id, uint64_t start_object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace, bool ignore_enoent,
T *callback_object) {
return aio_update<T, MF>(snap_id, start_object_no, start_object_no + 1,
new_state, current_state, parent_trace,
ignore_enoent, callback_object);
}
template <typename T, void(T::*MF)(int) = &T::complete>
bool aio_update(uint64_t snap_id, uint64_t start_object_no,
uint64_t end_object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace, bool ignore_enoent,
T *callback_object) {
ceph_assert(start_object_no < end_object_no);
std::unique_lock locker{m_lock};
if (snap_id == CEPH_NOSNAP) {
end_object_no = std::min(end_object_no, m_object_map.size());
if (start_object_no >= end_object_no) {
return false;
}
auto it = m_object_map.begin() + start_object_no;
auto end_it = m_object_map.begin() + end_object_no;
for (; it != end_it; ++it) {
if (update_required(it, new_state)) {
break;
}
}
if (it == end_it) {
return false;
}
m_async_op_tracker.start_op();
UpdateOperation update_operation(start_object_no, end_object_no,
new_state, current_state, parent_trace,
ignore_enoent,
util::create_context_callback<T, MF>(
callback_object));
detained_aio_update(std::move(update_operation));
} else {
aio_update(snap_id, start_object_no, end_object_no, new_state,
current_state, parent_trace, ignore_enoent,
util::create_context_callback<T, MF>(callback_object));
}
return true;
}
void rollback(uint64_t snap_id, Context *on_finish);
void snapshot_add(uint64_t snap_id, Context *on_finish);
void snapshot_remove(uint64_t snap_id, Context *on_finish);
private:
struct UpdateOperation {
uint64_t start_object_no;
uint64_t end_object_no;
uint8_t new_state;
boost::optional<uint8_t> current_state;
ZTracer::Trace parent_trace;
bool ignore_enoent;
Context *on_finish;
UpdateOperation(uint64_t start_object_no, uint64_t end_object_no,
uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace,
bool ignore_enoent, Context *on_finish)
: start_object_no(start_object_no), end_object_no(end_object_no),
new_state(new_state), current_state(current_state),
parent_trace(parent_trace), ignore_enoent(ignore_enoent),
on_finish(on_finish) {
}
};
typedef BlockGuard<UpdateOperation> UpdateGuard;
ImageCtxT &m_image_ctx;
uint64_t m_snap_id;
mutable ceph::shared_mutex m_lock;
ceph::BitVector<2> m_object_map;
AsyncOpTracker m_async_op_tracker;
UpdateGuard *m_update_guard = nullptr;
void detained_aio_update(UpdateOperation &&update_operation);
void handle_detained_aio_update(BlockGuardCell *cell, int r,
Context *on_finish);
void aio_update(uint64_t snap_id, uint64_t start_object_no,
uint64_t end_object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace, bool ignore_enoent,
Context *on_finish);
bool update_required(const ceph::BitVector<2>::Iterator &it,
uint8_t new_state);
};
} // namespace librbd
extern template class librbd::ObjectMap<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OBJECT_MAP_H
| 6,104 | 33.6875 | 106 | h |
null | ceph-main/src/librbd/Operations.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/Operations.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/perf_counters.h"
#include "osdc/Striper.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/ImageWatcher.h"
#include "librbd/ObjectMap.h"
#include "librbd/Types.h"
#include "librbd/Utils.h"
#include "librbd/api/Config.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/io/Utils.h"
#include "librbd/journal/DisabledPolicy.h"
#include "librbd/journal/StandardPolicy.h"
#include "librbd/operation/DisableFeaturesRequest.h"
#include "librbd/operation/EnableFeaturesRequest.h"
#include "librbd/operation/FlattenRequest.h"
#include "librbd/operation/MetadataRemoveRequest.h"
#include "librbd/operation/MetadataSetRequest.h"
#include "librbd/operation/MigrateRequest.h"
#include "librbd/operation/ObjectMapIterate.h"
#include "librbd/operation/RebuildObjectMapRequest.h"
#include "librbd/operation/RenameRequest.h"
#include "librbd/operation/ResizeRequest.h"
#include "librbd/operation/SnapshotCreateRequest.h"
#include "librbd/operation/SnapshotProtectRequest.h"
#include "librbd/operation/SnapshotRemoveRequest.h"
#include "librbd/operation/SnapshotRenameRequest.h"
#include "librbd/operation/SnapshotRollbackRequest.h"
#include "librbd/operation/SnapshotUnprotectRequest.h"
#include "librbd/operation/SnapshotLimitRequest.h"
#include "librbd/operation/SparsifyRequest.h"
#include <set>
#include <boost/bind/bind.hpp>
#include <boost/scope_exit.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::Operations: "
namespace librbd {
using namespace boost::placeholders;
namespace {
std::ostream &operator<<(std::ostream &out, const Operation &op) {
switch (op) {
case OPERATION_CHECK_OBJECT_MAP:
out << "check object map";
break;
case OPERATION_FLATTEN:
out << "flatten";
break;
case OPERATION_METADATA_UPDATE:
out << "metadata update";
break;
case OPERATION_MIGRATE:
out << "migrate";
break;
case OPERATION_REBUILD_OBJECT_MAP:
out << "rebuild object map";
break;
case OPERATION_RENAME:
out << "rename";
break;
case OPERATION_RESIZE:
out << "resize";
break;
case OPERATION_SNAP_CREATE:
out << "snap create";
break;
case OPERATION_SNAP_PROTECT:
out << "snap protect";
break;
case OPERATION_SNAP_REMOVE:
out << "snap remove";
break;
case OPERATION_SNAP_RENAME:
out << "snap rename";
break;
case OPERATION_SNAP_ROLLBACK:
out << "snap rollback";
break;
case OPERATION_SNAP_UNPROTECT:
out << "snap unprotect";
break;
case OPERATION_SPARSIFY:
out << "sparsify";
break;
case OPERATION_UPDATE_FEATURES:
out << "update features";
break;
default:
ceph_abort();
break;
}
return out;
}
template <typename I>
struct C_NotifyUpdate : public Context {
I &image_ctx;
Context *on_finish;
bool notified = false;
C_NotifyUpdate(I &image_ctx, Context *on_finish)
: image_ctx(image_ctx), on_finish(on_finish) {
}
void complete(int r) override {
CephContext *cct = image_ctx.cct;
if (notified) {
if (r == -ETIMEDOUT) {
// don't fail the op if a peer fails to get the update notification
lderr(cct) << "update notification timed-out" << dendl;
r = 0;
} else if (r == -ENOENT) {
// don't fail if header is missing (e.g. v1 image rename)
ldout(cct, 5) << "update notification on missing header" << dendl;
r = 0;
} else if (r < 0) {
lderr(cct) << "update notification failed: " << cpp_strerror(r)
<< dendl;
}
Context::complete(r);
return;
}
if (r < 0) {
// op failed -- no need to send update notification
Context::complete(r);
return;
}
notified = true;
image_ctx.notify_update(this);
}
void finish(int r) override {
on_finish->complete(r);
}
};
template <typename I>
struct C_InvokeAsyncRequest : public Context {
/**
* @verbatim
*
* <start>
* |
* . . . . . . | . . . . . . . . . . . . . . . . . .
* . . | . .
* . v v v .
* . REFRESH_IMAGE (skip if not needed) .
* . | .
* . v .
* . ACQUIRE_LOCK (skip if exclusive lock .
* . | disabled or has lock) .
* . | .
* . /--------/ \--------\ . . . . . . . . . . . . .
* . | | .
* . v v .
* LOCAL_REQUEST REMOTE_REQUEST
* | |
* | |
* \--------\ /--------/
* |
* v
* <finish>
*
* @endverbatim
*/
I &image_ctx;
Operation operation;
exclusive_lock::OperationRequestType request_type;
bool permit_snapshot;
boost::function<void(Context*)> local;
boost::function<void(Context*)> remote;
std::set<int> filter_error_codes;
Context *on_finish;
bool request_lock = false;
C_InvokeAsyncRequest(I &image_ctx, Operation operation,
exclusive_lock::OperationRequestType request_type,
bool permit_snapshot,
const boost::function<void(Context*)>& local,
const boost::function<void(Context*)>& remote,
const std::set<int> &filter_error_codes,
Context *on_finish)
: image_ctx(image_ctx), operation(operation), request_type(request_type),
permit_snapshot(permit_snapshot), local(local), remote(remote),
filter_error_codes(filter_error_codes), on_finish(on_finish) {
}
void send() {
send_refresh_image();
}
void send_refresh_image() {
if (!image_ctx.state->is_refresh_required()) {
send_acquire_exclusive_lock();
return;
}
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << dendl;
Context *ctx = util::create_context_callback<
C_InvokeAsyncRequest<I>,
&C_InvokeAsyncRequest<I>::handle_refresh_image>(this);
image_ctx.state->refresh(ctx);
}
void handle_refresh_image(int r) {
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << ": r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to refresh image: " << cpp_strerror(r) << dendl;
complete(r);
return;
}
send_acquire_exclusive_lock();
}
void send_acquire_exclusive_lock() {
// context can complete before owner_lock is unlocked
ceph::shared_mutex &owner_lock(image_ctx.owner_lock);
owner_lock.lock_shared();
image_ctx.image_lock.lock_shared();
if (image_ctx.read_only ||
(!permit_snapshot && image_ctx.snap_id != CEPH_NOSNAP)) {
image_ctx.image_lock.unlock_shared();
owner_lock.unlock_shared();
complete(-EROFS);
return;
}
image_ctx.image_lock.unlock_shared();
if (image_ctx.exclusive_lock == nullptr) {
send_local_request();
owner_lock.unlock_shared();
return;
} else if (image_ctx.image_watcher == nullptr) {
owner_lock.unlock_shared();
complete(-EROFS);
return;
}
if (image_ctx.exclusive_lock->is_lock_owner() &&
image_ctx.exclusive_lock->accept_request(request_type, nullptr)) {
send_local_request();
owner_lock.unlock_shared();
return;
}
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << dendl;
Context *ctx = util::create_async_context_callback(
image_ctx, util::create_context_callback<
C_InvokeAsyncRequest<I>,
&C_InvokeAsyncRequest<I>::handle_acquire_exclusive_lock>(
this, image_ctx.exclusive_lock));
if (request_lock) {
// current lock owner doesn't support op -- try to perform
// the action locally
request_lock = false;
image_ctx.exclusive_lock->acquire_lock(ctx);
} else {
image_ctx.exclusive_lock->try_acquire_lock(ctx);
}
owner_lock.unlock_shared();
}
void handle_acquire_exclusive_lock(int r) {
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << ": r=" << r << dendl;
if (r < 0) {
complete(r == -EBLOCKLISTED ? -EBLOCKLISTED : -EROFS);
return;
}
// context can complete before owner_lock is unlocked
ceph::shared_mutex &owner_lock(image_ctx.owner_lock);
owner_lock.lock_shared();
if (image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner()) {
send_local_request();
owner_lock.unlock_shared();
return;
}
send_remote_request();
owner_lock.unlock_shared();
}
void send_remote_request() {
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << dendl;
Context *ctx = util::create_async_context_callback(
image_ctx, util::create_context_callback<
C_InvokeAsyncRequest<I>,
&C_InvokeAsyncRequest<I>::handle_remote_request>(this));
remote(ctx);
}
void handle_remote_request(int r) {
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << ": r=" << r << dendl;
if (r == -EOPNOTSUPP) {
ldout(cct, 5) << operation << " not supported by current lock owner"
<< dendl;
request_lock = true;
send_refresh_image();
return;
} else if (r != -ETIMEDOUT && r != -ERESTART) {
image_ctx.state->handle_update_notification();
complete(r);
return;
}
ldout(cct, 5) << operation << " timed out notifying lock owner" << dendl;
send_refresh_image();
}
void send_local_request() {
auto ctx = new LambdaContext(
[this](int r) {
if (r == -ERESTART) {
image_ctx.operations->finish_op(operation, r);
send_refresh_image();
return;
}
execute_local_request();
});
image_ctx.operations->start_op(operation, ctx);
}
void execute_local_request() {
std::shared_lock owner_locker{image_ctx.owner_lock};
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << dendl;
Context *ctx = util::create_async_context_callback(
image_ctx, util::create_context_callback<
C_InvokeAsyncRequest<I>,
&C_InvokeAsyncRequest<I>::handle_local_request>(this));
local(ctx);
}
void handle_local_request(int r) {
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << ": r=" << r << dendl;
image_ctx.operations->finish_op(operation, r);
if (r == -ERESTART) {
send_refresh_image();
return;
}
complete(r);
}
void finish(int r) override {
if (filter_error_codes.count(r) != 0) {
r = 0;
}
on_finish->complete(r);
}
};
template <typename I>
bool needs_invalidate(I& image_ctx, uint64_t object_no,
uint8_t current_state, uint8_t new_state) {
if ( (current_state == OBJECT_EXISTS ||
current_state == OBJECT_EXISTS_CLEAN) &&
(new_state == OBJECT_NONEXISTENT ||
new_state == OBJECT_PENDING)) {
return false;
}
return true;
}
} // anonymous namespace
template <typename I>
Operations<I>::Operations(I &image_ctx)
: m_image_ctx(image_ctx),
m_queue_lock(ceph::make_mutex(
util::unique_lock_name("librbd::Operations::m_queue_lock",
this))) {
}
template <typename I>
void Operations<I>::start_op(Operation op, Context *ctx) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << __func__ << ": " << op << " " << ctx << dendl;
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
bool requires_lock = m_image_ctx.exclusive_lock != nullptr;
ctx = util::create_async_context_callback(
m_image_ctx, new LambdaContext(
[this, op, requires_lock, ctx](int r) {
Context *finish_op_ctx = nullptr;
if (requires_lock && r == 0) {
std::shared_lock owner_locker{m_image_ctx.owner_lock};
std::shared_lock image_locker{m_image_ctx.image_lock};
auto exclusive_lock = m_image_ctx.exclusive_lock;
if (exclusive_lock == nullptr ||
(finish_op_ctx = exclusive_lock->start_op(&r)) == nullptr) {
ldout(m_image_ctx.cct, 20) << "lock owner lost, restarting"
<< dendl;
r = -ERESTART;
}
}
ldout(m_image_ctx.cct, 20) << "start " << op << " " << ctx << dendl;
ctx->complete(r);
if (finish_op_ctx != nullptr) {
finish_op_ctx->complete(0);
}
}));
std::unique_lock locker{m_queue_lock};
if (!m_in_flight_ops.insert(op).second) {
ldout(cct, 20) << __func__ << ": " << op << " in flight" << dendl;
m_queued_ops[op].push_back(ctx);
return;
}
ctx->complete(0);
}
template <typename I>
void Operations<I>::finish_op(Operation op, int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << __func__ << ": " << op << " r=" << r << dendl;
std::unique_lock locker{m_queue_lock};
auto &queue = m_queued_ops[op];
if (queue.empty()) {
m_in_flight_ops.erase(op);
return;
}
auto ctx = queue.front();
queue.pop_front();
// propagate -ERESTART through all the queue
ctx->complete(r == -ERESTART ? r : 0);
}
template <typename I>
int Operations<I>::flatten(ProgressContext &prog_ctx) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "flatten" << dendl;
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
if (m_image_ctx.read_only) {
return -EROFS;
}
{
std::shared_lock image_locker{m_image_ctx.image_lock};
if (m_image_ctx.parent_md.spec.pool_id == -1) {
lderr(cct) << "image has no parent" << dendl;
return -EINVAL;
}
}
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_FLATTEN,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
false,
boost::bind(&Operations<I>::execute_flatten, this,
boost::ref(prog_ctx), _1),
boost::bind(&ImageWatcher<I>::notify_flatten,
m_image_ctx.image_watcher, request_id,
boost::ref(prog_ctx), _1));
if (r < 0 && r != -EINVAL) {
return r;
}
ldout(cct, 20) << "flatten finished" << dendl;
return 0;
}
template <typename I>
void Operations<I>::execute_flatten(ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "flatten" << dendl;
if (m_image_ctx.read_only || m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
// can't flatten a non-clone
if (m_image_ctx.parent_md.spec.pool_id == -1) {
lderr(cct) << "image has no parent" << dendl;
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EINVAL);
return;
}
if (m_image_ctx.snap_id != CEPH_NOSNAP) {
lderr(cct) << "snapshots cannot be flattened" << dendl;
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EROFS);
return;
}
uint64_t crypto_header_objects = Striper::get_num_objects(
m_image_ctx.layout,
m_image_ctx.get_area_size(io::ImageArea::CRYPTO_HEADER));
uint64_t raw_overlap;
int r = m_image_ctx.get_parent_overlap(CEPH_NOSNAP, &raw_overlap);
ceph_assert(r == 0);
auto overlap = m_image_ctx.reduce_parent_overlap(raw_overlap, false);
uint64_t data_overlap_objects = Striper::get_num_objects(
m_image_ctx.layout,
(overlap.second == io::ImageArea::DATA ? overlap.first : 0));
m_image_ctx.image_lock.unlock_shared();
// leave encryption header flattening to format-specific handler
operation::FlattenRequest<I> *req = new operation::FlattenRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish),
crypto_header_objects, data_overlap_objects, prog_ctx);
req->send();
}
template <typename I>
int Operations<I>::rebuild_object_map(ProgressContext &prog_ctx) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "rebuild_object_map" << dendl;
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_REBUILD_OBJECT_MAP,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, true,
boost::bind(&Operations<I>::execute_rebuild_object_map,
this, boost::ref(prog_ctx), _1),
boost::bind(&ImageWatcher<I>::notify_rebuild_object_map,
m_image_ctx.image_watcher, request_id,
boost::ref(prog_ctx), _1));
ldout(cct, 10) << "rebuild object map finished" << dendl;
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
void Operations<I>::execute_rebuild_object_map(ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
if (m_image_ctx.read_only || m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP)) {
lderr(cct) << "image must support object-map feature" << dendl;
on_finish->complete(-EINVAL);
return;
}
operation::RebuildObjectMapRequest<I> *req =
new operation::RebuildObjectMapRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), prog_ctx);
req->send();
}
template <typename I>
int Operations<I>::check_object_map(ProgressContext &prog_ctx) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
r = invoke_async_request(OPERATION_CHECK_OBJECT_MAP,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, true,
boost::bind(&Operations<I>::check_object_map, this,
boost::ref(prog_ctx), _1),
[this](Context *c) {
m_image_ctx.op_work_queue->queue(c, -EOPNOTSUPP);
});
return r;
}
template <typename I>
void Operations<I>::object_map_iterate(ProgressContext &prog_ctx,
operation::ObjectIterateWork<I> handle_mismatch,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP)) {
on_finish->complete(-EINVAL);
return;
}
operation::ObjectMapIterateRequest<I> *req =
new operation::ObjectMapIterateRequest<I>(m_image_ctx, on_finish,
prog_ctx, handle_mismatch);
req->send();
}
template <typename I>
void Operations<I>::check_object_map(ProgressContext &prog_ctx,
Context *on_finish) {
object_map_iterate(prog_ctx, needs_invalidate, on_finish);
}
template <typename I>
int Operations<I>::rename(const char *dstname) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": dest_name=" << dstname
<< dendl;
int r = librbd::detect_format(m_image_ctx.md_ctx, dstname, NULL, NULL);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error checking for existing image called "
<< dstname << ":" << cpp_strerror(r) << dendl;
return r;
}
if (r == 0) {
lderr(cct) << "rbd image " << dstname << " already exists" << dendl;
return -EEXIST;
}
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_RENAME,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
true,
boost::bind(&Operations<I>::execute_rename, this,
dstname, _1),
boost::bind(&ImageWatcher<I>::notify_rename,
m_image_ctx.image_watcher, request_id,
dstname, _1));
if (r < 0 && r != -EEXIST) {
return r;
}
m_image_ctx.set_image_name(dstname);
return 0;
}
template <typename I>
void Operations<I>::execute_rename(const std::string &dest_name,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
}
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": dest_name=" << dest_name
<< dendl;
if (m_image_ctx.old_format) {
m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.name == dest_name) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EEXIST);
return;
}
m_image_ctx.image_lock.unlock_shared();
// unregister watch before and register back after rename
on_finish = new C_NotifyUpdate<I>(m_image_ctx, on_finish);
on_finish = new LambdaContext([this, on_finish](int r) {
if (m_image_ctx.old_format) {
m_image_ctx.image_watcher->set_oid(m_image_ctx.header_oid);
}
m_image_ctx.image_watcher->register_watch(on_finish);
});
on_finish = new LambdaContext([this, dest_name, on_finish](int r) {
std::shared_lock owner_locker{m_image_ctx.owner_lock};
operation::RenameRequest<I> *req = new operation::RenameRequest<I>(
m_image_ctx, on_finish, dest_name);
req->send();
});
m_image_ctx.image_watcher->unregister_watch(on_finish);
return;
}
operation::RenameRequest<I> *req = new operation::RenameRequest<I>(
m_image_ctx, on_finish, dest_name);
req->send();
}
template <typename I>
int Operations<I>::resize(uint64_t size, bool allow_shrink, ProgressContext& prog_ctx) {
CephContext *cct = m_image_ctx.cct;
m_image_ctx.image_lock.lock_shared();
uint64_t raw_size = io::util::area_to_raw_offset(m_image_ctx, size,
io::ImageArea::DATA);
ldout(cct, 5) << this << " " << __func__
<< ": size=" << size
<< " raw_size=" << m_image_ctx.size
<< " new_raw_size=" << raw_size << dendl;
m_image_ctx.image_lock.unlock_shared();
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
if (m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP) &&
!ObjectMap<>::is_compatible(m_image_ctx.layout, raw_size)) {
lderr(cct) << "New size not compatible with object map" << dendl;
return -EINVAL;
}
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_RESIZE,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
false,
boost::bind(&Operations<I>::execute_resize, this,
size, allow_shrink, boost::ref(prog_ctx), _1, 0),
boost::bind(&ImageWatcher<I>::notify_resize,
m_image_ctx.image_watcher, request_id,
size, allow_shrink, boost::ref(prog_ctx), _1));
m_image_ctx.perfcounter->inc(l_librbd_resize);
ldout(cct, 2) << "resize finished" << dendl;
return r;
}
template <typename I>
void Operations<I>::execute_resize(uint64_t size, bool allow_shrink, ProgressContext &prog_ctx,
Context *on_finish,
uint64_t journal_op_tid) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
m_image_ctx.image_lock.lock_shared();
uint64_t raw_size = io::util::area_to_raw_offset(m_image_ctx, size,
io::ImageArea::DATA);
ldout(cct, 5) << this << " " << __func__
<< ": size=" << size
<< " raw_size=" << m_image_ctx.size
<< " new_raw_size=" << raw_size << dendl;
if (m_image_ctx.snap_id != CEPH_NOSNAP || m_image_ctx.read_only ||
m_image_ctx.operations_disabled) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EROFS);
return;
} else if (m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock) &&
!ObjectMap<>::is_compatible(m_image_ctx.layout, raw_size)) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EINVAL);
return;
}
m_image_ctx.image_lock.unlock_shared();
operation::ResizeRequest<I> *req = new operation::ResizeRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), raw_size,
allow_shrink, prog_ctx, journal_op_tid, false);
req->send();
}
template <typename I>
int Operations<I>::snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string& snap_name, uint64_t flags,
ProgressContext &prog_ctx) {
if (m_image_ctx.read_only) {
return -EROFS;
}
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
C_SaferCond ctx;
snap_create(snap_namespace, snap_name, flags, prog_ctx, &ctx);
r = ctx.wait();
if (r < 0) {
return r;
}
m_image_ctx.perfcounter->inc(l_librbd_snap_create);
return r;
}
template <typename I>
void Operations<I>::snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string& snap_name, uint64_t flags,
ProgressContext &prog_ctx, Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
if (m_image_ctx.read_only) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.get_snap_id(snap_namespace, snap_name) != CEPH_NOSNAP) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EEXIST);
return;
}
m_image_ctx.image_lock.unlock_shared();
uint64_t request_id = util::reserve_async_request_id();
C_InvokeAsyncRequest<I> *req = new C_InvokeAsyncRequest<I>(
m_image_ctx, OPERATION_SNAP_CREATE,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, true,
boost::bind(&Operations<I>::execute_snap_create, this, snap_namespace, snap_name,
_1, 0, flags, boost::ref(prog_ctx)),
boost::bind(&ImageWatcher<I>::notify_snap_create, m_image_ctx.image_watcher,
request_id, snap_namespace, snap_name, flags,
boost::ref(prog_ctx), _1),
{-EEXIST}, on_finish);
req->send();
}
template <typename I>
void Operations<I>::execute_snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish,
uint64_t journal_op_tid,
uint64_t flags,
ProgressContext &prog_ctx) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.get_snap_id(snap_namespace, snap_name) != CEPH_NOSNAP) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EEXIST);
return;
}
m_image_ctx.image_lock.unlock_shared();
operation::SnapshotCreateRequest<I> *req =
new operation::SnapshotCreateRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish),
snap_namespace, snap_name, journal_op_tid, flags, prog_ctx);
req->send();
}
template <typename I>
int Operations<I>::snap_rollback(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name,
ProgressContext& prog_ctx) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
int r = m_image_ctx.state->refresh_if_required();
if (r < 0)
return r;
C_SaferCond cond_ctx;
{
std::shared_lock owner_locker{m_image_ctx.owner_lock};
{
// need to drop image_lock before invalidating cache
std::shared_lock image_locker{m_image_ctx.image_lock};
if (!m_image_ctx.snap_exists) {
return -ENOENT;
}
if (m_image_ctx.snap_id != CEPH_NOSNAP || m_image_ctx.read_only) {
return -EROFS;
}
uint64_t snap_id = m_image_ctx.get_snap_id(snap_namespace, snap_name);
if (snap_id == CEPH_NOSNAP) {
lderr(cct) << "No such snapshot found." << dendl;
return -ENOENT;
}
}
r = prepare_image_update(exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
false);
if (r < 0) {
return r;
}
Context *ctx = new LambdaContext(
[this, ctx=&cond_ctx](int r) {
m_image_ctx.operations->finish_op(OPERATION_SNAP_ROLLBACK, r);
ctx->complete(r);
});
ctx = new LambdaContext(
[this, snap_namespace, snap_name, &prog_ctx, ctx](int r) {
if (r < 0) {
ctx->complete(r);
return;
}
std::shared_lock l{m_image_ctx.owner_lock};
execute_snap_rollback(snap_namespace, snap_name, prog_ctx, ctx);
});
m_image_ctx.operations->start_op(OPERATION_SNAP_ROLLBACK, ctx);
}
r = cond_ctx.wait();
if (r < 0) {
return r;
}
m_image_ctx.perfcounter->inc(l_librbd_snap_rollback);
return r;
}
template <typename I>
void Operations<I>::execute_snap_rollback(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
ProgressContext& prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
uint64_t snap_id = m_image_ctx.get_snap_id(snap_namespace, snap_name);
if (snap_id == CEPH_NOSNAP) {
lderr(cct) << "No such snapshot found." << dendl;
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-ENOENT);
return;
}
uint64_t new_size = m_image_ctx.get_image_size(snap_id);
m_image_ctx.image_lock.unlock_shared();
// async mode used for journal replay
operation::SnapshotRollbackRequest<I> *request =
new operation::SnapshotRollbackRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), snap_namespace, snap_name,
snap_id, new_size, prog_ctx);
request->send();
}
template <typename I>
int Operations<I>::snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name) {
if (m_image_ctx.read_only) {
return -EROFS;
}
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
C_SaferCond ctx;
snap_remove(snap_namespace, snap_name, &ctx);
r = ctx.wait();
if (r < 0) {
return r;
}
m_image_ctx.perfcounter->inc(l_librbd_snap_remove);
return 0;
}
template <typename I>
void Operations<I>::snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name,
Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
if (m_image_ctx.read_only) {
on_finish->complete(-EROFS);
return;
}
// quickly filter out duplicate ops
m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.get_snap_id(snap_namespace, snap_name) == CEPH_NOSNAP) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-ENOENT);
return;
}
bool proxy_op = ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0 ||
(m_image_ctx.features & RBD_FEATURE_JOURNALING) != 0);
m_image_ctx.image_lock.unlock_shared();
if (proxy_op) {
uint64_t request_id = util::reserve_async_request_id();
auto request_type = exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL;
if (cls::rbd::get_snap_namespace_type(snap_namespace) ==
cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH) {
request_type = exclusive_lock::OPERATION_REQUEST_TYPE_TRASH_SNAP_REMOVE;
}
C_InvokeAsyncRequest<I> *req = new C_InvokeAsyncRequest<I>(
m_image_ctx, OPERATION_SNAP_REMOVE, request_type, true,
boost::bind(&Operations<I>::execute_snap_remove, this, snap_namespace,
snap_name, _1),
boost::bind(&ImageWatcher<I>::notify_snap_remove,
m_image_ctx.image_watcher, request_id, snap_namespace,
snap_name, _1),
{-ENOENT}, on_finish);
req->send();
} else {
std::shared_lock owner_lock{m_image_ctx.owner_lock};
execute_snap_remove(snap_namespace, snap_name, on_finish);
}
}
template <typename I>
void Operations<I>::execute_snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
{
if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
}
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
uint64_t snap_id = m_image_ctx.get_snap_id(snap_namespace, snap_name);
if (snap_id == CEPH_NOSNAP) {
lderr(m_image_ctx.cct) << "No such snapshot found." << dendl;
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-ENOENT);
return;
}
bool is_protected;
int r = m_image_ctx.is_snap_protected(snap_id, &is_protected);
if (r < 0) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(r);
return;
} else if (is_protected) {
lderr(m_image_ctx.cct) << "snapshot is protected" << dendl;
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EBUSY);
return;
}
m_image_ctx.image_lock.unlock_shared();
operation::SnapshotRemoveRequest<I> *req =
new operation::SnapshotRemoveRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish),
snap_namespace, snap_name, snap_id);
req->send();
}
template <typename I>
int Operations<I>::snap_rename(const char *srcname, const char *dstname) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": "
<< "snap_name=" << srcname << ", "
<< "new_snap_name=" << dstname << dendl;
snapid_t snap_id;
if (m_image_ctx.read_only) {
return -EROFS;
}
int r = m_image_ctx.state->refresh_if_required();
if (r < 0)
return r;
{
std::shared_lock l{m_image_ctx.image_lock};
snap_id = m_image_ctx.get_snap_id(cls::rbd::UserSnapshotNamespace(), srcname);
if (snap_id == CEPH_NOSNAP) {
return -ENOENT;
}
if (m_image_ctx.get_snap_id(cls::rbd::UserSnapshotNamespace(), dstname) != CEPH_NOSNAP) {
return -EEXIST;
}
}
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_SNAP_RENAME,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
true,
boost::bind(&Operations<I>::execute_snap_rename,
this, snap_id, dstname, _1),
boost::bind(&ImageWatcher<I>::notify_snap_rename,
m_image_ctx.image_watcher, request_id,
snap_id, dstname, _1));
if (r < 0 && r != -EEXIST) {
return r;
}
} else {
C_SaferCond cond_ctx;
{
std::shared_lock owner_lock{m_image_ctx.owner_lock};
execute_snap_rename(snap_id, dstname, &cond_ctx);
}
r = cond_ctx.wait();
if (r < 0) {
return r;
}
}
m_image_ctx.perfcounter->inc(l_librbd_snap_rename);
return 0;
}
template <typename I>
void Operations<I>::execute_snap_rename(const uint64_t src_snap_id,
const std::string &dest_snap_name,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
if ((m_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
}
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.get_snap_id(cls::rbd::UserSnapshotNamespace(),
dest_snap_name) != CEPH_NOSNAP) {
// Renaming is supported for snapshots from user namespace only.
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EEXIST);
return;
}
m_image_ctx.image_lock.unlock_shared();
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": "
<< "snap_id=" << src_snap_id << ", "
<< "new_snap_name=" << dest_snap_name << dendl;
operation::SnapshotRenameRequest<I> *req =
new operation::SnapshotRenameRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), src_snap_id,
dest_snap_name);
req->send();
}
template <typename I>
int Operations<I>::snap_protect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
if (m_image_ctx.read_only) {
return -EROFS;
}
if (!m_image_ctx.test_features(RBD_FEATURE_LAYERING)) {
lderr(cct) << "image must support layering" << dendl;
return -ENOSYS;
}
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
{
std::shared_lock image_locker{m_image_ctx.image_lock};
bool is_protected;
r = m_image_ctx.is_snap_protected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
&is_protected);
if (r < 0) {
return r;
}
if (is_protected) {
return -EBUSY;
}
}
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_SNAP_PROTECT,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
true,
boost::bind(&Operations<I>::execute_snap_protect,
this, snap_namespace, snap_name, _1),
boost::bind(&ImageWatcher<I>::notify_snap_protect,
m_image_ctx.image_watcher, request_id,
snap_namespace, snap_name, _1));
if (r < 0 && r != -EBUSY) {
return r;
}
} else {
C_SaferCond cond_ctx;
{
std::shared_lock owner_lock{m_image_ctx.owner_lock};
execute_snap_protect(snap_namespace, snap_name, &cond_ctx);
}
r = cond_ctx.wait();
if (r < 0) {
return r;
}
}
return 0;
}
template <typename I>
void Operations<I>::execute_snap_protect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
}
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
bool is_protected;
int r = m_image_ctx.is_snap_protected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
&is_protected);
if (r < 0) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(r);
return;
} else if (is_protected) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EBUSY);
return;
}
m_image_ctx.image_lock.unlock_shared();
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
operation::SnapshotProtectRequest<I> *request =
new operation::SnapshotProtectRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), snap_namespace, snap_name);
request->send();
}
template <typename I>
int Operations<I>::snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
if (m_image_ctx.read_only) {
return -EROFS;
}
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
{
std::shared_lock image_locker{m_image_ctx.image_lock};
bool is_unprotected;
r = m_image_ctx.is_snap_unprotected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
&is_unprotected);
if (r < 0) {
return r;
}
if (is_unprotected) {
return -EINVAL;
}
}
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_SNAP_UNPROTECT,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
true,
boost::bind(&Operations<I>::execute_snap_unprotect,
this, snap_namespace, snap_name, _1),
boost::bind(&ImageWatcher<I>::notify_snap_unprotect,
m_image_ctx.image_watcher, request_id,
snap_namespace, snap_name, _1));
if (r < 0 && r != -EINVAL) {
return r;
}
} else {
C_SaferCond cond_ctx;
{
std::shared_lock owner_lock{m_image_ctx.owner_lock};
execute_snap_unprotect(snap_namespace, snap_name, &cond_ctx);
}
r = cond_ctx.wait();
if (r < 0) {
return r;
}
}
return 0;
}
template <typename I>
void Operations<I>::execute_snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
}
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
bool is_unprotected;
int r = m_image_ctx.is_snap_unprotected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
&is_unprotected);
if (r < 0) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(r);
return;
} else if (is_unprotected) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EINVAL);
return;
}
m_image_ctx.image_lock.unlock_shared();
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
operation::SnapshotUnprotectRequest<I> *request =
new operation::SnapshotUnprotectRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), snap_namespace, snap_name);
request->send();
}
template <typename I>
int Operations<I>::snap_set_limit(uint64_t limit) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": limit=" << limit << dendl;
if (m_image_ctx.read_only) {
return -EROFS;
}
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
C_SaferCond limit_ctx;
{
std::shared_lock owner_lock{m_image_ctx.owner_lock};
r = prepare_image_update(exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
true);
if (r < 0) {
return r;
}
execute_snap_set_limit(limit, &limit_ctx);
}
r = limit_ctx.wait();
return r;
}
template <typename I>
void Operations<I>::execute_snap_set_limit(const uint64_t limit,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": limit=" << limit
<< dendl;
operation::SnapshotLimitRequest<I> *request =
new operation::SnapshotLimitRequest<I>(m_image_ctx, on_finish, limit);
request->send();
}
template <typename I>
int Operations<I>::update_features(uint64_t features, bool enabled) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": features=" << features
<< ", enabled=" << enabled << dendl;
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
if (m_image_ctx.read_only) {
return -EROFS;
} else if (m_image_ctx.old_format) {
lderr(cct) << "old-format images do not support features" << dendl;
return -EINVAL;
}
uint64_t disable_mask = (RBD_FEATURES_MUTABLE |
RBD_FEATURES_DISABLE_ONLY);
if ((enabled && (features & RBD_FEATURES_MUTABLE) != features) ||
(!enabled && (features & disable_mask) != features) ||
((features & ~RBD_FEATURES_MUTABLE_INTERNAL) != features)) {
lderr(cct) << "cannot update immutable features" << dendl;
return -EINVAL;
}
bool set_object_map = (features & RBD_FEATURE_OBJECT_MAP) == RBD_FEATURE_OBJECT_MAP;
bool set_fast_diff = (features & RBD_FEATURE_FAST_DIFF) == RBD_FEATURE_FAST_DIFF;
bool exist_fast_diff = (m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0;
bool exist_object_map = (m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0;
if ((enabled && ((set_object_map && !exist_fast_diff) || (set_fast_diff && !exist_object_map)))
|| (!enabled && (set_object_map && exist_fast_diff))) {
features |= (RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF);
}
if (features == 0) {
lderr(cct) << "update requires at least one feature" << dendl;
return -EINVAL;
}
{
std::shared_lock image_locker{m_image_ctx.image_lock};
if (enabled && (features & m_image_ctx.features) != 0) {
lderr(cct) << "one or more requested features are already enabled"
<< dendl;
return -EINVAL;
}
if (!enabled && (features & ~m_image_ctx.features) != 0) {
lderr(cct) << "one or more requested features are already disabled"
<< dendl;
return -EINVAL;
}
}
// if disabling journaling, avoid attempting to open the journal
// when acquiring the exclusive lock in case the journal is corrupt
bool disabling_journal = false;
if (!enabled && ((features & RBD_FEATURE_JOURNALING) != 0)) {
std::unique_lock image_locker{m_image_ctx.image_lock};
m_image_ctx.set_journal_policy(new journal::DisabledPolicy());
disabling_journal = true;
}
BOOST_SCOPE_EXIT_ALL( (this)(disabling_journal) ) {
if (disabling_journal) {
std::unique_lock image_locker{m_image_ctx.image_lock};
m_image_ctx.set_journal_policy(
new journal::StandardPolicy<I>(&m_image_ctx));
}
};
// The journal options are not passed to the lock owner in the
// update features request. Therefore, if journaling is being
// enabled, the lock should be locally acquired instead of
// attempting to send the request to the peer.
if (enabled && (features & RBD_FEATURE_JOURNALING) != 0) {
C_SaferCond cond_ctx;
{
std::shared_lock owner_lock{m_image_ctx.owner_lock};
r = prepare_image_update(exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
true);
if (r < 0) {
return r;
}
execute_update_features(features, enabled, &cond_ctx, 0);
}
r = cond_ctx.wait();
} else {
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_UPDATE_FEATURES,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
false,
boost::bind(&Operations<I>::execute_update_features,
this, features, enabled, _1, 0),
boost::bind(&ImageWatcher<I>::notify_update_features,
m_image_ctx.image_watcher, request_id,
features, enabled, _1));
}
ldout(cct, 2) << "update_features finished" << dendl;
return r;
}
template <typename I>
void Operations<I>::execute_update_features(uint64_t features, bool enabled,
Context *on_finish,
uint64_t journal_op_tid) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": features=" << features
<< ", enabled=" << enabled << dendl;
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
if (enabled) {
operation::EnableFeaturesRequest<I> *req =
new operation::EnableFeaturesRequest<I>(
m_image_ctx, on_finish, journal_op_tid, features);
req->send();
} else {
operation::DisableFeaturesRequest<I> *req =
new operation::DisableFeaturesRequest<I>(
m_image_ctx, on_finish, journal_op_tid, features, false);
req->send();
}
}
template <typename I>
int Operations<I>::metadata_set(const std::string &key,
const std::string &value) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": key=" << key << ", value="
<< value << dendl;
std::string config_key;
bool config_override = util::is_metadata_config_override(key, &config_key);
if (config_override) {
// validate config setting
if (!librbd::api::Config<I>::is_option_name(&m_image_ctx, config_key)) {
lderr(cct) << "validation for " << key
<< " failed: not allowed image level override" << dendl;
return -EINVAL;
}
int r = ConfigProxy{false}.set_val(config_key.c_str(), value);
if (r < 0) {
return r;
}
}
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
if (m_image_ctx.read_only) {
return -EROFS;
}
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_METADATA_UPDATE,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
false,
boost::bind(&Operations<I>::execute_metadata_set,
this, key, value, _1),
boost::bind(&ImageWatcher<I>::notify_metadata_set,
m_image_ctx.image_watcher, request_id,
key, value, _1));
if (config_override && r >= 0) {
// apply new config key immediately
r = m_image_ctx.state->refresh_if_required();
}
ldout(cct, 20) << "metadata_set finished" << dendl;
return r;
}
template <typename I>
void Operations<I>::execute_metadata_set(const std::string &key,
const std::string &value,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": key=" << key << ", value="
<< value << dendl;
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
operation::MetadataSetRequest<I> *request =
new operation::MetadataSetRequest<I>(m_image_ctx,
new C_NotifyUpdate<I>(m_image_ctx, on_finish),
key, value);
request->send();
}
template <typename I>
int Operations<I>::metadata_remove(const std::string &key) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": key=" << key << dendl;
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
if (m_image_ctx.read_only) {
return -EROFS;
}
std::string value;
r = cls_client::metadata_get(&m_image_ctx.md_ctx, m_image_ctx.header_oid, key, &value);
if(r < 0)
return r;
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_METADATA_UPDATE,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
false,
boost::bind(&Operations<I>::execute_metadata_remove,
this, key, _1),
boost::bind(&ImageWatcher<I>::notify_metadata_remove,
m_image_ctx.image_watcher, request_id,
key, _1));
if (r == -ENOENT) {
r = 0;
}
std::string config_key;
if (util::is_metadata_config_override(key, &config_key) && r >= 0) {
// apply new config key immediately
r = m_image_ctx.state->refresh_if_required();
}
ldout(cct, 20) << "metadata_remove finished" << dendl;
return r;
}
template <typename I>
void Operations<I>::execute_metadata_remove(const std::string &key,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": key=" << key << dendl;
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
operation::MetadataRemoveRequest<I> *request =
new operation::MetadataRemoveRequest<I>(
m_image_ctx,
new C_NotifyUpdate<I>(m_image_ctx, on_finish), key);
request->send();
}
template <typename I>
int Operations<I>::migrate(ProgressContext &prog_ctx) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "migrate" << dendl;
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
if (m_image_ctx.read_only) {
return -EROFS;
}
{
std::shared_lock image_locker{m_image_ctx.image_lock};
if (m_image_ctx.migration_info.empty()) {
lderr(cct) << "image has no migrating parent" << dendl;
return -EINVAL;
}
}
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_MIGRATE,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
false,
boost::bind(&Operations<I>::execute_migrate, this,
boost::ref(prog_ctx), _1),
boost::bind(&ImageWatcher<I>::notify_migrate,
m_image_ctx.image_watcher, request_id,
boost::ref(prog_ctx), _1));
if (r < 0 && r != -EINVAL) {
return r;
}
ldout(cct, 20) << "migrate finished" << dendl;
return 0;
}
template <typename I>
void Operations<I>::execute_migrate(ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "migrate" << dendl;
if (m_image_ctx.read_only || m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.migration_info.empty()) {
lderr(cct) << "image has no migrating parent" << dendl;
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EINVAL);
return;
}
if (m_image_ctx.snap_id != CEPH_NOSNAP) {
lderr(cct) << "snapshots cannot be migrated" << dendl;
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.unlock_shared();
operation::MigrateRequest<I> *req = new operation::MigrateRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), prog_ctx);
req->send();
}
template <typename I>
int Operations<I>::sparsify(size_t sparse_size, ProgressContext &prog_ctx) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "sparsify" << dendl;
if (sparse_size < 4096 || sparse_size > m_image_ctx.get_object_size() ||
(sparse_size & (sparse_size - 1)) != 0) {
lderr(cct) << "sparse size should be power of two not less than 4096"
<< " and not larger image object size" << dendl;
return -EINVAL;
}
uint64_t request_id = util::reserve_async_request_id();
int r = invoke_async_request(OPERATION_SPARSIFY,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
false,
boost::bind(&Operations<I>::execute_sparsify,
this, sparse_size,
boost::ref(prog_ctx), _1),
boost::bind(&ImageWatcher<I>::notify_sparsify,
m_image_ctx.image_watcher,
request_id, sparse_size,
boost::ref(prog_ctx), _1));
if (r < 0 && r != -EINVAL) {
return r;
}
ldout(cct, 20) << "resparsify finished" << dendl;
return 0;
}
template <typename I>
void Operations<I>::execute_sparsify(size_t sparse_size,
ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "sparsify" << dendl;
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
auto req = new operation::SparsifyRequest<I>(
m_image_ctx, sparse_size, new C_NotifyUpdate<I>(m_image_ctx, on_finish),
prog_ctx);
req->send();
}
template <typename I>
int Operations<I>::prepare_image_update(
exclusive_lock::OperationRequestType request_type, bool request_lock) {
ceph_assert(ceph_mutex_is_rlocked(m_image_ctx.owner_lock));
if (m_image_ctx.image_watcher == nullptr) {
return -EROFS;
}
// need to upgrade to a write lock
C_SaferCond ctx;
m_image_ctx.owner_lock.unlock_shared();
bool attempting_lock = false;
{
std::unique_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr &&
(!m_image_ctx.exclusive_lock->is_lock_owner() ||
!m_image_ctx.exclusive_lock->accept_request(request_type, nullptr))) {
attempting_lock = true;
m_image_ctx.exclusive_lock->block_requests(0);
if (request_lock) {
m_image_ctx.exclusive_lock->acquire_lock(&ctx);
} else {
m_image_ctx.exclusive_lock->try_acquire_lock(&ctx);
}
}
}
int r = 0;
if (attempting_lock) {
r = ctx.wait();
}
m_image_ctx.owner_lock.lock_shared();
if (attempting_lock && m_image_ctx.exclusive_lock != nullptr) {
m_image_ctx.exclusive_lock->unblock_requests();
}
if (r == -EAGAIN || r == -EBUSY) {
r = 0;
}
if (r < 0) {
return r;
} else if (m_image_ctx.exclusive_lock != nullptr &&
!m_image_ctx.exclusive_lock->is_lock_owner()) {
return m_image_ctx.exclusive_lock->get_unlocked_op_error();
}
return 0;
}
template <typename I>
int Operations<I>::invoke_async_request(
Operation op, exclusive_lock::OperationRequestType request_type,
bool permit_snapshot, const boost::function<void(Context*)>& local_request,
const boost::function<void(Context*)>& remote_request) {
C_SaferCond ctx;
C_InvokeAsyncRequest<I> *req = new C_InvokeAsyncRequest<I>(m_image_ctx, op,
request_type,
permit_snapshot,
local_request,
remote_request,
{}, &ctx);
req->send();
return ctx.wait();
}
} // namespace librbd
template class librbd::Operations<librbd::ImageCtx>;
| 62,994 | 31.388175 | 97 | cc |
null | ceph-main/src/librbd/Operations.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATIONS_H
#define CEPH_LIBRBD_OPERATIONS_H
#include "cls/rbd/cls_rbd_types.h"
#include "include/int_types.h"
#include "librbd/exclusive_lock/Policy.h"
#include "librbd/operation/ObjectMapIterate.h"
#include <atomic>
#include <string>
#include <list>
#include <map>
#include <set>
#include <boost/function.hpp>
class Context;
namespace librbd {
class ImageCtx;
class ProgressContext;
enum Operation {
OPERATION_CHECK_OBJECT_MAP,
OPERATION_FLATTEN,
OPERATION_METADATA_UPDATE,
OPERATION_MIGRATE,
OPERATION_REBUILD_OBJECT_MAP,
OPERATION_RENAME,
OPERATION_RESIZE,
OPERATION_SNAP_CREATE,
OPERATION_SNAP_PROTECT,
OPERATION_SNAP_REMOVE,
OPERATION_SNAP_RENAME,
OPERATION_SNAP_ROLLBACK,
OPERATION_SNAP_UNPROTECT,
OPERATION_SPARSIFY,
OPERATION_UPDATE_FEATURES,
};
template <typename ImageCtxT = ImageCtx>
class Operations {
public:
Operations(ImageCtxT &image_ctx);
void start_op(enum Operation op, Context *ctx);
void finish_op(enum Operation op, int r);
int flatten(ProgressContext &prog_ctx);
void execute_flatten(ProgressContext &prog_ctx, Context *on_finish);
int rebuild_object_map(ProgressContext &prog_ctx);
void execute_rebuild_object_map(ProgressContext &prog_ctx,
Context *on_finish);
int check_object_map(ProgressContext &prog_ctx);
void check_object_map(ProgressContext &prog_ctx, Context *on_finish);
void object_map_iterate(ProgressContext &prog_ctx,
operation::ObjectIterateWork<ImageCtxT> handle_mismatch,
Context* on_finish);
int rename(const char *dstname);
void execute_rename(const std::string &dest_name, Context *on_finish);
int resize(uint64_t size, bool allow_shrink, ProgressContext& prog_ctx);
void execute_resize(uint64_t size, bool allow_shrink, ProgressContext &prog_ctx,
Context *on_finish, uint64_t journal_op_tid);
int snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string& snap_name, uint64_t flags,
ProgressContext& prog_ctx);
void snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string& snap_name, uint64_t flags,
ProgressContext& prog_ctx, Context *on_finish);
void execute_snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name, Context *on_finish,
uint64_t journal_op_tid, uint64_t flags,
ProgressContext &prog_ctx);
int snap_rollback(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name,
ProgressContext& prog_ctx);
void execute_snap_rollback(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
ProgressContext& prog_ctx, Context *on_finish);
int snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name);
void snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name,
Context *on_finish);
void execute_snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish);
int snap_rename(const char *srcname, const char *dstname);
void execute_snap_rename(const uint64_t src_snap_id,
const std::string &dest_snap_name,
Context *on_finish);
int snap_protect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name);
void execute_snap_protect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish);
int snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name);
void execute_snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish);
int snap_set_limit(uint64_t limit);
void execute_snap_set_limit(uint64_t limit, Context *on_finish);
int update_features(uint64_t features, bool enabled);
void execute_update_features(uint64_t features, bool enabled,
Context *on_finish, uint64_t journal_op_tid);
int metadata_set(const std::string &key, const std::string &value);
void execute_metadata_set(const std::string &key, const std::string &value,
Context *on_finish);
int metadata_remove(const std::string &key);
void execute_metadata_remove(const std::string &key, Context *on_finish);
int migrate(ProgressContext &prog_ctx);
void execute_migrate(ProgressContext &prog_ctx, Context *on_finish);
int sparsify(size_t sparse_size, ProgressContext &prog_ctx);
void execute_sparsify(size_t sparse_size, ProgressContext &prog_ctx,
Context *on_finish);
int prepare_image_update(exclusive_lock::OperationRequestType request_type,
bool request_lock);
private:
ImageCtxT &m_image_ctx;
mutable ceph::mutex m_queue_lock;
std::set<Operation> m_in_flight_ops;
std::map<Operation, std::list<Context *>> m_queued_ops;
int invoke_async_request(Operation op,
exclusive_lock::OperationRequestType request_type,
bool permit_snapshot,
const boost::function<void(Context*)>& local,
const boost::function<void(Context*)>& remote);
};
} // namespace librbd
extern template class librbd::Operations<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATIONS_H
| 5,716 | 34.955975 | 82 | h |
null | ceph-main/src/librbd/PluginRegistry.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/PluginRegistry.h"
#include "include/Context.h"
#include "common/dout.h"
#include "librbd/cache/ImageWriteback.h"
#include "librbd/ImageCtx.h"
#include "librbd/plugin/Api.h"
#include <boost/tokenizer.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::PluginRegistry: " \
<< this << " " << __func__ << ": "
namespace librbd {
template <typename I>
PluginRegistry<I>::PluginRegistry(I* image_ctx)
: m_image_ctx(image_ctx), m_plugin_api(std::make_unique<plugin::Api<I>>()),
m_image_writeback(std::make_unique<cache::ImageWriteback<I>>(*image_ctx)) {
}
template <typename I>
PluginRegistry<I>::~PluginRegistry() {
}
template <typename I>
void PluginRegistry<I>::init(const std::string& plugins, Context* on_finish) {
auto cct = m_image_ctx->cct;
auto plugin_registry = cct->get_plugin_registry();
auto gather_ctx = new C_Gather(cct, on_finish);
boost::tokenizer<boost::escaped_list_separator<char>> tokenizer(plugins);
for (auto token : tokenizer) {
ldout(cct, 5) << "attempting to load plugin: " << token << dendl;
auto ctx = gather_ctx->new_sub();
auto plugin = dynamic_cast<plugin::Interface<I>*>(
plugin_registry->get_with_load("librbd", "librbd_" + token));
if (plugin == nullptr) {
lderr(cct) << "failed to load plugin: " << token << dendl;
ctx->complete(-ENOSYS);
break;
}
plugin->init(
m_image_ctx, *m_plugin_api, *m_image_writeback, m_plugin_hook_points, ctx);
}
gather_ctx->activate();
}
template <typename I>
void PluginRegistry<I>::acquired_exclusive_lock(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
auto gather_ctx = new C_Gather(cct, on_finish);
for (auto &hook : m_plugin_hook_points) {
auto ctx = gather_ctx->new_sub();
hook->acquired_exclusive_lock(ctx);
}
gather_ctx->activate();
}
template <typename I>
void PluginRegistry<I>::prerelease_exclusive_lock(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
auto gather_ctx = new C_Gather(cct, on_finish);
for (auto &hook : m_plugin_hook_points) {
auto ctx = gather_ctx->new_sub();
hook->prerelease_exclusive_lock(ctx);
}
gather_ctx->activate();
}
template <typename I>
void PluginRegistry<I>::discard(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
auto gather_ctx = new C_Gather(cct, on_finish);
for (auto &hook : m_plugin_hook_points) {
auto ctx = gather_ctx->new_sub();
hook->discard(ctx);
}
gather_ctx->activate();
}
} // namespace librbd
template class librbd::PluginRegistry<librbd::ImageCtx>;
| 2,788 | 26.343137 | 79 | cc |
null | ceph-main/src/librbd/PluginRegistry.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_PLUGIN_REGISTRY_H
#define CEPH_LIBRBD_PLUGIN_REGISTRY_H
#include "librbd/plugin/Types.h"
#include <memory>
#include <string>
#include <list>
struct Context;
namespace librbd {
struct ImageCtx;
namespace cache {
class ImageWritebackInterface;
}
namespace plugin { template <typename> struct Api; }
template <typename ImageCtxT>
class PluginRegistry {
public:
PluginRegistry(ImageCtxT* image_ctx);
~PluginRegistry();
void init(const std::string& plugins, Context* on_finish);
void acquired_exclusive_lock(Context* on_finish);
void prerelease_exclusive_lock(Context* on_finish);
void discard(Context* on_finish);
private:
ImageCtxT* m_image_ctx;
std::unique_ptr<plugin::Api<ImageCtxT>> m_plugin_api;
std::unique_ptr<cache::ImageWritebackInterface> m_image_writeback;
std::string m_plugins;
plugin::PluginHookPoints m_plugin_hook_points;
};
} // namespace librbd
extern template class librbd::PluginRegistry<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_PLUGIN_REGISTRY_H
| 1,120 | 20.557692 | 70 | h |
null | ceph-main/src/librbd/TaskFinisher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRBD_TASK_FINISHER_H
#define LIBRBD_TASK_FINISHER_H
#include "include/common_fwd.h"
#include "include/Context.h"
#include "common/ceph_context.h"
#include "common/Finisher.h"
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include <map>
#include <utility>
namespace librbd {
struct TaskFinisherSingleton {
ceph::mutex m_lock = ceph::make_mutex("librbd::TaskFinisher::m_lock");
SafeTimer *m_safe_timer;
Finisher *m_finisher;
static TaskFinisherSingleton& get_singleton(CephContext* cct) {
return cct->lookup_or_create_singleton_object<
TaskFinisherSingleton>("librbd::TaskFinisherSingleton", false, cct);
}
explicit TaskFinisherSingleton(CephContext *cct) {
m_safe_timer = new SafeTimer(cct, m_lock, false);
m_safe_timer->init();
m_finisher = new Finisher(cct, "librbd::TaskFinisher::m_finisher", "taskfin_librbd");
m_finisher->start();
}
virtual ~TaskFinisherSingleton() {
{
std::lock_guard l{m_lock};
m_safe_timer->shutdown();
delete m_safe_timer;
}
m_finisher->wait_for_empty();
m_finisher->stop();
delete m_finisher;
}
void queue(Context* ctx, int r) {
m_finisher->queue(ctx, r);
}
};
template <typename Task>
class TaskFinisher {
public:
TaskFinisher(CephContext &cct) : m_cct(cct) {
auto& singleton = TaskFinisherSingleton::get_singleton(&cct);
m_lock = &singleton.m_lock;
m_safe_timer = singleton.m_safe_timer;
m_finisher = singleton.m_finisher;
}
bool cancel(const Task& task) {
std::lock_guard l{*m_lock};
typename TaskContexts::iterator it = m_task_contexts.find(task);
if (it == m_task_contexts.end()) {
return false;
}
it->second.first->complete(-ECANCELED);
m_safe_timer->cancel_event(it->second.second);
m_task_contexts.erase(it);
return true;
}
void cancel_all() {
std::lock_guard l{*m_lock};
for (auto &[task, pair] : m_task_contexts) {
pair.first->complete(-ECANCELED);
m_safe_timer->cancel_event(pair.second);
}
m_task_contexts.clear();
}
bool add_event_after(const Task& task, double seconds, Context *ctx) {
std::lock_guard l{*m_lock};
if (m_task_contexts.count(task) != 0) {
// task already scheduled on finisher or timer
delete ctx;
return false;
}
C_Task *timer_ctx = new C_Task(this, task);
m_task_contexts[task] = std::make_pair(ctx, timer_ctx);
m_safe_timer->add_event_after(seconds, timer_ctx);
return true;
}
bool reschedule_event_after(const Task& task, double seconds) {
std::lock_guard l{*m_lock};
auto it = m_task_contexts.find(task);
if (it == m_task_contexts.end()) {
return false;
}
bool canceled = m_safe_timer->cancel_event(it->second.second);
if (!canceled) {
return false;
}
auto timer_ctx = new C_Task(this, task);
it->second.second = timer_ctx;
m_safe_timer->add_event_after(seconds, timer_ctx);
return true;
}
void queue(Context *ctx, int r = 0) {
m_finisher->queue(ctx, r);
}
bool queue(const Task& task, Context *ctx) {
std::lock_guard l{*m_lock};
typename TaskContexts::iterator it = m_task_contexts.find(task);
if (it != m_task_contexts.end()) {
if (it->second.second != NULL &&
m_safe_timer->cancel_event(it->second.second)) {
it->second.first->complete(-ECANCELED);
} else {
// task already scheduled on the finisher
ctx->complete(-ECANCELED);
return false;
}
}
m_task_contexts[task] = std::make_pair(ctx, reinterpret_cast<Context *>(0));
m_finisher->queue(new C_Task(this, task));
return true;
}
private:
class C_Task : public Context {
public:
C_Task(TaskFinisher *task_finisher, const Task& task)
: m_task_finisher(task_finisher), m_task(task)
{
}
protected:
void finish(int r) override {
m_task_finisher->complete(m_task);
}
private:
TaskFinisher *m_task_finisher;
Task m_task;
};
CephContext &m_cct;
ceph::mutex *m_lock;
Finisher *m_finisher;
SafeTimer *m_safe_timer;
typedef std::map<Task, std::pair<Context *, Context *> > TaskContexts;
TaskContexts m_task_contexts;
void complete(const Task& task) {
Context *ctx = NULL;
{
std::lock_guard l{*m_lock};
typename TaskContexts::iterator it = m_task_contexts.find(task);
if (it != m_task_contexts.end()) {
ctx = it->second.first;
m_task_contexts.erase(it);
}
}
if (ctx != NULL) {
ctx->complete(0);
}
}
};
} // namespace librbd
#endif // LIBRBD_TASK_FINISHER
| 4,727 | 25.266667 | 89 | h |
null | ceph-main/src/librbd/TrashWatcher.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/TrashWatcher.h"
#include "include/rbd_types.h"
#include "include/rados/librados.hpp"
#include "common/errno.h"
#include "librbd/Utils.h"
#include "librbd/watcher/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::TrashWatcher: " << __func__ << ": "
namespace librbd {
using namespace trash_watcher;
using namespace watcher;
using librbd::util::create_rados_callback;
namespace {
static const uint64_t NOTIFY_TIMEOUT_MS = 5000;
} // anonymous namespace
template <typename I>
TrashWatcher<I>::TrashWatcher(librados::IoCtx &io_ctx,
asio::ContextWQ *work_queue)
: Watcher(io_ctx, work_queue, RBD_TRASH) {
}
template <typename I>
void TrashWatcher<I>::notify_image_added(
librados::IoCtx &io_ctx, const std::string& image_id,
const cls::rbd::TrashImageSpec& trash_image_spec, Context *on_finish) {
CephContext *cct = reinterpret_cast<CephContext*>(io_ctx.cct());
ldout(cct, 20) << dendl;
bufferlist bl;
encode(NotifyMessage{ImageAddedPayload{image_id, trash_image_spec}}, bl);
librados::AioCompletion *comp = create_rados_callback(on_finish);
int r = io_ctx.aio_notify(RBD_TRASH, comp, bl, NOTIFY_TIMEOUT_MS, nullptr);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void TrashWatcher<I>::notify_image_removed(librados::IoCtx &io_ctx,
const std::string& image_id,
Context *on_finish) {
CephContext *cct = reinterpret_cast<CephContext*>(io_ctx.cct());
ldout(cct, 20) << dendl;
bufferlist bl;
encode(NotifyMessage{ImageRemovedPayload{image_id}}, bl);
librados::AioCompletion *comp = create_rados_callback(on_finish);
int r = io_ctx.aio_notify(RBD_TRASH, comp, bl, NOTIFY_TIMEOUT_MS, nullptr);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void TrashWatcher<I>::handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist &bl) {
CephContext *cct = this->m_cct;
ldout(cct, 15) << "notify_id=" << notify_id << ", "
<< "handle=" << handle << dendl;
NotifyMessage notify_message;
try {
auto iter = bl.cbegin();
decode(notify_message, iter);
} catch (const buffer::error &err) {
lderr(cct) << "error decoding image notification: " << err.what()
<< dendl;
Context *ctx = new C_NotifyAck(this, notify_id, handle);
ctx->complete(0);
return;
}
apply_visitor(watcher::util::HandlePayloadVisitor<TrashWatcher<I>>(
this, notify_id, handle), notify_message.payload);
}
template <typename I>
bool TrashWatcher<I>::handle_payload(const ImageAddedPayload &payload,
Context *on_notify_ack) {
CephContext *cct = this->m_cct;
ldout(cct, 20) << dendl;
handle_image_added(payload.image_id, payload.trash_image_spec);
return true;
}
template <typename I>
bool TrashWatcher<I>::handle_payload(const ImageRemovedPayload &payload,
Context *on_notify_ack) {
CephContext *cct = this->m_cct;
ldout(cct, 20) << dendl;
handle_image_removed(payload.image_id);
return true;
}
template <typename I>
bool TrashWatcher<I>::handle_payload(const UnknownPayload &payload,
Context *on_notify_ack) {
return true;
}
} // namespace librbd
template class librbd::TrashWatcher<librbd::ImageCtx>;
| 3,586 | 29.65812 | 77 | cc |
null | ceph-main/src/librbd/TrashWatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_TRASH_WATCHER_H
#define CEPH_LIBRBD_TRASH_WATCHER_H
#include "include/int_types.h"
#include "include/rados/librados_fwd.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/ImageCtx.h"
#include "librbd/Watcher.h"
#include "librbd/trash_watcher/Types.h"
namespace librbd {
namespace asio { struct ContextWQ; }
namespace watcher {
namespace util {
template <typename> struct HandlePayloadVisitor;
} // namespace util
} // namespace watcher
template <typename ImageCtxT = librbd::ImageCtx>
class TrashWatcher : public Watcher {
friend struct watcher::util::HandlePayloadVisitor<TrashWatcher<ImageCtxT>>;
public:
TrashWatcher(librados::IoCtx &io_ctx, asio::ContextWQ *work_queue);
static void notify_image_added(librados::IoCtx &io_ctx,
const std::string& image_id,
const cls::rbd::TrashImageSpec& spec,
Context *on_finish);
static void notify_image_removed(librados::IoCtx &io_ctx,
const std::string& image_id,
Context *on_finish);
protected:
virtual void handle_image_added(const std::string &image_id,
const cls::rbd::TrashImageSpec& spec) = 0;
virtual void handle_image_removed(const std::string &image_id) = 0;
private:
void handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist &bl) override;
bool handle_payload(const trash_watcher::ImageAddedPayload &payload,
Context *on_notify_ack);
bool handle_payload(const trash_watcher::ImageRemovedPayload &payload,
Context *on_notify_ack);
bool handle_payload(const trash_watcher::UnknownPayload &payload,
Context *on_notify_ack);
};
} // namespace librbd
extern template class librbd::TrashWatcher<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_TRASH_WATCHER_H
| 2,075 | 34.186441 | 77 | h |
null | ceph-main/src/librbd/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRBD_TYPES_H
#define LIBRBD_TYPES_H
#include "include/types.h"
#include "cls/rbd/cls_rbd_types.h"
#include "deep_copy/Types.h"
#include <map>
#include <memory>
#include <string>
namespace neorados { class IOContext; }
namespace librbd {
// Performance counters
enum {
l_librbd_first = 26000,
l_librbd_rd, // read ops
l_librbd_rd_bytes, // bytes read
l_librbd_rd_latency, // average latency
l_librbd_wr,
l_librbd_wr_bytes,
l_librbd_wr_latency,
l_librbd_discard,
l_librbd_discard_bytes,
l_librbd_discard_latency,
l_librbd_flush,
l_librbd_flush_latency,
l_librbd_ws,
l_librbd_ws_bytes,
l_librbd_ws_latency,
l_librbd_cmp,
l_librbd_cmp_bytes,
l_librbd_cmp_latency,
l_librbd_snap_create,
l_librbd_snap_remove,
l_librbd_snap_rollback,
l_librbd_snap_rename,
l_librbd_notify,
l_librbd_resize,
l_librbd_readahead,
l_librbd_readahead_bytes,
l_librbd_invalidate_cache,
l_librbd_opened_time,
l_librbd_lock_acquired_time,
l_librbd_last,
};
typedef std::shared_ptr<neorados::IOContext> IOContext;
typedef std::map<uint64_t, uint64_t> SnapSeqs;
/// Full information about an image's parent.
struct ParentImageInfo {
/// Identification of the parent.
cls::rbd::ParentImageSpec spec;
/** @brief Where the portion of data shared with the child image ends.
* Since images can be resized multiple times, the portion of data shared
* with the child image is not necessarily min(parent size, child size).
* If the child image is first shrunk and then enlarged, the common portion
* will be shorter. */
uint64_t overlap = 0;
};
struct SnapInfo {
std::string name;
cls::rbd::SnapshotNamespace snap_namespace;
uint64_t size;
ParentImageInfo parent;
uint8_t protection_status;
uint64_t flags;
utime_t timestamp;
SnapInfo(std::string _name,
const cls::rbd::SnapshotNamespace &_snap_namespace,
uint64_t _size, const ParentImageInfo &_parent,
uint8_t _protection_status, uint64_t _flags, utime_t _timestamp)
: name(_name), snap_namespace(_snap_namespace), size(_size),
parent(_parent), protection_status(_protection_status), flags(_flags),
timestamp(_timestamp) {
}
};
enum {
OPEN_FLAG_SKIP_OPEN_PARENT = 1 << 0,
OPEN_FLAG_OLD_FORMAT = 1 << 1,
OPEN_FLAG_IGNORE_MIGRATING = 1 << 2
};
enum ImageReadOnlyFlag {
IMAGE_READ_ONLY_FLAG_USER = 1 << 0,
IMAGE_READ_ONLY_FLAG_NON_PRIMARY = 1 << 1,
};
enum SnapCreateFlag {
SNAP_CREATE_FLAG_SKIP_OBJECT_MAP = 1 << 0,
SNAP_CREATE_FLAG_SKIP_NOTIFY_QUIESCE = 1 << 1,
SNAP_CREATE_FLAG_IGNORE_NOTIFY_QUIESCE_ERROR = 1 << 2,
};
struct MigrationInfo {
int64_t pool_id = -1;
std::string pool_namespace;
std::string image_name;
std::string image_id;
std::string source_spec;
deep_copy::SnapMap snap_map;
uint64_t overlap = 0;
bool flatten = false;
MigrationInfo() {
}
MigrationInfo(int64_t pool_id, const std::string& pool_namespace,
const std::string& image_name, const std::string& image_id,
const std::string& source_spec,
const deep_copy::SnapMap &snap_map, uint64_t overlap,
bool flatten)
: pool_id(pool_id), pool_namespace(pool_namespace), image_name(image_name),
image_id(image_id), source_spec(source_spec), snap_map(snap_map),
overlap(overlap), flatten(flatten) {
}
bool empty() const {
return (pool_id == -1 && source_spec.empty());
}
};
} // namespace librbd
#endif // LIBRBD_TYPES_H
| 3,665 | 24.636364 | 79 | h |
null | ceph-main/src/librbd/Utils.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <boost/algorithm/string.hpp>
#include <boost/lexical_cast.hpp>
#include "librbd/Utils.h"
#include "include/random.h"
#include "include/rbd_types.h"
#include "include/stringify.h"
#include "include/neorados/RADOS.hpp"
#include "include/rbd/features.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/Features.h"
#include <boost/algorithm/string/predicate.hpp>
#include <bitset>
#include <random>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::util::" << __func__ << ": "
namespace librbd {
namespace util {
namespace {
const std::string CONFIG_KEY_URI_PREFIX{"config://"};
} // anonymous namespace
const std::string group_header_name(const std::string &group_id)
{
return RBD_GROUP_HEADER_PREFIX + group_id;
}
const std::string id_obj_name(const std::string &name)
{
return RBD_ID_PREFIX + name;
}
const std::string header_name(const std::string &image_id)
{
return RBD_HEADER_PREFIX + image_id;
}
const std::string old_header_name(const std::string &image_name)
{
return image_name + RBD_SUFFIX;
}
std::string unique_lock_name(const std::string &name, void *address) {
return name + " (" + stringify(address) + ")";
}
librados::AioCompletion *create_rados_callback(Context *on_finish) {
return create_rados_callback<Context, &Context::complete>(on_finish);
}
std::string generate_image_id(librados::IoCtx &ioctx) {
librados::Rados rados(ioctx);
uint64_t bid = rados.get_instance_id();
std::mt19937 generator{random_device_t{}()};
std::uniform_int_distribution<uint32_t> distribution{0, 0xFFFFFFFF};
uint32_t extra = distribution(generator);
std::ostringstream bid_ss;
bid_ss << std::hex << bid << std::hex << extra;
std::string id = bid_ss.str();
// ensure the image id won't overflow the fixed block name size
if (id.length() > RBD_MAX_IMAGE_ID_LENGTH) {
id = id.substr(id.length() - RBD_MAX_IMAGE_ID_LENGTH);
}
return id;
}
uint64_t get_rbd_default_features(CephContext* cct)
{
auto value = cct->_conf.get_val<std::string>("rbd_default_features");
return librbd::rbd_features_from_string(value, nullptr);
}
bool calc_sparse_extent(const bufferptr &bp,
size_t sparse_size,
uint64_t length,
size_t *write_offset,
size_t *write_length,
size_t *offset) {
size_t extent_size;
if (*offset + sparse_size > length) {
extent_size = length - *offset;
} else {
extent_size = sparse_size;
}
bufferptr extent(bp, *offset, extent_size);
*offset += extent_size;
bool extent_is_zero = extent.is_zero();
if (!extent_is_zero) {
*write_length += extent_size;
}
if (extent_is_zero && *write_length == 0) {
*write_offset += extent_size;
}
if ((extent_is_zero || *offset == length) && *write_length != 0) {
return true;
}
return false;
}
bool is_metadata_config_override(const std::string& metadata_key,
std::string* config_key) {
size_t prefix_len = librbd::ImageCtx::METADATA_CONF_PREFIX.size();
if (metadata_key.size() > prefix_len &&
metadata_key.compare(0, prefix_len,
librbd::ImageCtx::METADATA_CONF_PREFIX) == 0) {
*config_key = metadata_key.substr(prefix_len,
metadata_key.size() - prefix_len);
return true;
}
return false;
}
int create_ioctx(librados::IoCtx& src_io_ctx, const std::string& pool_desc,
int64_t pool_id,
const std::optional<std::string>& pool_namespace,
librados::IoCtx* dst_io_ctx) {
auto cct = (CephContext *)src_io_ctx.cct();
librados::Rados rados(src_io_ctx);
int r = rados.ioctx_create2(pool_id, *dst_io_ctx);
if (r == -ENOENT) {
ldout(cct, 1) << pool_desc << " pool " << pool_id << " no longer exists"
<< dendl;
return r;
} else if (r < 0) {
lderr(cct) << "error accessing " << pool_desc << " pool " << pool_id
<< dendl;
return r;
}
dst_io_ctx->set_namespace(
pool_namespace ? *pool_namespace : src_io_ctx.get_namespace());
if (src_io_ctx.get_pool_full_try()) {
dst_io_ctx->set_pool_full_try();
}
return 0;
}
int snap_create_flags_api_to_internal(CephContext *cct, uint32_t api_flags,
uint64_t *internal_flags) {
*internal_flags = 0;
if (api_flags & RBD_SNAP_CREATE_SKIP_QUIESCE) {
*internal_flags |= SNAP_CREATE_FLAG_SKIP_NOTIFY_QUIESCE;
api_flags &= ~RBD_SNAP_CREATE_SKIP_QUIESCE;
} else if (api_flags & RBD_SNAP_CREATE_IGNORE_QUIESCE_ERROR) {
*internal_flags |= SNAP_CREATE_FLAG_IGNORE_NOTIFY_QUIESCE_ERROR;
api_flags &= ~RBD_SNAP_CREATE_IGNORE_QUIESCE_ERROR;
}
if (api_flags != 0) {
lderr(cct) << "invalid snap create flags: "
<< std::bitset<32>(api_flags) << dendl;
return -EINVAL;
}
return 0;
}
uint32_t get_default_snap_create_flags(ImageCtx *ictx) {
auto mode = ictx->config.get_val<std::string>(
"rbd_default_snapshot_quiesce_mode");
if (mode == "required") {
return 0;
} else if (mode == "ignore-error") {
return RBD_SNAP_CREATE_IGNORE_QUIESCE_ERROR;
} else if (mode == "skip") {
return RBD_SNAP_CREATE_SKIP_QUIESCE;
} else {
ceph_abort_msg("invalid rbd_default_snapshot_quiesce_mode");
}
}
SnapContext get_snap_context(
const std::optional<
std::pair<std::uint64_t,
std::vector<std::uint64_t>>>& write_snap_context) {
SnapContext snapc;
if (write_snap_context) {
snapc = SnapContext{write_snap_context->first,
{write_snap_context->second.begin(),
write_snap_context->second.end()}};
}
return snapc;
}
uint64_t reserve_async_request_id() {
static std::atomic<uint64_t> async_request_seq = 0;
return ++async_request_seq;
}
bool is_config_key_uri(const std::string& uri) {
return boost::starts_with(uri, CONFIG_KEY_URI_PREFIX);
}
int get_config_key(librados::Rados& rados, const std::string& uri,
std::string* value) {
auto cct = reinterpret_cast<CephContext*>(rados.cct());
if (!is_config_key_uri(uri)) {
return -EINVAL;
}
std::string key = uri.substr(CONFIG_KEY_URI_PREFIX.size());
std::string cmd =
"{"
"\"prefix\": \"config-key get\", "
"\"key\": \"" + key + "\""
"}";
bufferlist in_bl;
bufferlist out_bl;
int r = rados.mon_command(cmd, in_bl, &out_bl, nullptr);
if (r < 0) {
lderr(cct) << "failed to retrieve MON config key " << key << ": "
<< cpp_strerror(r) << dendl;
return r;
}
*value = std::string(out_bl.c_str(), out_bl.length());
return 0;
}
} // namespace util
} // namespace librbd
| 6,934 | 27.076923 | 76 | cc |
null | ceph-main/src/librbd/Utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_UTILS_H
#define CEPH_LIBRBD_UTILS_H
#include "include/rados/librados.hpp"
#include "include/rbd_types.h"
#include "include/ceph_assert.h"
#include "include/Context.h"
#include "common/snap_types.h"
#include "common/zipkin_trace.h"
#include "common/RefCountedObj.h"
#include <atomic>
#include <optional>
#include <type_traits>
#include <utility>
#include <vector>
#include <stdio.h>
namespace librbd {
class ImageCtx;
namespace util {
namespace detail {
template <typename T>
void rados_callback(rados_completion_t c, void *arg) {
reinterpret_cast<T*>(arg)->complete(rados_aio_get_return_value(c));
}
template <typename T, void(T::*MF)(int)>
void rados_callback(rados_completion_t c, void *arg) {
T *obj = reinterpret_cast<T*>(arg);
int r = rados_aio_get_return_value(c);
(obj->*MF)(r);
}
template <typename T, Context*(T::*MF)(int*), bool destroy>
void rados_state_callback(rados_completion_t c, void *arg) {
T *obj = reinterpret_cast<T*>(arg);
int r = rados_aio_get_return_value(c);
Context *on_finish = (obj->*MF)(&r);
if (on_finish != nullptr) {
on_finish->complete(r);
if (destroy) {
delete obj;
}
}
}
template <typename T, void (T::*MF)(int)>
class C_CallbackAdapter : public Context {
T *obj;
public:
C_CallbackAdapter(T *obj) : obj(obj) {
}
protected:
void finish(int r) override {
(obj->*MF)(r);
}
};
template <typename T, void (T::*MF)(int)>
class C_RefCallbackAdapter : public Context {
RefCountedPtr refptr;
Context *on_finish;
public:
C_RefCallbackAdapter(T *obj, RefCountedPtr refptr)
: refptr(std::move(refptr)),
on_finish(new C_CallbackAdapter<T, MF>(obj)) {
}
protected:
void finish(int r) override {
on_finish->complete(r);
}
};
template <typename T, Context*(T::*MF)(int*), bool destroy>
class C_StateCallbackAdapter : public Context {
T *obj;
public:
C_StateCallbackAdapter(T *obj) : obj(obj){
}
protected:
void complete(int r) override {
Context *on_finish = (obj->*MF)(&r);
if (on_finish != nullptr) {
on_finish->complete(r);
if (destroy) {
delete obj;
}
}
Context::complete(r);
}
void finish(int r) override {
}
};
template <typename T, Context*(T::*MF)(int*)>
class C_RefStateCallbackAdapter : public Context {
RefCountedPtr refptr;
Context *on_finish;
public:
C_RefStateCallbackAdapter(T *obj, RefCountedPtr refptr)
: refptr(std::move(refptr)),
on_finish(new C_StateCallbackAdapter<T, MF, true>(obj)) {
}
protected:
void finish(int r) override {
on_finish->complete(r);
}
};
template <typename WQ>
struct C_AsyncCallback : public Context {
WQ *op_work_queue;
Context *on_finish;
C_AsyncCallback(WQ *op_work_queue, Context *on_finish)
: op_work_queue(op_work_queue), on_finish(on_finish) {
}
~C_AsyncCallback() override {
delete on_finish;
}
void finish(int r) override {
op_work_queue->queue(on_finish, r);
on_finish = nullptr;
}
};
} // namespace detail
std::string generate_image_id(librados::IoCtx &ioctx);
template <typename T>
inline std::string generate_image_id(librados::IoCtx &ioctx) {
return generate_image_id(ioctx);
}
const std::string group_header_name(const std::string &group_id);
const std::string id_obj_name(const std::string &name);
const std::string header_name(const std::string &image_id);
const std::string old_header_name(const std::string &image_name);
std::string unique_lock_name(const std::string &name, void *address);
template <typename I>
std::string data_object_name(I* image_ctx, uint64_t object_no) {
char buf[RBD_MAX_OBJ_NAME_SIZE];
size_t length = snprintf(buf, RBD_MAX_OBJ_NAME_SIZE,
image_ctx->format_string, object_no);
ceph_assert(length < RBD_MAX_OBJ_NAME_SIZE);
std::string oid;
oid.reserve(RBD_MAX_OBJ_NAME_SIZE);
oid.append(buf, length);
return oid;
}
librados::AioCompletion *create_rados_callback(Context *on_finish);
template <typename T>
librados::AioCompletion *create_rados_callback(T *obj) {
return librados::Rados::aio_create_completion(
obj, &detail::rados_callback<T>);
}
template <typename T, void(T::*MF)(int)>
librados::AioCompletion *create_rados_callback(T *obj) {
return librados::Rados::aio_create_completion(
obj, &detail::rados_callback<T, MF>);
}
template <typename T, Context*(T::*MF)(int*), bool destroy=true>
librados::AioCompletion *create_rados_callback(T *obj) {
return librados::Rados::aio_create_completion(
obj, &detail::rados_state_callback<T, MF, destroy>);
}
template <typename T, void(T::*MF)(int) = &T::complete>
Context *create_context_callback(T *obj) {
return new detail::C_CallbackAdapter<T, MF>(obj);
}
template <typename T, Context*(T::*MF)(int*), bool destroy=true>
Context *create_context_callback(T *obj) {
return new detail::C_StateCallbackAdapter<T, MF, destroy>(obj);
}
//for reference counting objects
template <typename T, void(T::*MF)(int) = &T::complete>
Context *create_context_callback(T *obj, RefCountedPtr refptr) {
return new detail::C_RefCallbackAdapter<T, MF>(obj, refptr);
}
template <typename T, Context*(T::*MF)(int*)>
Context *create_context_callback(T *obj, RefCountedPtr refptr) {
return new detail::C_RefStateCallbackAdapter<T, MF>(obj, refptr);
}
//for objects that don't inherit from RefCountedObj, to handle unit tests
template <typename T, void(T::*MF)(int) = &T::complete, typename R>
typename std::enable_if<not std::is_base_of<RefCountedPtr, R>::value, Context*>::type
create_context_callback(T *obj, R *refptr) {
return new detail::C_CallbackAdapter<T, MF>(obj);
}
template <typename T, Context*(T::*MF)(int*), typename R, bool destroy=true>
typename std::enable_if<not std::is_base_of<RefCountedPtr, R>::value, Context*>::type
create_context_callback(T *obj, R *refptr) {
return new detail::C_StateCallbackAdapter<T, MF, destroy>(obj);
}
template <typename I>
Context *create_async_context_callback(I &image_ctx, Context *on_finish) {
// use async callback to acquire a clean lock context
return new detail::C_AsyncCallback<
typename std::decay<decltype(*image_ctx.op_work_queue)>::type>(
image_ctx.op_work_queue, on_finish);
}
template <typename WQ>
Context *create_async_context_callback(WQ *work_queue, Context *on_finish) {
// use async callback to acquire a clean lock context
return new detail::C_AsyncCallback<WQ>(work_queue, on_finish);
}
// TODO: temporary until AioCompletion supports templated ImageCtx
inline ImageCtx *get_image_ctx(ImageCtx *image_ctx) {
return image_ctx;
}
uint64_t get_rbd_default_features(CephContext* cct);
bool calc_sparse_extent(const bufferptr &bp,
size_t sparse_size,
uint64_t length,
size_t *write_offset,
size_t *write_length,
size_t *offset);
template <typename I>
inline ZTracer::Trace create_trace(const I &image_ctx, const char *trace_name,
const ZTracer::Trace &parent_trace) {
if (parent_trace.valid()) {
return ZTracer::Trace(trace_name, &image_ctx.trace_endpoint, &parent_trace);
}
return ZTracer::Trace();
}
bool is_metadata_config_override(const std::string& metadata_key,
std::string* config_key);
int create_ioctx(librados::IoCtx& src_io_ctx, const std::string& pool_desc,
int64_t pool_id,
const std::optional<std::string>& pool_namespace,
librados::IoCtx* dst_io_ctx);
int snap_create_flags_api_to_internal(CephContext *cct, uint32_t api_flags,
uint64_t *internal_flags);
uint32_t get_default_snap_create_flags(ImageCtx *ictx);
SnapContext get_snap_context(
const std::optional<
std::pair<std::uint64_t,
std::vector<std::uint64_t>>>& write_snap_context);
uint64_t reserve_async_request_id();
bool is_config_key_uri(const std::string& uri);
int get_config_key(librados::Rados& rados, const std::string& uri,
std::string* value);
} // namespace util
} // namespace librbd
#endif // CEPH_LIBRBD_UTILS_H
| 8,251 | 27.752613 | 85 | h |
null | ceph-main/src/librbd/WatchNotifyTypes.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "cls/rbd/cls_rbd_types.h"
#include "common/Formatter.h"
#include "include/ceph_assert.h"
#include "include/stringify.h"
#include "librbd/WatchNotifyTypes.h"
namespace librbd {
namespace watch_notify {
void AsyncRequestId::encode(bufferlist &bl) const {
using ceph::encode;
encode(client_id, bl);
encode(request_id, bl);
}
void AsyncRequestId::decode(bufferlist::const_iterator &iter) {
using ceph::decode;
decode(client_id, iter);
decode(request_id, iter);
}
void AsyncRequestId::dump(Formatter *f) const {
f->open_object_section("client_id");
client_id.dump(f);
f->close_section();
f->dump_unsigned("request_id", request_id);
}
void AcquiredLockPayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(client_id, bl);
}
void AcquiredLockPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
if (version >= 2) {
decode(client_id, iter);
}
}
void AcquiredLockPayload::dump(Formatter *f) const {
f->open_object_section("client_id");
client_id.dump(f);
f->close_section();
}
void ReleasedLockPayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(client_id, bl);
}
void ReleasedLockPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
if (version >= 2) {
decode(client_id, iter);
}
}
void ReleasedLockPayload::dump(Formatter *f) const {
f->open_object_section("client_id");
client_id.dump(f);
f->close_section();
}
void RequestLockPayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(client_id, bl);
encode(force, bl);
}
void RequestLockPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
if (version >= 2) {
decode(client_id, iter);
}
if (version >= 3) {
decode(force, iter);
}
}
void RequestLockPayload::dump(Formatter *f) const {
f->open_object_section("client_id");
client_id.dump(f);
f->close_section();
f->dump_bool("force", force);
}
void HeaderUpdatePayload::encode(bufferlist &bl) const {
}
void HeaderUpdatePayload::decode(__u8 version, bufferlist::const_iterator &iter) {
}
void HeaderUpdatePayload::dump(Formatter *f) const {
}
void AsyncRequestPayloadBase::encode(bufferlist &bl) const {
using ceph::encode;
encode(async_request_id, bl);
}
void AsyncRequestPayloadBase::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
decode(async_request_id, iter);
}
void AsyncRequestPayloadBase::dump(Formatter *f) const {
f->open_object_section("async_request_id");
async_request_id.dump(f);
f->close_section();
}
void AsyncProgressPayload::encode(bufferlist &bl) const {
using ceph::encode;
AsyncRequestPayloadBase::encode(bl);
encode(offset, bl);
encode(total, bl);
}
void AsyncProgressPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
AsyncRequestPayloadBase::decode(version, iter);
decode(offset, iter);
decode(total, iter);
}
void AsyncProgressPayload::dump(Formatter *f) const {
AsyncRequestPayloadBase::dump(f);
f->dump_unsigned("offset", offset);
f->dump_unsigned("total", total);
}
void AsyncCompletePayload::encode(bufferlist &bl) const {
using ceph::encode;
AsyncRequestPayloadBase::encode(bl);
encode(result, bl);
}
void AsyncCompletePayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
AsyncRequestPayloadBase::decode(version, iter);
decode(result, iter);
}
void AsyncCompletePayload::dump(Formatter *f) const {
AsyncRequestPayloadBase::dump(f);
f->dump_int("result", result);
}
void ResizePayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(size, bl);
AsyncRequestPayloadBase::encode(bl);
encode(allow_shrink, bl);
}
void ResizePayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
decode(size, iter);
AsyncRequestPayloadBase::decode(version, iter);
if (version >= 4) {
decode(allow_shrink, iter);
}
}
void ResizePayload::dump(Formatter *f) const {
AsyncRequestPayloadBase::dump(f);
f->dump_unsigned("size", size);
f->dump_bool("allow_shrink", allow_shrink);
}
void SnapPayloadBase::encode(bufferlist &bl) const {
using ceph::encode;
encode(snap_name, bl);
encode(snap_namespace, bl);
encode(async_request_id, bl);
}
void SnapPayloadBase::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
decode(snap_name, iter);
if (version >= 6) {
decode(snap_namespace, iter);
}
if (version >= 7) {
decode(async_request_id, iter);
}
}
void SnapPayloadBase::dump(Formatter *f) const {
AsyncRequestPayloadBase::dump(f);
f->dump_string("snap_name", snap_name);
snap_namespace.dump(f);
}
void SnapCreatePayload::encode(bufferlist &bl) const {
using ceph::encode;
SnapPayloadBase::encode(bl);
encode(flags, bl);
}
void SnapCreatePayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
SnapPayloadBase::decode(version, iter);
if (version == 5) {
decode(snap_namespace, iter);
}
if (version >= 7) {
decode(flags, iter);
}
}
void SnapCreatePayload::dump(Formatter *f) const {
SnapPayloadBase::dump(f);
f->dump_unsigned("flags", flags);
}
void SnapRenamePayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(snap_id, bl);
SnapPayloadBase::encode(bl);
}
void SnapRenamePayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
decode(snap_id, iter);
SnapPayloadBase::decode(version, iter);
}
void SnapRenamePayload::dump(Formatter *f) const {
SnapPayloadBase::dump(f);
f->dump_unsigned("src_snap_id", snap_id);
}
void RenamePayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(image_name, bl);
encode(async_request_id, bl);
}
void RenamePayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
decode(image_name, iter);
if (version >= 7) {
decode(async_request_id, iter);
}
}
void RenamePayload::dump(Formatter *f) const {
AsyncRequestPayloadBase::dump(f);
f->dump_string("image_name", image_name);
}
void UpdateFeaturesPayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(features, bl);
encode(enabled, bl);
encode(async_request_id, bl);
}
void UpdateFeaturesPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
decode(features, iter);
decode(enabled, iter);
if (version >= 7) {
decode(async_request_id, iter);
}
}
void UpdateFeaturesPayload::dump(Formatter *f) const {
AsyncRequestPayloadBase::dump(f);
f->dump_unsigned("features", features);
f->dump_bool("enabled", enabled);
}
void SparsifyPayload::encode(bufferlist &bl) const {
using ceph::encode;
AsyncRequestPayloadBase::encode(bl);
encode(sparse_size, bl);
}
void SparsifyPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
AsyncRequestPayloadBase::decode(version, iter);
decode(sparse_size, iter);
}
void SparsifyPayload::dump(Formatter *f) const {
AsyncRequestPayloadBase::dump(f);
f->dump_unsigned("sparse_size", sparse_size);
}
void MetadataUpdatePayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(key, bl);
encode(value, bl);
encode(async_request_id, bl);
}
void MetadataUpdatePayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
decode(key, iter);
decode(value, iter);
if (version >= 7) {
decode(async_request_id, iter);
}
}
void MetadataUpdatePayload::dump(Formatter *f) const {
AsyncRequestPayloadBase::dump(f);
f->dump_string("key", key);
f->dump_string("value", *value);
}
void UnknownPayload::encode(bufferlist &bl) const {
ceph_abort();
}
void UnknownPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
}
void UnknownPayload::dump(Formatter *f) const {
}
bool NotifyMessage::check_for_refresh() const {
return payload->check_for_refresh();
}
void NotifyMessage::encode(bufferlist& bl) const {
ENCODE_START(7, 1, bl);
encode(static_cast<uint32_t>(payload->get_notify_op()), bl);
payload->encode(bl);
ENCODE_FINISH(bl);
}
void NotifyMessage::decode(bufferlist::const_iterator& iter) {
DECODE_START(1, iter);
uint32_t notify_op;
decode(notify_op, iter);
// select the correct payload variant based upon the encoded op
switch (notify_op) {
case NOTIFY_OP_ACQUIRED_LOCK:
payload.reset(new AcquiredLockPayload());
break;
case NOTIFY_OP_RELEASED_LOCK:
payload.reset(new ReleasedLockPayload());
break;
case NOTIFY_OP_REQUEST_LOCK:
payload.reset(new RequestLockPayload());
break;
case NOTIFY_OP_HEADER_UPDATE:
payload.reset(new HeaderUpdatePayload());
break;
case NOTIFY_OP_ASYNC_PROGRESS:
payload.reset(new AsyncProgressPayload());
break;
case NOTIFY_OP_ASYNC_COMPLETE:
payload.reset(new AsyncCompletePayload());
break;
case NOTIFY_OP_FLATTEN:
payload.reset(new FlattenPayload());
break;
case NOTIFY_OP_RESIZE:
payload.reset(new ResizePayload());
break;
case NOTIFY_OP_SNAP_CREATE:
payload.reset(new SnapCreatePayload());
break;
case NOTIFY_OP_SNAP_REMOVE:
payload.reset(new SnapRemovePayload());
break;
case NOTIFY_OP_SNAP_RENAME:
payload.reset(new SnapRenamePayload());
break;
case NOTIFY_OP_SNAP_PROTECT:
payload.reset(new SnapProtectPayload());
break;
case NOTIFY_OP_SNAP_UNPROTECT:
payload.reset(new SnapUnprotectPayload());
break;
case NOTIFY_OP_REBUILD_OBJECT_MAP:
payload.reset(new RebuildObjectMapPayload());
break;
case NOTIFY_OP_RENAME:
payload.reset(new RenamePayload());
break;
case NOTIFY_OP_UPDATE_FEATURES:
payload.reset(new UpdateFeaturesPayload());
break;
case NOTIFY_OP_MIGRATE:
payload.reset(new MigratePayload());
break;
case NOTIFY_OP_SPARSIFY:
payload.reset(new SparsifyPayload());
break;
case NOTIFY_OP_QUIESCE:
payload.reset(new QuiescePayload());
break;
case NOTIFY_OP_UNQUIESCE:
payload.reset(new UnquiescePayload());
break;
case NOTIFY_OP_METADATA_UPDATE:
payload.reset(new MetadataUpdatePayload());
break;
}
payload->decode(struct_v, iter);
DECODE_FINISH(iter);
}
void NotifyMessage::dump(Formatter *f) const {
payload->dump(f);
}
NotifyOp NotifyMessage::get_notify_op() const {
return payload->get_notify_op();
}
void NotifyMessage::generate_test_instances(std::list<NotifyMessage *> &o) {
o.push_back(new NotifyMessage(new AcquiredLockPayload(ClientId(1, 2))));
o.push_back(new NotifyMessage(new ReleasedLockPayload(ClientId(1, 2))));
o.push_back(new NotifyMessage(new RequestLockPayload(ClientId(1, 2), true)));
o.push_back(new NotifyMessage(new HeaderUpdatePayload()));
o.push_back(new NotifyMessage(new AsyncProgressPayload(AsyncRequestId(ClientId(0, 1), 2), 3, 4)));
o.push_back(new NotifyMessage(new AsyncCompletePayload(AsyncRequestId(ClientId(0, 1), 2), 3)));
o.push_back(new NotifyMessage(new FlattenPayload(AsyncRequestId(ClientId(0, 1), 2))));
o.push_back(new NotifyMessage(new ResizePayload(AsyncRequestId(ClientId(0, 1), 2), 123, true)));
o.push_back(new NotifyMessage(new SnapCreatePayload(AsyncRequestId(ClientId(0, 1), 2),
cls::rbd::UserSnapshotNamespace(),
"foo", 1)));
o.push_back(new NotifyMessage(new SnapRemovePayload(AsyncRequestId(ClientId(0, 1), 2),
cls::rbd::UserSnapshotNamespace(), "foo")));
o.push_back(new NotifyMessage(new SnapProtectPayload(AsyncRequestId(ClientId(0, 1), 2),
cls::rbd::UserSnapshotNamespace(), "foo")));
o.push_back(new NotifyMessage(new SnapUnprotectPayload(AsyncRequestId(ClientId(0, 1), 2),
cls::rbd::UserSnapshotNamespace(), "foo")));
o.push_back(new NotifyMessage(new RebuildObjectMapPayload(AsyncRequestId(ClientId(0, 1), 2))));
o.push_back(new NotifyMessage(new RenamePayload(AsyncRequestId(ClientId(0, 1), 2), "foo")));
o.push_back(new NotifyMessage(new UpdateFeaturesPayload(AsyncRequestId(ClientId(0, 1), 2),
1, true)));
o.push_back(new NotifyMessage(new MigratePayload(AsyncRequestId(ClientId(0, 1), 2))));
o.push_back(new NotifyMessage(new SparsifyPayload(AsyncRequestId(ClientId(0, 1), 2), 1)));
o.push_back(new NotifyMessage(new QuiescePayload(AsyncRequestId(ClientId(0, 1), 2))));
o.push_back(new NotifyMessage(new UnquiescePayload(AsyncRequestId(ClientId(0, 1), 2))));
o.push_back(new NotifyMessage(new MetadataUpdatePayload(AsyncRequestId(ClientId(0, 1), 2),
"foo", std::optional<std::string>{"xyz"})));
}
void ResponseMessage::encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(result, bl);
ENCODE_FINISH(bl);
}
void ResponseMessage::decode(bufferlist::const_iterator& iter) {
DECODE_START(1, iter);
decode(result, iter);
DECODE_FINISH(iter);
}
void ResponseMessage::dump(Formatter *f) const {
f->dump_int("result", result);
}
void ResponseMessage::generate_test_instances(std::list<ResponseMessage *> &o) {
o.push_back(new ResponseMessage(1));
}
std::ostream &operator<<(std::ostream &out,
const librbd::watch_notify::NotifyOp &op) {
using namespace librbd::watch_notify;
switch (op) {
case NOTIFY_OP_ACQUIRED_LOCK:
out << "AcquiredLock";
break;
case NOTIFY_OP_RELEASED_LOCK:
out << "ReleasedLock";
break;
case NOTIFY_OP_REQUEST_LOCK:
out << "RequestLock";
break;
case NOTIFY_OP_HEADER_UPDATE:
out << "HeaderUpdate";
break;
case NOTIFY_OP_ASYNC_PROGRESS:
out << "AsyncProgress";
break;
case NOTIFY_OP_ASYNC_COMPLETE:
out << "AsyncComplete";
break;
case NOTIFY_OP_FLATTEN:
out << "Flatten";
break;
case NOTIFY_OP_RESIZE:
out << "Resize";
break;
case NOTIFY_OP_SNAP_CREATE:
out << "SnapCreate";
break;
case NOTIFY_OP_SNAP_REMOVE:
out << "SnapRemove";
break;
case NOTIFY_OP_SNAP_RENAME:
out << "SnapRename";
break;
case NOTIFY_OP_SNAP_PROTECT:
out << "SnapProtect";
break;
case NOTIFY_OP_SNAP_UNPROTECT:
out << "SnapUnprotect";
break;
case NOTIFY_OP_REBUILD_OBJECT_MAP:
out << "RebuildObjectMap";
break;
case NOTIFY_OP_RENAME:
out << "Rename";
break;
case NOTIFY_OP_UPDATE_FEATURES:
out << "UpdateFeatures";
break;
case NOTIFY_OP_MIGRATE:
out << "Migrate";
break;
case NOTIFY_OP_SPARSIFY:
out << "Sparsify";
break;
case NOTIFY_OP_QUIESCE:
out << "Quiesce";
break;
case NOTIFY_OP_UNQUIESCE:
out << "Unquiesce";
break;
case NOTIFY_OP_METADATA_UPDATE:
out << "MetadataUpdate";
break;
default:
out << "Unknown (" << static_cast<uint32_t>(op) << ")";
break;
}
return out;
}
std::ostream &operator<<(std::ostream &out,
const librbd::watch_notify::AsyncRequestId &request) {
out << "[" << request.client_id.gid << "," << request.client_id.handle << ","
<< request.request_id << "]";
return out;
}
} // namespace watch_notify
} // namespace librbd
| 15,460 | 26.707885 | 102 | cc |
null | ceph-main/src/librbd/WatchNotifyTypes.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRBD_WATCH_NOTIFY_TYPES_H
#define LIBRBD_WATCH_NOTIFY_TYPES_H
#include "cls/rbd/cls_rbd_types.h"
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/encoding.h"
#include "librbd/watcher/Types.h"
#include <iosfwd>
#include <list>
#include <memory>
#include <string>
#include <boost/variant.hpp>
namespace ceph {
class Formatter;
}
namespace librbd {
namespace watch_notify {
using librbd::watcher::ClientId;
WRITE_CLASS_ENCODER(ClientId);
struct AsyncRequestId {
ClientId client_id;
uint64_t request_id;
AsyncRequestId() : request_id() {}
AsyncRequestId(const ClientId &client_id_, uint64_t request_id_)
: client_id(client_id_), request_id(request_id_) {}
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
inline bool operator<(const AsyncRequestId &rhs) const {
if (client_id != rhs.client_id) {
return client_id < rhs.client_id;
} else {
return request_id < rhs.request_id;
}
}
inline bool operator!=(const AsyncRequestId &rhs) const {
return (client_id != rhs.client_id || request_id != rhs.request_id);
}
inline operator bool() const {
return (*this != AsyncRequestId());
}
};
enum NotifyOp {
NOTIFY_OP_ACQUIRED_LOCK = 0,
NOTIFY_OP_RELEASED_LOCK = 1,
NOTIFY_OP_REQUEST_LOCK = 2,
NOTIFY_OP_HEADER_UPDATE = 3,
NOTIFY_OP_ASYNC_PROGRESS = 4,
NOTIFY_OP_ASYNC_COMPLETE = 5,
NOTIFY_OP_FLATTEN = 6,
NOTIFY_OP_RESIZE = 7,
NOTIFY_OP_SNAP_CREATE = 8,
NOTIFY_OP_SNAP_REMOVE = 9,
NOTIFY_OP_REBUILD_OBJECT_MAP = 10,
NOTIFY_OP_SNAP_RENAME = 11,
NOTIFY_OP_SNAP_PROTECT = 12,
NOTIFY_OP_SNAP_UNPROTECT = 13,
NOTIFY_OP_RENAME = 14,
NOTIFY_OP_UPDATE_FEATURES = 15,
NOTIFY_OP_MIGRATE = 16,
NOTIFY_OP_SPARSIFY = 17,
NOTIFY_OP_QUIESCE = 18,
NOTIFY_OP_UNQUIESCE = 19,
NOTIFY_OP_METADATA_UPDATE = 20,
};
struct Payload {
virtual ~Payload() {}
virtual NotifyOp get_notify_op() const = 0;
virtual bool check_for_refresh() const = 0;
virtual void encode(bufferlist &bl) const = 0;
virtual void decode(__u8 version, bufferlist::const_iterator &iter) = 0;
virtual void dump(Formatter *f) const = 0;
};
struct AcquiredLockPayload : public Payload {
ClientId client_id;
AcquiredLockPayload() {}
AcquiredLockPayload(const ClientId &client_id) : client_id(client_id) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_ACQUIRED_LOCK;
}
bool check_for_refresh() const override {
return false;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct ReleasedLockPayload : public Payload {
ClientId client_id;
ReleasedLockPayload() {}
ReleasedLockPayload(const ClientId &client_id) : client_id(client_id) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_RELEASED_LOCK;
}
bool check_for_refresh() const override {
return false;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct RequestLockPayload : public Payload {
ClientId client_id;
bool force = false;
RequestLockPayload() {}
RequestLockPayload(const ClientId &client_id, bool force)
: client_id(client_id), force(force) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_REQUEST_LOCK;
}
bool check_for_refresh() const override {
return false;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct HeaderUpdatePayload : public Payload {
NotifyOp get_notify_op() const override {
return NOTIFY_OP_HEADER_UPDATE;
}
bool check_for_refresh() const override {
return false;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct AsyncRequestPayloadBase : public Payload {
public:
AsyncRequestId async_request_id;
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
protected:
AsyncRequestPayloadBase() {}
AsyncRequestPayloadBase(const AsyncRequestId &id) : async_request_id(id) {}
};
struct AsyncProgressPayload : public AsyncRequestPayloadBase {
uint64_t offset = 0;
uint64_t total = 0;
AsyncProgressPayload() {}
AsyncProgressPayload(const AsyncRequestId &id, uint64_t offset, uint64_t total)
: AsyncRequestPayloadBase(id), offset(offset), total(total) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_ASYNC_PROGRESS;
}
bool check_for_refresh() const override {
return false;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct AsyncCompletePayload : public AsyncRequestPayloadBase {
int result = 0;
AsyncCompletePayload() {}
AsyncCompletePayload(const AsyncRequestId &id, int r)
: AsyncRequestPayloadBase(id), result(r) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_ASYNC_COMPLETE;
}
bool check_for_refresh() const override {
return false;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct FlattenPayload : public AsyncRequestPayloadBase {
FlattenPayload() {}
FlattenPayload(const AsyncRequestId &id) : AsyncRequestPayloadBase(id) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_FLATTEN;
}
bool check_for_refresh() const override {
return true;
}
};
struct ResizePayload : public AsyncRequestPayloadBase {
uint64_t size = 0;
bool allow_shrink = true;
ResizePayload() {}
ResizePayload(const AsyncRequestId &id, uint64_t size, bool allow_shrink)
: AsyncRequestPayloadBase(id), size(size), allow_shrink(allow_shrink) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_RESIZE;
}
bool check_for_refresh() const override {
return true;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct SnapPayloadBase : public AsyncRequestPayloadBase {
public:
cls::rbd::SnapshotNamespace snap_namespace;
std::string snap_name;
bool check_for_refresh() const override {
return true;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
protected:
SnapPayloadBase() {}
SnapPayloadBase(const AsyncRequestId &id,
const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &name)
: AsyncRequestPayloadBase(id), snap_namespace(snap_namespace),
snap_name(name) {
}
};
struct SnapCreatePayload : public SnapPayloadBase {
uint64_t flags = 0;
SnapCreatePayload() {}
SnapCreatePayload(const AsyncRequestId &id,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &name, uint64_t flags)
: SnapPayloadBase(id, snap_namespace, name), flags(flags) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_SNAP_CREATE;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct SnapRenamePayload : public SnapPayloadBase {
uint64_t snap_id = 0;
SnapRenamePayload() {}
SnapRenamePayload(const AsyncRequestId &id,
const uint64_t &src_snap_id,
const std::string &dst_name)
: SnapPayloadBase(id, cls::rbd::UserSnapshotNamespace(), dst_name),
snap_id(src_snap_id) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_SNAP_RENAME;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct SnapRemovePayload : public SnapPayloadBase {
SnapRemovePayload() {}
SnapRemovePayload(const AsyncRequestId &id,
const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &name)
: SnapPayloadBase(id, snap_namespace, name) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_SNAP_REMOVE;
}
};
struct SnapProtectPayload : public SnapPayloadBase {
SnapProtectPayload() {}
SnapProtectPayload(const AsyncRequestId &id,
const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &name)
: SnapPayloadBase(id, snap_namespace, name) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_SNAP_PROTECT;
}
};
struct SnapUnprotectPayload : public SnapPayloadBase {
SnapUnprotectPayload() {}
SnapUnprotectPayload(const AsyncRequestId &id,
const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &name)
: SnapPayloadBase(id, snap_namespace, name) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_SNAP_UNPROTECT;
}
};
struct RebuildObjectMapPayload : public AsyncRequestPayloadBase {
RebuildObjectMapPayload() {}
RebuildObjectMapPayload(const AsyncRequestId &id)
: AsyncRequestPayloadBase(id) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_REBUILD_OBJECT_MAP;
}
bool check_for_refresh() const override {
return true;
}
};
struct RenamePayload : public AsyncRequestPayloadBase {
std::string image_name;
RenamePayload() {}
RenamePayload(const AsyncRequestId &id, const std::string _image_name)
: AsyncRequestPayloadBase(id), image_name(_image_name) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_RENAME;
}
bool check_for_refresh() const override {
return true;
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
struct UpdateFeaturesPayload : public AsyncRequestPayloadBase {
uint64_t features = 0;
bool enabled = false;
UpdateFeaturesPayload() {}
UpdateFeaturesPayload(const AsyncRequestId &id, uint64_t features,
bool enabled)
: AsyncRequestPayloadBase(id), features(features), enabled(enabled) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_UPDATE_FEATURES;
}
bool check_for_refresh() const override {
return true;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct MigratePayload : public AsyncRequestPayloadBase {
MigratePayload() {}
MigratePayload(const AsyncRequestId &id) : AsyncRequestPayloadBase(id) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_MIGRATE;
}
bool check_for_refresh() const override {
return true;
}
};
struct SparsifyPayload : public AsyncRequestPayloadBase {
uint64_t sparse_size = 0;
SparsifyPayload() {}
SparsifyPayload(const AsyncRequestId &id, uint64_t sparse_size)
: AsyncRequestPayloadBase(id), sparse_size(sparse_size) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_SPARSIFY;
}
bool check_for_refresh() const override {
return true;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct QuiescePayload : public AsyncRequestPayloadBase {
QuiescePayload() {}
QuiescePayload(const AsyncRequestId &id) : AsyncRequestPayloadBase(id) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_QUIESCE;
}
bool check_for_refresh() const override {
return false;
}
};
struct UnquiescePayload : public AsyncRequestPayloadBase {
UnquiescePayload() {}
UnquiescePayload(const AsyncRequestId &id) : AsyncRequestPayloadBase(id) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_UNQUIESCE;
}
bool check_for_refresh() const override {
return false;
}
};
struct MetadataUpdatePayload : public AsyncRequestPayloadBase {
std::string key;
std::optional<std::string> value;
MetadataUpdatePayload() {}
MetadataUpdatePayload(const AsyncRequestId &id, std::string key,
std::optional<std::string> value)
: AsyncRequestPayloadBase(id), key(key), value(value) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_METADATA_UPDATE;
}
bool check_for_refresh() const override {
return false;
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
struct UnknownPayload : public Payload {
NotifyOp get_notify_op() const override {
return static_cast<NotifyOp>(-1);
}
bool check_for_refresh() const override {
return false;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct NotifyMessage {
NotifyMessage() : payload(new UnknownPayload()) {}
NotifyMessage(Payload *payload) : payload(payload) {}
std::unique_ptr<Payload> payload;
bool check_for_refresh() const;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
NotifyOp get_notify_op() const;
static void generate_test_instances(std::list<NotifyMessage *> &o);
};
struct ResponseMessage {
ResponseMessage() : result(0) {}
ResponseMessage(int result_) : result(result_) {}
int result;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<ResponseMessage *> &o);
};
std::ostream &operator<<(std::ostream &out,
const NotifyOp &op);
std::ostream &operator<<(std::ostream &out,
const AsyncRequestId &request);
WRITE_CLASS_ENCODER(AsyncRequestId);
WRITE_CLASS_ENCODER(NotifyMessage);
WRITE_CLASS_ENCODER(ResponseMessage);
} // namespace watch_notify
} // namespace librbd
#endif // LIBRBD_WATCH_NOTIFY_TYPES_H
| 14,766 | 26.705441 | 81 | h |
null | ceph-main/src/librbd/Watcher.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/Watcher.h"
#include "librbd/watcher/RewatchRequest.h"
#include "librbd/Utils.h"
#include "librbd/TaskFinisher.h"
#include "librbd/asio/ContextWQ.h"
#include "include/encoding.h"
#include "common/errno.h"
#include <boost/bind/bind.hpp>
// re-include our assert to clobber the system one; fix dout:
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_rbd
namespace librbd {
using namespace boost::placeholders;
using namespace watcher;
using util::create_context_callback;
using util::create_rados_callback;
using std::string;
namespace {
struct C_UnwatchAndFlush : public Context {
librados::Rados rados;
Context *on_finish;
bool flushing = false;
int ret_val = 0;
C_UnwatchAndFlush(librados::IoCtx &io_ctx, Context *on_finish)
: rados(io_ctx), on_finish(on_finish) {
}
void complete(int r) override {
if (ret_val == 0 && r < 0) {
ret_val = r;
}
if (!flushing) {
flushing = true;
librados::AioCompletion *aio_comp = create_rados_callback(this);
r = rados.aio_watch_flush(aio_comp);
ceph_assert(r == 0);
aio_comp->release();
return;
}
// ensure our reference to the RadosClient is released prior
// to completing the callback to avoid racing an explicit
// librados shutdown
Context *ctx = on_finish;
r = ret_val;
delete this;
ctx->complete(r);
}
void finish(int r) override {
}
};
} // anonymous namespace
#undef dout_prefix
#define dout_prefix *_dout << "librbd::Watcher::C_NotifyAck " << this << " " \
<< __func__ << ": "
Watcher::C_NotifyAck::C_NotifyAck(Watcher *watcher, uint64_t notify_id,
uint64_t handle)
: watcher(watcher), cct(watcher->m_cct), notify_id(notify_id),
handle(handle) {
ldout(cct, 10) << "id=" << notify_id << ", " << "handle=" << handle << dendl;
}
void Watcher::C_NotifyAck::finish(int r) {
ldout(cct, 10) << "r=" << r << dendl;
ceph_assert(r == 0);
watcher->acknowledge_notify(notify_id, handle, out);
}
#undef dout_prefix
#define dout_prefix *_dout << "librbd::Watcher: " << this << " " << __func__ \
<< ": "
Watcher::Watcher(librados::IoCtx& ioctx, asio::ContextWQ *work_queue,
const string& oid)
: m_ioctx(ioctx), m_work_queue(work_queue), m_oid(oid),
m_cct(reinterpret_cast<CephContext *>(ioctx.cct())),
m_watch_lock(ceph::make_shared_mutex(
util::unique_lock_name("librbd::Watcher::m_watch_lock", this))),
m_watch_handle(0), m_notifier(work_queue, ioctx, oid),
m_watch_state(WATCH_STATE_IDLE), m_watch_ctx(*this) {
}
Watcher::~Watcher() {
std::shared_lock l{m_watch_lock};
ceph_assert(is_unregistered(m_watch_lock));
}
void Watcher::register_watch(Context *on_finish) {
ldout(m_cct, 10) << dendl;
std::unique_lock watch_locker{m_watch_lock};
ceph_assert(is_unregistered(m_watch_lock));
m_watch_state = WATCH_STATE_REGISTERING;
m_watch_blocklisted = false;
librados::AioCompletion *aio_comp = create_rados_callback(
new C_RegisterWatch(this, on_finish));
int r = m_ioctx.aio_watch(m_oid, aio_comp, &m_watch_handle, &m_watch_ctx);
ceph_assert(r == 0);
aio_comp->release();
}
void Watcher::handle_register_watch(int r, Context *on_finish) {
ldout(m_cct, 10) << "r=" << r << dendl;
bool watch_error = false;
Context *unregister_watch_ctx = nullptr;
{
std::unique_lock watch_locker{m_watch_lock};
ceph_assert(m_watch_state == WATCH_STATE_REGISTERING);
m_watch_state = WATCH_STATE_IDLE;
if (r < 0) {
lderr(m_cct) << "failed to register watch: " << cpp_strerror(r)
<< dendl;
m_watch_handle = 0;
}
if (m_unregister_watch_ctx != nullptr) {
std::swap(unregister_watch_ctx, m_unregister_watch_ctx);
} else if (r == 0 && m_watch_error) {
lderr(m_cct) << "re-registering watch after error" << dendl;
m_watch_state = WATCH_STATE_REWATCHING;
watch_error = true;
} else {
m_watch_blocklisted = (r == -EBLOCKLISTED);
}
}
on_finish->complete(r);
if (unregister_watch_ctx != nullptr) {
unregister_watch_ctx->complete(0);
} else if (watch_error) {
rewatch();
}
}
void Watcher::unregister_watch(Context *on_finish) {
ldout(m_cct, 10) << dendl;
{
std::unique_lock watch_locker{m_watch_lock};
if (m_watch_state != WATCH_STATE_IDLE) {
ldout(m_cct, 10) << "delaying unregister until register completed"
<< dendl;
ceph_assert(m_unregister_watch_ctx == nullptr);
m_unregister_watch_ctx = new LambdaContext([this, on_finish](int r) {
unregister_watch(on_finish);
});
return;
} else if (is_registered(m_watch_lock)) {
librados::AioCompletion *aio_comp = create_rados_callback(
new C_UnwatchAndFlush(m_ioctx, on_finish));
int r = m_ioctx.aio_unwatch(m_watch_handle, aio_comp);
ceph_assert(r == 0);
aio_comp->release();
m_watch_handle = 0;
m_watch_blocklisted = false;
return;
}
}
on_finish->complete(0);
}
bool Watcher::notifications_blocked() const {
std::shared_lock locker{m_watch_lock};
bool blocked = (m_blocked_count > 0);
ldout(m_cct, 5) << "blocked=" << blocked << dendl;
return blocked;
}
void Watcher::block_notifies(Context *on_finish) {
{
std::unique_lock locker{m_watch_lock};
++m_blocked_count;
ldout(m_cct, 5) << "blocked_count=" << m_blocked_count << dendl;
}
m_async_op_tracker.wait_for_ops(on_finish);
}
void Watcher::unblock_notifies() {
std::unique_lock locker{m_watch_lock};
ceph_assert(m_blocked_count > 0);
--m_blocked_count;
ldout(m_cct, 5) << "blocked_count=" << m_blocked_count << dendl;
}
void Watcher::flush(Context *on_finish) {
m_notifier.flush(on_finish);
}
std::string Watcher::get_oid() const {
std::shared_lock locker{m_watch_lock};
return m_oid;
}
void Watcher::set_oid(const string& oid) {
std::unique_lock watch_locker{m_watch_lock};
ceph_assert(is_unregistered(m_watch_lock));
m_oid = oid;
}
void Watcher::handle_error(uint64_t handle, int err) {
lderr(m_cct) << "handle=" << handle << ": " << cpp_strerror(err) << dendl;
std::unique_lock watch_locker{m_watch_lock};
m_watch_error = true;
if (is_registered(m_watch_lock)) {
m_watch_state = WATCH_STATE_REWATCHING;
if (err == -EBLOCKLISTED) {
m_watch_blocklisted = true;
}
auto ctx = new LambdaContext(
boost::bind(&Watcher::rewatch, this));
m_work_queue->queue(ctx);
}
}
void Watcher::acknowledge_notify(uint64_t notify_id, uint64_t handle,
bufferlist &out) {
m_ioctx.notify_ack(m_oid, notify_id, handle, out);
}
void Watcher::rewatch() {
ldout(m_cct, 10) << dendl;
Context *unregister_watch_ctx = nullptr;
{
std::unique_lock watch_locker{m_watch_lock};
ceph_assert(m_watch_state == WATCH_STATE_REWATCHING);
if (m_unregister_watch_ctx != nullptr) {
m_watch_state = WATCH_STATE_IDLE;
std::swap(unregister_watch_ctx, m_unregister_watch_ctx);
} else {
m_watch_error = false;
auto ctx = create_context_callback<
Watcher, &Watcher::handle_rewatch>(this);
auto req = RewatchRequest::create(m_ioctx, m_oid, m_watch_lock,
&m_watch_ctx, &m_watch_handle, ctx);
req->send();
return;
}
}
unregister_watch_ctx->complete(0);
}
void Watcher::handle_rewatch(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
bool watch_error = false;
Context *unregister_watch_ctx = nullptr;
{
std::unique_lock watch_locker{m_watch_lock};
ceph_assert(m_watch_state == WATCH_STATE_REWATCHING);
m_watch_blocklisted = false;
if (m_unregister_watch_ctx != nullptr) {
ldout(m_cct, 10) << "image is closing, skip rewatch" << dendl;
m_watch_state = WATCH_STATE_IDLE;
std::swap(unregister_watch_ctx, m_unregister_watch_ctx);
} else if (r == -EBLOCKLISTED) {
lderr(m_cct) << "client blocklisted" << dendl;
m_watch_blocklisted = true;
} else if (r == -ENOENT) {
ldout(m_cct, 5) << "object does not exist" << dendl;
} else if (r < 0) {
lderr(m_cct) << "failed to rewatch: " << cpp_strerror(r) << dendl;
watch_error = true;
} else if (m_watch_error) {
lderr(m_cct) << "re-registering watch after error" << dendl;
watch_error = true;
}
}
if (unregister_watch_ctx != nullptr) {
unregister_watch_ctx->complete(0);
return;
} else if (watch_error) {
rewatch();
return;
}
auto ctx = create_context_callback<
Watcher, &Watcher::handle_rewatch_callback>(this);
m_work_queue->queue(ctx, r);
}
void Watcher::handle_rewatch_callback(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
handle_rewatch_complete(r);
bool watch_error = false;
Context *unregister_watch_ctx = nullptr;
{
std::unique_lock watch_locker{m_watch_lock};
ceph_assert(m_watch_state == WATCH_STATE_REWATCHING);
if (m_unregister_watch_ctx != nullptr) {
m_watch_state = WATCH_STATE_IDLE;
std::swap(unregister_watch_ctx, m_unregister_watch_ctx);
} else if (r == -EBLOCKLISTED || r == -ENOENT) {
m_watch_state = WATCH_STATE_IDLE;
} else if (r < 0 || m_watch_error) {
watch_error = true;
} else {
m_watch_state = WATCH_STATE_IDLE;
}
}
if (unregister_watch_ctx != nullptr) {
unregister_watch_ctx->complete(0);
} else if (watch_error) {
rewatch();
}
}
void Watcher::send_notify(bufferlist& payload,
watcher::NotifyResponse *response,
Context *on_finish) {
m_notifier.notify(payload, response, on_finish);
}
void Watcher::WatchCtx::handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist& bl) {
// if notifications are blocked, finish the notification w/o
// bubbling the notification up to the derived class
watcher.m_async_op_tracker.start_op();
if (watcher.notifications_blocked()) {
bufferlist bl;
watcher.acknowledge_notify(notify_id, handle, bl);
} else {
watcher.handle_notify(notify_id, handle, notifier_id, bl);
}
watcher.m_async_op_tracker.finish_op();
}
void Watcher::WatchCtx::handle_error(uint64_t handle, int err) {
watcher.handle_error(handle, err);
}
} // namespace librbd
| 10,531 | 27.38814 | 79 | cc |
null | ceph-main/src/librbd/Watcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_WATCHER_H
#define CEPH_LIBRBD_WATCHER_H
#include "common/AsyncOpTracker.h"
#include "common/ceph_mutex.h"
#include "common/RWLock.h"
#include "include/rados/librados.hpp"
#include "librbd/watcher/Notifier.h"
#include "librbd/watcher/Types.h"
#include <string>
#include <utility>
namespace librbd {
namespace asio { struct ContextWQ; }
namespace watcher { struct NotifyResponse; }
class Watcher {
public:
struct C_NotifyAck : public Context {
Watcher *watcher;
CephContext *cct;
uint64_t notify_id;
uint64_t handle;
bufferlist out;
C_NotifyAck(Watcher *watcher, uint64_t notify_id, uint64_t handle);
void finish(int r) override;
};
Watcher(librados::IoCtx& ioctx, asio::ContextWQ *work_queue,
const std::string& oid);
virtual ~Watcher();
void register_watch(Context *on_finish);
virtual void unregister_watch(Context *on_finish);
void flush(Context *on_finish);
bool notifications_blocked() const;
virtual void block_notifies(Context *on_finish);
void unblock_notifies();
std::string get_oid() const;
void set_oid(const std::string& oid);
uint64_t get_watch_handle() const {
std::shared_lock watch_locker{m_watch_lock};
return m_watch_handle;
}
bool is_registered() const {
std::shared_lock locker{m_watch_lock};
return is_registered(m_watch_lock);
}
bool is_unregistered() const {
std::shared_lock locker{m_watch_lock};
return is_unregistered(m_watch_lock);
}
bool is_blocklisted() const {
std::shared_lock locker{m_watch_lock};
return m_watch_blocklisted;
}
protected:
enum WatchState {
WATCH_STATE_IDLE,
WATCH_STATE_REGISTERING,
WATCH_STATE_REWATCHING
};
librados::IoCtx& m_ioctx;
asio::ContextWQ *m_work_queue;
std::string m_oid;
CephContext *m_cct;
mutable ceph::shared_mutex m_watch_lock;
uint64_t m_watch_handle;
watcher::Notifier m_notifier;
WatchState m_watch_state;
bool m_watch_blocklisted = false;
AsyncOpTracker m_async_op_tracker;
bool is_registered(const ceph::shared_mutex&) const {
return (m_watch_state == WATCH_STATE_IDLE && m_watch_handle != 0);
}
bool is_unregistered(const ceph::shared_mutex&) const {
return (m_watch_state == WATCH_STATE_IDLE && m_watch_handle == 0);
}
void send_notify(bufferlist &payload,
watcher::NotifyResponse *response = nullptr,
Context *on_finish = nullptr);
virtual void handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist &bl) = 0;
virtual void handle_error(uint64_t cookie, int err);
void acknowledge_notify(uint64_t notify_id, uint64_t handle,
bufferlist &out);
virtual void handle_rewatch_complete(int r) { }
private:
/**
* @verbatim
*
* <start>
* |
* v
* UNREGISTERED
* |
* | (register_watch)
* |
* REGISTERING
* |
* v (watch error)
* REGISTERED * * * * * * * > ERROR
* | ^ |
* | | | (rewatch)
* | | v
* | | REWATCHING
* | | |
* | | |
* | \---------------------/
* |
* | (unregister_watch)
* |
* v
* UNREGISTERED
* |
* v
* <finish>
*
* @endverbatim
*/
struct WatchCtx : public librados::WatchCtx2 {
Watcher &watcher;
WatchCtx(Watcher &parent) : watcher(parent) {}
void handle_notify(uint64_t notify_id,
uint64_t handle,
uint64_t notifier_id,
bufferlist& bl) override;
void handle_error(uint64_t handle, int err) override;
};
struct C_RegisterWatch : public Context {
Watcher *watcher;
Context *on_finish;
C_RegisterWatch(Watcher *watcher, Context *on_finish)
: watcher(watcher), on_finish(on_finish) {
}
void finish(int r) override {
watcher->handle_register_watch(r, on_finish);
}
};
WatchCtx m_watch_ctx;
Context *m_unregister_watch_ctx = nullptr;
bool m_watch_error = false;
uint32_t m_blocked_count = 0;
void handle_register_watch(int r, Context *on_finish);
void rewatch();
void handle_rewatch(int r);
void handle_rewatch_callback(int r);
};
} // namespace librbd
#endif // CEPH_LIBRBD_WATCHER_H
| 4,543 | 23.695652 | 71 | h |
null | ceph-main/src/librbd/internal.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/int_types.h"
#include <errno.h>
#include <limits.h>
#include "include/types.h"
#include "include/uuid.h"
#include "common/ceph_context.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Throttle.h"
#include "common/event_socket.h"
#include "common/perf_counters.h"
#include "osdc/Striper.h"
#include "include/stringify.h"
#include "cls/lock/cls_lock_client.h"
#include "cls/rbd/cls_rbd.h"
#include "cls/rbd/cls_rbd_types.h"
#include "cls/rbd/cls_rbd_client.h"
#include "cls/journal/cls_journal_types.h"
#include "cls/journal/cls_journal_client.h"
#include "librbd/AsioEngine.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/internal.h"
#include "librbd/Journal.h"
#include "librbd/ObjectMap.h"
#include "librbd/Operations.h"
#include "librbd/PluginRegistry.h"
#include "librbd/Types.h"
#include "librbd/Utils.h"
#include "librbd/api/Config.h"
#include "librbd/api/Image.h"
#include "librbd/api/Io.h"
#include "librbd/cache/Utils.h"
#include "librbd/exclusive_lock/AutomaticPolicy.h"
#include "librbd/exclusive_lock/StandardPolicy.h"
#include "librbd/deep_copy/MetadataCopyRequest.h"
#include "librbd/image/CloneRequest.h"
#include "librbd/image/CreateRequest.h"
#include "librbd/image/GetMetadataRequest.h"
#include "librbd/image/Types.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/io/ObjectRequest.h"
#include "librbd/io/ReadResult.h"
#include "librbd/journal/Types.h"
#include "librbd/managed_lock/Types.h"
#include "librbd/mirror/EnableRequest.h"
#include "librbd/operation/TrimRequest.h"
#include "journal/Journaler.h"
#include <boost/scope_exit.hpp>
#include <boost/variant.hpp>
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd: "
#define rbd_howmany(x, y) (((x) + (y) - 1) / (y))
using std::istringstream;
using std::map;
using std::pair;
using std::set;
using std::string;
using std::vector;
// list binds to list() here, so std::list is explicitly used below
using ceph::bufferlist;
using librados::snap_t;
using librados::IoCtx;
using librados::Rados;
namespace librbd {
namespace {
int validate_pool(IoCtx &io_ctx, CephContext *cct) {
if (!cct->_conf.get_val<bool>("rbd_validate_pool")) {
return 0;
}
int r = io_ctx.stat(RBD_DIRECTORY, NULL, NULL);
if (r == 0) {
return 0;
} else if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to stat RBD directory: " << cpp_strerror(r) << dendl;
return r;
}
// allocate a self-managed snapshot id if this a new pool to force
// self-managed snapshot mode
uint64_t snap_id;
r = io_ctx.selfmanaged_snap_create(&snap_id);
if (r == -EINVAL) {
lderr(cct) << "pool not configured for self-managed RBD snapshot support"
<< dendl;
return r;
} else if (r < 0) {
lderr(cct) << "failed to allocate self-managed snapshot: "
<< cpp_strerror(r) << dendl;
return r;
}
r = io_ctx.selfmanaged_snap_remove(snap_id);
if (r < 0) {
lderr(cct) << "failed to release self-managed snapshot " << snap_id
<< ": " << cpp_strerror(r) << dendl;
}
return 0;
}
} // anonymous namespace
int detect_format(IoCtx &io_ctx, const string &name,
bool *old_format, uint64_t *size)
{
CephContext *cct = (CephContext *)io_ctx.cct();
if (old_format)
*old_format = true;
int r = io_ctx.stat(util::old_header_name(name), size, NULL);
if (r == -ENOENT) {
if (old_format)
*old_format = false;
r = io_ctx.stat(util::id_obj_name(name), size, NULL);
if (r < 0)
return r;
} else if (r < 0) {
return r;
}
ldout(cct, 20) << "detect format of " << name << " : "
<< (old_format ? (*old_format ? "old" : "new") :
"don't care") << dendl;
return 0;
}
bool has_parent(int64_t parent_pool_id, uint64_t off, uint64_t overlap)
{
return (parent_pool_id != -1 && off <= overlap);
}
void init_rbd_header(struct rbd_obj_header_ondisk& ondisk,
uint64_t size, int order, uint64_t bid)
{
uint32_t hi = bid >> 32;
uint32_t lo = bid & 0xFFFFFFFF;
uint32_t extra = rand() % 0xFFFFFFFF;
// FIPS zeroization audit 20191117: this memset is not security related.
memset(&ondisk, 0, sizeof(ondisk));
memcpy(&ondisk.text, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT));
memcpy(&ondisk.signature, RBD_HEADER_SIGNATURE,
sizeof(RBD_HEADER_SIGNATURE));
memcpy(&ondisk.version, RBD_HEADER_VERSION, sizeof(RBD_HEADER_VERSION));
snprintf(ondisk.block_name, sizeof(ondisk.block_name), "rb.%x.%x.%x",
hi, lo, extra);
ondisk.image_size = size;
ondisk.options.order = order;
ondisk.options.crypt_type = RBD_CRYPT_NONE;
ondisk.options.comp_type = RBD_COMP_NONE;
ondisk.snap_seq = 0;
ondisk.snap_count = 0;
ondisk.reserved = 0;
ondisk.snap_names_len = 0;
}
void image_info(ImageCtx *ictx, image_info_t& info, size_t infosize)
{
int obj_order = ictx->order;
{
std::shared_lock locker{ictx->image_lock};
info.size = ictx->get_area_size(io::ImageArea::DATA);
}
info.obj_size = 1ULL << obj_order;
info.num_objs = Striper::get_num_objects(ictx->layout, info.size);
info.order = obj_order;
strncpy(info.block_name_prefix, ictx->object_prefix.c_str(),
RBD_MAX_BLOCK_NAME_SIZE);
info.block_name_prefix[RBD_MAX_BLOCK_NAME_SIZE - 1] = '\0';
// clear deprecated fields
info.parent_pool = -1L;
info.parent_name[0] = '\0';
}
uint64_t oid_to_object_no(const string& oid, const string& object_prefix)
{
istringstream iss(oid);
// skip object prefix and separator
iss.ignore(object_prefix.length() + 1);
uint64_t num;
iss >> std::hex >> num;
return num;
}
int read_header_bl(IoCtx& io_ctx, const string& header_oid,
bufferlist& header, uint64_t *ver)
{
int r;
uint64_t off = 0;
#define READ_SIZE 4096
do {
bufferlist bl;
r = io_ctx.read(header_oid, bl, READ_SIZE, off);
if (r < 0)
return r;
header.claim_append(bl);
off += r;
} while (r == READ_SIZE);
static_assert(sizeof(RBD_HEADER_TEXT) == sizeof(RBD_MIGRATE_HEADER_TEXT),
"length of rbd headers must be the same");
if (header.length() < sizeof(RBD_HEADER_TEXT) ||
(memcmp(RBD_HEADER_TEXT, header.c_str(),
sizeof(RBD_HEADER_TEXT)) != 0 &&
memcmp(RBD_MIGRATE_HEADER_TEXT, header.c_str(),
sizeof(RBD_MIGRATE_HEADER_TEXT)) != 0)) {
CephContext *cct = (CephContext *)io_ctx.cct();
lderr(cct) << "unrecognized header format" << dendl;
return -ENXIO;
}
if (ver)
*ver = io_ctx.get_last_version();
return 0;
}
int read_header(IoCtx& io_ctx, const string& header_oid,
struct rbd_obj_header_ondisk *header, uint64_t *ver)
{
bufferlist header_bl;
int r = read_header_bl(io_ctx, header_oid, header_bl, ver);
if (r < 0)
return r;
if (header_bl.length() < (int)sizeof(*header))
return -EIO;
memcpy(header, header_bl.c_str(), sizeof(*header));
return 0;
}
int tmap_set(IoCtx& io_ctx, const string& imgname)
{
bufferlist cmdbl, emptybl;
__u8 c = CEPH_OSD_TMAP_SET;
encode(c, cmdbl);
encode(imgname, cmdbl);
encode(emptybl, cmdbl);
return io_ctx.tmap_update(RBD_DIRECTORY, cmdbl);
}
int tmap_rm(IoCtx& io_ctx, const string& imgname)
{
bufferlist cmdbl;
__u8 c = CEPH_OSD_TMAP_RM;
encode(c, cmdbl);
encode(imgname, cmdbl);
return io_ctx.tmap_update(RBD_DIRECTORY, cmdbl);
}
typedef boost::variant<std::string,uint64_t> image_option_value_t;
typedef std::map<int,image_option_value_t> image_options_t;
typedef std::shared_ptr<image_options_t> image_options_ref;
enum image_option_type_t {
STR,
UINT64,
};
const std::map<int, image_option_type_t> IMAGE_OPTIONS_TYPE_MAPPING = {
{RBD_IMAGE_OPTION_FORMAT, UINT64},
{RBD_IMAGE_OPTION_FEATURES, UINT64},
{RBD_IMAGE_OPTION_ORDER, UINT64},
{RBD_IMAGE_OPTION_STRIPE_UNIT, UINT64},
{RBD_IMAGE_OPTION_STRIPE_COUNT, UINT64},
{RBD_IMAGE_OPTION_JOURNAL_ORDER, UINT64},
{RBD_IMAGE_OPTION_JOURNAL_SPLAY_WIDTH, UINT64},
{RBD_IMAGE_OPTION_JOURNAL_POOL, STR},
{RBD_IMAGE_OPTION_FEATURES_SET, UINT64},
{RBD_IMAGE_OPTION_FEATURES_CLEAR, UINT64},
{RBD_IMAGE_OPTION_DATA_POOL, STR},
{RBD_IMAGE_OPTION_FLATTEN, UINT64},
{RBD_IMAGE_OPTION_CLONE_FORMAT, UINT64},
{RBD_IMAGE_OPTION_MIRROR_IMAGE_MODE, UINT64},
};
std::string image_option_name(int optname) {
switch (optname) {
case RBD_IMAGE_OPTION_FORMAT:
return "format";
case RBD_IMAGE_OPTION_FEATURES:
return "features";
case RBD_IMAGE_OPTION_ORDER:
return "order";
case RBD_IMAGE_OPTION_STRIPE_UNIT:
return "stripe_unit";
case RBD_IMAGE_OPTION_STRIPE_COUNT:
return "stripe_count";
case RBD_IMAGE_OPTION_JOURNAL_ORDER:
return "journal_order";
case RBD_IMAGE_OPTION_JOURNAL_SPLAY_WIDTH:
return "journal_splay_width";
case RBD_IMAGE_OPTION_JOURNAL_POOL:
return "journal_pool";
case RBD_IMAGE_OPTION_FEATURES_SET:
return "features_set";
case RBD_IMAGE_OPTION_FEATURES_CLEAR:
return "features_clear";
case RBD_IMAGE_OPTION_DATA_POOL:
return "data_pool";
case RBD_IMAGE_OPTION_FLATTEN:
return "flatten";
case RBD_IMAGE_OPTION_CLONE_FORMAT:
return "clone_format";
case RBD_IMAGE_OPTION_MIRROR_IMAGE_MODE:
return "mirror_image_mode";
default:
return "unknown (" + stringify(optname) + ")";
}
}
void image_options_create(rbd_image_options_t* opts)
{
image_options_ref* opts_ = new image_options_ref(new image_options_t());
*opts = static_cast<rbd_image_options_t>(opts_);
}
void image_options_create_ref(rbd_image_options_t* opts,
rbd_image_options_t orig)
{
image_options_ref* orig_ = static_cast<image_options_ref*>(orig);
image_options_ref* opts_ = new image_options_ref(*orig_);
*opts = static_cast<rbd_image_options_t>(opts_);
}
void image_options_copy(rbd_image_options_t* opts,
const ImageOptions &orig)
{
image_options_ref* opts_ = new image_options_ref(new image_options_t());
*opts = static_cast<rbd_image_options_t>(opts_);
std::string str_val;
uint64_t uint64_val;
for (auto &i : IMAGE_OPTIONS_TYPE_MAPPING) {
switch (i.second) {
case STR:
if (orig.get(i.first, &str_val) == 0) {
image_options_set(*opts, i.first, str_val);
}
continue;
case UINT64:
if (orig.get(i.first, &uint64_val) == 0) {
image_options_set(*opts, i.first, uint64_val);
}
continue;
}
}
}
void image_options_destroy(rbd_image_options_t opts)
{
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
delete opts_;
}
int image_options_set(rbd_image_options_t opts, int optname,
const std::string& optval)
{
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
std::map<int, image_option_type_t>::const_iterator i =
IMAGE_OPTIONS_TYPE_MAPPING.find(optname);
if (i == IMAGE_OPTIONS_TYPE_MAPPING.end() || i->second != STR) {
return -EINVAL;
}
(*opts_->get())[optname] = optval;
return 0;
}
int image_options_set(rbd_image_options_t opts, int optname, uint64_t optval)
{
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
std::map<int, image_option_type_t>::const_iterator i =
IMAGE_OPTIONS_TYPE_MAPPING.find(optname);
if (i == IMAGE_OPTIONS_TYPE_MAPPING.end() || i->second != UINT64) {
return -EINVAL;
}
(*opts_->get())[optname] = optval;
return 0;
}
int image_options_get(rbd_image_options_t opts, int optname,
std::string* optval)
{
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
std::map<int, image_option_type_t>::const_iterator i =
IMAGE_OPTIONS_TYPE_MAPPING.find(optname);
if (i == IMAGE_OPTIONS_TYPE_MAPPING.end() || i->second != STR) {
return -EINVAL;
}
image_options_t::const_iterator j = (*opts_)->find(optname);
if (j == (*opts_)->end()) {
return -ENOENT;
}
*optval = boost::get<std::string>(j->second);
return 0;
}
int image_options_get(rbd_image_options_t opts, int optname, uint64_t* optval)
{
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
std::map<int, image_option_type_t>::const_iterator i =
IMAGE_OPTIONS_TYPE_MAPPING.find(optname);
if (i == IMAGE_OPTIONS_TYPE_MAPPING.end() || i->second != UINT64) {
return -EINVAL;
}
image_options_t::const_iterator j = (*opts_)->find(optname);
if (j == (*opts_)->end()) {
return -ENOENT;
}
*optval = boost::get<uint64_t>(j->second);
return 0;
}
int image_options_is_set(rbd_image_options_t opts, int optname,
bool* is_set)
{
if (IMAGE_OPTIONS_TYPE_MAPPING.find(optname) ==
IMAGE_OPTIONS_TYPE_MAPPING.end()) {
return -EINVAL;
}
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
*is_set = ((*opts_)->find(optname) != (*opts_)->end());
return 0;
}
int image_options_unset(rbd_image_options_t opts, int optname)
{
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
std::map<int, image_option_type_t>::const_iterator i =
IMAGE_OPTIONS_TYPE_MAPPING.find(optname);
if (i == IMAGE_OPTIONS_TYPE_MAPPING.end()) {
ceph_assert((*opts_)->find(optname) == (*opts_)->end());
return -EINVAL;
}
image_options_t::const_iterator j = (*opts_)->find(optname);
if (j == (*opts_)->end()) {
return -ENOENT;
}
(*opts_)->erase(j);
return 0;
}
void image_options_clear(rbd_image_options_t opts)
{
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
(*opts_)->clear();
}
bool image_options_is_empty(rbd_image_options_t opts)
{
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
return (*opts_)->empty();
}
int create_v1(IoCtx& io_ctx, const char *imgname, uint64_t size, int order)
{
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 20) << __func__ << " " << &io_ctx << " name = " << imgname
<< " size = " << size << " order = " << order << dendl;
int r = validate_pool(io_ctx, cct);
if (r < 0) {
return r;
}
if (!io_ctx.get_namespace().empty()) {
lderr(cct) << "attempting to add v1 image to namespace" << dendl;
return -EINVAL;
}
ldout(cct, 2) << "adding rbd image to directory..." << dendl;
r = tmap_set(io_ctx, imgname);
if (r < 0) {
lderr(cct) << "error adding image to directory: " << cpp_strerror(r)
<< dendl;
return r;
}
Rados rados(io_ctx);
uint64_t bid = rados.get_instance_id();
ldout(cct, 2) << "creating rbd image..." << dendl;
struct rbd_obj_header_ondisk header;
init_rbd_header(header, size, order, bid);
bufferlist bl;
bl.append((const char *)&header, sizeof(header));
string header_oid = util::old_header_name(imgname);
r = io_ctx.write(header_oid, bl, bl.length(), 0);
if (r < 0) {
lderr(cct) << "Error writing image header: " << cpp_strerror(r)
<< dendl;
int remove_r = tmap_rm(io_ctx, imgname);
if (remove_r < 0) {
lderr(cct) << "Could not remove image from directory after "
<< "header creation failed: "
<< cpp_strerror(remove_r) << dendl;
}
return r;
}
ldout(cct, 2) << "done." << dendl;
return 0;
}
int create(librados::IoCtx& io_ctx, const char *imgname, uint64_t size,
int *order)
{
uint64_t order_ = *order;
ImageOptions opts;
int r = opts.set(RBD_IMAGE_OPTION_ORDER, order_);
ceph_assert(r == 0);
r = create(io_ctx, imgname, "", size, opts, "", "", false);
int r1 = opts.get(RBD_IMAGE_OPTION_ORDER, &order_);
ceph_assert(r1 == 0);
*order = order_;
return r;
}
int create(IoCtx& io_ctx, const char *imgname, uint64_t size,
bool old_format, uint64_t features, int *order,
uint64_t stripe_unit, uint64_t stripe_count)
{
if (!order)
return -EINVAL;
uint64_t order_ = *order;
uint64_t format = old_format ? 1 : 2;
ImageOptions opts;
int r;
r = opts.set(RBD_IMAGE_OPTION_FORMAT, format);
ceph_assert(r == 0);
r = opts.set(RBD_IMAGE_OPTION_FEATURES, features);
ceph_assert(r == 0);
r = opts.set(RBD_IMAGE_OPTION_ORDER, order_);
ceph_assert(r == 0);
r = opts.set(RBD_IMAGE_OPTION_STRIPE_UNIT, stripe_unit);
ceph_assert(r == 0);
r = opts.set(RBD_IMAGE_OPTION_STRIPE_COUNT, stripe_count);
ceph_assert(r == 0);
r = create(io_ctx, imgname, "", size, opts, "", "", false);
int r1 = opts.get(RBD_IMAGE_OPTION_ORDER, &order_);
ceph_assert(r1 == 0);
*order = order_;
return r;
}
int create(IoCtx& io_ctx, const std::string &image_name,
const std::string &image_id, uint64_t size,
ImageOptions& opts,
const std::string &non_primary_global_image_id,
const std::string &primary_mirror_uuid,
bool skip_mirror_enable)
{
std::string id(image_id);
if (id.empty()) {
id = util::generate_image_id(io_ctx);
}
CephContext *cct = (CephContext *)io_ctx.cct();
uint64_t option;
if (opts.get(RBD_IMAGE_OPTION_FLATTEN, &option) == 0) {
lderr(cct) << "create does not support 'flatten' image option" << dendl;
return -EINVAL;
}
if (opts.get(RBD_IMAGE_OPTION_CLONE_FORMAT, &option) == 0) {
lderr(cct) << "create does not support 'clone_format' image option"
<< dendl;
return -EINVAL;
}
ldout(cct, 10) << __func__ << " name=" << image_name << ", "
<< "id= " << id << ", "
<< "size=" << size << ", opts=" << opts << dendl;
uint64_t format;
if (opts.get(RBD_IMAGE_OPTION_FORMAT, &format) != 0)
format = cct->_conf.get_val<uint64_t>("rbd_default_format");
bool old_format = format == 1;
// make sure it doesn't already exist, in either format
int r = detect_format(io_ctx, image_name, NULL, NULL);
if (r != -ENOENT) {
if (r) {
lderr(cct) << "Could not tell if " << image_name << " already exists"
<< dendl;
return r;
}
lderr(cct) << "rbd image " << image_name << " already exists" << dendl;
return -EEXIST;
}
uint64_t order = 0;
if (opts.get(RBD_IMAGE_OPTION_ORDER, &order) != 0 || order == 0) {
order = cct->_conf.get_val<uint64_t>("rbd_default_order");
}
r = image::CreateRequest<>::validate_order(cct, order);
if (r < 0) {
return r;
}
if (old_format) {
if ( !getenv("RBD_FORCE_ALLOW_V1") ) {
lderr(cct) << "Format 1 image creation unsupported. " << dendl;
return -EINVAL;
}
lderr(cct) << "Forced V1 image creation. " << dendl;
r = create_v1(io_ctx, image_name.c_str(), size, order);
} else {
AsioEngine asio_engine(io_ctx);
ConfigProxy config{cct->_conf};
api::Config<>::apply_pool_overrides(io_ctx, &config);
uint32_t create_flags = 0U;
uint64_t mirror_image_mode = RBD_MIRROR_IMAGE_MODE_JOURNAL;
if (skip_mirror_enable) {
create_flags = image::CREATE_FLAG_SKIP_MIRROR_ENABLE;
} else if (opts.get(RBD_IMAGE_OPTION_MIRROR_IMAGE_MODE,
&mirror_image_mode) == 0) {
create_flags = image::CREATE_FLAG_FORCE_MIRROR_ENABLE;
}
C_SaferCond cond;
image::CreateRequest<> *req = image::CreateRequest<>::create(
config, io_ctx, image_name, id, size, opts, create_flags,
static_cast<cls::rbd::MirrorImageMode>(mirror_image_mode),
non_primary_global_image_id, primary_mirror_uuid,
asio_engine.get_work_queue(), &cond);
req->send();
r = cond.wait();
}
int r1 = opts.set(RBD_IMAGE_OPTION_ORDER, order);
ceph_assert(r1 == 0);
return r;
}
/*
* Parent may be in different pool, hence different IoCtx
*/
int clone(IoCtx& p_ioctx, const char *p_name, const char *p_snap_name,
IoCtx& c_ioctx, const char *c_name,
uint64_t features, int *c_order,
uint64_t stripe_unit, int stripe_count)
{
uint64_t order = *c_order;
ImageOptions opts;
opts.set(RBD_IMAGE_OPTION_FEATURES, features);
opts.set(RBD_IMAGE_OPTION_ORDER, order);
opts.set(RBD_IMAGE_OPTION_STRIPE_UNIT, stripe_unit);
opts.set(RBD_IMAGE_OPTION_STRIPE_COUNT, stripe_count);
int r = clone(p_ioctx, nullptr, p_name, p_snap_name, c_ioctx, nullptr,
c_name, opts, "", "");
opts.get(RBD_IMAGE_OPTION_ORDER, &order);
*c_order = order;
return r;
}
int clone(IoCtx& p_ioctx, const char *p_id, const char *p_name,
const char *p_snap_name, IoCtx& c_ioctx, const char *c_id,
const char *c_name, ImageOptions& c_opts,
const std::string &non_primary_global_image_id,
const std::string &primary_mirror_uuid)
{
ceph_assert((p_id == nullptr) ^ (p_name == nullptr));
CephContext *cct = (CephContext *)p_ioctx.cct();
if (p_snap_name == nullptr) {
lderr(cct) << "image to be cloned must be a snapshot" << dendl;
return -EINVAL;
}
uint64_t flatten;
if (c_opts.get(RBD_IMAGE_OPTION_FLATTEN, &flatten) == 0) {
lderr(cct) << "clone does not support 'flatten' image option" << dendl;
return -EINVAL;
}
int r;
std::string parent_id;
if (p_id == nullptr) {
r = cls_client::dir_get_id(&p_ioctx, RBD_DIRECTORY, p_name,
&parent_id);
if (r < 0) {
if (r != -ENOENT) {
lderr(cct) << "failed to retrieve parent image id: "
<< cpp_strerror(r) << dendl;
}
return r;
}
} else {
parent_id = p_id;
}
std::string clone_id;
if (c_id == nullptr) {
clone_id = util::generate_image_id(c_ioctx);
} else {
clone_id = c_id;
}
ldout(cct, 10) << __func__ << " "
<< "c_name=" << c_name << ", "
<< "c_id= " << clone_id << ", "
<< "c_opts=" << c_opts << dendl;
ConfigProxy config{reinterpret_cast<CephContext *>(c_ioctx.cct())->_conf};
api::Config<>::apply_pool_overrides(c_ioctx, &config);
AsioEngine asio_engine(p_ioctx);
C_SaferCond cond;
auto *req = image::CloneRequest<>::create(
config, p_ioctx, parent_id, p_snap_name,
{cls::rbd::UserSnapshotNamespace{}}, CEPH_NOSNAP, c_ioctx, c_name,
clone_id, c_opts, cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
non_primary_global_image_id, primary_mirror_uuid,
asio_engine.get_work_queue(), &cond);
req->send();
r = cond.wait();
if (r < 0) {
return r;
}
return 0;
}
int rename(IoCtx& io_ctx, const char *srcname, const char *dstname)
{
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 20) << "rename " << &io_ctx << " " << srcname << " -> "
<< dstname << dendl;
ImageCtx *ictx = new ImageCtx(srcname, "", "", io_ctx, false);
int r = ictx->state->open(0);
if (r < 0) {
lderr(cct) << "error opening source image: " << cpp_strerror(r) << dendl;
return r;
}
BOOST_SCOPE_EXIT((ictx)) {
ictx->state->close();
} BOOST_SCOPE_EXIT_END
return ictx->operations->rename(dstname);
}
int info(ImageCtx *ictx, image_info_t& info, size_t infosize)
{
ldout(ictx->cct, 20) << "info " << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
image_info(ictx, info, infosize);
return 0;
}
int get_old_format(ImageCtx *ictx, uint8_t *old)
{
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
*old = ictx->old_format;
return 0;
}
int get_size(ImageCtx *ictx, uint64_t *size)
{
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock l2{ictx->image_lock};
*size = ictx->get_area_size(io::ImageArea::DATA);
return 0;
}
int get_features(ImageCtx *ictx, uint64_t *features)
{
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock l{ictx->image_lock};
*features = ictx->features;
return 0;
}
int get_overlap(ImageCtx *ictx, uint64_t *overlap)
{
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock image_locker{ictx->image_lock};
uint64_t raw_overlap;
r = ictx->get_parent_overlap(ictx->snap_id, &raw_overlap);
if (r < 0) {
return r;
}
auto _overlap = ictx->reduce_parent_overlap(raw_overlap, false);
*overlap = (_overlap.second == io::ImageArea::DATA ? _overlap.first : 0);
return 0;
}
int get_flags(ImageCtx *ictx, uint64_t *flags)
{
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
std::shared_lock l2{ictx->image_lock};
return ictx->get_flags(ictx->snap_id, flags);
}
int set_image_notification(ImageCtx *ictx, int fd, int type)
{
CephContext *cct = ictx->cct;
ldout(cct, 20) << __func__ << " " << ictx << " fd " << fd << " type" << type << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
if (ictx->event_socket.is_valid())
return -EINVAL;
return ictx->event_socket.init(fd, type);
}
int is_exclusive_lock_owner(ImageCtx *ictx, bool *is_owner)
{
CephContext *cct = ictx->cct;
ldout(cct, 20) << __func__ << ": ictx=" << ictx << dendl;
*is_owner = false;
std::shared_lock owner_locker{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr) {
return 0;
}
// might have been blocklisted by peer -- ensure we still own
// the lock by pinging the OSD
int r = ictx->exclusive_lock->assert_header_locked();
if (r == -EBUSY || r == -ENOENT) {
return 0;
} else if (r < 0) {
return r;
}
*is_owner = true;
return 0;
}
int lock_acquire(ImageCtx *ictx, rbd_lock_mode_t lock_mode)
{
CephContext *cct = ictx->cct;
ldout(cct, 20) << __func__ << ": ictx=" << ictx << ", "
<< "lock_mode=" << lock_mode << dendl;
if (lock_mode != RBD_LOCK_MODE_EXCLUSIVE) {
return -EOPNOTSUPP;
}
C_SaferCond lock_ctx;
{
std::unique_lock l{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr) {
lderr(cct) << "exclusive-lock feature is not enabled" << dendl;
return -EINVAL;
}
if (ictx->get_exclusive_lock_policy()->may_auto_request_lock()) {
ictx->set_exclusive_lock_policy(
new exclusive_lock::StandardPolicy(ictx));
}
if (ictx->exclusive_lock->is_lock_owner()) {
return 0;
}
ictx->exclusive_lock->acquire_lock(&lock_ctx);
}
int r = lock_ctx.wait();
if (r < 0) {
lderr(cct) << "failed to request exclusive lock: " << cpp_strerror(r)
<< dendl;
return r;
}
std::shared_lock l{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr) {
return -EINVAL;
} else if (!ictx->exclusive_lock->is_lock_owner()) {
lderr(cct) << "failed to acquire exclusive lock" << dendl;
return ictx->exclusive_lock->get_unlocked_op_error();
}
return 0;
}
int lock_release(ImageCtx *ictx)
{
CephContext *cct = ictx->cct;
ldout(cct, 20) << __func__ << ": ictx=" << ictx << dendl;
C_SaferCond lock_ctx;
{
std::unique_lock l{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr ||
!ictx->exclusive_lock->is_lock_owner()) {
lderr(cct) << "not exclusive lock owner" << dendl;
return -EINVAL;
}
ictx->exclusive_lock->release_lock(&lock_ctx);
}
int r = lock_ctx.wait();
if (r < 0) {
lderr(cct) << "failed to release exclusive lock: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
int lock_get_owners(ImageCtx *ictx, rbd_lock_mode_t *lock_mode,
std::list<std::string> *lock_owners)
{
CephContext *cct = ictx->cct;
ldout(cct, 20) << __func__ << ": ictx=" << ictx << dendl;
managed_lock::Locker locker;
C_SaferCond get_owner_ctx;
{
std::shared_lock owner_locker{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr) {
lderr(cct) << "exclusive-lock feature is not enabled" << dendl;
return -EINVAL;
}
ictx->exclusive_lock->get_locker(&locker, &get_owner_ctx);
}
int r = get_owner_ctx.wait();
if (r == -ENOENT) {
return r;
} else if (r < 0) {
lderr(cct) << "failed to determine current lock owner: "
<< cpp_strerror(r) << dendl;
return r;
}
*lock_mode = RBD_LOCK_MODE_EXCLUSIVE;
lock_owners->clear();
lock_owners->emplace_back(locker.address);
return 0;
}
int lock_break(ImageCtx *ictx, rbd_lock_mode_t lock_mode,
const std::string &lock_owner) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << __func__ << ": ictx=" << ictx << ", "
<< "lock_mode=" << lock_mode << ", "
<< "lock_owner=" << lock_owner << dendl;
if (lock_mode != RBD_LOCK_MODE_EXCLUSIVE) {
return -EOPNOTSUPP;
}
if (ictx->read_only) {
return -EROFS;
}
managed_lock::Locker locker;
C_SaferCond get_owner_ctx;
{
std::shared_lock l{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr) {
lderr(cct) << "exclusive-lock feature is not enabled" << dendl;
return -EINVAL;
}
ictx->exclusive_lock->get_locker(&locker, &get_owner_ctx);
}
int r = get_owner_ctx.wait();
if (r == -ENOENT) {
return r;
} else if (r < 0) {
lderr(cct) << "failed to determine current lock owner: "
<< cpp_strerror(r) << dendl;
return r;
}
if (locker.address != lock_owner) {
return -EBUSY;
}
C_SaferCond break_ctx;
{
std::shared_lock l{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr) {
lderr(cct) << "exclusive-lock feature is not enabled" << dendl;
return -EINVAL;
}
ictx->exclusive_lock->break_lock(locker, true, &break_ctx);
}
r = break_ctx.wait();
if (r == -ENOENT) {
return r;
} else if (r < 0) {
lderr(cct) << "failed to break lock: " << cpp_strerror(r) << dendl;
return r;
}
return 0;
}
int copy(ImageCtx *src, IoCtx& dest_md_ctx, const char *destname,
ImageOptions& opts, ProgressContext &prog_ctx, size_t sparse_size)
{
CephContext *cct = (CephContext *)dest_md_ctx.cct();
uint64_t option;
if (opts.get(RBD_IMAGE_OPTION_FLATTEN, &option) == 0) {
lderr(cct) << "copy does not support 'flatten' image option" << dendl;
return -EINVAL;
}
if (opts.get(RBD_IMAGE_OPTION_CLONE_FORMAT, &option) == 0) {
lderr(cct) << "copy does not support 'clone_format' image option"
<< dendl;
return -EINVAL;
}
ldout(cct, 20) << "copy " << src->name
<< (src->snap_name.length() ? "@" + src->snap_name : "")
<< " -> " << destname << " opts = " << opts << dendl;
src->image_lock.lock_shared();
uint64_t features = src->features;
uint64_t src_size = src->get_image_size(src->snap_id);
src->image_lock.unlock_shared();
uint64_t format = 2;
if (opts.get(RBD_IMAGE_OPTION_FORMAT, &format) != 0) {
opts.set(RBD_IMAGE_OPTION_FORMAT, format);
}
uint64_t stripe_unit = src->stripe_unit;
if (opts.get(RBD_IMAGE_OPTION_STRIPE_UNIT, &stripe_unit) != 0) {
opts.set(RBD_IMAGE_OPTION_STRIPE_UNIT, stripe_unit);
}
uint64_t stripe_count = src->stripe_count;
if (opts.get(RBD_IMAGE_OPTION_STRIPE_COUNT, &stripe_count) != 0) {
opts.set(RBD_IMAGE_OPTION_STRIPE_COUNT, stripe_count);
}
uint64_t order = src->order;
if (opts.get(RBD_IMAGE_OPTION_ORDER, &order) != 0) {
opts.set(RBD_IMAGE_OPTION_ORDER, order);
}
if (opts.get(RBD_IMAGE_OPTION_FEATURES, &features) != 0) {
opts.set(RBD_IMAGE_OPTION_FEATURES, features);
}
if (features & ~RBD_FEATURES_ALL) {
lderr(cct) << "librbd does not support requested features" << dendl;
return -ENOSYS;
}
int r = create(dest_md_ctx, destname, "", src_size, opts, "", "", false);
if (r < 0) {
lderr(cct) << "header creation failed" << dendl;
return r;
}
opts.set(RBD_IMAGE_OPTION_ORDER, static_cast<uint64_t>(order));
ImageCtx *dest = new librbd::ImageCtx(destname, "", nullptr, dest_md_ctx,
false);
r = dest->state->open(0);
if (r < 0) {
lderr(cct) << "failed to read newly created header" << dendl;
return r;
}
r = copy(src, dest, prog_ctx, sparse_size);
int close_r = dest->state->close();
if (r == 0 && close_r < 0) {
r = close_r;
}
return r;
}
class C_CopyWrite : public Context {
public:
C_CopyWrite(bufferlist *bl, Context* ctx)
: m_bl(bl), m_ctx(ctx) {}
void finish(int r) override {
delete m_bl;
m_ctx->complete(r);
}
private:
bufferlist *m_bl;
Context *m_ctx;
};
class C_CopyRead : public Context {
public:
C_CopyRead(SimpleThrottle *throttle, ImageCtx *dest, uint64_t offset,
bufferlist *bl, size_t sparse_size)
: m_throttle(throttle), m_dest(dest), m_offset(offset), m_bl(bl),
m_sparse_size(sparse_size) {
m_throttle->start_op();
}
void finish(int r) override {
if (r < 0) {
lderr(m_dest->cct) << "error reading from source image at offset "
<< m_offset << ": " << cpp_strerror(r) << dendl;
delete m_bl;
m_throttle->end_op(r);
return;
}
ceph_assert(m_bl->length() == (size_t)r);
if (m_bl->is_zero()) {
delete m_bl;
m_throttle->end_op(r);
return;
}
if (!m_sparse_size) {
m_sparse_size = (1 << m_dest->order);
}
auto *throttle = m_throttle;
auto *end_op_ctx = new LambdaContext([throttle](int r) {
throttle->end_op(r);
});
auto gather_ctx = new C_Gather(m_dest->cct, end_op_ctx);
m_bl->rebuild(buffer::ptr_node::create(m_bl->length()));
size_t write_offset = 0;
size_t write_length = 0;
size_t offset = 0;
size_t length = m_bl->length();
const auto& m_ptr = m_bl->front();
while (offset < length) {
if (util::calc_sparse_extent(m_ptr,
m_sparse_size,
length,
&write_offset,
&write_length,
&offset)) {
bufferlist *write_bl = new bufferlist();
write_bl->push_back(
buffer::ptr_node::create(m_ptr, write_offset, write_length));
Context *ctx = new C_CopyWrite(write_bl, gather_ctx->new_sub());
auto comp = io::AioCompletion::create(ctx);
// coordinate through AIO WQ to ensure lock is acquired if needed
api::Io<>::aio_write(*m_dest, comp, m_offset + write_offset,
write_length, std::move(*write_bl),
LIBRADOS_OP_FLAG_FADVISE_DONTNEED,
std::move(read_trace));
write_offset = offset;
write_length = 0;
}
}
delete m_bl;
ceph_assert(gather_ctx->get_sub_created_count() > 0);
gather_ctx->activate();
}
ZTracer::Trace read_trace;
private:
SimpleThrottle *m_throttle;
ImageCtx *m_dest;
uint64_t m_offset;
bufferlist *m_bl;
size_t m_sparse_size;
};
int copy(ImageCtx *src, ImageCtx *dest, ProgressContext &prog_ctx, size_t sparse_size)
{
src->image_lock.lock_shared();
uint64_t src_size = src->get_image_size(src->snap_id);
src->image_lock.unlock_shared();
dest->image_lock.lock_shared();
uint64_t dest_size = dest->get_image_size(dest->snap_id);
dest->image_lock.unlock_shared();
CephContext *cct = src->cct;
if (dest_size < src_size) {
lderr(cct) << " src size " << src_size << " > dest size "
<< dest_size << dendl;
return -EINVAL;
}
// ensure previous writes are visible to dest
C_SaferCond flush_ctx;
{
auto aio_comp = io::AioCompletion::create_and_start(&flush_ctx, src,
io::AIO_TYPE_FLUSH);
auto req = io::ImageDispatchSpec::create_flush(
*src, io::IMAGE_DISPATCH_LAYER_INTERNAL_START,
aio_comp, io::FLUSH_SOURCE_INTERNAL, {});
req->send();
}
int r = flush_ctx.wait();
if (r < 0) {
return r;
}
C_SaferCond ctx;
auto req = deep_copy::MetadataCopyRequest<>::create(
src, dest, &ctx);
req->send();
r = ctx.wait();
if (r < 0) {
lderr(cct) << "failed to copy metadata: " << cpp_strerror(r) << dendl;
return r;
}
ZTracer::Trace trace;
if (src->blkin_trace_all) {
trace.init("copy", &src->trace_endpoint);
}
SimpleThrottle throttle(src->config.get_val<uint64_t>("rbd_concurrent_management_ops"), false);
uint64_t period = src->get_stripe_period();
unsigned fadvise_flags = LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL |
LIBRADOS_OP_FLAG_FADVISE_NOCACHE;
uint64_t object_id = 0;
for (uint64_t offset = 0; offset < src_size; offset += period) {
if (throttle.pending_error()) {
return throttle.wait_for_ret();
}
{
std::shared_lock image_locker{src->image_lock};
if (src->object_map != nullptr) {
bool skip = true;
// each period is related to src->stripe_count objects, check them all
for (uint64_t i=0; i < src->stripe_count; i++) {
if (object_id < src->object_map->size() &&
src->object_map->object_may_exist(object_id)) {
skip = false;
}
++object_id;
}
if (skip) continue;
} else {
object_id += src->stripe_count;
}
}
uint64_t len = std::min(period, src_size - offset);
bufferlist *bl = new bufferlist();
auto ctx = new C_CopyRead(&throttle, dest, offset, bl, sparse_size);
auto comp = io::AioCompletion::create_and_start<Context>(
ctx, src, io::AIO_TYPE_READ);
auto req = io::ImageDispatchSpec::create_read(
*src, io::IMAGE_DISPATCH_LAYER_NONE, comp,
{{offset, len}}, io::ImageArea::DATA, io::ReadResult{bl},
src->get_data_io_context(), fadvise_flags, 0, trace);
ctx->read_trace = trace;
req->send();
prog_ctx.update_progress(offset, src_size);
}
r = throttle.wait_for_ret();
if (r >= 0)
prog_ctx.update_progress(src_size, src_size);
return r;
}
int list_lockers(ImageCtx *ictx,
std::list<locker_t> *lockers,
bool *exclusive,
string *tag)
{
ldout(ictx->cct, 20) << "list_locks on image " << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock locker{ictx->image_lock};
if (exclusive)
*exclusive = ictx->exclusive_locked;
if (tag)
*tag = ictx->lock_tag;
if (lockers) {
lockers->clear();
map<rados::cls::lock::locker_id_t,
rados::cls::lock::locker_info_t>::const_iterator it;
for (it = ictx->lockers.begin(); it != ictx->lockers.end(); ++it) {
locker_t locker;
locker.client = stringify(it->first.locker);
locker.cookie = it->first.cookie;
locker.address = it->second.addr.get_legacy_str();
lockers->push_back(locker);
}
}
return 0;
}
int lock(ImageCtx *ictx, bool exclusive, const string& cookie,
const string& tag)
{
ldout(ictx->cct, 20) << "lock image " << ictx << " exclusive=" << exclusive
<< " cookie='" << cookie << "' tag='" << tag << "'"
<< dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
/**
* If we wanted we could do something more intelligent, like local
* checks that we think we will succeed. But for now, let's not
* duplicate that code.
*/
{
std::shared_lock locker{ictx->image_lock};
r = rados::cls::lock::lock(&ictx->md_ctx, ictx->header_oid, RBD_LOCK_NAME,
exclusive ? ClsLockType::EXCLUSIVE : ClsLockType::SHARED,
cookie, tag, "", utime_t(), 0);
if (r < 0) {
return r;
}
}
ictx->notify_update();
return 0;
}
int unlock(ImageCtx *ictx, const string& cookie)
{
ldout(ictx->cct, 20) << "unlock image " << ictx
<< " cookie='" << cookie << "'" << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
{
std::shared_lock locker{ictx->image_lock};
r = rados::cls::lock::unlock(&ictx->md_ctx, ictx->header_oid,
RBD_LOCK_NAME, cookie);
if (r < 0) {
return r;
}
}
ictx->notify_update();
return 0;
}
int break_lock(ImageCtx *ictx, const string& client,
const string& cookie)
{
ldout(ictx->cct, 20) << "break_lock image " << ictx << " client='" << client
<< "' cookie='" << cookie << "'" << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
entity_name_t lock_client;
if (!lock_client.parse(client)) {
lderr(ictx->cct) << "Unable to parse client '" << client
<< "'" << dendl;
return -EINVAL;
}
if (ictx->config.get_val<bool>("rbd_blocklist_on_break_lock")) {
typedef std::map<rados::cls::lock::locker_id_t,
rados::cls::lock::locker_info_t> Lockers;
Lockers lockers;
ClsLockType lock_type;
std::string lock_tag;
r = rados::cls::lock::get_lock_info(&ictx->md_ctx, ictx->header_oid,
RBD_LOCK_NAME, &lockers, &lock_type,
&lock_tag);
if (r < 0) {
lderr(ictx->cct) << "unable to retrieve lock info: " << cpp_strerror(r)
<< dendl;
return r;
}
std::string client_address;
for (Lockers::iterator it = lockers.begin();
it != lockers.end(); ++it) {
if (it->first.locker == lock_client) {
client_address = it->second.addr.get_legacy_str();
break;
}
}
if (client_address.empty()) {
return -ENOENT;
}
librados::Rados rados(ictx->md_ctx);
r = rados.blocklist_add(
client_address,
ictx->config.get_val<uint64_t>("rbd_blocklist_expire_seconds"));
if (r < 0) {
lderr(ictx->cct) << "unable to blocklist client: " << cpp_strerror(r)
<< dendl;
return r;
}
}
r = rados::cls::lock::break_lock(&ictx->md_ctx, ictx->header_oid,
RBD_LOCK_NAME, cookie, lock_client);
if (r < 0)
return r;
ictx->notify_update();
return 0;
}
void rbd_ctx_cb(completion_t cb, void *arg)
{
Context *ctx = reinterpret_cast<Context *>(arg);
auto comp = reinterpret_cast<io::AioCompletion *>(cb);
ctx->complete(comp->get_return_value());
comp->release();
}
int64_t read_iterate(ImageCtx *ictx, uint64_t off, uint64_t len,
int (*cb)(uint64_t, size_t, const char *, void *),
void *arg)
{
coarse_mono_time start_time;
ceph::timespan elapsed;
ldout(ictx->cct, 20) << "read_iterate " << ictx << " off = " << off
<< " len = " << len << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
uint64_t mylen = len;
ictx->image_lock.lock_shared();
r = clip_io(ictx, off, &mylen, io::ImageArea::DATA);
ictx->image_lock.unlock_shared();
if (r < 0)
return r;
int64_t total_read = 0;
uint64_t period = ictx->get_stripe_period();
uint64_t left = mylen;
ZTracer::Trace trace;
if (ictx->blkin_trace_all) {
trace.init("read_iterate", &ictx->trace_endpoint);
}
std::shared_lock owner_locker{ictx->owner_lock};
start_time = coarse_mono_clock::now();
while (left > 0) {
uint64_t period_off = off - (off % period);
uint64_t read_len = std::min(period_off + period - off, left);
bufferlist bl;
C_SaferCond ctx;
auto c = io::AioCompletion::create_and_start(&ctx, ictx,
io::AIO_TYPE_READ);
auto req = io::ImageDispatchSpec::create_read(
*ictx, io::IMAGE_DISPATCH_LAYER_NONE, c,
{{off, read_len}}, io::ImageArea::DATA, io::ReadResult{&bl},
ictx->get_data_io_context(), 0, 0, trace);
req->send();
int ret = ctx.wait();
if (ret < 0) {
return ret;
}
r = cb(total_read, ret, bl.c_str(), arg);
if (r < 0) {
return r;
}
total_read += ret;
left -= ret;
off += ret;
}
elapsed = coarse_mono_clock::now() - start_time;
ictx->perfcounter->tinc(l_librbd_rd_latency, elapsed);
ictx->perfcounter->inc(l_librbd_rd);
ictx->perfcounter->inc(l_librbd_rd_bytes, mylen);
return total_read;
}
// validate extent against area size; clip to area size if necessary
int clip_io(ImageCtx* ictx, uint64_t off, uint64_t* len, io::ImageArea area) {
ceph_assert(ceph_mutex_is_locked(ictx->image_lock));
if (ictx->snap_id != CEPH_NOSNAP &&
ictx->get_snap_info(ictx->snap_id) == nullptr) {
return -ENOENT;
}
// special-case "len == 0" requests: always valid
if (*len == 0)
return 0;
uint64_t area_size = ictx->get_area_size(area);
// can't start past end
if (off >= area_size)
return -EINVAL;
// clip requests that extend past end to just end
if ((off + *len) > area_size)
*len = (size_t)(area_size - off);
return 0;
}
int invalidate_cache(ImageCtx *ictx)
{
CephContext *cct = ictx->cct;
ldout(cct, 20) << "invalidate_cache " << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
C_SaferCond ctx;
{
ictx->io_image_dispatcher->invalidate_cache(&ctx);
}
r = ctx.wait();
if (r < 0) {
ldout(cct, 20) << "failed to invalidate image cache" << dendl;
return r;
}
ictx->perfcounter->inc(l_librbd_invalidate_cache);
// Delete writeback cache if it is not initialized
if ((!ictx->exclusive_lock ||
!ictx->exclusive_lock->is_lock_owner()) &&
ictx->test_features(RBD_FEATURE_DIRTY_CACHE)) {
C_SaferCond ctx3;
ictx->plugin_registry->discard(&ctx3);
r = ctx3.wait();
}
return r;
}
int poll_io_events(ImageCtx *ictx, io::AioCompletion **comps, int numcomp)
{
if (numcomp <= 0)
return -EINVAL;
CephContext *cct = ictx->cct;
ldout(cct, 20) << __func__ << " " << ictx << " numcomp = " << numcomp
<< dendl;
int i = 0;
while (i < numcomp && ictx->event_socket_completions.pop(comps[i])) {
++i;
}
return i;
}
int metadata_get(ImageCtx *ictx, const string &key, string *value)
{
CephContext *cct = ictx->cct;
ldout(cct, 20) << "metadata_get " << ictx << " key=" << key << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
return cls_client::metadata_get(&ictx->md_ctx, ictx->header_oid, key, value);
}
int metadata_list(ImageCtx *ictx, const string &start, uint64_t max, map<string, bufferlist> *pairs)
{
CephContext *cct = ictx->cct;
ldout(cct, 20) << "metadata_list " << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
C_SaferCond ctx;
auto req = image::GetMetadataRequest<>::create(
ictx->md_ctx, ictx->header_oid, false, "", start, max, pairs, &ctx);
req->send();
return ctx.wait();
}
int list_watchers(ImageCtx *ictx,
std::list<librbd::image_watcher_t> &watchers)
{
int r;
std::string header_oid;
std::list<obj_watch_t> obj_watchers;
if (ictx->old_format) {
header_oid = util::old_header_name(ictx->name);
} else {
header_oid = util::header_name(ictx->id);
}
r = ictx->md_ctx.list_watchers(header_oid, &obj_watchers);
if (r < 0) {
return r;
}
watchers.clear();
for (auto i = obj_watchers.begin(); i != obj_watchers.end(); ++i) {
librbd::image_watcher_t watcher;
watcher.addr = i->addr;
watcher.id = i->watcher_id;
watcher.cookie = i->cookie;
watchers.push_back(watcher);
}
return 0;
}
}
std::ostream &operator<<(std::ostream &os, const librbd::ImageOptions &opts) {
os << "[";
const char *delimiter = "";
for (auto &i : librbd::IMAGE_OPTIONS_TYPE_MAPPING) {
if (i.second == librbd::STR) {
std::string val;
if (opts.get(i.first, &val) == 0) {
os << delimiter << librbd::image_option_name(i.first) << "=" << val;
delimiter = ", ";
}
} else if (i.second == librbd::UINT64) {
uint64_t val;
if (opts.get(i.first, &val) == 0) {
os << delimiter << librbd::image_option_name(i.first) << "=" << val;
delimiter = ", ";
}
}
}
os << "]";
return os;
}
| 49,410 | 27.380816 | 102 | cc |
null | ceph-main/src/librbd/internal.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_INTERNAL_H
#define CEPH_LIBRBD_INTERNAL_H
#include "include/int_types.h"
#include <map>
#include <set>
#include <string>
#include <vector>
#include "include/buffer_fwd.h"
#include "include/rbd/librbd.hpp"
#include "include/rbd_types.h"
#include "cls/rbd/cls_rbd_types.h"
#include "common/ceph_time.h"
#include "librbd/Types.h"
namespace librbd {
struct ImageCtx;
namespace io {
struct AioCompletion;
enum class ImageArea;
}
class NoOpProgressContext : public ProgressContext
{
public:
NoOpProgressContext()
{
}
int update_progress(uint64_t offset, uint64_t src_size) override
{
return 0;
}
};
int detect_format(librados::IoCtx &io_ctx, const std::string &name,
bool *old_format, uint64_t *size);
bool has_parent(int64_t parent_pool_id, uint64_t off, uint64_t overlap);
std::string image_option_name(int optname);
void image_options_create(rbd_image_options_t* opts);
void image_options_create_ref(rbd_image_options_t* opts,
rbd_image_options_t orig);
void image_options_copy(rbd_image_options_t *opts,
const ImageOptions &orig);
void image_options_destroy(rbd_image_options_t opts);
int image_options_set(rbd_image_options_t opts, int optname,
const std::string& optval);
int image_options_set(rbd_image_options_t opts, int optname, uint64_t optval);
int image_options_get(rbd_image_options_t opts, int optname,
std::string* optval);
int image_options_get(rbd_image_options_t opts, int optname,
uint64_t* optval);
int image_options_is_set(rbd_image_options_t opts, int optname,
bool* is_set);
int image_options_unset(rbd_image_options_t opts, int optname);
void image_options_clear(rbd_image_options_t opts);
bool image_options_is_empty(rbd_image_options_t opts);
int create(librados::IoCtx& io_ctx, const char *imgname, uint64_t size,
int *order);
int create(librados::IoCtx& io_ctx, const char *imgname, uint64_t size,
bool old_format, uint64_t features, int *order,
uint64_t stripe_unit, uint64_t stripe_count);
int create(IoCtx& io_ctx, const std::string &image_name,
const std::string &image_id, uint64_t size, ImageOptions& opts,
const std::string &non_primary_global_image_id,
const std::string &primary_mirror_uuid,
bool skip_mirror_enable);
int clone(IoCtx& p_ioctx, const char *p_name, const char *p_snap_name,
IoCtx& c_ioctx, const char *c_name,
uint64_t features, int *c_order,
uint64_t stripe_unit, int stripe_count);
int clone(IoCtx& p_ioctx, const char *p_id, const char *p_name,
const char *p_snap_name, IoCtx& c_ioctx, const char *c_id,
const char *c_name, ImageOptions& c_opts,
const std::string &non_primary_global_image_id,
const std::string &primary_mirror_uuid);
int rename(librados::IoCtx& io_ctx, const char *srcname, const char *dstname);
int info(ImageCtx *ictx, image_info_t& info, size_t image_size);
int get_old_format(ImageCtx *ictx, uint8_t *old);
int get_size(ImageCtx *ictx, uint64_t *size);
int get_features(ImageCtx *ictx, uint64_t *features);
int get_overlap(ImageCtx *ictx, uint64_t *overlap);
int get_flags(ImageCtx *ictx, uint64_t *flags);
int set_image_notification(ImageCtx *ictx, int fd, int type);
int is_exclusive_lock_owner(ImageCtx *ictx, bool *is_owner);
int lock_acquire(ImageCtx *ictx, rbd_lock_mode_t lock_mode);
int lock_release(ImageCtx *ictx);
int lock_get_owners(ImageCtx *ictx, rbd_lock_mode_t *lock_mode,
std::list<std::string> *lock_owners);
int lock_break(ImageCtx *ictx, rbd_lock_mode_t lock_mode,
const std::string &lock_owner);
int copy(ImageCtx *ictx, IoCtx& dest_md_ctx, const char *destname,
ImageOptions& opts, ProgressContext &prog_ctx, size_t sparse_size);
int copy(ImageCtx *src, ImageCtx *dest, ProgressContext &prog_ctx, size_t sparse_size);
/* cooperative locking */
int list_lockers(ImageCtx *ictx,
std::list<locker_t> *locks,
bool *exclusive,
std::string *tag);
int lock(ImageCtx *ictx, bool exclusive, const std::string& cookie,
const std::string& tag);
int lock_shared(ImageCtx *ictx, const std::string& cookie,
const std::string& tag);
int unlock(ImageCtx *ictx, const std::string& cookie);
int break_lock(ImageCtx *ictx, const std::string& client,
const std::string& cookie);
int read_header_bl(librados::IoCtx& io_ctx, const std::string& md_oid,
ceph::bufferlist& header, uint64_t *ver);
int read_header(librados::IoCtx& io_ctx, const std::string& md_oid,
struct rbd_obj_header_ondisk *header, uint64_t *ver);
int tmap_set(librados::IoCtx& io_ctx, const std::string& imgname);
int tmap_rm(librados::IoCtx& io_ctx, const std::string& imgname);
void image_info(const ImageCtx *ictx, image_info_t& info, size_t info_size);
uint64_t oid_to_object_no(const std::string& oid,
const std::string& object_prefix);
int clip_io(ImageCtx* ictx, uint64_t off, uint64_t* len, io::ImageArea area);
void init_rbd_header(struct rbd_obj_header_ondisk& ondisk,
uint64_t size, int order, uint64_t bid);
int64_t read_iterate(ImageCtx *ictx, uint64_t off, uint64_t len,
int (*cb)(uint64_t, size_t, const char *, void *),
void *arg);
int invalidate_cache(ImageCtx *ictx);
int poll_io_events(ImageCtx *ictx, io::AioCompletion **comps, int numcomp);
int metadata_list(ImageCtx *ictx, const std::string &last, uint64_t max,
std::map<std::string, bufferlist> *pairs);
int metadata_get(ImageCtx *ictx, const std::string &key, std::string *value);
int list_watchers(ImageCtx *ictx, std::list<librbd::image_watcher_t> &watchers);
}
std::ostream &operator<<(std::ostream &os, const librbd::ImageOptions &opts);
#endif
| 5,956 | 39.80137 | 89 | h |
null | ceph-main/src/librbd/librbd.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/int_types.h"
#include <errno.h>
#include "common/deleter.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/TracepointProvider.h"
#include "include/Context.h"
#include "cls/rbd/cls_rbd_client.h"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/Features.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/internal.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/api/Config.h"
#include "librbd/api/DiffIterate.h"
#include "librbd/api/Group.h"
#include "librbd/api/Image.h"
#include "librbd/api/Io.h"
#include "librbd/api/Migration.h"
#include "librbd/api/Mirror.h"
#include "librbd/api/Namespace.h"
#include "librbd/api/Pool.h"
#include "librbd/api/PoolMetadata.h"
#include "librbd/api/Snapshot.h"
#include "librbd/api/Trash.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ReadResult.h"
#include <algorithm>
#include <string>
#include <vector>
#ifdef WITH_LTTNG
#define TRACEPOINT_DEFINE
#define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#include "tracing/librbd.h"
#undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#undef TRACEPOINT_DEFINE
#else
#define tracepoint(...)
#endif
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd: "
using std::list;
using std::map;
using std::pair;
using std::set;
using std::string;
using std::vector;
using ceph::bufferlist;
using librados::snap_t;
using librados::IoCtx;
namespace {
TracepointProvider::Traits tracepoint_traits("librbd_tp.so", "rbd_tracing");
struct UserBufferDeleter : public deleter::impl {
CephContext* cct;
librbd::io::AioCompletion* aio_completion;
UserBufferDeleter(CephContext* cct, librbd::io::AioCompletion* aio_completion)
: deleter::impl(deleter()), cct(cct), aio_completion(aio_completion) {
aio_completion->block(cct);
}
~UserBufferDeleter() override {
aio_completion->unblock(cct);
}
};
static auto create_write_raw(librbd::ImageCtx *ictx, const char *buf,
size_t len,
librbd::io::AioCompletion* aio_completion) {
if (ictx->disable_zero_copy || aio_completion == nullptr) {
// must copy the buffer if writeback/writearound cache is in-use (or using
// non-AIO)
return buffer::copy(buf, len);
}
// avoid copying memory for AIO operations, but possibly delay completions
// until the last reference to the user's memory has been released
return ceph::unique_leakable_ptr<ceph::buffer::raw>(
buffer::claim_buffer(
len, const_cast<char*>(buf),
deleter(new UserBufferDeleter(ictx->cct, aio_completion))));
}
static int get_iovec_length(const struct iovec *iov, int iovcnt, size_t &len)
{
len = 0;
if (iovcnt <= 0) {
return -EINVAL;
}
for (int i = 0; i < iovcnt; ++i) {
const struct iovec &io = iov[i];
// check for overflow
if (len + io.iov_len < len) {
return -EINVAL;
}
len += io.iov_len;
}
return 0;
}
static bufferlist iovec_to_bufferlist(librbd::ImageCtx *ictx,
const struct iovec *iov,
int iovcnt,
librbd::io::AioCompletion* aio_completion)
{
bufferlist bl;
for (int i = 0; i < iovcnt; ++i) {
const struct iovec &io = iov[i];
bl.push_back(create_write_raw(ictx, static_cast<char*>(io.iov_base),
io.iov_len, aio_completion));
}
return bl;
}
CephContext* get_cct(IoCtx &io_ctx) {
return reinterpret_cast<CephContext*>(io_ctx.cct());
}
librbd::io::AioCompletion* get_aio_completion(librbd::RBD::AioCompletion *comp) {
return reinterpret_cast<librbd::io::AioCompletion *>(comp->pc);
}
struct C_AioCompletion : public Context {
CephContext *cct;
librbd::io::aio_type_t aio_type;
librbd::io::AioCompletion* aio_comp;
C_AioCompletion(librbd::ImageCtx *ictx, librbd::io::aio_type_t aio_type,
librbd::io::AioCompletion* aio_comp)
: cct(ictx->cct), aio_type(aio_type), aio_comp(aio_comp) {
aio_comp->init_time(ictx, aio_type);
aio_comp->get();
}
virtual ~C_AioCompletion() {
aio_comp->put();
}
void finish(int r) override {
ldout(cct, 20) << "C_AioCompletion::finish: r=" << r << dendl;
if (r < 0) {
aio_comp->fail(r);
} else {
aio_comp->complete();
}
}
};
struct C_OpenComplete : public C_AioCompletion {
librbd::ImageCtx *ictx;
void **ictxp;
C_OpenComplete(librbd::ImageCtx *ictx, librbd::io::AioCompletion* comp,
void **ictxp)
: C_AioCompletion(ictx, librbd::io::AIO_TYPE_OPEN, comp),
ictx(ictx), ictxp(ictxp) {
}
void finish(int r) override {
ldout(cct, 20) << "C_OpenComplete::finish: r=" << r << dendl;
if (r < 0) {
*ictxp = nullptr;
} else {
*ictxp = ictx;
}
C_AioCompletion::finish(r);
}
};
struct C_OpenAfterCloseComplete : public Context {
librbd::ImageCtx *ictx;
librbd::io::AioCompletion* comp;
void **ictxp;
C_OpenAfterCloseComplete(librbd::ImageCtx *ictx,
librbd::io::AioCompletion* comp,
void **ictxp)
: ictx(ictx), comp(comp), ictxp(ictxp) {
}
void finish(int r) override {
ldout(ictx->cct, 20) << "C_OpenAfterCloseComplete::finish: r=" << r
<< dendl;
*ictxp = nullptr;
ictx->state->open(0, new C_OpenComplete(ictx, comp, ictxp));
}
};
struct C_UpdateWatchCB : public librbd::UpdateWatchCtx {
rbd_update_callback_t watch_cb;
void *arg;
uint64_t handle = 0;
C_UpdateWatchCB(rbd_update_callback_t watch_cb, void *arg) :
watch_cb(watch_cb), arg(arg) {
}
void handle_notify() override {
watch_cb(arg);
}
};
struct C_QuiesceWatchCB : public librbd::QuiesceWatchCtx {
rbd_update_callback_t quiesce_cb;
rbd_update_callback_t unquiesce_cb;
void *arg;
uint64_t handle = 0;
C_QuiesceWatchCB(rbd_update_callback_t quiesce_cb,
rbd_update_callback_t unquiesce_cb, void *arg) :
quiesce_cb(quiesce_cb), unquiesce_cb(unquiesce_cb), arg(arg) {
}
void handle_quiesce() override {
quiesce_cb(arg);
}
void handle_unquiesce() override {
unquiesce_cb(arg);
}
};
void group_image_status_cpp_to_c(const librbd::group_image_info_t &cpp_info,
rbd_group_image_info_t *c_info) {
c_info->name = strdup(cpp_info.name.c_str());
c_info->pool = cpp_info.pool;
c_info->state = cpp_info.state;
}
void group_info_cpp_to_c(const librbd::group_info_t &cpp_info,
rbd_group_info_t *c_info) {
c_info->name = strdup(cpp_info.name.c_str());
c_info->pool = cpp_info.pool;
}
void group_snap_info_cpp_to_c(const librbd::group_snap_info_t &cpp_info,
rbd_group_snap_info_t *c_info) {
c_info->name = strdup(cpp_info.name.c_str());
c_info->state = cpp_info.state;
}
void mirror_image_info_cpp_to_c(const librbd::mirror_image_info_t &cpp_info,
rbd_mirror_image_info_t *c_info) {
c_info->global_id = strdup(cpp_info.global_id.c_str());
c_info->state = cpp_info.state;
c_info->primary = cpp_info.primary;
}
int get_local_mirror_image_site_status(
const librbd::mirror_image_global_status_t& status,
librbd::mirror_image_site_status_t* local_status) {
auto it = std::find_if(status.site_statuses.begin(),
status.site_statuses.end(),
[](const librbd::mirror_image_site_status_t& s) {
return (s.mirror_uuid ==
cls::rbd::MirrorImageSiteStatus::LOCAL_MIRROR_UUID);
});
if (it == status.site_statuses.end()) {
return -ENOENT;
}
*local_status = *it;
return 0;
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
int mirror_image_global_status_cpp_to_c(
const librbd::mirror_image_global_status_t &cpp_status,
rbd_mirror_image_status_t *c_status) {
c_status->name = strdup(cpp_status.name.c_str());
mirror_image_info_cpp_to_c(cpp_status.info, &c_status->info);
librbd::mirror_image_site_status_t local_status;
int r = get_local_mirror_image_site_status(cpp_status, &local_status);
if (r < 0) {
return r;
}
c_status->state = local_status.state;
c_status->description = strdup(local_status.description.c_str());
c_status->last_update = local_status.last_update;
c_status->up = local_status.up;
return 0;
}
#pragma GCC diagnostic pop
void mirror_image_global_status_cpp_to_c(
const librbd::mirror_image_global_status_t &cpp_status,
rbd_mirror_image_global_status_t *c_status) {
c_status->name = strdup(cpp_status.name.c_str());
mirror_image_info_cpp_to_c(cpp_status.info, &c_status->info);
c_status->site_statuses_count = cpp_status.site_statuses.size();
c_status->site_statuses = (rbd_mirror_image_site_status_t*)calloc(
cpp_status.site_statuses.size(), sizeof(rbd_mirror_image_site_status_t));
auto idx = 0U;
for (auto it = cpp_status.site_statuses.begin();
it != cpp_status.site_statuses.end(); ++it) {
auto& s_status = c_status->site_statuses[idx++];
s_status.mirror_uuid = strdup(it->mirror_uuid.c_str());
s_status.state = it->state;
s_status.description = strdup(it->description.c_str());
s_status.last_update = it->last_update;
s_status.up = it->up;
}
}
void trash_image_info_cpp_to_c(const librbd::trash_image_info_t &cpp_info,
rbd_trash_image_info_t *c_info) {
c_info->id = strdup(cpp_info.id.c_str());
c_info->name = strdup(cpp_info.name.c_str());
c_info->source = cpp_info.source;
c_info->deletion_time = cpp_info.deletion_time;
c_info->deferment_end_time = cpp_info.deferment_end_time;
}
void config_option_cpp_to_c(const librbd::config_option_t &cpp_option,
rbd_config_option_t *c_option) {
c_option->name = strdup(cpp_option.name.c_str());
c_option->value = strdup(cpp_option.value.c_str());
c_option->source = cpp_option.source;
}
void config_option_cleanup(rbd_config_option_t &option) {
free(option.name);
free(option.value);
}
struct C_MirrorImageGetInfo : public Context {
rbd_mirror_image_info_t *mirror_image_info;
Context *on_finish;
librbd::mirror_image_info_t cpp_mirror_image_info;
C_MirrorImageGetInfo(rbd_mirror_image_info_t *mirror_image_info,
Context *on_finish)
: mirror_image_info(mirror_image_info), on_finish(on_finish) {
}
void finish(int r) override {
if (r < 0) {
on_finish->complete(r);
return;
}
mirror_image_info_cpp_to_c(cpp_mirror_image_info, mirror_image_info);
on_finish->complete(0);
}
};
struct C_MirrorImageGetGlobalStatus : public Context {
rbd_mirror_image_global_status_t *mirror_image_global_status;
Context *on_finish;
librbd::mirror_image_global_status_t cpp_mirror_image_global_status;
C_MirrorImageGetGlobalStatus(
rbd_mirror_image_global_status_t *mirror_image_global_status,
Context *on_finish)
: mirror_image_global_status(mirror_image_global_status),
on_finish(on_finish) {
}
void finish(int r) override {
if (r < 0) {
on_finish->complete(r);
return;
}
mirror_image_global_status_cpp_to_c(cpp_mirror_image_global_status,
mirror_image_global_status);
on_finish->complete(0);
}
};
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
struct C_MirrorImageGetStatus : public Context {
librbd::mirror_image_status_t *mirror_image_status_cpp = nullptr;
rbd_mirror_image_status_t *mirror_image_status = nullptr;
Context *on_finish;
librbd::mirror_image_global_status_t cpp_mirror_image_global_status;
C_MirrorImageGetStatus(rbd_mirror_image_status_t *mirror_image_status,
Context *on_finish)
: mirror_image_status(mirror_image_status), on_finish(on_finish) {
}
C_MirrorImageGetStatus(librbd::mirror_image_status_t *mirror_image_status,
Context *on_finish)
: mirror_image_status_cpp(mirror_image_status), on_finish(on_finish) {
}
void finish(int r) override {
if (r < 0) {
on_finish->complete(r);
return;
}
if (mirror_image_status != nullptr) {
r = mirror_image_global_status_cpp_to_c(cpp_mirror_image_global_status,
mirror_image_status);
} else if (mirror_image_status_cpp != nullptr) {
librbd::mirror_image_site_status_t local_status;
r = get_local_mirror_image_site_status(cpp_mirror_image_global_status,
&local_status);
if (r >= 0) {
*mirror_image_status_cpp = {
cpp_mirror_image_global_status.name,
cpp_mirror_image_global_status.info,
local_status.state, local_status.description,
local_status.last_update, local_status.up};
}
}
on_finish->complete(r);
}
};
#pragma GCC diagnostic pop
} // anonymous namespace
namespace librbd {
ProgressContext::~ProgressContext()
{
}
class CProgressContext : public ProgressContext
{
public:
CProgressContext(librbd_progress_fn_t fn, void *data)
: m_fn(fn), m_data(data)
{
}
int update_progress(uint64_t offset, uint64_t src_size) override
{
return m_fn(offset, src_size, m_data);
}
private:
librbd_progress_fn_t m_fn;
void *m_data;
};
/*
* Pool stats
*/
PoolStats::PoolStats() {
rbd_pool_stats_create(&pool_stats);
}
PoolStats::~PoolStats() {
rbd_pool_stats_destroy(pool_stats);
}
int PoolStats::add(rbd_pool_stat_option_t option, uint64_t* opt_val) {
return rbd_pool_stats_option_add_uint64(pool_stats, option, opt_val);
}
/*
* RBD
*/
RBD::RBD()
{
}
RBD::~RBD()
{
}
void RBD::version(int *major, int *minor, int *extra)
{
rbd_version(major, minor, extra);
}
int RBD::open(IoCtx& io_ctx, Image& image, const char *name)
{
return open(io_ctx, image, name, NULL);
}
int RBD::open_by_id(IoCtx& io_ctx, Image& image, const char *id)
{
return open_by_id(io_ctx, image, id, nullptr);
}
int RBD::open(IoCtx& io_ctx, Image& image, const char *name,
const char *snap_name)
{
ImageCtx *ictx = new ImageCtx(name, "", snap_name, io_ctx, false);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only);
if (image.ctx != NULL) {
reinterpret_cast<ImageCtx*>(image.ctx)->state->close();
image.ctx = NULL;
}
int r = ictx->state->open(0);
if (r < 0) {
tracepoint(librbd, open_image_exit, r);
return r;
}
image.ctx = (image_ctx_t) ictx;
tracepoint(librbd, open_image_exit, 0);
return 0;
}
int RBD::open_by_id(IoCtx& io_ctx, Image& image, const char *id,
const char *snap_name)
{
ImageCtx *ictx = new ImageCtx("", id, snap_name, io_ctx, false);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, open_image_by_id_enter, ictx, ictx->id.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
if (image.ctx != nullptr) {
reinterpret_cast<ImageCtx*>(image.ctx)->state->close();
image.ctx = nullptr;
}
int r = ictx->state->open(0);
if (r < 0) {
tracepoint(librbd, open_image_by_id_exit, r);
return r;
}
image.ctx = (image_ctx_t) ictx;
tracepoint(librbd, open_image_by_id_exit, 0);
return 0;
}
int RBD::aio_open(IoCtx& io_ctx, Image& image, const char *name,
const char *snap_name, RBD::AioCompletion *c)
{
ImageCtx *ictx = new ImageCtx(name, "", snap_name, io_ctx, false);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, aio_open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only, c->pc);
if (image.ctx != NULL) {
reinterpret_cast<ImageCtx*>(image.ctx)->state->close(
new C_OpenAfterCloseComplete(ictx, get_aio_completion(c), &image.ctx));
} else {
ictx->state->open(0, new C_OpenComplete(ictx, get_aio_completion(c),
&image.ctx));
}
tracepoint(librbd, aio_open_image_exit, 0);
return 0;
}
int RBD::aio_open_by_id(IoCtx& io_ctx, Image& image, const char *id,
const char *snap_name, RBD::AioCompletion *c)
{
ImageCtx *ictx = new ImageCtx("", id, snap_name, io_ctx, false);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, aio_open_image_by_id_enter, ictx, ictx->id.c_str(),
ictx->snap_name.c_str(), ictx->read_only, c->pc);
if (image.ctx != nullptr) {
reinterpret_cast<ImageCtx*>(image.ctx)->state->close(
new C_OpenAfterCloseComplete(ictx, get_aio_completion(c), &image.ctx));
} else {
ictx->state->open(0, new C_OpenComplete(ictx, get_aio_completion(c),
&image.ctx));
}
tracepoint(librbd, aio_open_image_by_id_exit, 0);
return 0;
}
int RBD::open_read_only(IoCtx& io_ctx, Image& image, const char *name,
const char *snap_name)
{
ImageCtx *ictx = new ImageCtx(name, "", snap_name, io_ctx, true);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only);
if (image.ctx != NULL) {
reinterpret_cast<ImageCtx*>(image.ctx)->state->close();
image.ctx = NULL;
}
int r = ictx->state->open(0);
if (r < 0) {
tracepoint(librbd, open_image_exit, r);
return r;
}
image.ctx = (image_ctx_t) ictx;
tracepoint(librbd, open_image_exit, 0);
return 0;
}
int RBD::open_by_id_read_only(IoCtx& io_ctx, Image& image, const char *id,
const char *snap_name)
{
ImageCtx *ictx = new ImageCtx("", id, snap_name, io_ctx, true);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, open_image_by_id_enter, ictx, ictx->id.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
if (image.ctx != nullptr) {
reinterpret_cast<ImageCtx*>(image.ctx)->state->close();
image.ctx = nullptr;
}
int r = ictx->state->open(0);
if (r < 0) {
tracepoint(librbd, open_image_by_id_exit, r);
return r;
}
image.ctx = (image_ctx_t) ictx;
tracepoint(librbd, open_image_by_id_exit, 0);
return 0;
}
int RBD::aio_open_read_only(IoCtx& io_ctx, Image& image, const char *name,
const char *snap_name, RBD::AioCompletion *c)
{
ImageCtx *ictx = new ImageCtx(name, "", snap_name, io_ctx, true);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, aio_open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only, c->pc);
if (image.ctx != NULL) {
reinterpret_cast<ImageCtx*>(image.ctx)->state->close(
new C_OpenAfterCloseComplete(ictx, get_aio_completion(c), &image.ctx));
} else {
ictx->state->open(0, new C_OpenComplete(ictx, get_aio_completion(c),
&image.ctx));
}
tracepoint(librbd, aio_open_image_exit, 0);
return 0;
}
int RBD::aio_open_by_id_read_only(IoCtx& io_ctx, Image& image, const char *id,
const char *snap_name, RBD::AioCompletion *c)
{
ImageCtx *ictx = new ImageCtx("", id, snap_name, io_ctx, true);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, aio_open_image_by_id_enter, ictx, ictx->id.c_str(),
ictx->snap_name.c_str(), ictx->read_only, c->pc);
if (image.ctx != nullptr) {
reinterpret_cast<ImageCtx*>(image.ctx)->state->close(
new C_OpenAfterCloseComplete(ictx, get_aio_completion(c), &image.ctx));
} else {
ictx->state->open(0, new C_OpenComplete(ictx, get_aio_completion(c),
&image.ctx));
}
tracepoint(librbd, aio_open_image_by_id_exit, 0);
return 0;
}
int RBD::features_to_string(uint64_t features, std::string *str_features)
{
std::stringstream err;
*str_features = librbd::rbd_features_to_string(features, &err);
if (!err.str().empty()) {
return -EINVAL;
}
return 0;
}
int RBD::features_from_string(const std::string str_features, uint64_t *features)
{
std::stringstream err;
*features = librbd::rbd_features_from_string(str_features, &err);
if (!err.str().empty()) {
return -EINVAL;
}
return 0;
}
int RBD::create(IoCtx& io_ctx, const char *name, uint64_t size, int *order)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, create_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name, size, *order);
int r = librbd::create(io_ctx, name, size, order);
tracepoint(librbd, create_exit, r, *order);
return r;
}
int RBD::create2(IoCtx& io_ctx, const char *name, uint64_t size,
uint64_t features, int *order)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, create2_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name, size, features, *order);
int r = librbd::create(io_ctx, name, size, false, features, order, 0, 0);
tracepoint(librbd, create2_exit, r, *order);
return r;
}
int RBD::create3(IoCtx& io_ctx, const char *name, uint64_t size,
uint64_t features, int *order, uint64_t stripe_unit,
uint64_t stripe_count)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, create3_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name, size, features, *order, stripe_unit, stripe_count);
int r = librbd::create(io_ctx, name, size, false, features, order,
stripe_unit, stripe_count);
tracepoint(librbd, create3_exit, r, *order);
return r;
}
int RBD::create4(IoCtx& io_ctx, const char *name, uint64_t size,
ImageOptions& opts)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, create4_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name, size, opts.opts);
int r = librbd::create(io_ctx, name, "", size, opts, "", "", false);
tracepoint(librbd, create4_exit, r);
return r;
}
int RBD::clone(IoCtx& p_ioctx, const char *p_name, const char *p_snap_name,
IoCtx& c_ioctx, const char *c_name, uint64_t features,
int *c_order)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(p_ioctx));
tracepoint(librbd, clone_enter, p_ioctx.get_pool_name().c_str(), p_ioctx.get_id(), p_name, p_snap_name, c_ioctx.get_pool_name().c_str(), c_ioctx.get_id(), c_name, features);
int r = librbd::clone(p_ioctx, p_name, p_snap_name, c_ioctx, c_name,
features, c_order, 0, 0);
tracepoint(librbd, clone_exit, r, *c_order);
return r;
}
int RBD::clone2(IoCtx& p_ioctx, const char *p_name, const char *p_snap_name,
IoCtx& c_ioctx, const char *c_name, uint64_t features,
int *c_order, uint64_t stripe_unit, int stripe_count)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(p_ioctx));
tracepoint(librbd, clone2_enter, p_ioctx.get_pool_name().c_str(), p_ioctx.get_id(), p_name, p_snap_name, c_ioctx.get_pool_name().c_str(), c_ioctx.get_id(), c_name, features, stripe_unit, stripe_count);
int r = librbd::clone(p_ioctx, p_name, p_snap_name, c_ioctx, c_name,
features, c_order, stripe_unit, stripe_count);
tracepoint(librbd, clone2_exit, r, *c_order);
return r;
}
int RBD::clone3(IoCtx& p_ioctx, const char *p_name, const char *p_snap_name,
IoCtx& c_ioctx, const char *c_name, ImageOptions& c_opts)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(p_ioctx));
tracepoint(librbd, clone3_enter, p_ioctx.get_pool_name().c_str(), p_ioctx.get_id(), p_name, p_snap_name, c_ioctx.get_pool_name().c_str(), c_ioctx.get_id(), c_name, c_opts.opts);
int r = librbd::clone(p_ioctx, nullptr, p_name, p_snap_name, c_ioctx,
nullptr, c_name, c_opts, "", "");
tracepoint(librbd, clone3_exit, r);
return r;
}
int RBD::remove(IoCtx& io_ctx, const char *name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, remove_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Image<>::remove(io_ctx, name, prog_ctx);
tracepoint(librbd, remove_exit, r);
return r;
}
int RBD::remove_with_progress(IoCtx& io_ctx, const char *name,
ProgressContext& pctx)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, remove_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name);
int r = librbd::api::Image<>::remove(io_ctx, name, pctx);
tracepoint(librbd, remove_exit, r);
return r;
}
int RBD::trash_move(IoCtx &io_ctx, const char *name, uint64_t delay) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_move_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), name);
int r = librbd::api::Trash<>::move(io_ctx, RBD_TRASH_IMAGE_SOURCE_USER,
name, delay);
tracepoint(librbd, trash_move_exit, r);
return r;
}
int RBD::trash_get(IoCtx &io_ctx, const char *id, trash_image_info_t *info) {
return librbd::api::Trash<>::get(io_ctx, id, info);
}
int RBD::trash_list(IoCtx &io_ctx, vector<trash_image_info_t> &entries) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_list_enter,
io_ctx.get_pool_name().c_str(), io_ctx.get_id());
int r = librbd::api::Trash<>::list(io_ctx, entries, true);
#ifdef WITH_LTTNG
if (r >= 0) {
for (const auto& entry : entries) {
tracepoint(librbd, trash_list_entry, entry.id.c_str());
}
}
#endif
tracepoint(librbd, trash_list_exit, r, r);
return r;
}
int RBD::trash_remove(IoCtx &io_ctx, const char *image_id, bool force) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_remove_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_id, force);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Trash<>::remove(io_ctx, image_id, force, prog_ctx);
tracepoint(librbd, trash_remove_exit, r);
return r;
}
int RBD::trash_remove_with_progress(IoCtx &io_ctx, const char *image_id,
bool force, ProgressContext &pctx) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_remove_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_id, force);
int r = librbd::api::Trash<>::remove(io_ctx, image_id, force, pctx);
tracepoint(librbd, trash_remove_exit, r);
return r;
}
int RBD::trash_restore(IoCtx &io_ctx, const char *id, const char *name) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_undelete_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), id, name);
int r = librbd::api::Trash<>::restore(
io_ctx, librbd::api::Trash<>::ALLOWED_RESTORE_SOURCES, id, name);
tracepoint(librbd, trash_undelete_exit, r);
return r;
}
int RBD::trash_purge(IoCtx &io_ctx, time_t expire_ts, float threshold) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_purge_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), expire_ts, threshold);
NoOpProgressContext nop_pctx;
int r = librbd::api::Trash<>::purge(io_ctx, expire_ts, threshold, nop_pctx);
tracepoint(librbd, trash_purge_exit, r);
return r;
}
int RBD::trash_purge_with_progress(IoCtx &io_ctx, time_t expire_ts,
float threshold, ProgressContext &pctx) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_purge_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), expire_ts, threshold);
int r = librbd::api::Trash<>::purge(io_ctx, expire_ts, threshold, pctx);
tracepoint(librbd, trash_purge_exit, r);
return r;
}
int RBD::namespace_create(IoCtx& io_ctx, const char *namespace_name) {
return librbd::api::Namespace<>::create(io_ctx, namespace_name);
}
int RBD::namespace_remove(IoCtx& io_ctx, const char *namespace_name) {
return librbd::api::Namespace<>::remove(io_ctx, namespace_name);
}
int RBD::namespace_list(IoCtx& io_ctx,
std::vector<std::string>* namespace_names) {
return librbd::api::Namespace<>::list(io_ctx, namespace_names);
}
int RBD::namespace_exists(IoCtx& io_ctx, const char *namespace_name,
bool *exists) {
return librbd::api::Namespace<>::exists(io_ctx, namespace_name, exists);
}
int RBD::pool_init(IoCtx& io_ctx, bool force) {
return librbd::api::Pool<>::init(io_ctx, force);
}
int RBD::pool_stats_get(IoCtx& io_ctx, PoolStats* stats) {
auto pool_stat_options =
reinterpret_cast<librbd::api::Pool<>::StatOptions*>(stats->pool_stats);
return librbd::api::Pool<>::get_stats(io_ctx, pool_stat_options);
}
int RBD::list(IoCtx& io_ctx, vector<string>& names)
{
std::vector<image_spec_t> image_specs;
int r = list2(io_ctx, &image_specs);
if (r < 0) {
return r;
}
names.clear();
for (auto& it : image_specs) {
names.push_back(it.name);
}
return 0;
}
int RBD::list2(IoCtx& io_ctx, std::vector<image_spec_t> *images)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, list_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id());
int r = librbd::api::Image<>::list_images(io_ctx, images);
#ifdef WITH_LTTNG
if (r >= 0) {
for (auto& it : *images) {
tracepoint(librbd, list_entry, it.name.c_str());
}
}
#endif
tracepoint(librbd, list_exit, r, r);
return r;
}
int RBD::rename(IoCtx& src_io_ctx, const char *srcname, const char *destname)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(src_io_ctx));
tracepoint(librbd, rename_enter, src_io_ctx.get_pool_name().c_str(), src_io_ctx.get_id(), srcname, destname);
int r = librbd::rename(src_io_ctx, srcname, destname);
tracepoint(librbd, rename_exit, r);
return r;
}
int RBD::migration_prepare(IoCtx& io_ctx, const char *image_name,
IoCtx& dest_io_ctx, const char *dest_image_name,
ImageOptions& opts)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_prepare_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name, dest_io_ctx.get_pool_name().c_str(),
dest_io_ctx.get_id(), dest_image_name, opts.opts);
int r = librbd::api::Migration<>::prepare(io_ctx, image_name, dest_io_ctx,
dest_image_name, opts);
tracepoint(librbd, migration_prepare_exit, r);
return r;
}
int RBD::migration_prepare_import(const char *source_spec, IoCtx& dest_io_ctx,
const char *dest_image_name,
ImageOptions& opts) {
return librbd::api::Migration<>::prepare_import(source_spec, dest_io_ctx,
dest_image_name, opts);
}
int RBD::migration_execute(IoCtx& io_ctx, const char *image_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_execute_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Migration<>::execute(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_execute_exit, r);
return r;
}
int RBD::migration_execute_with_progress(IoCtx& io_ctx,
const char *image_name,
librbd::ProgressContext &prog_ctx)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_execute_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
int r = librbd::api::Migration<>::execute(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_execute_exit, r);
return r;
}
int RBD::migration_abort(IoCtx& io_ctx, const char *image_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_abort_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Migration<>::abort(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_abort_exit, r);
return r;
}
int RBD::migration_abort_with_progress(IoCtx& io_ctx, const char *image_name,
librbd::ProgressContext &prog_ctx)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_abort_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
int r = librbd::api::Migration<>::abort(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_abort_exit, r);
return r;
}
int RBD::migration_commit(IoCtx& io_ctx, const char *image_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_commit_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Migration<>::commit(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_commit_exit, r);
return r;
}
int RBD::migration_commit_with_progress(IoCtx& io_ctx, const char *image_name,
librbd::ProgressContext &prog_ctx)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_commit_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
int r = librbd::api::Migration<>::commit(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_commit_exit, r);
return r;
}
int RBD::migration_status(IoCtx& io_ctx, const char *image_name,
image_migration_status_t *status,
size_t status_size)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_status_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
if (status_size != sizeof(image_migration_status_t)) {
tracepoint(librbd, migration_status_exit, -ERANGE);
return -ERANGE;
}
int r = librbd::api::Migration<>::status(io_ctx, image_name, status);
tracepoint(librbd, migration_status_exit, r);
return r;
}
int RBD::mirror_mode_get(IoCtx& io_ctx, rbd_mirror_mode_t *mirror_mode) {
return librbd::api::Mirror<>::mode_get(io_ctx, mirror_mode);
}
int RBD::mirror_mode_set(IoCtx& io_ctx, rbd_mirror_mode_t mirror_mode) {
return librbd::api::Mirror<>::mode_set(io_ctx, mirror_mode);
}
int RBD::mirror_uuid_get(IoCtx& io_ctx, std::string* mirror_uuid) {
return librbd::api::Mirror<>::uuid_get(io_ctx, mirror_uuid);
}
int RBD::mirror_site_name_get(librados::Rados& rados,
std::string* site_name) {
return librbd::api::Mirror<>::site_name_get(rados, site_name);
}
int RBD::mirror_site_name_set(librados::Rados& rados,
const std::string& site_name) {
return librbd::api::Mirror<>::site_name_set(rados, site_name);
}
int RBD::mirror_peer_bootstrap_create(IoCtx& io_ctx, std::string* token) {
return librbd::api::Mirror<>::peer_bootstrap_create(io_ctx, token);
}
int RBD::mirror_peer_bootstrap_import(IoCtx& io_ctx,
rbd_mirror_peer_direction_t direction,
const std::string& token) {
return librbd::api::Mirror<>::peer_bootstrap_import(io_ctx, direction,
token);
}
int RBD::mirror_peer_site_add(IoCtx& io_ctx, std::string *uuid,
mirror_peer_direction_t direction,
const std::string &site_name,
const std::string &client_name) {
return librbd::api::Mirror<>::peer_site_add(
io_ctx, uuid, direction, site_name, client_name);
}
int RBD::mirror_peer_site_remove(IoCtx& io_ctx, const std::string &uuid) {
return librbd::api::Mirror<>::peer_site_remove(io_ctx, uuid);
}
int RBD::mirror_peer_site_list(
IoCtx& io_ctx, std::vector<mirror_peer_site_t> *peer_sites) {
return librbd::api::Mirror<>::peer_site_list(io_ctx, peer_sites);
}
int RBD::mirror_peer_site_set_client_name(
IoCtx& io_ctx, const std::string &uuid, const std::string &client_name) {
return librbd::api::Mirror<>::peer_site_set_client(io_ctx, uuid,
client_name);
}
int RBD::mirror_peer_site_set_name(IoCtx& io_ctx, const std::string &uuid,
const std::string &site_name) {
return librbd::api::Mirror<>::peer_site_set_name(io_ctx, uuid,
site_name);
}
int RBD::mirror_peer_site_set_direction(IoCtx& io_ctx,
const std::string& uuid,
mirror_peer_direction_t direction) {
return librbd::api::Mirror<>::peer_site_set_direction(io_ctx, uuid,
direction);
}
int RBD::mirror_peer_site_get_attributes(
IoCtx& io_ctx, const std::string &uuid,
std::map<std::string, std::string> *key_vals) {
return librbd::api::Mirror<>::peer_site_get_attributes(io_ctx, uuid,
key_vals);
}
int RBD::mirror_peer_site_set_attributes(
IoCtx& io_ctx, const std::string &uuid,
const std::map<std::string, std::string>& key_vals) {
return librbd::api::Mirror<>::peer_site_set_attributes(io_ctx, uuid,
key_vals);
}
int RBD::mirror_image_global_status_list(
IoCtx& io_ctx, const std::string &start_id, size_t max,
std::map<std::string, mirror_image_global_status_t> *global_statuses) {
return librbd::api::Mirror<>::image_global_status_list(
io_ctx, start_id, max, global_statuses);
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
int RBD::mirror_peer_add(IoCtx& io_ctx, std::string *uuid,
const std::string &cluster_name,
const std::string &client_name) {
return librbd::api::Mirror<>::peer_site_add(
io_ctx, uuid, RBD_MIRROR_PEER_DIRECTION_RX_TX, cluster_name, client_name);
}
int RBD::mirror_peer_remove(IoCtx& io_ctx, const std::string &uuid) {
return librbd::api::Mirror<>::peer_site_remove(io_ctx, uuid);
}
int RBD::mirror_peer_list(IoCtx& io_ctx, std::vector<mirror_peer_t> *peers) {
std::vector<mirror_peer_site_t> peer_sites;
int r = librbd::api::Mirror<>::peer_site_list(io_ctx, &peer_sites);
if (r < 0) {
return r;
}
peers->clear();
peers->reserve(peer_sites.size());
for (auto& peer_site : peer_sites) {
peers->push_back({peer_site.uuid, peer_site.site_name,
peer_site.client_name});
}
return 0;
}
int RBD::mirror_peer_set_client(IoCtx& io_ctx, const std::string &uuid,
const std::string &client_name) {
return librbd::api::Mirror<>::peer_site_set_client(io_ctx, uuid,
client_name);
}
int RBD::mirror_peer_set_cluster(IoCtx& io_ctx, const std::string &uuid,
const std::string &cluster_name) {
return librbd::api::Mirror<>::peer_site_set_name(io_ctx, uuid,
cluster_name);
}
int RBD::mirror_peer_get_attributes(
IoCtx& io_ctx, const std::string &uuid,
std::map<std::string, std::string> *key_vals) {
return librbd::api::Mirror<>::peer_site_get_attributes(io_ctx, uuid,
key_vals);
}
int RBD::mirror_peer_set_attributes(
IoCtx& io_ctx, const std::string &uuid,
const std::map<std::string, std::string>& key_vals) {
return librbd::api::Mirror<>::peer_site_set_attributes(io_ctx, uuid,
key_vals);
}
int RBD::mirror_image_status_list(IoCtx& io_ctx, const std::string &start_id,
size_t max, std::map<std::string, mirror_image_status_t> *images) {
std::map<std::string, mirror_image_global_status_t> global_statuses;
int r = librbd::api::Mirror<>::image_global_status_list(
io_ctx, start_id, max, &global_statuses);
if (r < 0) {
return r;
}
images->clear();
for (auto &[id, global_status] : global_statuses) {
if (global_status.site_statuses.empty() ||
global_status.site_statuses[0].mirror_uuid !=
cls::rbd::MirrorImageSiteStatus::LOCAL_MIRROR_UUID) {
continue;
}
auto& site_status = global_status.site_statuses[0];
(*images)[id] = mirror_image_status_t{
global_status.name, global_status.info, site_status.state,
site_status.description, site_status.last_update, site_status.up};
}
return 0;
}
#pragma GCC diagnostic pop
int RBD::mirror_image_status_summary(IoCtx& io_ctx,
std::map<mirror_image_status_state_t, int> *states) {
return librbd::api::Mirror<>::image_status_summary(io_ctx, states);
}
int RBD::mirror_image_instance_id_list(IoCtx& io_ctx,
const std::string &start_id, size_t max,
std::map<std::string, std::string> *instance_ids) {
return librbd::api::Mirror<>::image_instance_id_list(io_ctx, start_id, max,
instance_ids);
}
int RBD::mirror_image_info_list(
IoCtx& io_ctx, mirror_image_mode_t *mode_filter,
const std::string &start_id, size_t max,
std::map<std::string, std::pair<mirror_image_mode_t,
mirror_image_info_t>> *entries) {
return librbd::api::Mirror<>::image_info_list(io_ctx, mode_filter, start_id,
max, entries);
}
int RBD::group_create(IoCtx& io_ctx, const char *group_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, group_create_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), group_name);
int r = librbd::api::Group<>::create(io_ctx, group_name);
tracepoint(librbd, group_create_exit, r);
return r;
}
int RBD::group_remove(IoCtx& io_ctx, const char *group_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, group_remove_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), group_name);
int r = librbd::api::Group<>::remove(io_ctx, group_name);
tracepoint(librbd, group_remove_exit, r);
return r;
}
int RBD::group_list(IoCtx& io_ctx, vector<string> *names)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, group_list_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id());
int r = librbd::api::Group<>::list(io_ctx, names);
if (r >= 0) {
for (auto itr : *names) {
tracepoint(librbd, group_list_entry, itr.c_str());
}
}
tracepoint(librbd, group_list_exit, r);
return r;
}
int RBD::group_rename(IoCtx& io_ctx, const char *src_name,
const char *dest_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, group_rename_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), src_name, dest_name);
int r = librbd::api::Group<>::rename(io_ctx, src_name, dest_name);
tracepoint(librbd, group_rename_exit, r);
return r;
}
int RBD::group_image_add(IoCtx& group_ioctx, const char *group_name,
IoCtx& image_ioctx, const char *image_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_image_add_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name,
image_ioctx.get_pool_name().c_str(),
image_ioctx.get_id(), image_name);
int r = librbd::api::Group<>::image_add(group_ioctx, group_name,
image_ioctx, image_name);
tracepoint(librbd, group_image_add_exit, r);
return r;
}
int RBD::group_image_remove(IoCtx& group_ioctx, const char *group_name,
IoCtx& image_ioctx, const char *image_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_image_remove_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name,
image_ioctx.get_pool_name().c_str(),
image_ioctx.get_id(), image_name);
int r = librbd::api::Group<>::image_remove(group_ioctx, group_name,
image_ioctx, image_name);
tracepoint(librbd, group_image_remove_exit, r);
return r;
}
int RBD::group_image_remove_by_id(IoCtx& group_ioctx, const char *group_name,
IoCtx& image_ioctx, const char *image_id)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_image_remove_by_id_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name,
image_ioctx.get_pool_name().c_str(),
image_ioctx.get_id(), image_id);
int r = librbd::api::Group<>::image_remove_by_id(group_ioctx, group_name,
image_ioctx, image_id);
tracepoint(librbd, group_image_remove_by_id_exit, r);
return r;
}
int RBD::group_image_list(IoCtx& group_ioctx, const char *group_name,
std::vector<group_image_info_t> *images,
size_t group_image_info_size)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_image_list_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name);
if (group_image_info_size != sizeof(group_image_info_t)) {
tracepoint(librbd, group_image_list_exit, -ERANGE);
return -ERANGE;
}
int r = librbd::api::Group<>::image_list(group_ioctx, group_name, images);
tracepoint(librbd, group_image_list_exit, r);
return r;
}
int RBD::group_snap_create(IoCtx& group_ioctx, const char *group_name,
const char *snap_name) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_create_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
int r = librbd::api::Group<>::snap_create(group_ioctx, group_name,
snap_name, 0);
tracepoint(librbd, group_snap_create_exit, r);
return r;
}
int RBD::group_snap_create2(IoCtx& group_ioctx, const char *group_name,
const char *snap_name, uint32_t flags) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_create_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
int r = librbd::api::Group<>::snap_create(group_ioctx, group_name,
snap_name, flags);
tracepoint(librbd, group_snap_create_exit, r);
return r;
}
int RBD::group_snap_remove(IoCtx& group_ioctx, const char *group_name,
const char *snap_name) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_remove_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
int r = librbd::api::Group<>::snap_remove(group_ioctx, group_name,
snap_name);
tracepoint(librbd, group_snap_remove_exit, r);
return r;
}
int RBD::group_snap_list(IoCtx& group_ioctx, const char *group_name,
std::vector<group_snap_info_t> *snaps,
size_t group_snap_info_size)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_list_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name);
if (group_snap_info_size != sizeof(group_snap_info_t)) {
tracepoint(librbd, group_snap_list_exit, -ERANGE);
return -ERANGE;
}
int r = librbd::api::Group<>::snap_list(group_ioctx, group_name, snaps);
tracepoint(librbd, group_snap_list_exit, r);
return r;
}
int RBD::group_snap_rename(IoCtx& group_ioctx, const char *group_name,
const char *old_snap_name,
const char *new_snap_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_rename_enter,
group_ioctx.get_pool_name().c_str(), group_ioctx.get_id(),
group_name, old_snap_name, new_snap_name);
int r = librbd::api::Group<>::snap_rename(group_ioctx, group_name,
old_snap_name, new_snap_name);
tracepoint(librbd, group_snap_list_exit, r);
return r;
}
int RBD::group_snap_rollback(IoCtx& group_ioctx, const char *group_name,
const char *snap_name) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_rollback_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Group<>::snap_rollback(group_ioctx, group_name,
snap_name, prog_ctx);
tracepoint(librbd, group_snap_rollback_exit, r);
return r;
}
int RBD::group_snap_rollback_with_progress(IoCtx& group_ioctx,
const char *group_name,
const char *snap_name,
ProgressContext& prog_ctx) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_rollback_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
int r = librbd::api::Group<>::snap_rollback(group_ioctx, group_name,
snap_name, prog_ctx);
tracepoint(librbd, group_snap_rollback_exit, r);
return r;
}
int RBD::pool_metadata_get(IoCtx& ioctx, const std::string &key,
std::string *value)
{
int r = librbd::api::PoolMetadata<>::get(ioctx, key, value);
return r;
}
int RBD::pool_metadata_set(IoCtx& ioctx, const std::string &key,
const std::string &value)
{
int r = librbd::api::PoolMetadata<>::set(ioctx, key, value);
return r;
}
int RBD::pool_metadata_remove(IoCtx& ioctx, const std::string &key)
{
int r = librbd::api::PoolMetadata<>::remove(ioctx, key);
return r;
}
int RBD::pool_metadata_list(IoCtx& ioctx, const std::string &start,
uint64_t max, map<string, bufferlist> *pairs)
{
int r = librbd::api::PoolMetadata<>::list(ioctx, start, max, pairs);
return r;
}
int RBD::config_list(IoCtx& io_ctx, std::vector<config_option_t> *options) {
return librbd::api::Config<>::list(io_ctx, options);
}
RBD::AioCompletion::AioCompletion(void *cb_arg, callback_t complete_cb)
{
auto aio_comp = librbd::io::AioCompletion::create(
cb_arg, complete_cb, this);
aio_comp->external_callback = true;
pc = reinterpret_cast<void*>(aio_comp);
}
bool RBD::AioCompletion::is_complete()
{
librbd::io::AioCompletion *c = (librbd::io::AioCompletion *)pc;
return c->is_complete();
}
int RBD::AioCompletion::wait_for_complete()
{
librbd::io::AioCompletion *c = (librbd::io::AioCompletion *)pc;
return c->wait_for_complete();
}
ssize_t RBD::AioCompletion::get_return_value()
{
librbd::io::AioCompletion *c = (librbd::io::AioCompletion *)pc;
return c->get_return_value();
}
void *RBD::AioCompletion::get_arg()
{
librbd::io::AioCompletion *c = (librbd::io::AioCompletion *)pc;
return c->get_arg();
}
void RBD::AioCompletion::release()
{
librbd::io::AioCompletion *c = (librbd::io::AioCompletion *)pc;
c->release();
delete this;
}
/*
ImageOptions
*/
ImageOptions::ImageOptions()
{
librbd::image_options_create(&opts);
}
ImageOptions::ImageOptions(rbd_image_options_t opts_)
{
librbd::image_options_create_ref(&opts, opts_);
}
ImageOptions::ImageOptions(const ImageOptions &imgopts)
{
librbd::image_options_copy(&opts, imgopts);
}
ImageOptions::~ImageOptions()
{
librbd::image_options_destroy(opts);
}
int ImageOptions::set(int optname, const std::string& optval)
{
return librbd::image_options_set(opts, optname, optval);
}
int ImageOptions::set(int optname, uint64_t optval)
{
return librbd::image_options_set(opts, optname, optval);
}
int ImageOptions::get(int optname, std::string* optval) const
{
return librbd::image_options_get(opts, optname, optval);
}
int ImageOptions::get(int optname, uint64_t* optval) const
{
return librbd::image_options_get(opts, optname, optval);
}
int ImageOptions::is_set(int optname, bool* is_set)
{
return librbd::image_options_is_set(opts, optname, is_set);
}
int ImageOptions::unset(int optname)
{
return librbd::image_options_unset(opts, optname);
}
void ImageOptions::clear()
{
librbd::image_options_clear(opts);
}
bool ImageOptions::empty() const
{
return librbd::image_options_is_empty(opts);
}
/*
Image
*/
Image::Image() : ctx(NULL)
{
}
Image::~Image()
{
close();
}
int Image::close()
{
int r = 0;
if (ctx) {
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, close_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str());
r = ictx->state->close();
ctx = NULL;
tracepoint(librbd, close_image_exit, r);
}
return r;
}
int Image::aio_close(RBD::AioCompletion *c)
{
if (!ctx) {
return -EINVAL;
}
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_close_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), c->pc);
ictx->state->close(new C_AioCompletion(ictx, librbd::io::AIO_TYPE_CLOSE,
get_aio_completion(c)));
ctx = NULL;
tracepoint(librbd, aio_close_image_exit, 0);
return 0;
}
int Image::resize(uint64_t size)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, resize_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, size);
librbd::NoOpProgressContext prog_ctx;
int r = ictx->operations->resize(size, true, prog_ctx);
tracepoint(librbd, resize_exit, r);
return r;
}
int Image::resize2(uint64_t size, bool allow_shrink, librbd::ProgressContext& pctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, resize_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, size);
int r = ictx->operations->resize(size, allow_shrink, pctx);
tracepoint(librbd, resize_exit, r);
return r;
}
int Image::resize_with_progress(uint64_t size, librbd::ProgressContext& pctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, resize_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, size);
int r = ictx->operations->resize(size, true, pctx);
tracepoint(librbd, resize_exit, r);
return r;
}
int Image::stat(image_info_t& info, size_t infosize)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, stat_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::info(ictx, info, infosize);
tracepoint(librbd, stat_exit, r, &info);
return r;
}
int Image::old_format(uint8_t *old)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_old_format_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::get_old_format(ictx, old);
tracepoint(librbd, get_old_format_exit, r, *old);
return r;
}
int Image::size(uint64_t *size)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_size_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::get_size(ictx, size);
tracepoint(librbd, get_size_exit, r, *size);
return r;
}
int Image::get_group(group_info_t *group_info, size_t group_info_size)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, image_get_group_enter, ictx->name.c_str());
if (group_info_size != sizeof(group_info_t)) {
tracepoint(librbd, image_get_group_exit, -ERANGE);
return -ERANGE;
}
int r = librbd::api::Group<>::image_get_group(ictx, group_info);
tracepoint(librbd, image_get_group_exit, r);
return r;
}
int Image::features(uint64_t *features)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_features_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::get_features(ictx, features);
tracepoint(librbd, get_features_exit, r, *features);
return r;
}
int Image::update_features(uint64_t features, bool enabled)
{
ImageCtx *ictx = reinterpret_cast<ImageCtx *>(ctx);
tracepoint(librbd, update_features_enter, ictx, features, enabled);
int r = ictx->operations->update_features(features, enabled);
tracepoint(librbd, update_features_exit, r);
return r;
}
int Image::get_op_features(uint64_t *op_features)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Image<>::get_op_features(ictx, op_features);
}
uint64_t Image::get_stripe_unit() const
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_stripe_unit_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
uint64_t stripe_unit = ictx->get_stripe_unit();
tracepoint(librbd, get_stripe_unit_exit, 0, stripe_unit);
return stripe_unit;
}
uint64_t Image::get_stripe_count() const
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_stripe_count_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
uint64_t stripe_count = ictx->get_stripe_count();
tracepoint(librbd, get_stripe_count_exit, 0, stripe_count);
return stripe_count;
}
int Image::get_create_timestamp(struct timespec *timestamp)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_create_timestamp_enter, ictx, ictx->name.c_str(),
ictx->read_only);
utime_t time = ictx->get_create_timestamp();
time.to_timespec(timestamp);
tracepoint(librbd, get_create_timestamp_exit, 0, timestamp);
return 0;
}
int Image::get_access_timestamp(struct timespec *timestamp)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_access_timestamp_enter, ictx, ictx->name.c_str(),
ictx->read_only);
{
std::shared_lock timestamp_locker{ictx->timestamp_lock};
utime_t time = ictx->get_access_timestamp();
time.to_timespec(timestamp);
}
tracepoint(librbd, get_access_timestamp_exit, 0, timestamp);
return 0;
}
int Image::get_modify_timestamp(struct timespec *timestamp)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_modify_timestamp_enter, ictx, ictx->name.c_str(),
ictx->read_only);
{
std::shared_lock timestamp_locker{ictx->timestamp_lock};
utime_t time = ictx->get_modify_timestamp();
time.to_timespec(timestamp);
}
tracepoint(librbd, get_modify_timestamp_exit, 0, timestamp);
return 0;
}
int Image::overlap(uint64_t *overlap)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_overlap_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::get_overlap(ictx, overlap);
tracepoint(librbd, get_overlap_exit, r, *overlap);
return r;
}
int Image::get_name(std::string *name)
{
ImageCtx *ictx = reinterpret_cast<ImageCtx *>(ctx);
*name = ictx->name;
return 0;
}
int Image::get_id(std::string *id)
{
ImageCtx *ictx = reinterpret_cast<ImageCtx *>(ctx);
if (ictx->old_format) {
return -EINVAL;
}
*id = ictx->id;
return 0;
}
std::string Image::get_block_name_prefix()
{
ImageCtx *ictx = reinterpret_cast<ImageCtx *>(ctx);
return ictx->object_prefix;
}
int64_t Image::get_data_pool_id()
{
ImageCtx *ictx = reinterpret_cast<ImageCtx *>(ctx);
return librbd::api::Image<>::get_data_pool_id(ictx);
}
int Image::parent_info(string *parent_pool_name, string *parent_name,
string *parent_snap_name)
{
librbd::linked_image_spec_t parent_image;
librbd::snap_spec_t parent_snap;
int r = get_parent(&parent_image, &parent_snap);
if (r >= 0) {
if (parent_pool_name != nullptr) {
*parent_pool_name = parent_image.pool_name;
}
if (parent_name != nullptr) {
*parent_name = parent_image.image_name;
}
if (parent_snap_name != nullptr) {
*parent_snap_name = parent_snap.name;
}
}
return r;
}
int Image::parent_info2(string *parent_pool_name, string *parent_name,
string *parent_id, string *parent_snap_name)
{
librbd::linked_image_spec_t parent_image;
librbd::snap_spec_t parent_snap;
int r = get_parent(&parent_image, &parent_snap);
if (r >= 0) {
if (parent_pool_name != nullptr) {
*parent_pool_name = parent_image.pool_name;
}
if (parent_name != nullptr) {
*parent_name = parent_image.image_name;
}
if (parent_id != nullptr) {
*parent_id = parent_image.image_id;
}
if (parent_snap_name != nullptr) {
*parent_snap_name = parent_snap.name;
}
}
return r;
}
int Image::get_parent(linked_image_spec_t *parent_image,
snap_spec_t *parent_snap)
{
auto ictx = reinterpret_cast<ImageCtx*>(ctx);
tracepoint(librbd, get_parent_info_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::api::Image<>::get_parent(ictx, parent_image, parent_snap);
tracepoint(librbd, get_parent_info_exit, r,
parent_image->pool_name.c_str(),
parent_image->image_name.c_str(),
parent_image->image_id.c_str(),
parent_snap->name.c_str());
return r;
}
int Image::get_migration_source_spec(std::string* source_spec)
{
auto ictx = reinterpret_cast<ImageCtx*>(ctx);
return librbd::api::Migration<>::get_source_spec(ictx, source_spec);
}
int Image::get_flags(uint64_t *flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_flags_enter, ictx);
int r = librbd::get_flags(ictx, flags);
tracepoint(librbd, get_flags_exit, ictx, r, *flags);
return r;
}
int Image::set_image_notification(int fd, int type)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, set_image_notification_enter, ictx, fd, type);
int r = librbd::set_image_notification(ictx, fd, type);
tracepoint(librbd, set_image_notification_exit, ictx, r);
return r;
}
int Image::is_exclusive_lock_owner(bool *is_owner)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, is_exclusive_lock_owner_enter, ictx);
int r = librbd::is_exclusive_lock_owner(ictx, is_owner);
tracepoint(librbd, is_exclusive_lock_owner_exit, ictx, r, *is_owner);
return r;
}
int Image::lock_acquire(rbd_lock_mode_t lock_mode)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, lock_acquire_enter, ictx, lock_mode);
int r = librbd::lock_acquire(ictx, lock_mode);
tracepoint(librbd, lock_acquire_exit, ictx, r);
return r;
}
int Image::lock_release()
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, lock_release_enter, ictx);
int r = librbd::lock_release(ictx);
tracepoint(librbd, lock_release_exit, ictx, r);
return r;
}
int Image::lock_get_owners(rbd_lock_mode_t *lock_mode,
std::list<std::string> *lock_owners)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, lock_get_owners_enter, ictx);
int r = librbd::lock_get_owners(ictx, lock_mode, lock_owners);
tracepoint(librbd, lock_get_owners_exit, ictx, r);
return r;
}
int Image::lock_break(rbd_lock_mode_t lock_mode,
const std::string &lock_owner)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, lock_break_enter, ictx, lock_mode, lock_owner.c_str());
int r = librbd::lock_break(ictx, lock_mode, lock_owner);
tracepoint(librbd, lock_break_exit, ictx, r);
return r;
}
int Image::rebuild_object_map(ProgressContext &prog_ctx)
{
ImageCtx *ictx = reinterpret_cast<ImageCtx*>(ctx);
return ictx->operations->rebuild_object_map(prog_ctx);
}
int Image::check_object_map(ProgressContext &prog_ctx)
{
ImageCtx *ictx = reinterpret_cast<ImageCtx*>(ctx);
return ictx->operations->check_object_map(prog_ctx);
}
int Image::copy(IoCtx& dest_io_ctx, const char *destname)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, copy_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname);
ImageOptions opts;
librbd::NoOpProgressContext prog_ctx;
int r = librbd::copy(ictx, dest_io_ctx, destname, opts, prog_ctx, 0);
tracepoint(librbd, copy_exit, r);
return r;
}
int Image::copy2(Image& dest)
{
ImageCtx *srcctx = (ImageCtx *)ctx;
ImageCtx *destctx = (ImageCtx *)dest.ctx;
tracepoint(librbd, copy2_enter, srcctx, srcctx->name.c_str(), srcctx->snap_name.c_str(), srcctx->read_only, destctx, destctx->name.c_str(), destctx->snap_name.c_str(), destctx->read_only);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::copy(srcctx, destctx, prog_ctx, 0);
tracepoint(librbd, copy2_exit, r);
return r;
}
int Image::copy3(IoCtx& dest_io_ctx, const char *destname, ImageOptions& opts)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, copy3_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname, opts.opts);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::copy(ictx, dest_io_ctx, destname, opts, prog_ctx, 0);
tracepoint(librbd, copy3_exit, r);
return r;
}
int Image::copy4(IoCtx& dest_io_ctx, const char *destname, ImageOptions& opts, size_t sparse_size)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, copy4_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname, opts.opts, sparse_size);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::copy(ictx, dest_io_ctx, destname, opts, prog_ctx, sparse_size);
tracepoint(librbd, copy4_exit, r);
return r;
}
int Image::copy_with_progress(IoCtx& dest_io_ctx, const char *destname,
librbd::ProgressContext &pctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, copy_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname);
ImageOptions opts;
int r = librbd::copy(ictx, dest_io_ctx, destname, opts, pctx, 0);
tracepoint(librbd, copy_exit, r);
return r;
}
int Image::copy_with_progress2(Image& dest, librbd::ProgressContext &pctx)
{
ImageCtx *srcctx = (ImageCtx *)ctx;
ImageCtx *destctx = (ImageCtx *)dest.ctx;
tracepoint(librbd, copy2_enter, srcctx, srcctx->name.c_str(), srcctx->snap_name.c_str(), srcctx->read_only, destctx, destctx->name.c_str(), destctx->snap_name.c_str(), destctx->read_only);
int r = librbd::copy(srcctx, destctx, pctx, 0);
tracepoint(librbd, copy2_exit, r);
return r;
}
int Image::copy_with_progress3(IoCtx& dest_io_ctx, const char *destname,
ImageOptions& opts,
librbd::ProgressContext &pctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, copy3_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname, opts.opts);
int r = librbd::copy(ictx, dest_io_ctx, destname, opts, pctx, 0);
tracepoint(librbd, copy3_exit, r);
return r;
}
int Image::copy_with_progress4(IoCtx& dest_io_ctx, const char *destname,
ImageOptions& opts,
librbd::ProgressContext &pctx,
size_t sparse_size)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, copy4_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname, opts.opts, sparse_size);
int r = librbd::copy(ictx, dest_io_ctx, destname, opts, pctx, sparse_size);
tracepoint(librbd, copy4_exit, r);
return r;
}
int Image::deep_copy(IoCtx& dest_io_ctx, const char *destname,
ImageOptions& opts)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, deep_copy_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only,
dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(),
destname, opts.opts);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Image<>::deep_copy(ictx, dest_io_ctx, destname, opts,
prog_ctx);
tracepoint(librbd, deep_copy_exit, r);
return r;
}
int Image::deep_copy_with_progress(IoCtx& dest_io_ctx, const char *destname,
ImageOptions& opts,
librbd::ProgressContext &prog_ctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, deep_copy_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only,
dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(),
destname, opts.opts);
int r = librbd::api::Image<>::deep_copy(ictx, dest_io_ctx, destname, opts,
prog_ctx);
tracepoint(librbd, deep_copy_exit, r);
return r;
}
int Image::encryption_format(encryption_format_t format,
encryption_options_t opts,
size_t opts_size)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Image<>::encryption_format(
ictx, format, opts, opts_size, false);
}
int Image::encryption_load(encryption_format_t format,
encryption_options_t opts,
size_t opts_size)
{
ImageCtx *ictx = (ImageCtx *)ctx;
encryption_spec_t spec = {format, opts, opts_size};
return librbd::api::Image<>::encryption_load(ictx, &spec, 1, false);
}
int Image::encryption_load2(const encryption_spec_t *specs, size_t spec_count)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Image<>::encryption_load(
ictx, specs, spec_count, false);
}
int Image::flatten()
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, flatten_enter, ictx, ictx->name.c_str(), ictx->id.c_str());
librbd::NoOpProgressContext prog_ctx;
int r = ictx->operations->flatten(prog_ctx);
tracepoint(librbd, flatten_exit, r);
return r;
}
int Image::flatten_with_progress(librbd::ProgressContext& prog_ctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, flatten_enter, ictx, ictx->name.c_str(), ictx->id.c_str());
int r = ictx->operations->flatten(prog_ctx);
tracepoint(librbd, flatten_exit, r);
return r;
}
int Image::sparsify(size_t sparse_size)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, sparsify_enter, ictx, ictx->name.c_str(), sparse_size,
ictx->id.c_str());
librbd::NoOpProgressContext prog_ctx;
int r = ictx->operations->sparsify(sparse_size, prog_ctx);
tracepoint(librbd, sparsify_exit, r);
return r;
}
int Image::sparsify_with_progress(size_t sparse_size,
librbd::ProgressContext& prog_ctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, sparsify_enter, ictx, ictx->name.c_str(), sparse_size,
ictx->id.c_str());
int r = ictx->operations->sparsify(sparse_size, prog_ctx);
tracepoint(librbd, sparsify_exit, r);
return r;
}
int Image::list_children(set<pair<string, string> > *children)
{
std::vector<linked_image_spec_t> images;
int r = list_children3(&images);
if (r < 0) {
return r;
}
for (auto& image : images) {
if (!image.trash) {
children->insert({image.pool_name, image.image_name});
}
}
return 0;
}
int Image::list_children2(vector<librbd::child_info_t> *children)
{
std::vector<linked_image_spec_t> images;
int r = list_children3(&images);
if (r < 0) {
return r;
}
for (auto& image : images) {
children->push_back({
.pool_name = image.pool_name,
.image_name = image.image_name,
.image_id = image.image_id,
.trash = image.trash});
}
return 0;
}
int Image::list_children3(std::vector<linked_image_spec_t> *images)
{
auto ictx = reinterpret_cast<ImageCtx*>(ctx);
tracepoint(librbd, list_children_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::api::Image<>::list_children(ictx, images);
#ifdef WITH_LTTNG
if (r >= 0) {
for (auto& it : *images) {
tracepoint(librbd, list_children_entry, it.pool_name.c_str(),
it.image_name.c_str());
}
}
#endif
tracepoint(librbd, list_children_exit, r);
return r;
}
int Image::list_descendants(std::vector<linked_image_spec_t> *images)
{
auto ictx = reinterpret_cast<ImageCtx*>(ctx);
images->clear();
int r = librbd::api::Image<>::list_descendants(ictx, {}, images);
return r;
}
int Image::list_lockers(std::list<librbd::locker_t> *lockers,
bool *exclusive, string *tag)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, list_lockers_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::list_lockers(ictx, lockers, exclusive, tag);
if (r >= 0) {
for (std::list<librbd::locker_t>::const_iterator it = lockers->begin();
it != lockers->end(); ++it) {
tracepoint(librbd, list_lockers_entry, it->client.c_str(), it->cookie.c_str(), it->address.c_str());
}
}
tracepoint(librbd, list_lockers_exit, r);
return r;
}
int Image::lock_exclusive(const string& cookie)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, lock_exclusive_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, cookie.c_str());
int r = librbd::lock(ictx, true, cookie, "");
tracepoint(librbd, lock_exclusive_exit, r);
return r;
}
int Image::lock_shared(const string& cookie, const std::string& tag)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, lock_shared_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, cookie.c_str(), tag.c_str());
int r = librbd::lock(ictx, false, cookie, tag);
tracepoint(librbd, lock_shared_exit, r);
return r;
}
int Image::unlock(const string& cookie)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, unlock_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, cookie.c_str());
int r = librbd::unlock(ictx, cookie);
tracepoint(librbd, unlock_exit, r);
return r;
}
int Image::break_lock(const string& client, const string& cookie)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, break_lock_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, client.c_str(), cookie.c_str());
int r = librbd::break_lock(ictx, client, cookie);
tracepoint(librbd, break_lock_exit, r);
return r;
}
int Image::snap_create(const char *snap_name)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_create_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
auto flags = librbd::util::get_default_snap_create_flags(ictx);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Snapshot<>::create(ictx, snap_name, flags, prog_ctx);
tracepoint(librbd, snap_create_exit, r);
return r;
}
int Image::snap_create2(const char *snap_name, uint32_t flags,
ProgressContext& prog_ctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_create_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = librbd::api::Snapshot<>::create(ictx, snap_name, flags, prog_ctx);
tracepoint(librbd, snap_create_exit, r);
return r;
}
int Image::snap_remove(const char *snap_name)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_remove_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Snapshot<>::remove(ictx, snap_name, 0, prog_ctx);
tracepoint(librbd, snap_remove_exit, r);
return r;
}
int Image::snap_remove2(const char *snap_name, uint32_t flags, ProgressContext& pctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_remove2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name, flags);
int r = librbd::api::Snapshot<>::remove(ictx, snap_name, flags, pctx);
tracepoint(librbd, snap_remove_exit, r);
return r;
}
int Image::snap_remove_by_id(uint64_t snap_id)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Snapshot<>::remove(ictx, snap_id);
}
int Image::snap_rollback(const char *snap_name)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_rollback_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
librbd::NoOpProgressContext prog_ctx;
int r = ictx->operations->snap_rollback(cls::rbd::UserSnapshotNamespace(), snap_name, prog_ctx);
tracepoint(librbd, snap_rollback_exit, r);
return r;
}
int Image::snap_rename(const char *srcname, const char *dstname)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_rename_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, srcname, dstname);
int r = ictx->operations->snap_rename(srcname, dstname);
tracepoint(librbd, snap_rename_exit, r);
return r;
}
int Image::snap_rollback_with_progress(const char *snap_name,
ProgressContext& prog_ctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_rollback_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = ictx->operations->snap_rollback(cls::rbd::UserSnapshotNamespace(), snap_name, prog_ctx);
tracepoint(librbd, snap_rollback_exit, r);
return r;
}
int Image::snap_protect(const char *snap_name)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_protect_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = ictx->operations->snap_protect(cls::rbd::UserSnapshotNamespace(), snap_name);
tracepoint(librbd, snap_protect_exit, r);
return r;
}
int Image::snap_unprotect(const char *snap_name)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_unprotect_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = ictx->operations->snap_unprotect(cls::rbd::UserSnapshotNamespace(), snap_name);
tracepoint(librbd, snap_unprotect_exit, r);
return r;
}
int Image::snap_is_protected(const char *snap_name, bool *is_protected)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_is_protected_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = librbd::api::Snapshot<>::is_protected(ictx, snap_name, is_protected);
tracepoint(librbd, snap_is_protected_exit, r, *is_protected ? 1 : 0);
return r;
}
int Image::snap_list(vector<librbd::snap_info_t>& snaps)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_list_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, &snaps);
int r = librbd::api::Snapshot<>::list(ictx, snaps);
if (r >= 0) {
for (int i = 0, n = snaps.size(); i < n; i++) {
tracepoint(librbd, snap_list_entry, snaps[i].id, snaps[i].size, snaps[i].name.c_str());
}
}
tracepoint(librbd, snap_list_exit, r, snaps.size());
if (r >= 0) {
// A little ugly, but the C++ API doesn't need a Image::snap_list_end,
// and we want the tracepoints to mirror the C API
tracepoint(librbd, snap_list_end_enter, &snaps);
tracepoint(librbd, snap_list_end_exit);
}
return r;
}
bool Image::snap_exists(const char *snap_name)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_exists_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, snap_name);
bool exists;
int r = librbd::api::Snapshot<>::exists(ictx, cls::rbd::UserSnapshotNamespace(), snap_name, &exists);
tracepoint(librbd, snap_exists_exit, r, exists);
if (r < 0) {
// lie to caller since we don't know the real answer yet.
return false;
}
return exists;
}
// A safer version of snap_exists.
int Image::snap_exists2(const char *snap_name, bool *exists)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_exists_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = librbd::api::Snapshot<>::exists(ictx, cls::rbd::UserSnapshotNamespace(), snap_name, exists);
tracepoint(librbd, snap_exists_exit, r, *exists);
return r;
}
int Image::snap_get_timestamp(uint64_t snap_id, struct timespec *timestamp)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_get_timestamp_enter, ictx, ictx->name.c_str());
int r = librbd::api::Snapshot<>::get_timestamp(ictx, snap_id, timestamp);
tracepoint(librbd, snap_get_timestamp_exit, r);
return r;
}
int Image::snap_get_limit(uint64_t *limit)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_get_limit_enter, ictx, ictx->name.c_str());
int r = librbd::api::Snapshot<>::get_limit(ictx, limit);
tracepoint(librbd, snap_get_limit_exit, r, *limit);
return r;
}
int Image::snap_get_namespace_type(uint64_t snap_id,
snap_namespace_type_t *namespace_type) {
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_get_namespace_type_enter, ictx, ictx->name.c_str());
int r = librbd::api::Snapshot<>::get_namespace_type(ictx, snap_id, namespace_type);
tracepoint(librbd, snap_get_namespace_type_exit, r);
return r;
}
int Image::snap_get_group_namespace(uint64_t snap_id,
snap_group_namespace_t *group_snap,
size_t group_snap_size) {
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_get_group_namespace_enter, ictx,
ictx->name.c_str());
if (group_snap_size != sizeof(snap_group_namespace_t)) {
tracepoint(librbd, snap_get_group_namespace_exit, -ERANGE);
return -ERANGE;
}
int r = librbd::api::Snapshot<>::get_group_namespace(ictx, snap_id,
group_snap);
tracepoint(librbd, snap_get_group_namespace_exit, r);
return r;
}
int Image::snap_get_trash_namespace(uint64_t snap_id,
std::string* original_name) {
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Snapshot<>::get_trash_namespace(ictx, snap_id,
original_name);
}
int Image::snap_get_mirror_namespace(
uint64_t snap_id, snap_mirror_namespace_t *mirror_snap,
size_t mirror_snap_size) {
ImageCtx *ictx = (ImageCtx *)ctx;
if (mirror_snap_size != sizeof(snap_mirror_namespace_t)) {
return -ERANGE;
}
int r = librbd::api::Snapshot<>::get_mirror_namespace(
ictx, snap_id, mirror_snap);
return r;
}
int Image::snap_set_limit(uint64_t limit)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_set_limit_enter, ictx, ictx->name.c_str(), limit);
int r = ictx->operations->snap_set_limit(limit);
tracepoint(librbd, snap_set_limit_exit, r);
return r;
}
int Image::snap_set(const char *snap_name)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_set_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = librbd::api::Image<>::snap_set(
ictx, cls::rbd::UserSnapshotNamespace(), snap_name);
tracepoint(librbd, snap_set_exit, r);
return r;
}
int Image::snap_set_by_id(uint64_t snap_id)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Image<>::snap_set(ictx, snap_id);
}
int Image::snap_get_name(uint64_t snap_id, std::string *snap_name)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Snapshot<>::get_name(ictx, snap_id, snap_name);
}
int Image::snap_get_id(const std::string snap_name, uint64_t *snap_id)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Snapshot<>::get_id(ictx, snap_name, snap_id);
}
ssize_t Image::read(uint64_t ofs, size_t len, bufferlist& bl)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, read_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len);
int r = api::Io<>::read(*ictx, ofs, len, io::ReadResult{&bl}, 0);
tracepoint(librbd, read_exit, r);
return r;
}
ssize_t Image::read2(uint64_t ofs, size_t len, bufferlist& bl, int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, read2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, ofs, len, op_flags);
int r = api::Io<>::read(*ictx, ofs, len, io::ReadResult{&bl}, op_flags);
tracepoint(librbd, read_exit, r);
return r;
}
int64_t Image::read_iterate(uint64_t ofs, size_t len,
int (*cb)(uint64_t, size_t, const char *, void *),
void *arg)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, read_iterate_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len);
int64_t r = librbd::read_iterate(ictx, ofs, len, cb, arg);
tracepoint(librbd, read_iterate_exit, r);
return r;
}
int Image::read_iterate2(uint64_t ofs, uint64_t len,
int (*cb)(uint64_t, size_t, const char *, void *),
void *arg)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, read_iterate2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len);
int64_t r = librbd::read_iterate(ictx, ofs, len, cb, arg);
if (r > 0)
r = 0;
tracepoint(librbd, read_iterate2_exit, r);
return (int)r;
}
int Image::diff_iterate(const char *fromsnapname,
uint64_t ofs, uint64_t len,
int (*cb)(uint64_t, size_t, int, void *),
void *arg)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, diff_iterate_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, fromsnapname, ofs, len,
true, false);
int r = librbd::api::DiffIterate<>::diff_iterate(ictx,
cls::rbd::UserSnapshotNamespace(),
fromsnapname, ofs,
len, true, false, cb, arg);
tracepoint(librbd, diff_iterate_exit, r);
return r;
}
int Image::diff_iterate2(const char *fromsnapname, uint64_t ofs, uint64_t len,
bool include_parent, bool whole_object,
int (*cb)(uint64_t, size_t, int, void *), void *arg)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, diff_iterate_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, fromsnapname, ofs, len,
include_parent, whole_object);
int r = librbd::api::DiffIterate<>::diff_iterate(ictx,
cls::rbd::UserSnapshotNamespace(),
fromsnapname, ofs,
len, include_parent,
whole_object, cb, arg);
tracepoint(librbd, diff_iterate_exit, r);
return r;
}
ssize_t Image::write(uint64_t ofs, size_t len, bufferlist& bl)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, write_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len, bl.length() < len ? NULL : bl.c_str());
if (bl.length() < len) {
tracepoint(librbd, write_exit, -EINVAL);
return -EINVAL;
}
int r = api::Io<>::write(*ictx, ofs, len, bufferlist{bl}, 0);
tracepoint(librbd, write_exit, r);
return r;
}
ssize_t Image::write2(uint64_t ofs, size_t len, bufferlist& bl, int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, write2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only,
ofs, len, bl.length() < len ? NULL : bl.c_str(), op_flags);
if (bl.length() < len) {
tracepoint(librbd, write_exit, -EINVAL);
return -EINVAL;
}
int r = api::Io<>::write(*ictx, ofs, len, bufferlist{bl}, op_flags);
tracepoint(librbd, write_exit, r);
return r;
}
int Image::discard(uint64_t ofs, uint64_t len)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, discard_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len);
if (len > static_cast<uint64_t>(std::numeric_limits<int32_t>::max())) {
tracepoint(librbd, discard_exit, -EINVAL);
return -EINVAL;
}
int r = api::Io<>::discard(
*ictx, ofs, len, ictx->discard_granularity_bytes);
tracepoint(librbd, discard_exit, r);
return r;
}
ssize_t Image::writesame(uint64_t ofs, size_t len, bufferlist& bl, int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, writesame_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, ofs, len, bl.length() == 0 ? NULL : bl.c_str(), bl.length(),
op_flags);
if (bl.length() == 0 || len % bl.length() ||
len > static_cast<size_t>(std::numeric_limits<int>::max())) {
tracepoint(librbd, writesame_exit, -EINVAL);
return -EINVAL;
}
bool discard_zero = ictx->config.get_val<bool>("rbd_discard_on_zeroed_write_same");
if (discard_zero && bl.is_zero()) {
int r = api::Io<>::write_zeroes(*ictx, ofs, len, 0U, op_flags);
tracepoint(librbd, writesame_exit, r);
return r;
}
int r = api::Io<>::write_same(*ictx, ofs, len, bufferlist{bl}, op_flags);
tracepoint(librbd, writesame_exit, r);
return r;
}
ssize_t Image::write_zeroes(uint64_t ofs, size_t len, int zero_flags,
int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return api::Io<>::write_zeroes(*ictx, ofs, len, zero_flags, op_flags);
}
ssize_t Image::compare_and_write(uint64_t ofs, size_t len,
ceph::bufferlist &cmp_bl, ceph::bufferlist& bl,
uint64_t *mismatch_off, int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, compare_and_write_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(),
ictx->read_only, ofs, len, cmp_bl.length() < len ? NULL : cmp_bl.c_str(),
bl.length() < len ? NULL : bl.c_str(), op_flags);
if (bl.length() < len || cmp_bl.length() < len) {
tracepoint(librbd, compare_and_write_exit, -EINVAL);
return -EINVAL;
}
int r = api::Io<>::compare_and_write(
*ictx, ofs, len, bufferlist{cmp_bl}, bufferlist{bl}, mismatch_off,
op_flags);
tracepoint(librbd, compare_and_write_exit, r);
return r;
}
int Image::aio_write(uint64_t off, size_t len, bufferlist& bl,
RBD::AioCompletion *c)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_write_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, off, len, bl.length() < len ? NULL : bl.c_str(), c->pc);
if (bl.length() < len) {
tracepoint(librbd, aio_write_exit, -EINVAL);
return -EINVAL;
}
api::Io<>::aio_write(*ictx, get_aio_completion(c), off, len, bufferlist{bl},
0, true);
tracepoint(librbd, aio_write_exit, 0);
return 0;
}
int Image::aio_write2(uint64_t off, size_t len, bufferlist& bl,
RBD::AioCompletion *c, int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_write2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, off, len, bl.length() < len ? NULL : bl.c_str(), c->pc, op_flags);
if (bl.length() < len) {
tracepoint(librbd, aio_write_exit, -EINVAL);
return -EINVAL;
}
api::Io<>::aio_write(*ictx, get_aio_completion(c), off, len, bufferlist{bl},
op_flags, true);
tracepoint(librbd, aio_write_exit, 0);
return 0;
}
int Image::aio_read(uint64_t off, size_t len, bufferlist& bl,
RBD::AioCompletion *c)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_read_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, off, len, bl.c_str(), c->pc);
ldout(ictx->cct, 10) << "Image::aio_read() buf=" << (void *)bl.c_str() << "~"
<< (void *)(bl.c_str() + len - 1) << dendl;
api::Io<>::aio_read(*ictx, get_aio_completion(c), off, len,
io::ReadResult{&bl}, 0, true);
tracepoint(librbd, aio_read_exit, 0);
return 0;
}
int Image::aio_read2(uint64_t off, size_t len, bufferlist& bl,
RBD::AioCompletion *c, int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_read2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, off, len, bl.c_str(), c->pc, op_flags);
ldout(ictx->cct, 10) << "Image::aio_read() buf=" << (void *)bl.c_str() << "~"
<< (void *)(bl.c_str() + len - 1) << dendl;
api::Io<>::aio_read(*ictx, get_aio_completion(c), off, len,
io::ReadResult{&bl}, op_flags, true);
tracepoint(librbd, aio_read_exit, 0);
return 0;
}
int Image::flush()
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, flush_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = api::Io<>::flush(*ictx);
tracepoint(librbd, flush_exit, r);
return r;
}
int Image::aio_flush(RBD::AioCompletion *c)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_flush_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, c->pc);
api::Io<>::aio_flush(*ictx, get_aio_completion(c), true);
tracepoint(librbd, aio_flush_exit, 0);
return 0;
}
int Image::aio_discard(uint64_t off, uint64_t len, RBD::AioCompletion *c)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_discard_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, off, len, c->pc);
api::Io<>::aio_discard(
*ictx, get_aio_completion(c), off, len, ictx->discard_granularity_bytes,
true);
tracepoint(librbd, aio_discard_exit, 0);
return 0;
}
int Image::aio_writesame(uint64_t off, size_t len, bufferlist& bl,
RBD::AioCompletion *c, int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_writesame_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, off, len, bl.length() <= len ? NULL : bl.c_str(), bl.length(),
c->pc, op_flags);
if (bl.length() == 0 || len % bl.length()) {
tracepoint(librbd, aio_writesame_exit, -EINVAL);
return -EINVAL;
}
bool discard_zero = ictx->config.get_val<bool>("rbd_discard_on_zeroed_write_same");
if (discard_zero && bl.is_zero()) {
api::Io<>::aio_write_zeroes(*ictx, get_aio_completion(c), off, len, 0U,
op_flags, true);
tracepoint(librbd, aio_writesame_exit, 0);
return 0;
}
api::Io<>::aio_write_same(*ictx, get_aio_completion(c), off, len,
bufferlist{bl}, op_flags, true);
tracepoint(librbd, aio_writesame_exit, 0);
return 0;
}
int Image::aio_write_zeroes(uint64_t off, size_t len, RBD::AioCompletion *c,
int zero_flags, int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
api::Io<>::aio_write_zeroes(*ictx, get_aio_completion(c), off, len,
zero_flags, op_flags, true);
return 0;
}
int Image::aio_compare_and_write(uint64_t off, size_t len,
ceph::bufferlist& cmp_bl, ceph::bufferlist& bl,
RBD::AioCompletion *c, uint64_t *mismatch_off,
int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_compare_and_write_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(),
ictx->read_only, off, len, cmp_bl.length() < len ? NULL : cmp_bl.c_str(),
bl.length() < len ? NULL : bl.c_str(), c->pc, op_flags);
if (bl.length() < len || cmp_bl.length() < len) {
tracepoint(librbd, aio_compare_and_write_exit, -EINVAL);
return -EINVAL;
}
api::Io<>::aio_compare_and_write(*ictx, get_aio_completion(c), off, len,
bufferlist{cmp_bl}, bufferlist{bl},
mismatch_off, op_flags, false);
tracepoint(librbd, aio_compare_and_write_exit, 0);
return 0;
}
int Image::invalidate_cache()
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, invalidate_cache_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::invalidate_cache(ictx);
tracepoint(librbd, invalidate_cache_exit, r);
return r;
}
int Image::poll_io_events(RBD::AioCompletion **comps, int numcomp)
{
io::AioCompletion *cs[numcomp];
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, poll_io_events_enter, ictx, numcomp);
int r = librbd::poll_io_events(ictx, cs, numcomp);
tracepoint(librbd, poll_io_events_exit, r);
if (r > 0) {
for (int i = 0; i < r; ++i)
comps[i] = (RBD::AioCompletion *)cs[i]->rbd_comp;
}
return r;
}
int Image::metadata_get(const std::string &key, std::string *value)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, metadata_get_enter, ictx, key.c_str());
int r = librbd::metadata_get(ictx, key, value);
if (r < 0) {
tracepoint(librbd, metadata_get_exit, r, key.c_str(), NULL);
} else {
tracepoint(librbd, metadata_get_exit, r, key.c_str(), value->c_str());
}
return r;
}
int Image::metadata_set(const std::string &key, const std::string &value)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, metadata_set_enter, ictx, key.c_str(), value.c_str());
int r = ictx->operations->metadata_set(key, value);
tracepoint(librbd, metadata_set_exit, r);
return r;
}
int Image::metadata_remove(const std::string &key)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, metadata_remove_enter, ictx, key.c_str());
int r = ictx->operations->metadata_remove(key);
tracepoint(librbd, metadata_remove_exit, r);
return r;
}
int Image::metadata_list(const std::string &start, uint64_t max, map<string, bufferlist> *pairs)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, metadata_list_enter, ictx);
int r = librbd::metadata_list(ictx, start, max, pairs);
if (r >= 0) {
for (map<string, bufferlist>::iterator it = pairs->begin();
it != pairs->end(); ++it) {
tracepoint(librbd, metadata_list_entry, it->first.c_str(), it->second.c_str());
}
}
tracepoint(librbd, metadata_list_exit, r);
return r;
}
int Image::mirror_image_enable() {
return mirror_image_enable2(RBD_MIRROR_IMAGE_MODE_JOURNAL);
}
int Image::mirror_image_enable2(mirror_image_mode_t mode) {
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Mirror<>::image_enable(ictx, mode, false);
}
int Image::mirror_image_disable(bool force) {
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Mirror<>::image_disable(ictx, force);
}
int Image::mirror_image_promote(bool force) {
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Mirror<>::image_promote(ictx, force);
}
int Image::mirror_image_demote() {
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Mirror<>::image_demote(ictx);
}
int Image::mirror_image_resync()
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Mirror<>::image_resync(ictx);
}
int Image::mirror_image_create_snapshot(uint64_t *snap_id)
{
ImageCtx *ictx = (ImageCtx *)ctx;
auto flags = librbd::util::get_default_snap_create_flags(ictx);
return librbd::api::Mirror<>::image_snapshot_create(ictx, flags, snap_id);
}
int Image::mirror_image_create_snapshot2(uint32_t flags, uint64_t *snap_id)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Mirror<>::image_snapshot_create(ictx, flags, snap_id);
}
int Image::mirror_image_get_info(mirror_image_info_t *mirror_image_info,
size_t info_size) {
ImageCtx *ictx = (ImageCtx *)ctx;
if (sizeof(mirror_image_info_t) != info_size) {
return -ERANGE;
}
return librbd::api::Mirror<>::image_get_info(ictx, mirror_image_info);
}
int Image::mirror_image_get_mode(mirror_image_mode_t *mode) {
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Mirror<>::image_get_mode(ictx, mode);
}
int Image::mirror_image_get_global_status(
mirror_image_global_status_t *mirror_image_global_status,
size_t status_size) {
ImageCtx *ictx = (ImageCtx *)ctx;
if (sizeof(mirror_image_global_status_t) != status_size) {
return -ERANGE;
}
return librbd::api::Mirror<>::image_get_global_status(
ictx, mirror_image_global_status);
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
int Image::mirror_image_get_status(mirror_image_status_t *mirror_image_status,
size_t status_size) {
ImageCtx *ictx = (ImageCtx *)ctx;
if (sizeof(mirror_image_status_t) != status_size) {
return -ERANGE;
}
mirror_image_global_status_t mirror_image_global_status;
int r = librbd::api::Mirror<>::image_get_global_status(
ictx, &mirror_image_global_status);
if (r < 0) {
return r;
}
librbd::mirror_image_site_status_t local_status;
r = get_local_mirror_image_site_status(mirror_image_global_status,
&local_status);
if (r < 0) {
return r;
}
*mirror_image_status = mirror_image_status_t{
mirror_image_global_status.name, mirror_image_global_status.info,
local_status.state, local_status.description, local_status.last_update,
local_status.up};
return 0;
}
#pragma GCC diagnostic pop
int Image::mirror_image_get_instance_id(std::string *instance_id) {
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Mirror<>::image_get_instance_id(ictx, instance_id);
}
int Image::aio_mirror_image_promote(bool force, RBD::AioCompletion *c) {
ImageCtx *ictx = (ImageCtx *)ctx;
librbd::api::Mirror<>::image_promote(
ictx, force, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(c)));
return 0;
}
int Image::aio_mirror_image_demote(RBD::AioCompletion *c) {
ImageCtx *ictx = (ImageCtx *)ctx;
librbd::api::Mirror<>::image_demote(
ictx, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(c)));
return 0;
}
int Image::aio_mirror_image_get_info(mirror_image_info_t *mirror_image_info,
size_t info_size,
RBD::AioCompletion *c) {
ImageCtx *ictx = (ImageCtx *)ctx;
if (sizeof(mirror_image_info_t) != info_size) {
return -ERANGE;
}
librbd::api::Mirror<>::image_get_info(
ictx, mirror_image_info,
new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(c)));
return 0;
}
int Image::aio_mirror_image_get_mode(mirror_image_mode_t *mode,
RBD::AioCompletion *c) {
ImageCtx *ictx = (ImageCtx *)ctx;
librbd::api::Mirror<>::image_get_mode(
ictx, mode, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(c)));
return 0;
}
int Image::aio_mirror_image_get_global_status(
mirror_image_global_status_t *status, size_t status_size,
RBD::AioCompletion *c) {
ImageCtx *ictx = (ImageCtx *)ctx;
if (sizeof(mirror_image_global_status_t) != status_size) {
return -ERANGE;
}
librbd::api::Mirror<>::image_get_global_status(
ictx, status, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(c)));
return 0;
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
int Image::aio_mirror_image_get_status(mirror_image_status_t *status,
size_t status_size,
RBD::AioCompletion *c) {
ImageCtx *ictx = (ImageCtx *)ctx;
if (sizeof(mirror_image_status_t) != status_size) {
return -ERANGE;
}
auto ctx = new C_MirrorImageGetStatus(
status, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(c)));
librbd::api::Mirror<>::image_get_global_status(
ictx, &ctx->cpp_mirror_image_global_status, ctx);
return 0;
}
#pragma GCC diagnostic pop
int Image::aio_mirror_image_create_snapshot(uint32_t flags, uint64_t *snap_id,
RBD::AioCompletion *c) {
ImageCtx *ictx = (ImageCtx *)ctx;
librbd::api::Mirror<>::image_snapshot_create(
ictx, flags, snap_id, new C_AioCompletion(ictx,
librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(c)));
return 0;
}
int Image::update_watch(UpdateWatchCtx *wctx, uint64_t *handle) {
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, update_watch_enter, ictx, wctx);
int r = ictx->state->register_update_watcher(wctx, handle);
tracepoint(librbd, update_watch_exit, r, *handle);
return r;
}
int Image::update_unwatch(uint64_t handle) {
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, update_unwatch_enter, ictx, handle);
int r = ictx->state->unregister_update_watcher(handle);
tracepoint(librbd, update_unwatch_exit, r);
return r;
}
int Image::list_watchers(std::list<librbd::image_watcher_t> &watchers) {
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, list_watchers_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::list_watchers(ictx, watchers);
#ifdef WITH_LTTNG
if (r >= 0) {
for (auto &watcher : watchers) {
tracepoint(librbd, list_watchers_entry, watcher.addr.c_str(), watcher.id, watcher.cookie);
}
}
#endif
tracepoint(librbd, list_watchers_exit, r, watchers.size());
return r;
}
int Image::config_list(std::vector<config_option_t> *options) {
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Config<>::list(ictx, options);
}
int Image::quiesce_watch(QuiesceWatchCtx *wctx, uint64_t *handle) {
ImageCtx *ictx = (ImageCtx *)ctx;
int r = ictx->state->register_quiesce_watcher(wctx, handle);
return r;
}
int Image::quiesce_unwatch(uint64_t handle) {
ImageCtx *ictx = (ImageCtx *)ctx;
int r = ictx->state->unregister_quiesce_watcher(handle);
return r;
}
void Image::quiesce_complete(uint64_t handle, int r) {
ImageCtx *ictx = (ImageCtx *)ctx;
ictx->state->quiesce_complete(handle, r);
}
} // namespace librbd
extern "C" void rbd_version(int *major, int *minor, int *extra)
{
if (major)
*major = LIBRBD_VER_MAJOR;
if (minor)
*minor = LIBRBD_VER_MINOR;
if (extra)
*extra = LIBRBD_VER_EXTRA;
}
extern "C" void rbd_image_options_create(rbd_image_options_t* opts)
{
librbd::image_options_create(opts);
}
extern "C" void rbd_image_options_destroy(rbd_image_options_t opts)
{
librbd::image_options_destroy(opts);
}
extern "C" int rbd_image_options_set_string(rbd_image_options_t opts, int optname,
const char* optval)
{
return librbd::image_options_set(opts, optname, optval);
}
extern "C" int rbd_image_options_set_uint64(rbd_image_options_t opts, int optname,
uint64_t optval)
{
return librbd::image_options_set(opts, optname, optval);
}
extern "C" int rbd_image_options_get_string(rbd_image_options_t opts, int optname,
char* optval, size_t maxlen)
{
std::string optval_;
int r = librbd::image_options_get(opts, optname, &optval_);
if (r < 0) {
return r;
}
if (optval_.size() >= maxlen) {
return -E2BIG;
}
strncpy(optval, optval_.c_str(), maxlen);
return 0;
}
extern "C" int rbd_image_options_get_uint64(rbd_image_options_t opts, int optname,
uint64_t* optval)
{
return librbd::image_options_get(opts, optname, optval);
}
extern "C" int rbd_image_options_is_set(rbd_image_options_t opts, int optname,
bool* is_set)
{
return librbd::image_options_is_set(opts, optname, is_set);
}
extern "C" int rbd_image_options_unset(rbd_image_options_t opts, int optname)
{
return librbd::image_options_unset(opts, optname);
}
extern "C" void rbd_image_options_clear(rbd_image_options_t opts)
{
librbd::image_options_clear(opts);
}
extern "C" int rbd_image_options_is_empty(rbd_image_options_t opts)
{
return librbd::image_options_is_empty(opts);
}
/* pool mirroring */
extern "C" int rbd_mirror_site_name_get(rados_t cluster, char *name,
size_t *max_len) {
librados::Rados rados;
librados::Rados::from_rados_t(cluster, rados);
std::string site_name;
int r = librbd::api::Mirror<>::site_name_get(rados, &site_name);
if (r < 0) {
return r;
}
auto total_len = site_name.size() + 1;
if (*max_len < total_len) {
*max_len = total_len;
return -ERANGE;
}
*max_len = total_len;
strcpy(name, site_name.c_str());
return 0;
}
extern "C" int rbd_mirror_site_name_set(rados_t cluster, const char *name) {
librados::Rados rados;
librados::Rados::from_rados_t(cluster, rados);
return librbd::api::Mirror<>::site_name_set(rados, name);
}
extern "C" int rbd_mirror_mode_get(rados_ioctx_t p,
rbd_mirror_mode_t *mirror_mode) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
return librbd::api::Mirror<>::mode_get(io_ctx, mirror_mode);
}
extern "C" int rbd_mirror_mode_set(rados_ioctx_t p,
rbd_mirror_mode_t mirror_mode) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
return librbd::api::Mirror<>::mode_set(io_ctx, mirror_mode);
}
extern "C" int rbd_mirror_uuid_get(rados_ioctx_t p,
char *mirror_uuid, size_t *max_len) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::string mirror_uuid_str;
int r = librbd::api::Mirror<>::uuid_get(io_ctx, &mirror_uuid_str);
if (r < 0) {
return r;
}
auto total_len = mirror_uuid_str.size() + 1;
if (*max_len < total_len) {
*max_len = total_len;
return -ERANGE;
}
*max_len = total_len;
strcpy(mirror_uuid, mirror_uuid_str.c_str());
return 0;
}
extern "C" int rbd_mirror_peer_bootstrap_create(rados_ioctx_t p, char *token,
size_t *max_len) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::string token_str;
int r = librbd::api::Mirror<>::peer_bootstrap_create(io_ctx, &token_str);
if (r < 0) {
return r;
}
auto total_len = token_str.size() + 1;
if (*max_len < total_len) {
*max_len = total_len;
return -ERANGE;
}
*max_len = total_len;
strcpy(token, token_str.c_str());
return 0;
}
extern "C" int rbd_mirror_peer_bootstrap_import(
rados_ioctx_t p, rbd_mirror_peer_direction_t direction,
const char *token) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
return librbd::api::Mirror<>::peer_bootstrap_import(io_ctx, direction, token);
}
extern "C" int rbd_mirror_peer_site_add(rados_ioctx_t p, char *uuid,
size_t uuid_max_length,
rbd_mirror_peer_direction_t direction,
const char *site_name,
const char *client_name) {
static const std::size_t UUID_LENGTH = 36;
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
if (uuid_max_length < UUID_LENGTH + 1) {
return -E2BIG;
}
std::string uuid_str;
int r = librbd::api::Mirror<>::peer_site_add(io_ctx, &uuid_str, direction,
site_name, client_name);
if (r >= 0) {
strncpy(uuid, uuid_str.c_str(), uuid_max_length);
uuid[uuid_max_length - 1] = '\0';
}
return r;
}
extern "C" int rbd_mirror_peer_site_remove(rados_ioctx_t p, const char *uuid) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
int r = librbd::api::Mirror<>::peer_site_remove(io_ctx, uuid);
return r;
}
extern "C" int rbd_mirror_peer_site_list(
rados_ioctx_t p, rbd_mirror_peer_site_t *peers, int *max_peers) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::vector<librbd::mirror_peer_site_t> peer_vector;
int r = librbd::api::Mirror<>::peer_site_list(io_ctx, &peer_vector);
if (r < 0) {
return r;
}
if (*max_peers < static_cast<int>(peer_vector.size())) {
*max_peers = static_cast<int>(peer_vector.size());
return -ERANGE;
}
for (int i = 0; i < static_cast<int>(peer_vector.size()); ++i) {
peers[i].uuid = strdup(peer_vector[i].uuid.c_str());
peers[i].direction = peer_vector[i].direction;
peers[i].site_name = strdup(peer_vector[i].site_name.c_str());
peers[i].mirror_uuid = strdup(peer_vector[i].mirror_uuid.c_str());
peers[i].client_name = strdup(peer_vector[i].client_name.c_str());
}
*max_peers = static_cast<int>(peer_vector.size());
return 0;
}
extern "C" void rbd_mirror_peer_site_list_cleanup(rbd_mirror_peer_site_t *peers,
int max_peers) {
for (int i = 0; i < max_peers; ++i) {
free(peers[i].uuid);
free(peers[i].site_name);
free(peers[i].mirror_uuid);
free(peers[i].client_name);
}
}
extern "C" int rbd_mirror_peer_site_set_client_name(
rados_ioctx_t p, const char *uuid, const char *client_name) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
return librbd::api::Mirror<>::peer_site_set_client(io_ctx, uuid, client_name);
}
extern "C" int rbd_mirror_peer_site_set_name(
rados_ioctx_t p, const char *uuid, const char *site_name) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
return librbd::api::Mirror<>::peer_site_set_name(io_ctx, uuid, site_name);
}
extern "C" int rbd_mirror_peer_site_set_direction(
rados_ioctx_t p, const char *uuid, rbd_mirror_peer_direction_t direction) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
return librbd::api::Mirror<>::peer_site_set_direction(io_ctx, uuid,
direction);
}
extern "C" int rbd_mirror_peer_site_get_attributes(
rados_ioctx_t p, const char *uuid, char *keys, size_t *max_key_len,
char *values, size_t *max_val_len, size_t *key_value_count) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::map<std::string, std::string> attributes;
int r = librbd::api::Mirror<>::peer_site_get_attributes(
io_ctx, uuid, &attributes);
if (r < 0) {
return r;
}
size_t key_total_len = 0, val_total_len = 0;
for (auto& it : attributes) {
key_total_len += it.first.size() + 1;
val_total_len += it.second.length() + 1;
}
bool too_short = ((*max_key_len < key_total_len) ||
(*max_val_len < val_total_len));
*max_key_len = key_total_len;
*max_val_len = val_total_len;
*key_value_count = attributes.size();
if (too_short) {
return -ERANGE;
}
char *keys_p = keys;
char *values_p = values;
for (auto& it : attributes) {
strncpy(keys_p, it.first.c_str(), it.first.size() + 1);
keys_p += it.first.size() + 1;
strncpy(values_p, it.second.c_str(), it.second.length() + 1);
values_p += it.second.length() + 1;
}
return 0;
}
extern "C" int rbd_mirror_peer_site_set_attributes(
rados_ioctx_t p, const char *uuid, const char *keys, const char *values,
size_t count) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::map<std::string, std::string> attributes;
for (size_t i = 0; i < count; ++i) {
const char* key = keys;
keys += strlen(key) + 1;
const char* value = values;
values += strlen(value) + 1;
attributes[key] = value;
}
return librbd::api::Mirror<>::peer_site_set_attributes(
io_ctx, uuid, attributes);
}
extern "C" int rbd_mirror_image_global_status_list(rados_ioctx_t p,
const char *start_id, size_t max, char **image_ids,
rbd_mirror_image_global_status_t *images, size_t *len) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::map<std::string, librbd::mirror_image_global_status_t> cpp_images;
int r = librbd::api::Mirror<>::image_global_status_list(
io_ctx, start_id, max, &cpp_images);
if (r < 0) {
return r;
}
size_t i = 0;
for (auto &it : cpp_images) {
ceph_assert(i < max);
const std::string &image_id = it.first;
image_ids[i] = strdup(image_id.c_str());
mirror_image_global_status_cpp_to_c(it.second, &images[i]);
i++;
}
*len = i;
return 0;
}
extern "C" void rbd_mirror_image_global_status_cleanup(
rbd_mirror_image_global_status_t *global_status) {
free(global_status->name);
rbd_mirror_image_get_info_cleanup(&global_status->info);
for (auto idx = 0U; idx < global_status->site_statuses_count; ++idx) {
free(global_status->site_statuses[idx].mirror_uuid);
free(global_status->site_statuses[idx].description);
}
free(global_status->site_statuses);
}
extern "C" void rbd_mirror_image_global_status_list_cleanup(
char **image_ids, rbd_mirror_image_global_status_t *images, size_t len) {
for (size_t i = 0; i < len; i++) {
free(image_ids[i]);
rbd_mirror_image_global_status_cleanup(&images[i]);
}
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
extern "C" int rbd_mirror_peer_add(rados_ioctx_t p, char *uuid,
size_t uuid_max_length,
const char *cluster_name,
const char *client_name) {
return rbd_mirror_peer_site_add(
p, uuid, uuid_max_length, RBD_MIRROR_PEER_DIRECTION_RX_TX, cluster_name,
client_name);
}
extern "C" int rbd_mirror_peer_remove(rados_ioctx_t p, const char *uuid) {
return rbd_mirror_peer_site_remove(p, uuid);
}
extern "C" int rbd_mirror_peer_list(rados_ioctx_t p,
rbd_mirror_peer_t *peers, int *max_peers) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::vector<librbd::mirror_peer_site_t> peer_vector;
int r = librbd::api::Mirror<>::peer_site_list(io_ctx, &peer_vector);
if (r < 0) {
return r;
}
if (*max_peers < static_cast<int>(peer_vector.size())) {
*max_peers = static_cast<int>(peer_vector.size());
return -ERANGE;
}
for (int i = 0; i < static_cast<int>(peer_vector.size()); ++i) {
peers[i].uuid = strdup(peer_vector[i].uuid.c_str());
peers[i].cluster_name = strdup(peer_vector[i].site_name.c_str());
peers[i].client_name = strdup(peer_vector[i].client_name.c_str());
}
*max_peers = static_cast<int>(peer_vector.size());
return 0;
}
extern "C" void rbd_mirror_peer_list_cleanup(rbd_mirror_peer_t *peers,
int max_peers) {
for (int i = 0; i < max_peers; ++i) {
free(peers[i].uuid);
free(peers[i].cluster_name);
free(peers[i].client_name);
}
}
extern "C" int rbd_mirror_peer_set_client(rados_ioctx_t p, const char *uuid,
const char *client_name) {
return rbd_mirror_peer_site_set_client_name(p, uuid, client_name);
}
extern "C" int rbd_mirror_peer_set_cluster(rados_ioctx_t p, const char *uuid,
const char *cluster_name) {
return rbd_mirror_peer_site_set_name(p, uuid, cluster_name);
}
extern "C" int rbd_mirror_peer_get_attributes(
rados_ioctx_t p, const char *uuid, char *keys, size_t *max_key_len,
char *values, size_t *max_val_len, size_t *key_value_count) {
return rbd_mirror_peer_site_get_attributes(
p, uuid, keys, max_key_len, values, max_val_len, key_value_count);
}
extern "C" int rbd_mirror_peer_set_attributes(
rados_ioctx_t p, const char *uuid, const char *keys, const char *values,
size_t count) {
return rbd_mirror_peer_site_set_attributes(
p, uuid, keys, values, count);
}
extern "C" int rbd_mirror_image_status_list(rados_ioctx_t p,
const char *start_id, size_t max, char **image_ids,
rbd_mirror_image_status_t *images, size_t *len) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::map<std::string, librbd::mirror_image_global_status_t> cpp_images;
int r = librbd::api::Mirror<>::image_global_status_list(
io_ctx, start_id, max, &cpp_images);
if (r < 0) {
return r;
}
size_t i = 0;
for (auto &it : cpp_images) {
ceph_assert(i < max);
const std::string &image_id = it.first;
image_ids[i] = strdup(image_id.c_str());
mirror_image_global_status_cpp_to_c(it.second, &images[i]);
i++;
}
*len = i;
return 0;
}
extern "C" void rbd_mirror_image_status_list_cleanup(char **image_ids,
rbd_mirror_image_status_t *images, size_t len) {
for (size_t i = 0; i < len; i++) {
free(image_ids[i]);
free(images[i].name);
rbd_mirror_image_get_info_cleanup(&images[i].info);
free(images[i].description);
}
}
#pragma GCC diagnostic pop
extern "C" int rbd_mirror_image_status_summary(rados_ioctx_t p,
rbd_mirror_image_status_state_t *states, int *counts, size_t *maxlen) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::map<librbd::mirror_image_status_state_t, int> states_;
int r = librbd::api::Mirror<>::image_status_summary(io_ctx, &states_);
if (r < 0) {
return r;
}
size_t i = 0;
for (auto &it : states_) {
if (i == *maxlen) {
return -ERANGE;
}
states[i] = it.first;
counts[i] = it.second;
i++;
}
*maxlen = i;
return 0;
}
extern "C" int rbd_mirror_image_instance_id_list(
rados_ioctx_t p, const char *start_id, size_t max, char **image_ids,
char **instance_ids, size_t *len) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::map<std::string, std::string> cpp_instance_ids;
int r = librbd::api::Mirror<>::image_instance_id_list(io_ctx, start_id, max,
&cpp_instance_ids);
if (r < 0) {
return r;
}
size_t i = 0;
for (auto &it : cpp_instance_ids) {
ceph_assert(i < max);
image_ids[i] = strdup(it.first.c_str());
instance_ids[i] = strdup(it.second.c_str());
i++;
}
*len = i;
return 0;
}
extern "C" void rbd_mirror_image_instance_id_list_cleanup(
char **image_ids, char **instance_ids, size_t len) {
for (size_t i = 0; i < len; i++) {
free(image_ids[i]);
free(instance_ids[i]);
}
}
extern "C" int rbd_mirror_image_info_list(
rados_ioctx_t p, rbd_mirror_image_mode_t *mode_filter,
const char *start_id, size_t max, char **image_ids,
rbd_mirror_image_mode_t *mode_entries,
rbd_mirror_image_info_t *info_entries, size_t *num_entries) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::map<std::string, std::pair<librbd::mirror_image_mode_t,
librbd::mirror_image_info_t>> cpp_entries;
int r = librbd::api::Mirror<>::image_info_list(io_ctx, mode_filter, start_id,
max, &cpp_entries);
if (r < 0) {
return r;
}
ceph_assert(cpp_entries.size() <= max);
for (auto &it : cpp_entries) {
*(image_ids++) = strdup(it.first.c_str());
*(mode_entries++) = it.second.first;
mirror_image_info_cpp_to_c(it.second.second, info_entries++);
}
*num_entries = cpp_entries.size();
return 0;
}
extern "C" void rbd_mirror_image_info_list_cleanup(
char **image_ids, rbd_mirror_image_info_t *info_entries,
size_t num_entries) {
for (size_t i = 0; i < num_entries; i++) {
free(*(image_ids++));
rbd_mirror_image_get_info_cleanup(info_entries++);
}
}
/* helpers */
extern "C" void rbd_image_spec_cleanup(rbd_image_spec_t *image)
{
free(image->id);
free(image->name);
}
extern "C" void rbd_image_spec_list_cleanup(rbd_image_spec_t *images,
size_t num_images)
{
for (size_t idx = 0; idx < num_images; ++idx) {
rbd_image_spec_cleanup(&images[idx]);
}
}
extern "C" void rbd_linked_image_spec_cleanup(rbd_linked_image_spec_t *image)
{
free(image->pool_name);
free(image->pool_namespace);
free(image->image_id);
free(image->image_name);
}
extern "C" void rbd_linked_image_spec_list_cleanup(
rbd_linked_image_spec_t *images, size_t num_images)
{
for (size_t idx = 0; idx < num_images; ++idx) {
rbd_linked_image_spec_cleanup(&images[idx]);
}
}
extern "C" void rbd_snap_spec_cleanup(rbd_snap_spec_t *snap)
{
free(snap->name);
}
/* images */
extern "C" int rbd_list(rados_ioctx_t p, char *names, size_t *size)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, list_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id());
std::vector<librbd::image_spec_t> cpp_image_specs;
int r = librbd::api::Image<>::list_images(io_ctx, &cpp_image_specs);
if (r < 0) {
tracepoint(librbd, list_exit, r, *size);
return r;
}
size_t expected_size = 0;
for (auto& it : cpp_image_specs) {
expected_size += it.name.size() + 1;
}
if (*size < expected_size) {
*size = expected_size;
tracepoint(librbd, list_exit, -ERANGE, *size);
return -ERANGE;
}
if (names == NULL) {
tracepoint(librbd, list_exit, -EINVAL, *size);
return -EINVAL;
}
for (auto& it : cpp_image_specs) {
const char* name = it.name.c_str();
tracepoint(librbd, list_entry, name);
strcpy(names, name);
names += strlen(names) + 1;
}
tracepoint(librbd, list_exit, (int)expected_size, *size);
return (int)expected_size;
}
extern "C" int rbd_list2(rados_ioctx_t p, rbd_image_spec_t *images,
size_t *size)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, list_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id());
// FIPS zeroization audit 20191117: this memset is not security related.
memset(images, 0, sizeof(*images) * *size);
std::vector<librbd::image_spec_t> cpp_image_specs;
int r = librbd::api::Image<>::list_images(io_ctx, &cpp_image_specs);
if (r < 0) {
tracepoint(librbd, list_exit, r, *size);
return r;
}
size_t expected_size = cpp_image_specs.size();
if (*size < expected_size) {
*size = expected_size;
tracepoint(librbd, list_exit, -ERANGE, *size);
return -ERANGE;
}
*size = expected_size;
for (size_t idx = 0; idx < expected_size; ++idx) {
images[idx].id = strdup(cpp_image_specs[idx].id.c_str());
images[idx].name = strdup(cpp_image_specs[idx].name.c_str());
}
tracepoint(librbd, list_exit, 0, *size);
return 0;
}
extern "C" int rbd_create(rados_ioctx_t p, const char *name, uint64_t size, int *order)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, create_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name, size, *order);
int r = librbd::create(io_ctx, name, size, order);
tracepoint(librbd, create_exit, r, *order);
return r;
}
extern "C" int rbd_create2(rados_ioctx_t p, const char *name,
uint64_t size, uint64_t features,
int *order)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, create2_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name, size, features, *order);
int r = librbd::create(io_ctx, name, size, false, features, order, 0, 0);
tracepoint(librbd, create2_exit, r, *order);
return r;
}
extern "C" int rbd_create3(rados_ioctx_t p, const char *name,
uint64_t size, uint64_t features,
int *order,
uint64_t stripe_unit, uint64_t stripe_count)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, create3_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name, size, features, *order, stripe_unit, stripe_count);
int r = librbd::create(io_ctx, name, size, false, features, order,
stripe_unit, stripe_count);
tracepoint(librbd, create3_exit, r, *order);
return r;
}
extern "C" int rbd_create4(rados_ioctx_t p, const char *name,
uint64_t size, rbd_image_options_t opts)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, create4_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name, size, opts);
librbd::ImageOptions opts_(opts);
int r = librbd::create(io_ctx, name, "", size, opts_, "", "", false);
tracepoint(librbd, create4_exit, r);
return r;
}
extern "C" int rbd_clone(rados_ioctx_t p_ioctx, const char *p_name,
const char *p_snap_name, rados_ioctx_t c_ioctx,
const char *c_name, uint64_t features, int *c_order)
{
librados::IoCtx p_ioc, c_ioc;
librados::IoCtx::from_rados_ioctx_t(p_ioctx, p_ioc);
librados::IoCtx::from_rados_ioctx_t(c_ioctx, c_ioc);
TracepointProvider::initialize<tracepoint_traits>(get_cct(p_ioc));
tracepoint(librbd, clone_enter, p_ioc.get_pool_name().c_str(), p_ioc.get_id(), p_name, p_snap_name, c_ioc.get_pool_name().c_str(), c_ioc.get_id(), c_name, features);
int r = librbd::clone(p_ioc, p_name, p_snap_name, c_ioc, c_name,
features, c_order, 0, 0);
tracepoint(librbd, clone_exit, r, *c_order);
return r;
}
extern "C" int rbd_clone2(rados_ioctx_t p_ioctx, const char *p_name,
const char *p_snap_name, rados_ioctx_t c_ioctx,
const char *c_name, uint64_t features, int *c_order,
uint64_t stripe_unit, int stripe_count)
{
librados::IoCtx p_ioc, c_ioc;
librados::IoCtx::from_rados_ioctx_t(p_ioctx, p_ioc);
librados::IoCtx::from_rados_ioctx_t(c_ioctx, c_ioc);
TracepointProvider::initialize<tracepoint_traits>(get_cct(p_ioc));
tracepoint(librbd, clone2_enter, p_ioc.get_pool_name().c_str(), p_ioc.get_id(), p_name, p_snap_name, c_ioc.get_pool_name().c_str(), c_ioc.get_id(), c_name, features, stripe_unit, stripe_count);
int r = librbd::clone(p_ioc, p_name, p_snap_name, c_ioc, c_name,
features, c_order, stripe_unit, stripe_count);
tracepoint(librbd, clone2_exit, r, *c_order);
return r;
}
extern "C" int rbd_clone3(rados_ioctx_t p_ioctx, const char *p_name,
const char *p_snap_name, rados_ioctx_t c_ioctx,
const char *c_name, rbd_image_options_t c_opts)
{
librados::IoCtx p_ioc, c_ioc;
librados::IoCtx::from_rados_ioctx_t(p_ioctx, p_ioc);
librados::IoCtx::from_rados_ioctx_t(c_ioctx, c_ioc);
TracepointProvider::initialize<tracepoint_traits>(get_cct(p_ioc));
tracepoint(librbd, clone3_enter, p_ioc.get_pool_name().c_str(), p_ioc.get_id(), p_name, p_snap_name, c_ioc.get_pool_name().c_str(), c_ioc.get_id(), c_name, c_opts);
librbd::ImageOptions c_opts_(c_opts);
int r = librbd::clone(p_ioc, nullptr, p_name, p_snap_name, c_ioc, nullptr,
c_name, c_opts_, "", "");
tracepoint(librbd, clone3_exit, r);
return r;
}
extern "C" int rbd_remove(rados_ioctx_t p, const char *name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, remove_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Image<>::remove(io_ctx, name, prog_ctx);
tracepoint(librbd, remove_exit, r);
return r;
}
extern "C" int rbd_remove_with_progress(rados_ioctx_t p, const char *name,
librbd_progress_fn_t cb, void *cbdata)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, remove_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name);
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = librbd::api::Image<>::remove(io_ctx, name, prog_ctx);
tracepoint(librbd, remove_exit, r);
return r;
}
extern "C" int rbd_trash_move(rados_ioctx_t p, const char *name,
uint64_t delay) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_move_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), name);
int r = librbd::api::Trash<>::move(io_ctx, RBD_TRASH_IMAGE_SOURCE_USER, name,
delay);
tracepoint(librbd, trash_move_exit, r);
return r;
}
extern "C" int rbd_trash_get(rados_ioctx_t io, const char *id,
rbd_trash_image_info_t *info) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
librbd::trash_image_info_t cpp_info;
int r = librbd::api::Trash<>::get(io_ctx, id, &cpp_info);
if (r < 0) {
return r;
}
trash_image_info_cpp_to_c(cpp_info, info);
return 0;
}
extern "C" void rbd_trash_get_cleanup(rbd_trash_image_info_t *info) {
free(info->id);
free(info->name);
}
extern "C" int rbd_trash_list(rados_ioctx_t p, rbd_trash_image_info_t *entries,
size_t *num_entries) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_list_enter,
io_ctx.get_pool_name().c_str(), io_ctx.get_id());
// FIPS zeroization audit 20191117: this memset is not security related.
memset(entries, 0, sizeof(*entries) * *num_entries);
vector<librbd::trash_image_info_t> cpp_entries;
int r = librbd::api::Trash<>::list(io_ctx, cpp_entries, true);
if (r < 0) {
tracepoint(librbd, trash_list_exit, r, *num_entries);
return r;
}
if (*num_entries < cpp_entries.size()) {
*num_entries = cpp_entries.size();
tracepoint(librbd, trash_list_exit, -ERANGE, *num_entries);
return -ERANGE;
}
int i=0;
for (const auto &entry : cpp_entries) {
trash_image_info_cpp_to_c(entry, &entries[i++]);
}
*num_entries = cpp_entries.size();
return *num_entries;
}
extern "C" void rbd_trash_list_cleanup(rbd_trash_image_info_t *entries,
size_t num_entries) {
for (size_t i=0; i < num_entries; i++) {
rbd_trash_get_cleanup(&entries[i]);
}
}
extern "C" int rbd_trash_purge(rados_ioctx_t io, time_t expire_ts,
float threshold) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_purge_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), expire_ts, threshold);
librbd::NoOpProgressContext nop_pctx;
int r = librbd::api::Trash<>::purge(io_ctx, expire_ts, threshold, nop_pctx);
tracepoint(librbd, trash_purge_exit, r);
return r;
}
extern "C" int rbd_trash_purge_with_progress(rados_ioctx_t io, time_t expire_ts,
float threshold, librbd_progress_fn_t cb, void* cbdata) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_purge_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), expire_ts, threshold);
librbd::CProgressContext pctx(cb, cbdata);
int r = librbd::api::Trash<>::purge(io_ctx, expire_ts, threshold, pctx);
tracepoint(librbd, trash_purge_exit, r);
return r;
}
extern "C" int rbd_trash_remove(rados_ioctx_t p, const char *image_id,
bool force) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_remove_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_id, force);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Trash<>::remove(io_ctx, image_id, force, prog_ctx);
tracepoint(librbd, trash_remove_exit, r);
return r;
}
extern "C" int rbd_trash_remove_with_progress(rados_ioctx_t p,
const char *image_id,
bool force,
librbd_progress_fn_t cb,
void *cbdata) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_remove_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_id, force);
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = librbd::api::Trash<>::remove(io_ctx, image_id, force, prog_ctx);
tracepoint(librbd, trash_remove_exit, r);
return r;
}
extern "C" int rbd_trash_restore(rados_ioctx_t p, const char *id,
const char *name) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_undelete_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), id, name);
int r = librbd::api::Trash<>::restore(
io_ctx, librbd::api::Trash<>::ALLOWED_RESTORE_SOURCES, id, name);
tracepoint(librbd, trash_undelete_exit, r);
return r;
}
extern "C" int rbd_namespace_create(rados_ioctx_t io,
const char *namespace_name) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
return librbd::api::Namespace<>::create(io_ctx, namespace_name);
}
extern "C" int rbd_namespace_remove(rados_ioctx_t io,
const char *namespace_name) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
return librbd::api::Namespace<>::remove(io_ctx, namespace_name);
}
extern "C" int rbd_namespace_list(rados_ioctx_t io, char *names, size_t *size) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
if (names == nullptr || size == nullptr) {
return -EINVAL;
}
std::vector<std::string> cpp_names;
int r = librbd::api::Namespace<>::list(io_ctx, &cpp_names);
if (r < 0) {
return r;
}
size_t expected_size = 0;
for (size_t i = 0; i < cpp_names.size(); i++) {
expected_size += cpp_names[i].size() + 1;
}
if (*size < expected_size) {
*size = expected_size;
return -ERANGE;
}
*size = expected_size;
for (int i = 0; i < (int)cpp_names.size(); i++) {
const char* name = cpp_names[i].c_str();
strcpy(names, name);
names += strlen(names) + 1;
}
return (int)expected_size;
}
extern "C" int rbd_namespace_exists(rados_ioctx_t io,
const char *namespace_name,
bool *exists) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
return librbd::api::Namespace<>::exists(io_ctx, namespace_name, exists);
}
extern "C" int rbd_pool_init(rados_ioctx_t io, bool force) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
return librbd::api::Pool<>::init(io_ctx, force);
}
extern "C" void rbd_pool_stats_create(rbd_pool_stats_t *stats) {
*stats = reinterpret_cast<rbd_pool_stats_t>(
new librbd::api::Pool<>::StatOptions{});
}
extern "C" void rbd_pool_stats_destroy(rbd_pool_stats_t stats) {
auto pool_stat_options =
reinterpret_cast<librbd::api::Pool<>::StatOptions*>(stats);
delete pool_stat_options;
}
extern "C" int rbd_pool_stats_option_add_uint64(rbd_pool_stats_t stats,
int stat_option,
uint64_t* stat_val) {
auto pool_stat_options =
reinterpret_cast<librbd::api::Pool<>::StatOptions*>(stats);
return librbd::api::Pool<>::add_stat_option(
pool_stat_options, static_cast<rbd_pool_stat_option_t>(stat_option),
stat_val);
}
extern "C" int rbd_pool_stats_get(
rados_ioctx_t io, rbd_pool_stats_t pool_stats) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
auto pool_stat_options =
reinterpret_cast<librbd::api::Pool<>::StatOptions*>(pool_stats);
return librbd::api::Pool<>::get_stats(io_ctx, pool_stat_options);
}
extern "C" int rbd_copy(rbd_image_t image, rados_ioctx_t dest_p,
const char *destname)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, copy_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname);
librbd::ImageOptions opts;
librbd::NoOpProgressContext prog_ctx;
int r = librbd::copy(ictx, dest_io_ctx, destname, opts, prog_ctx, 0);
tracepoint(librbd, copy_exit, r);
return r;
}
extern "C" int rbd_copy2(rbd_image_t srcp, rbd_image_t destp)
{
librbd::ImageCtx *src = (librbd::ImageCtx *)srcp;
librbd::ImageCtx *dest = (librbd::ImageCtx *)destp;
tracepoint(librbd, copy2_enter, src, src->name.c_str(), src->snap_name.c_str(), src->read_only, dest, dest->name.c_str(), dest->snap_name.c_str(), dest->read_only);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::copy(src, dest, prog_ctx, 0);
tracepoint(librbd, copy2_exit, r);
return r;
}
extern "C" int rbd_copy3(rbd_image_t image, rados_ioctx_t dest_p,
const char *destname, rbd_image_options_t c_opts)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, copy3_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname, c_opts);
librbd::ImageOptions c_opts_(c_opts);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::copy(ictx, dest_io_ctx, destname, c_opts_, prog_ctx, 0);
tracepoint(librbd, copy3_exit, r);
return r;
}
extern "C" int rbd_copy4(rbd_image_t image, rados_ioctx_t dest_p,
const char *destname, rbd_image_options_t c_opts, size_t sparse_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, copy4_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname, c_opts, sparse_size);
librbd::ImageOptions c_opts_(c_opts);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::copy(ictx, dest_io_ctx, destname, c_opts_, prog_ctx, sparse_size);
tracepoint(librbd, copy4_exit, r);
return r;
}
extern "C" int rbd_copy_with_progress(rbd_image_t image, rados_ioctx_t dest_p,
const char *destname,
librbd_progress_fn_t fn, void *data)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, copy_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname);
librbd::ImageOptions opts;
librbd::CProgressContext prog_ctx(fn, data);
int ret = librbd::copy(ictx, dest_io_ctx, destname, opts, prog_ctx, 0);
tracepoint(librbd, copy_exit, ret);
return ret;
}
extern "C" int rbd_copy_with_progress2(rbd_image_t srcp, rbd_image_t destp,
librbd_progress_fn_t fn, void *data)
{
librbd::ImageCtx *src = (librbd::ImageCtx *)srcp;
librbd::ImageCtx *dest = (librbd::ImageCtx *)destp;
tracepoint(librbd, copy2_enter, src, src->name.c_str(), src->snap_name.c_str(), src->read_only, dest, dest->name.c_str(), dest->snap_name.c_str(), dest->read_only);
librbd::CProgressContext prog_ctx(fn, data);
int ret = librbd::copy(src, dest, prog_ctx, 0);
tracepoint(librbd, copy2_exit, ret);
return ret;
}
extern "C" int rbd_copy_with_progress3(rbd_image_t image, rados_ioctx_t dest_p,
const char *destname,
rbd_image_options_t dest_opts,
librbd_progress_fn_t fn, void *data)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, copy3_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname, dest_opts);
librbd::ImageOptions dest_opts_(dest_opts);
librbd::CProgressContext prog_ctx(fn, data);
int ret = librbd::copy(ictx, dest_io_ctx, destname, dest_opts_, prog_ctx, 0);
tracepoint(librbd, copy3_exit, ret);
return ret;
}
extern "C" int rbd_copy_with_progress4(rbd_image_t image, rados_ioctx_t dest_p,
const char *destname,
rbd_image_options_t dest_opts,
librbd_progress_fn_t fn, void *data, size_t sparse_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, copy4_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname, dest_opts, sparse_size);
librbd::ImageOptions dest_opts_(dest_opts);
librbd::CProgressContext prog_ctx(fn, data);
int ret = librbd::copy(ictx, dest_io_ctx, destname, dest_opts_, prog_ctx, sparse_size);
tracepoint(librbd, copy4_exit, ret);
return ret;
}
extern "C" int rbd_deep_copy(rbd_image_t image, rados_ioctx_t dest_p,
const char *destname, rbd_image_options_t c_opts)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, deep_copy_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only,
dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(),
destname, c_opts);
librbd::ImageOptions opts(c_opts);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Image<>::deep_copy(ictx, dest_io_ctx, destname, opts,
prog_ctx);
tracepoint(librbd, deep_copy_exit, r);
return r;
}
extern "C" int rbd_deep_copy_with_progress(rbd_image_t image,
rados_ioctx_t dest_p,
const char *destname,
rbd_image_options_t dest_opts,
librbd_progress_fn_t fn, void *data)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, deep_copy_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only,
dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(),
destname, dest_opts);
librbd::ImageOptions opts(dest_opts);
librbd::CProgressContext prog_ctx(fn, data);
int ret = librbd::api::Image<>::deep_copy(ictx, dest_io_ctx, destname, opts,
prog_ctx);
tracepoint(librbd, deep_copy_exit, ret);
return ret;
}
extern "C" int rbd_encryption_format(rbd_image_t image,
rbd_encryption_format_t format,
rbd_encryption_options_t opts,
size_t opts_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Image<>::encryption_format(
ictx, format, opts, opts_size, true);
}
extern "C" int rbd_encryption_load(rbd_image_t image,
rbd_encryption_format_t format,
rbd_encryption_options_t opts,
size_t opts_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::encryption_spec_t spec = {format, opts, opts_size};
return librbd::api::Image<>::encryption_load(ictx, &spec, 1, true);
}
extern "C" int rbd_encryption_load2(rbd_image_t image,
const rbd_encryption_spec_t *specs,
size_t spec_count)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Image<>::encryption_load(ictx, specs, spec_count, true);
}
extern "C" int rbd_flatten(rbd_image_t image)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, flatten_enter, ictx, ictx->name.c_str(), ictx->id.c_str());
librbd::NoOpProgressContext prog_ctx;
int r = ictx->operations->flatten(prog_ctx);
tracepoint(librbd, flatten_exit, r);
return r;
}
extern "C" int rbd_flatten_with_progress(rbd_image_t image,
librbd_progress_fn_t cb, void *cbdata)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, flatten_enter, ictx, ictx->name.c_str(), ictx->id.c_str());
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = ictx->operations->flatten(prog_ctx);
tracepoint(librbd, flatten_exit, r);
return r;
}
extern "C" int rbd_sparsify(rbd_image_t image, size_t sparse_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, sparsify_enter, ictx, ictx->name.c_str(), sparse_size,
ictx->id.c_str());
librbd::NoOpProgressContext prog_ctx;
int r = ictx->operations->sparsify(sparse_size, prog_ctx);
tracepoint(librbd, sparsify_exit, r);
return r;
}
extern "C" int rbd_sparsify_with_progress(rbd_image_t image, size_t sparse_size,
librbd_progress_fn_t cb, void *cbdata)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, sparsify_enter, ictx, ictx->name.c_str(), sparse_size,
ictx->id.c_str());
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = ictx->operations->sparsify(sparse_size, prog_ctx);
tracepoint(librbd, sparsify_exit, r);
return r;
}
extern "C" int rbd_rename(rados_ioctx_t src_p, const char *srcname,
const char *destname)
{
librados::IoCtx src_io_ctx;
librados::IoCtx::from_rados_ioctx_t(src_p, src_io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(src_io_ctx));
tracepoint(librbd, rename_enter, src_io_ctx.get_pool_name().c_str(), src_io_ctx.get_id(), srcname, destname);
int r = librbd::rename(src_io_ctx, srcname, destname);
tracepoint(librbd, rename_exit, r);
return r;
}
extern "C" int rbd_migration_prepare(rados_ioctx_t p, const char *image_name,
rados_ioctx_t dest_p,
const char *dest_image_name,
rbd_image_options_t opts_)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, migration_prepare_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name, dest_io_ctx.get_pool_name().c_str(),
dest_io_ctx.get_id(), dest_image_name, opts_);
librbd::ImageOptions opts(opts_);
int r = librbd::api::Migration<>::prepare(io_ctx, image_name, dest_io_ctx,
dest_image_name, opts);
tracepoint(librbd, migration_prepare_exit, r);
return r;
}
extern "C" int rbd_migration_prepare_import(
const char *source_spec, rados_ioctx_t dest_p,
const char *dest_image_name, rbd_image_options_t opts_) {
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
librbd::ImageOptions opts(opts_);
return librbd::api::Migration<>::prepare_import(source_spec, dest_io_ctx,
dest_image_name, opts);
}
extern "C" int rbd_migration_execute(rados_ioctx_t p, const char *image_name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_execute_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Migration<>::execute(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_execute_exit, r);
return r;
}
extern "C" int rbd_migration_execute_with_progress(rados_ioctx_t p,
const char *name,
librbd_progress_fn_t fn,
void *data)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_execute_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), name);
librbd::CProgressContext prog_ctx(fn, data);
int r = librbd::api::Migration<>::execute(io_ctx, name, prog_ctx);
tracepoint(librbd, migration_execute_exit, r);
return r;
}
extern "C" int rbd_migration_abort(rados_ioctx_t p, const char *image_name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_abort_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Migration<>::abort(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_abort_exit, r);
return r;
}
extern "C" int rbd_migration_abort_with_progress(rados_ioctx_t p,
const char *name,
librbd_progress_fn_t fn,
void *data)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_abort_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), name);
librbd::CProgressContext prog_ctx(fn, data);
int r = librbd::api::Migration<>::abort(io_ctx, name, prog_ctx);
tracepoint(librbd, migration_abort_exit, r);
return r;
}
extern "C" int rbd_migration_commit(rados_ioctx_t p, const char *image_name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_commit_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Migration<>::commit(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_commit_exit, r);
return r;
}
extern "C" int rbd_migration_commit_with_progress(rados_ioctx_t p,
const char *name,
librbd_progress_fn_t fn,
void *data)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_commit_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), name);
librbd::CProgressContext prog_ctx(fn, data);
int r = librbd::api::Migration<>::commit(io_ctx, name, prog_ctx);
tracepoint(librbd, migration_commit_exit, r);
return r;
}
extern "C" int rbd_migration_status(rados_ioctx_t p, const char *image_name,
rbd_image_migration_status_t *status,
size_t status_size)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_status_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
if (status_size != sizeof(rbd_image_migration_status_t)) {
tracepoint(librbd, migration_status_exit, -ERANGE);
return -ERANGE;
}
librbd::image_migration_status_t cpp_status;
int r = librbd::api::Migration<>::status(io_ctx, image_name, &cpp_status);
if (r >= 0) {
status->source_pool_id = cpp_status.source_pool_id;
status->source_pool_namespace =
strdup(cpp_status.source_pool_namespace.c_str());
status->source_image_name = strdup(cpp_status.source_image_name.c_str());
status->source_image_id = strdup(cpp_status.source_image_id.c_str());
status->dest_pool_id = cpp_status.dest_pool_id;
status->dest_pool_namespace =
strdup(cpp_status.dest_pool_namespace.c_str());
status->dest_image_name = strdup(cpp_status.dest_image_name.c_str());
status->dest_image_id = strdup(cpp_status.dest_image_id.c_str());
status->state = cpp_status.state;
status->state_description = strdup(cpp_status.state_description.c_str());
}
tracepoint(librbd, migration_status_exit, r);
return r;
}
extern "C" void rbd_migration_status_cleanup(rbd_image_migration_status_t *s)
{
free(s->source_pool_namespace);
free(s->source_image_name);
free(s->source_image_id);
free(s->dest_pool_namespace);
free(s->dest_image_name);
free(s->dest_image_id);
free(s->state_description);
}
extern "C" int rbd_pool_metadata_get(rados_ioctx_t p, const char *key,
char *value, size_t *vallen)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
string val_s;
int r = librbd::api::PoolMetadata<>::get(io_ctx, key, &val_s);
if (*vallen < val_s.size() + 1) {
r = -ERANGE;
*vallen = val_s.size() + 1;
} else {
strncpy(value, val_s.c_str(), val_s.size() + 1);
}
return r;
}
extern "C" int rbd_pool_metadata_set(rados_ioctx_t p, const char *key,
const char *value)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
int r = librbd::api::PoolMetadata<>::set(io_ctx, key, value);
return r;
}
extern "C" int rbd_pool_metadata_remove(rados_ioctx_t p, const char *key)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
int r = librbd::api::PoolMetadata<>::remove(io_ctx, key);
return r;
}
extern "C" int rbd_pool_metadata_list(rados_ioctx_t p, const char *start,
uint64_t max, char *key, size_t *key_len,
char *value, size_t *val_len)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
map<string, bufferlist> pairs;
int r = librbd::api::PoolMetadata<>::list(io_ctx, start, max, &pairs);
if (r < 0) {
return r;
}
size_t key_total_len = 0, val_total_len = 0;
for (auto &it : pairs) {
key_total_len += it.first.size() + 1;
val_total_len += it.second.length() + 1;
}
if (*key_len < key_total_len || *val_len < val_total_len) {
*key_len = key_total_len;
*val_len = val_total_len;
return -ERANGE;
}
*key_len = key_total_len;
*val_len = val_total_len;
char *key_p = key, *value_p = value;
for (auto &it : pairs) {
strncpy(key_p, it.first.c_str(), it.first.size() + 1);
key_p += it.first.size() + 1;
strncpy(value_p, it.second.c_str(), it.second.length());
value_p += it.second.length();
*value_p = '\0';
value_p++;
}
return 0;
}
extern "C" int rbd_config_pool_list(rados_ioctx_t p,
rbd_config_option_t *options,
int *max_options) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::vector<librbd::config_option_t> option_vector;
int r = librbd::api::Config<>::list(io_ctx, &option_vector);
if (r < 0) {
return r;
}
if (*max_options < static_cast<int>(option_vector.size())) {
*max_options = static_cast<int>(option_vector.size());
return -ERANGE;
}
for (int i = 0; i < static_cast<int>(option_vector.size()); ++i) {
config_option_cpp_to_c(option_vector[i], &options[i]);
}
*max_options = static_cast<int>(option_vector.size());
return 0;
}
extern "C" void rbd_config_pool_list_cleanup(rbd_config_option_t *options,
int max_options) {
for (int i = 0; i < max_options; ++i) {
config_option_cleanup(options[i]);
}
}
extern "C" int rbd_open(rados_ioctx_t p, const char *name, rbd_image_t *image,
const char *snap_name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
librbd::ImageCtx *ictx = new librbd::ImageCtx(name, "", snap_name, io_ctx,
false);
tracepoint(librbd, open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = ictx->state->open(0);
if (r >= 0) {
*image = (rbd_image_t)ictx;
}
tracepoint(librbd, open_image_exit, r);
return r;
}
extern "C" int rbd_open_by_id(rados_ioctx_t p, const char *id,
rbd_image_t *image, const char *snap_name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
librbd::ImageCtx *ictx = new librbd::ImageCtx("", id, snap_name, io_ctx,
false);
tracepoint(librbd, open_image_enter, ictx, ictx->name.c_str(),
ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = ictx->state->open(0);
if (r >= 0) {
*image = (rbd_image_t)ictx;
}
tracepoint(librbd, open_image_exit, r);
return r;
}
extern "C" int rbd_aio_open(rados_ioctx_t p, const char *name,
rbd_image_t *image, const char *snap_name,
rbd_completion_t c)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
librbd::ImageCtx *ictx = new librbd::ImageCtx(name, "", snap_name, io_ctx,
false);
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only, comp->pc);
ictx->state->open(0, new C_OpenComplete(ictx, get_aio_completion(comp),
image));
tracepoint(librbd, aio_open_image_exit, 0);
return 0;
}
extern "C" int rbd_aio_open_by_id(rados_ioctx_t p, const char *id,
rbd_image_t *image, const char *snap_name,
rbd_completion_t c)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
librbd::ImageCtx *ictx = new librbd::ImageCtx("", id, snap_name, io_ctx,
false);
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_open_image_enter, ictx, ictx->name.c_str(),
ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only,
comp->pc);
ictx->state->open(0, new C_OpenComplete(ictx, get_aio_completion(comp),
image));
tracepoint(librbd, aio_open_image_exit, 0);
return 0;
}
extern "C" int rbd_open_read_only(rados_ioctx_t p, const char *name,
rbd_image_t *image, const char *snap_name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
librbd::ImageCtx *ictx = new librbd::ImageCtx(name, "", snap_name, io_ctx,
true);
tracepoint(librbd, open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = ictx->state->open(0);
if (r >= 0) {
*image = (rbd_image_t)ictx;
}
tracepoint(librbd, open_image_exit, r);
return r;
}
extern "C" int rbd_open_by_id_read_only(rados_ioctx_t p, const char *id,
rbd_image_t *image, const char *snap_name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
librbd::ImageCtx *ictx = new librbd::ImageCtx("", id, snap_name, io_ctx,
true);
tracepoint(librbd, open_image_enter, ictx, ictx->name.c_str(),
ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = ictx->state->open(0);
if (r >= 0) {
*image = (rbd_image_t)ictx;
}
tracepoint(librbd, open_image_exit, r);
return r;
}
extern "C" int rbd_aio_open_read_only(rados_ioctx_t p, const char *name,
rbd_image_t *image, const char *snap_name,
rbd_completion_t c)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
librbd::ImageCtx *ictx = new librbd::ImageCtx(name, "", snap_name, io_ctx,
true);
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only, comp->pc);
ictx->state->open(0, new C_OpenComplete(ictx, get_aio_completion(comp),
image));
tracepoint(librbd, aio_open_image_exit, 0);
return 0;
}
extern "C" int rbd_aio_open_by_id_read_only(rados_ioctx_t p, const char *id,
rbd_image_t *image,
const char *snap_name,
rbd_completion_t c)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
librbd::ImageCtx *ictx = new librbd::ImageCtx("", id, snap_name, io_ctx,
true);
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_open_image_enter, ictx, ictx->name.c_str(),
ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only, comp->pc);
ictx->state->open(0, new C_OpenComplete(ictx, get_aio_completion(comp),
image));
tracepoint(librbd, aio_open_image_exit, 0);
return 0;
}
extern "C" int rbd_features_to_string(uint64_t features, char *str_features, size_t *size)
{
std::stringstream err;
std::string get_str_features = librbd::rbd_features_to_string(features, &err);
if (!err.str().empty()) {
return -EINVAL;
}
uint64_t expected_size = get_str_features.size();
if (*size <= expected_size) {
*size = expected_size + 1;
return -ERANGE;
}
strncpy(str_features, get_str_features.c_str(), expected_size);
str_features[expected_size] = '\0';
*size = expected_size + 1;
return 0;
}
extern "C" int rbd_features_from_string(const char *str_features, uint64_t *features)
{
std::stringstream err;
*features = librbd::rbd_features_from_string(str_features, &err);
if (!err.str().empty()) {
return -EINVAL;
}
return 0;
}
extern "C" int rbd_close(rbd_image_t image)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, close_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str());
int r = ictx->state->close();
tracepoint(librbd, close_image_exit, r);
return r;
}
extern "C" int rbd_aio_close(rbd_image_t image, rbd_completion_t c)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_close_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), comp->pc);
ictx->state->close(new C_AioCompletion(ictx, librbd::io::AIO_TYPE_CLOSE,
get_aio_completion(comp)));
tracepoint(librbd, aio_close_image_exit, 0);
return 0;
}
extern "C" int rbd_resize(rbd_image_t image, uint64_t size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, resize_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, size);
librbd::NoOpProgressContext prog_ctx;
int r = ictx->operations->resize(size, true, prog_ctx);
tracepoint(librbd, resize_exit, r);
return r;
}
extern "C" int rbd_resize2(rbd_image_t image, uint64_t size, bool allow_shrink,
librbd_progress_fn_t cb, void *cbdata)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, resize_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, size);
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = ictx->operations->resize(size, allow_shrink, prog_ctx);
tracepoint(librbd, resize_exit, r);
return r;
}
extern "C" int rbd_resize_with_progress(rbd_image_t image, uint64_t size,
librbd_progress_fn_t cb, void *cbdata)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, resize_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, size);
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = ictx->operations->resize(size, true, prog_ctx);
tracepoint(librbd, resize_exit, r);
return r;
}
extern "C" int rbd_stat(rbd_image_t image, rbd_image_info_t *info,
size_t infosize)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, stat_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::info(ictx, *info, infosize);
tracepoint(librbd, stat_exit, r, info);
return r;
}
extern "C" int rbd_get_old_format(rbd_image_t image, uint8_t *old)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_old_format_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::get_old_format(ictx, old);
tracepoint(librbd, get_old_format_exit, r, *old);
return r;
}
extern "C" int rbd_get_size(rbd_image_t image, uint64_t *size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_size_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::get_size(ictx, size);
tracepoint(librbd, get_size_exit, r, *size);
return r;
}
extern "C" int rbd_get_features(rbd_image_t image, uint64_t *features)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_features_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::get_features(ictx, features);
tracepoint(librbd, get_features_exit, r, *features);
return r;
}
extern "C" int rbd_update_features(rbd_image_t image, uint64_t features,
uint8_t enabled)
{
librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx *>(image);
bool features_enabled = enabled != 0;
tracepoint(librbd, update_features_enter, ictx, features, features_enabled);
int r = ictx->operations->update_features(features, features_enabled);
tracepoint(librbd, update_features_exit, r);
return r;
}
extern "C" int rbd_get_op_features(rbd_image_t image, uint64_t *op_features)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Image<>::get_op_features(ictx, op_features);
}
extern "C" int rbd_get_stripe_unit(rbd_image_t image, uint64_t *stripe_unit)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_stripe_unit_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
*stripe_unit = ictx->get_stripe_unit();
tracepoint(librbd, get_stripe_unit_exit, 0, *stripe_unit);
return 0;
}
extern "C" int rbd_get_stripe_count(rbd_image_t image, uint64_t *stripe_count)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_stripe_count_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
*stripe_count = ictx->get_stripe_count();
tracepoint(librbd, get_stripe_count_exit, 0, *stripe_count);
return 0;
}
extern "C" int rbd_get_create_timestamp(rbd_image_t image,
struct timespec *timestamp)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_create_timestamp_enter, ictx, ictx->name.c_str(),
ictx->read_only);
utime_t time = ictx->get_create_timestamp();
time.to_timespec(timestamp);
tracepoint(librbd, get_create_timestamp_exit, 0, timestamp);
return 0;
}
extern "C" int rbd_get_access_timestamp(rbd_image_t image,
struct timespec *timestamp)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_access_timestamp_enter, ictx, ictx->name.c_str(),
ictx->read_only);
utime_t time = ictx->get_access_timestamp();
time.to_timespec(timestamp);
tracepoint(librbd, get_access_timestamp_exit, 0, timestamp);
return 0;
}
extern "C" int rbd_get_modify_timestamp(rbd_image_t image,
struct timespec *timestamp)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_modify_timestamp_enter, ictx, ictx->name.c_str(),
ictx->read_only);
utime_t time = ictx->get_modify_timestamp();
time.to_timespec(timestamp);
tracepoint(librbd, get_modify_timestamp_exit, 0, timestamp);
return 0;
}
extern "C" int rbd_get_overlap(rbd_image_t image, uint64_t *overlap)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_overlap_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::get_overlap(ictx, overlap);
tracepoint(librbd, get_overlap_exit, r, *overlap);
return r;
}
extern "C" int rbd_get_name(rbd_image_t image, char *name, size_t *name_len)
{
librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx *>(image);
if (*name_len <= ictx->name.size()) {
*name_len = ictx->name.size() + 1;
return -ERANGE;
}
strncpy(name, ictx->name.c_str(), ictx->name.size());
name[ictx->name.size()] = '\0';
*name_len = ictx->name.size() + 1;
return 0;
}
extern "C" int rbd_get_id(rbd_image_t image, char *id, size_t id_len)
{
librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx *>(image);
if (ictx->old_format) {
return -EINVAL;
}
if (ictx->id.size() >= id_len) {
return -ERANGE;
}
strncpy(id, ictx->id.c_str(), id_len - 1);
id[id_len - 1] = '\0';
return 0;
}
extern "C" int rbd_get_block_name_prefix(rbd_image_t image, char *prefix,
size_t prefix_len)
{
librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx *>(image);
if (ictx->object_prefix.size() >= prefix_len) {
return -ERANGE;
}
strncpy(prefix, ictx->object_prefix.c_str(), prefix_len - 1);
prefix[prefix_len - 1] = '\0';
return 0;
}
extern "C" int64_t rbd_get_data_pool_id(rbd_image_t image)
{
librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx *>(image);
return librbd::api::Image<>::get_data_pool_id(ictx);
}
extern "C" int rbd_get_parent_info(rbd_image_t image,
char *parent_pool_name, size_t ppool_namelen,
char *parent_name, size_t pnamelen,
char *parent_snap_name, size_t psnap_namelen)
{
auto ictx = reinterpret_cast<librbd::ImageCtx*>(image);
tracepoint(librbd, get_parent_info_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
librbd::linked_image_spec_t parent_image;
librbd::snap_spec_t parent_snap;
int r = librbd::api::Image<>::get_parent(ictx, &parent_image, &parent_snap);
if (r >= 0) {
if (parent_pool_name) {
if (parent_image.pool_name.length() + 1 > ppool_namelen) {
r = -ERANGE;
} else {
strcpy(parent_pool_name, parent_image.pool_name.c_str());
}
}
if (parent_name) {
if (parent_image.image_name.length() + 1 > pnamelen) {
r = -ERANGE;
} else {
strcpy(parent_name, parent_image.image_name.c_str());
}
}
if (parent_snap_name) {
if (parent_snap.name.length() + 1 > psnap_namelen) {
r = -ERANGE;
} else {
strcpy(parent_snap_name, parent_snap.name.c_str());
}
}
}
if (r < 0) {
tracepoint(librbd, get_parent_info_exit, r, NULL, NULL, NULL, NULL);
return r;
}
tracepoint(librbd, get_parent_info_exit, r,
parent_image.pool_name.c_str(),
parent_image.image_name.c_str(),
parent_image.image_id.c_str(),
parent_snap.name.c_str());
return 0;
}
extern "C" int rbd_get_parent_info2(rbd_image_t image,
char *parent_pool_name,
size_t ppool_namelen,
char *parent_name, size_t pnamelen,
char *parent_id, size_t pidlen,
char *parent_snap_name,
size_t psnap_namelen)
{
auto ictx = reinterpret_cast<librbd::ImageCtx*>(image);
tracepoint(librbd, get_parent_info_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
librbd::linked_image_spec_t parent_image;
librbd::snap_spec_t parent_snap;
int r = librbd::api::Image<>::get_parent(ictx, &parent_image, &parent_snap);
if (r >= 0) {
if (parent_pool_name) {
if (parent_image.pool_name.length() + 1 > ppool_namelen) {
r = -ERANGE;
} else {
strcpy(parent_pool_name, parent_image.pool_name.c_str());
}
}
if (parent_name) {
if (parent_image.image_name.length() + 1 > pnamelen) {
r = -ERANGE;
} else {
strcpy(parent_name, parent_image.image_name.c_str());
}
}
if (parent_id) {
if (parent_image.image_id.length() + 1 > pidlen) {
r = -ERANGE;
} else {
strcpy(parent_id, parent_image.image_id.c_str());
}
}
if (parent_snap_name) {
if (parent_snap.name.length() + 1 > psnap_namelen) {
r = -ERANGE;
} else {
strcpy(parent_snap_name, parent_snap.name.c_str());
}
}
}
if (r < 0) {
tracepoint(librbd, get_parent_info_exit, r, NULL, NULL, NULL, NULL);
return r;
}
tracepoint(librbd, get_parent_info_exit, r,
parent_image.pool_name.c_str(),
parent_image.image_name.c_str(),
parent_image.image_id.c_str(),
parent_snap.name.c_str());
return 0;
}
extern "C" int rbd_get_parent(rbd_image_t image,
rbd_linked_image_spec_t *parent_image,
rbd_snap_spec_t *parent_snap)
{
auto ictx = reinterpret_cast<librbd::ImageCtx*>(image);
tracepoint(librbd, get_parent_info_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
librbd::linked_image_spec_t cpp_parent_image;
librbd::snap_spec_t cpp_parent_snap;
int r = librbd::api::Image<>::get_parent(ictx, &cpp_parent_image,
&cpp_parent_snap);
if (r < 0) {
// FIPS zeroization audit 20191117: these memsets are not security related.
memset(parent_image, 0, sizeof(rbd_linked_image_spec_t));
memset(parent_snap, 0, sizeof(rbd_snap_spec_t));
} else {
*parent_image = {
.pool_id = cpp_parent_image.pool_id,
.pool_name = strdup(cpp_parent_image.pool_name.c_str()),
.pool_namespace = strdup(cpp_parent_image.pool_namespace.c_str()),
.image_id = strdup(cpp_parent_image.image_id.c_str()),
.image_name = strdup(cpp_parent_image.image_name.c_str()),
.trash = cpp_parent_image.trash};
*parent_snap = {
.id = cpp_parent_snap.id,
.namespace_type = cpp_parent_snap.namespace_type,
.name = strdup(cpp_parent_snap.name.c_str())};
}
tracepoint(librbd, get_parent_info_exit, r,
parent_image->pool_name,
parent_image->image_name,
parent_image->image_id,
parent_snap->name);
return r;
}
extern "C" int rbd_get_migration_source_spec(rbd_image_t image,
char* source_spec,
size_t* max_len)
{
auto ictx = reinterpret_cast<librbd::ImageCtx*>(image);
std::string cpp_source_spec;
int r = librbd::api::Migration<>::get_source_spec(ictx, &cpp_source_spec);
if (r < 0) {
return r;
}
size_t expected_size = cpp_source_spec.size();
if (expected_size >= *max_len) {
*max_len = expected_size + 1;
return -ERANGE;
}
strncpy(source_spec, cpp_source_spec.c_str(), expected_size);
source_spec[expected_size] = '\0';
*max_len = expected_size + 1;
return 0;
}
extern "C" int rbd_get_flags(rbd_image_t image, uint64_t *flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_flags_enter, ictx);
int r = librbd::get_flags(ictx, flags);
tracepoint(librbd, get_flags_exit, ictx, r, *flags);
return r;
}
extern "C" int rbd_get_group(rbd_image_t image, rbd_group_info_t *group_info,
size_t group_info_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, image_get_group_enter, ictx->name.c_str());
if (group_info_size != sizeof(rbd_group_info_t)) {
tracepoint(librbd, image_get_group_exit, -ERANGE);
return -ERANGE;
}
librbd::group_info_t cpp_group_info;
int r = librbd::api::Group<>::image_get_group(ictx, &cpp_group_info);
if (r >= 0) {
group_info_cpp_to_c(cpp_group_info, group_info);
} else {
group_info->name = NULL;
}
tracepoint(librbd, image_get_group_exit, r);
return r;
}
extern "C" int rbd_set_image_notification(rbd_image_t image, int fd, int type)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, set_image_notification_enter, ictx, fd, type);
int r = librbd::set_image_notification(ictx, fd, type);
tracepoint(librbd, set_image_notification_exit, ictx, r);
return r;
}
extern "C" int rbd_is_exclusive_lock_owner(rbd_image_t image, int *is_owner)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, is_exclusive_lock_owner_enter, ictx);
bool owner;
int r = librbd::is_exclusive_lock_owner(ictx, &owner);
*is_owner = owner ? 1 : 0;
tracepoint(librbd, is_exclusive_lock_owner_exit, ictx, r, *is_owner);
return r;
}
extern "C" int rbd_lock_acquire(rbd_image_t image, rbd_lock_mode_t lock_mode)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, lock_acquire_enter, ictx, lock_mode);
int r = librbd::lock_acquire(ictx, lock_mode);
tracepoint(librbd, lock_acquire_exit, ictx, r);
return r;
}
extern "C" int rbd_lock_release(rbd_image_t image)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, lock_release_enter, ictx);
int r = librbd::lock_release(ictx);
tracepoint(librbd, lock_release_exit, ictx, r);
return r;
}
extern "C" int rbd_lock_get_owners(rbd_image_t image,
rbd_lock_mode_t *lock_mode,
char **lock_owners,
size_t *max_lock_owners)
{
librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx*>(image);
tracepoint(librbd, lock_get_owners_enter, ictx);
// FIPS zeroization audit 20191117: this memset is not security related.
memset(lock_owners, 0, sizeof(*lock_owners) * *max_lock_owners);
std::list<std::string> lock_owner_list;
int r = librbd::lock_get_owners(ictx, lock_mode, &lock_owner_list);
if (r >= 0) {
if (*max_lock_owners >= lock_owner_list.size()) {
*max_lock_owners = 0;
for (auto &lock_owner : lock_owner_list) {
lock_owners[(*max_lock_owners)++] = strdup(lock_owner.c_str());
}
} else {
*max_lock_owners = lock_owner_list.size();
r = -ERANGE;
}
}
tracepoint(librbd, lock_get_owners_exit, ictx, r);
return r;
}
extern "C" void rbd_lock_get_owners_cleanup(char **lock_owners,
size_t lock_owner_count)
{
for (size_t i = 0; i < lock_owner_count; ++i) {
free(lock_owners[i]);
}
}
extern "C" int rbd_lock_break(rbd_image_t image, rbd_lock_mode_t lock_mode,
const char *lock_owner)
{
librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx*>(image);
tracepoint(librbd, lock_break_enter, ictx, lock_mode, lock_owner);
int r = librbd::lock_break(ictx, lock_mode, lock_owner);
tracepoint(librbd, lock_break_exit, ictx, r);
return r;
}
extern "C" int rbd_rebuild_object_map(rbd_image_t image,
librbd_progress_fn_t cb, void *cbdata)
{
librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx*>(image);
librbd::CProgressContext prog_ctx(cb, cbdata);
return ictx->operations->rebuild_object_map(prog_ctx);
}
/* snapshots */
extern "C" int rbd_snap_create(rbd_image_t image, const char *snap_name)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_create_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
auto flags = librbd::util::get_default_snap_create_flags(ictx);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Snapshot<>::create(ictx, snap_name, flags, prog_ctx);
tracepoint(librbd, snap_create_exit, r);
return r;
}
extern "C" int rbd_snap_create2(rbd_image_t image, const char *snap_name,
uint32_t flags, librbd_progress_fn_t cb,
void *cbdata)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_create_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = librbd::api::Snapshot<>::create(ictx, snap_name, flags, prog_ctx);
tracepoint(librbd, snap_create_exit, r);
return r;
}
extern "C" int rbd_snap_rename(rbd_image_t image, const char *srcname, const char *dstname)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_rename_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, srcname, dstname);
int r = ictx->operations->snap_rename(srcname, dstname);
tracepoint(librbd, snap_rename_exit, r);
return r;
}
extern "C" int rbd_snap_remove(rbd_image_t image, const char *snap_name)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_remove_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Snapshot<>::remove(ictx, snap_name, 0, prog_ctx);
tracepoint(librbd, snap_remove_exit, r);
return r;
}
extern "C" int rbd_snap_remove2(rbd_image_t image, const char *snap_name, uint32_t flags,
librbd_progress_fn_t cb, void *cbdata)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_remove2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name, flags);
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = librbd::api::Snapshot<>::remove(ictx, snap_name, flags, prog_ctx);
tracepoint(librbd, snap_remove_exit, r);
return r;
}
extern "C" int rbd_snap_remove_by_id(rbd_image_t image, uint64_t snap_id)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Snapshot<>::remove(ictx, snap_id);
}
extern "C" int rbd_snap_rollback(rbd_image_t image, const char *snap_name)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_rollback_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
librbd::NoOpProgressContext prog_ctx;
int r = ictx->operations->snap_rollback(cls::rbd::UserSnapshotNamespace(), snap_name, prog_ctx);
tracepoint(librbd, snap_rollback_exit, r);
return r;
}
extern "C" int rbd_snap_rollback_with_progress(rbd_image_t image,
const char *snap_name,
librbd_progress_fn_t cb,
void *cbdata)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_rollback_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = ictx->operations->snap_rollback(cls::rbd::UserSnapshotNamespace(), snap_name, prog_ctx);
tracepoint(librbd, snap_rollback_exit, r);
return r;
}
extern "C" int rbd_snap_list(rbd_image_t image, rbd_snap_info_t *snaps,
int *max_snaps)
{
vector<librbd::snap_info_t> cpp_snaps;
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_list_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snaps);
if (!max_snaps) {
tracepoint(librbd, snap_list_exit, -EINVAL, 0);
return -EINVAL;
}
// FIPS zeroization audit 20191117: this memset is not security related.
memset(snaps, 0, sizeof(*snaps) * *max_snaps);
int r = librbd::api::Snapshot<>::list(ictx, cpp_snaps);
if (r == -ENOENT) {
tracepoint(librbd, snap_list_exit, 0, *max_snaps);
return 0;
}
if (r < 0) {
tracepoint(librbd, snap_list_exit, r, *max_snaps);
return r;
}
if (*max_snaps < (int)cpp_snaps.size() + 1) {
*max_snaps = (int)cpp_snaps.size() + 1;
tracepoint(librbd, snap_list_exit, -ERANGE, *max_snaps);
return -ERANGE;
}
int i;
for (i = 0; i < (int)cpp_snaps.size(); i++) {
snaps[i].id = cpp_snaps[i].id;
snaps[i].size = cpp_snaps[i].size;
snaps[i].name = strdup(cpp_snaps[i].name.c_str());
if (!snaps[i].name) {
for (int j = 0; j < i; j++)
free((void *)snaps[j].name);
tracepoint(librbd, snap_list_exit, -ENOMEM, *max_snaps);
return -ENOMEM;
}
tracepoint(librbd, snap_list_entry, snaps[i].id, snaps[i].size, snaps[i].name);
}
snaps[i].id = 0;
snaps[i].size = 0;
snaps[i].name = NULL;
r = (int)cpp_snaps.size();
tracepoint(librbd, snap_list_exit, r, *max_snaps);
return r;
}
extern "C" void rbd_snap_list_end(rbd_snap_info_t *snaps)
{
tracepoint(librbd, snap_list_end_enter, snaps);
while (snaps->name) {
free((void *)snaps->name);
snaps++;
}
tracepoint(librbd, snap_list_end_exit);
}
extern "C" int rbd_snap_protect(rbd_image_t image, const char *snap_name)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_protect_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = ictx->operations->snap_protect(cls::rbd::UserSnapshotNamespace(), snap_name);
tracepoint(librbd, snap_protect_exit, r);
return r;
}
extern "C" int rbd_snap_unprotect(rbd_image_t image, const char *snap_name)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_unprotect_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = ictx->operations->snap_unprotect(cls::rbd::UserSnapshotNamespace(), snap_name);
tracepoint(librbd, snap_unprotect_exit, r);
return r;
}
extern "C" int rbd_snap_is_protected(rbd_image_t image, const char *snap_name,
int *is_protected)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_is_protected_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
bool protected_snap;
int r = librbd::api::Snapshot<>::is_protected(ictx, snap_name, &protected_snap);
if (r < 0) {
tracepoint(librbd, snap_is_protected_exit, r, *is_protected ? 1 : 0);
return r;
}
*is_protected = protected_snap ? 1 : 0;
tracepoint(librbd, snap_is_protected_exit, 0, *is_protected ? 1 : 0);
return 0;
}
extern "C" int rbd_snap_get_limit(rbd_image_t image, uint64_t *limit)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_get_limit_enter, ictx, ictx->name.c_str());
int r = librbd::api::Snapshot<>::get_limit(ictx, limit);
tracepoint(librbd, snap_get_limit_exit, r, *limit);
return r;
}
extern "C" int rbd_snap_exists(rbd_image_t image, const char *snapname, bool *exists)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_exists_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, snapname);
int r = librbd::api::Snapshot<>::exists(ictx, cls::rbd::UserSnapshotNamespace(), snapname, exists);
tracepoint(librbd, snap_exists_exit, r, *exists);
return r;
}
extern "C" int rbd_snap_get_timestamp(rbd_image_t image, uint64_t snap_id, struct timespec *timestamp)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_get_timestamp_enter, ictx, ictx->name.c_str());
int r = librbd::api::Snapshot<>::get_timestamp(ictx, snap_id, timestamp);
tracepoint(librbd, snap_get_timestamp_exit, r);
return r;
}
extern "C" int rbd_snap_set_limit(rbd_image_t image, uint64_t limit)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_set_limit_enter, ictx, ictx->name.c_str(), limit);
int r = librbd::api::Snapshot<>::set_limit(ictx, limit);
tracepoint(librbd, snap_set_limit_exit, r);
return r;
}
extern "C" int rbd_snap_set(rbd_image_t image, const char *snap_name)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_set_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = librbd::api::Image<>::snap_set(
ictx, cls::rbd::UserSnapshotNamespace(), snap_name);
tracepoint(librbd, snap_set_exit, r);
return r;
}
extern "C" int rbd_snap_set_by_id(rbd_image_t image, uint64_t snap_id)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Image<>::snap_set(ictx, snap_id);
}
extern "C" int rbd_snap_get_name(rbd_image_t image, uint64_t snap_id, char *snapname, size_t *name_len)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
std::string snap_name;
int r = librbd::api::Snapshot<>::get_name(ictx, snap_id, &snap_name);
size_t expected_size = snap_name.size();
if (*name_len <= expected_size) {
*name_len = expected_size + 1;
return -ERANGE;
}
strncpy(snapname, snap_name.c_str(), expected_size);
snapname[expected_size] = '\0';
*name_len = expected_size + 1;
return r;
}
extern "C" int rbd_snap_get_id(rbd_image_t image, const char *snapname, uint64_t *snap_id)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Snapshot<>::get_id(ictx, snapname, snap_id);
}
extern "C" ssize_t rbd_list_children(rbd_image_t image, char *pools,
size_t *pools_len, char *images,
size_t *images_len)
{
auto ictx = reinterpret_cast<librbd::ImageCtx*>(image);
tracepoint(librbd, list_children_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
std::vector<librbd::linked_image_spec_t> cpp_images;
int r = librbd::api::Image<>::list_children(ictx, &cpp_images);
if (r < 0) {
tracepoint(librbd, list_children_exit, r);
return r;
}
std::set<std::pair<std::string, std::string>> image_set;
for (auto& image : cpp_images) {
if (!image.trash) {
image_set.insert({image.pool_name, image.image_name});
}
}
size_t pools_total = 0;
size_t images_total = 0;
for (auto it : image_set) {
pools_total += it.first.length() + 1;
images_total += it.second.length() + 1;
}
bool too_short = false;
if (pools_total > *pools_len)
too_short = true;
if (images_total > *images_len)
too_short = true;
*pools_len = pools_total;
*images_len = images_total;
if (too_short) {
tracepoint(librbd, list_children_exit, -ERANGE);
return -ERANGE;
}
char *pools_p = pools;
char *images_p = images;
for (auto it : image_set) {
const char* pool = it.first.c_str();
strcpy(pools_p, pool);
pools_p += it.first.length() + 1;
const char* image = it.second.c_str();
strcpy(images_p, image);
images_p += it.second.length() + 1;
tracepoint(librbd, list_children_entry, pool, image);
}
ssize_t ret = image_set.size();
tracepoint(librbd, list_children_exit, ret);
return ret;
}
extern "C" int rbd_list_children2(rbd_image_t image,
rbd_child_info_t *children,
int *max_children)
{
auto ictx = reinterpret_cast<librbd::ImageCtx*>(image);
tracepoint(librbd, list_children_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
// FIPS zeroization audit 20191117: this memset is not security related.
memset(children, 0, sizeof(*children) * *max_children);
if (!max_children) {
tracepoint(librbd, list_children_exit, -EINVAL);
return -EINVAL;
}
std::vector<librbd::linked_image_spec_t> cpp_children;
int r = librbd::api::Image<>::list_children(ictx, &cpp_children);
if (r < 0) {
tracepoint(librbd, list_children_exit, r);
return r;
}
if (*max_children < (int)cpp_children.size() + 1) {
*max_children = (int)cpp_children.size() + 1;
tracepoint(librbd, list_children_exit, *max_children);
return -ERANGE;
}
int i;
for (i = 0; i < (int)cpp_children.size(); i++) {
children[i].pool_name = strdup(cpp_children[i].pool_name.c_str());
children[i].image_name = strdup(cpp_children[i].image_name.c_str());
children[i].image_id = strdup(cpp_children[i].image_id.c_str());
children[i].trash = cpp_children[i].trash;
tracepoint(librbd, list_children_entry, children[i].pool_name,
children[i].image_name);
}
children[i].pool_name = NULL;
children[i].image_name = NULL;
children[i].image_id = NULL;
r = (int)cpp_children.size();
tracepoint(librbd, list_children_exit, *max_children);
return r;
}
extern "C" void rbd_list_child_cleanup(rbd_child_info_t *child)
{
free((void *)child->pool_name);
free((void *)child->image_name);
free((void *)child->image_id);
}
extern "C" void rbd_list_children_cleanup(rbd_child_info_t *children,
size_t num_children)
{
for (size_t i=0; i < num_children; i++) {
free((void *)children[i].pool_name);
free((void *)children[i].image_name);
free((void *)children[i].image_id);
}
}
extern "C" int rbd_list_children3(rbd_image_t image,
rbd_linked_image_spec_t *images,
size_t *max_images)
{
auto ictx = reinterpret_cast<librbd::ImageCtx*>(image);
tracepoint(librbd, list_children_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
// FIPS zeroization audit 20191117: this memset is not security related.
memset(images, 0, sizeof(*images) * *max_images);
std::vector<librbd::linked_image_spec_t> cpp_children;
int r = librbd::api::Image<>::list_children(ictx, &cpp_children);
if (r < 0) {
tracepoint(librbd, list_children_exit, r);
return r;
}
if (*max_images < cpp_children.size()) {
*max_images = cpp_children.size();
return -ERANGE;
}
*max_images = cpp_children.size();
for (size_t idx = 0; idx < cpp_children.size(); ++idx) {
images[idx] = {
.pool_id = cpp_children[idx].pool_id,
.pool_name = strdup(cpp_children[idx].pool_name.c_str()),
.pool_namespace = strdup(cpp_children[idx].pool_namespace.c_str()),
.image_id = strdup(cpp_children[idx].image_id.c_str()),
.image_name = strdup(cpp_children[idx].image_name.c_str()),
.trash = cpp_children[idx].trash};
tracepoint(librbd, list_children_entry, images[idx].pool_name,
images[idx].image_name);
}
return 0;
}
extern "C" int rbd_list_descendants(rbd_image_t image,
rbd_linked_image_spec_t *images,
size_t *max_images)
{
auto ictx = reinterpret_cast<librbd::ImageCtx*>(image);
// FIPS zeroization audit 20191117: this memset is not security related.
memset(images, 0, sizeof(*images) * *max_images);
std::vector<librbd::linked_image_spec_t> cpp_children;
int r = librbd::api::Image<>::list_descendants(ictx, {}, &cpp_children);
if (r < 0) {
return r;
}
if (*max_images < cpp_children.size()) {
*max_images = cpp_children.size();
return -ERANGE;
}
*max_images = cpp_children.size();
for (size_t idx = 0; idx < cpp_children.size(); ++idx) {
images[idx] = {
.pool_id = cpp_children[idx].pool_id,
.pool_name = strdup(cpp_children[idx].pool_name.c_str()),
.pool_namespace = strdup(cpp_children[idx].pool_namespace.c_str()),
.image_id = strdup(cpp_children[idx].image_id.c_str()),
.image_name = strdup(cpp_children[idx].image_name.c_str()),
.trash = cpp_children[idx].trash};
}
return 0;
}
extern "C" ssize_t rbd_list_lockers(rbd_image_t image, int *exclusive,
char *tag, size_t *tag_len,
char *clients, size_t *clients_len,
char *cookies, size_t *cookies_len,
char *addrs, size_t *addrs_len)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, list_lockers_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
std::list<librbd::locker_t> lockers;
bool exclusive_bool;
string tag_str;
int r = list_lockers(ictx, &lockers, &exclusive_bool, &tag_str);
if (r < 0) {
tracepoint(librbd, list_lockers_exit, r);
return r;
}
ldout(ictx->cct, 20) << "list_lockers r = " << r << " lockers.size() = " << lockers.size() << dendl;
*exclusive = (int)exclusive_bool;
size_t clients_total = 0;
size_t cookies_total = 0;
size_t addrs_total = 0;
for (list<librbd::locker_t>::const_iterator it = lockers.begin();
it != lockers.end(); ++it) {
clients_total += it->client.length() + 1;
cookies_total += it->cookie.length() + 1;
addrs_total += it->address.length() + 1;
}
bool too_short = ((clients_total > *clients_len) ||
(cookies_total > *cookies_len) ||
(addrs_total > *addrs_len) ||
(tag_str.length() + 1 > *tag_len));
*clients_len = clients_total;
*cookies_len = cookies_total;
*addrs_len = addrs_total;
*tag_len = tag_str.length() + 1;
if (too_short) {
tracepoint(librbd, list_lockers_exit, -ERANGE);
return -ERANGE;
}
strcpy(tag, tag_str.c_str());
char *clients_p = clients;
char *cookies_p = cookies;
char *addrs_p = addrs;
for (list<librbd::locker_t>::const_iterator it = lockers.begin();
it != lockers.end(); ++it) {
const char* client = it->client.c_str();
strcpy(clients_p, client);
clients_p += it->client.length() + 1;
const char* cookie = it->cookie.c_str();
strcpy(cookies_p, cookie);
cookies_p += it->cookie.length() + 1;
const char* address = it->address.c_str();
strcpy(addrs_p, address);
addrs_p += it->address.length() + 1;
tracepoint(librbd, list_lockers_entry, client, cookie, address);
}
ssize_t ret = lockers.size();
tracepoint(librbd, list_lockers_exit, ret);
return ret;
}
extern "C" int rbd_lock_exclusive(rbd_image_t image, const char *cookie)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, lock_exclusive_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, cookie);
int r = librbd::lock(ictx, true, cookie ? cookie : "", "");
tracepoint(librbd, lock_exclusive_exit, r);
return r;
}
extern "C" int rbd_lock_shared(rbd_image_t image, const char *cookie,
const char *tag)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, lock_shared_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, cookie, tag);
int r = librbd::lock(ictx, false, cookie ? cookie : "", tag ? tag : "");
tracepoint(librbd, lock_shared_exit, r);
return r;
}
extern "C" int rbd_unlock(rbd_image_t image, const char *cookie)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, unlock_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, cookie);
int r = librbd::unlock(ictx, cookie ? cookie : "");
tracepoint(librbd, unlock_exit, r);
return r;
}
extern "C" int rbd_break_lock(rbd_image_t image, const char *client,
const char *cookie)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, break_lock_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, client, cookie);
int r = librbd::break_lock(ictx, client, cookie ? cookie : "");
tracepoint(librbd, break_lock_exit, r);
return r;
}
/* I/O */
extern "C" ssize_t rbd_read(rbd_image_t image, uint64_t ofs, size_t len,
char *buf)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, read_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len);
int r = librbd::api::Io<>::read(
*ictx, ofs, len, librbd::io::ReadResult{buf, len}, 0);
tracepoint(librbd, read_exit, r);
return r;
}
extern "C" ssize_t rbd_read2(rbd_image_t image, uint64_t ofs, size_t len,
char *buf, int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, read2_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, ofs, len, op_flags);
int r = librbd::api::Io<>::read(
*ictx, ofs, len, librbd::io::ReadResult{buf, len}, op_flags);
tracepoint(librbd, read_exit, r);
return r;
}
extern "C" int64_t rbd_read_iterate(rbd_image_t image, uint64_t ofs, size_t len,
int (*cb)(uint64_t, size_t, const char *, void *),
void *arg)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, read_iterate_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len);
int64_t r = librbd::read_iterate(ictx, ofs, len, cb, arg);
tracepoint(librbd, read_iterate_exit, r);
return r;
}
extern "C" int rbd_read_iterate2(rbd_image_t image, uint64_t ofs, uint64_t len,
int (*cb)(uint64_t, size_t, const char *, void *),
void *arg)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, read_iterate2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len);
int64_t r = librbd::read_iterate(ictx, ofs, len, cb, arg);
if (r > 0)
r = 0;
tracepoint(librbd, read_iterate2_exit, r);
return (int)r;
}
extern "C" int rbd_diff_iterate(rbd_image_t image,
const char *fromsnapname,
uint64_t ofs, uint64_t len,
int (*cb)(uint64_t, size_t, int, void *),
void *arg)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, diff_iterate_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, fromsnapname, ofs, len,
true, false);
int r = librbd::api::DiffIterate<>::diff_iterate(ictx,
cls::rbd::UserSnapshotNamespace(),
fromsnapname, ofs, len,
true, false, cb, arg);
tracepoint(librbd, diff_iterate_exit, r);
return r;
}
extern "C" int rbd_diff_iterate2(rbd_image_t image, const char *fromsnapname,
uint64_t ofs, uint64_t len,
uint8_t include_parent, uint8_t whole_object,
int (*cb)(uint64_t, size_t, int, void *),
void *arg)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, diff_iterate_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, fromsnapname, ofs, len,
include_parent != 0, whole_object != 0);
int r = librbd::api::DiffIterate<>::diff_iterate(ictx,
cls::rbd::UserSnapshotNamespace(),
fromsnapname, ofs, len,
include_parent, whole_object,
cb, arg);
tracepoint(librbd, diff_iterate_exit, r);
return r;
}
extern "C" ssize_t rbd_write(rbd_image_t image, uint64_t ofs, size_t len,
const char *buf)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, write_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len, buf);
bufferlist bl;
bl.push_back(create_write_raw(ictx, buf, len, nullptr));
int r = librbd::api::Io<>::write(*ictx, ofs, len, std::move(bl), 0);
tracepoint(librbd, write_exit, r);
return r;
}
extern "C" ssize_t rbd_write2(rbd_image_t image, uint64_t ofs, size_t len,
const char *buf, int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, write2_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, ofs, len, buf, op_flags);
bufferlist bl;
bl.push_back(create_write_raw(ictx, buf, len, nullptr));
int r = librbd::api::Io<>::write(*ictx, ofs, len, std::move(bl), op_flags);
tracepoint(librbd, write_exit, r);
return r;
}
extern "C" int rbd_discard(rbd_image_t image, uint64_t ofs, uint64_t len)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, discard_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, ofs, len);
if (len > static_cast<uint64_t>(std::numeric_limits<int>::max())) {
tracepoint(librbd, discard_exit, -EINVAL);
return -EINVAL;
}
int r = librbd::api::Io<>::discard(
*ictx, ofs, len, ictx->discard_granularity_bytes);
tracepoint(librbd, discard_exit, r);
return r;
}
extern "C" ssize_t rbd_writesame(rbd_image_t image, uint64_t ofs, size_t len,
const char *buf, size_t data_len, int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, writesame_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, ofs, len, data_len == 0 ? NULL : buf, data_len, op_flags);
if (data_len == 0 || len % data_len ||
len > static_cast<uint64_t>(std::numeric_limits<int>::max())) {
tracepoint(librbd, writesame_exit, -EINVAL);
return -EINVAL;
}
bool discard_zero = ictx->config.get_val<bool>("rbd_discard_on_zeroed_write_same");
if (discard_zero && mem_is_zero(buf, data_len)) {
int r = librbd::api::Io<>::write_zeroes(*ictx, ofs, len, 0, op_flags);
tracepoint(librbd, writesame_exit, r);
return r;
}
bufferlist bl;
bl.push_back(create_write_raw(ictx, buf, data_len, nullptr));
int r = librbd::api::Io<>::write_same(
*ictx, ofs, len, std::move(bl), op_flags);
tracepoint(librbd, writesame_exit, r);
return r;
}
extern "C" ssize_t rbd_write_zeroes(rbd_image_t image, uint64_t ofs, size_t len,
int zero_flags, int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Io<>::write_zeroes(*ictx, ofs, len, zero_flags, op_flags);
}
extern "C" ssize_t rbd_compare_and_write(rbd_image_t image,
uint64_t ofs, size_t len,
const char *cmp_buf,
const char *buf,
uint64_t *mismatch_off,
int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, compare_and_write_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, ofs,
len, cmp_buf, buf, op_flags);
bufferlist cmp_bl;
cmp_bl.push_back(create_write_raw(ictx, cmp_buf, len, nullptr));
bufferlist bl;
bl.push_back(create_write_raw(ictx, buf, len, nullptr));
int r = librbd::api::Io<>::compare_and_write(
*ictx, ofs, len, std::move(cmp_bl), std::move(bl), mismatch_off, op_flags);
tracepoint(librbd, compare_and_write_exit, r);
return r;
}
extern "C" int rbd_aio_create_completion(void *cb_arg,
rbd_callback_t complete_cb,
rbd_completion_t *c)
{
librbd::RBD::AioCompletion *rbd_comp =
new librbd::RBD::AioCompletion(cb_arg, complete_cb);
*c = (rbd_completion_t) rbd_comp;
return 0;
}
extern "C" int rbd_aio_write(rbd_image_t image, uint64_t off, size_t len,
const char *buf, rbd_completion_t c)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_write_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, off, len, buf, comp->pc);
auto aio_completion = get_aio_completion(comp);
bufferlist bl;
bl.push_back(create_write_raw(ictx, buf, len, aio_completion));
librbd::api::Io<>::aio_write(
*ictx, aio_completion, off, len, std::move(bl), 0, true);
tracepoint(librbd, aio_write_exit, 0);
return 0;
}
extern "C" int rbd_aio_write2(rbd_image_t image, uint64_t off, size_t len,
const char *buf, rbd_completion_t c, int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_write2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, off, len, buf, comp->pc, op_flags);
auto aio_completion = get_aio_completion(comp);
bufferlist bl;
bl.push_back(create_write_raw(ictx, buf, len, aio_completion));
librbd::api::Io<>::aio_write(
*ictx, aio_completion, off, len, std::move(bl), op_flags, true);
tracepoint(librbd, aio_write_exit, 0);
return 0;
}
extern "C" int rbd_aio_writev(rbd_image_t image, const struct iovec *iov,
int iovcnt, uint64_t off, rbd_completion_t c)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
size_t len;
int r = get_iovec_length(iov, iovcnt, len);
tracepoint(librbd, aio_write_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, off, len, NULL,
comp->pc);
if (r == 0) {
auto aio_completion = get_aio_completion(comp);
auto bl = iovec_to_bufferlist(ictx, iov, iovcnt, aio_completion);
librbd::api::Io<>::aio_write(
*ictx, aio_completion, off, len, std::move(bl), 0, true);
}
tracepoint(librbd, aio_write_exit, r);
return r;
}
extern "C" int rbd_aio_read(rbd_image_t image, uint64_t off, size_t len,
char *buf, rbd_completion_t c)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_read_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, off, len, buf, comp->pc);
librbd::api::Io<>::aio_read(
*ictx, get_aio_completion(comp), off, len, librbd::io::ReadResult{buf, len},
0, true);
tracepoint(librbd, aio_read_exit, 0);
return 0;
}
extern "C" int rbd_aio_read2(rbd_image_t image, uint64_t off, size_t len,
char *buf, rbd_completion_t c, int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_read2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, off, len, buf, comp->pc, op_flags);
librbd::api::Io<>::aio_read(
*ictx, get_aio_completion(comp), off, len, librbd::io::ReadResult{buf, len},
op_flags, true);
tracepoint(librbd, aio_read_exit, 0);
return 0;
}
extern "C" int rbd_aio_readv(rbd_image_t image, const struct iovec *iov,
int iovcnt, uint64_t off, rbd_completion_t c)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
size_t len;
int r = get_iovec_length(iov, iovcnt, len);
tracepoint(librbd, aio_read_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, off, len, NULL,
comp->pc);
if (r == 0) {
librbd::io::ReadResult read_result;
if (iovcnt == 1) {
read_result = librbd::io::ReadResult(
static_cast<char *>(iov[0].iov_base), iov[0].iov_len);
} else {
read_result = librbd::io::ReadResult(iov, iovcnt);
}
librbd::api::Io<>::aio_read(
*ictx, get_aio_completion(comp), off, len, std::move(read_result), 0,
true);
}
tracepoint(librbd, aio_read_exit, r);
return r;
}
extern "C" int rbd_flush(rbd_image_t image)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, flush_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::api::Io<>::flush(*ictx);
tracepoint(librbd, flush_exit, r);
return r;
}
extern "C" int rbd_aio_flush(rbd_image_t image, rbd_completion_t c)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_flush_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, comp->pc);
librbd::api::Io<>::aio_flush(*ictx, get_aio_completion(comp), true);
tracepoint(librbd, aio_flush_exit, 0);
return 0;
}
extern "C" int rbd_aio_discard(rbd_image_t image, uint64_t off, uint64_t len,
rbd_completion_t c)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_discard_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, off, len, comp->pc);
librbd::api::Io<>::aio_discard(
*ictx, get_aio_completion(comp), off, len,
ictx->discard_granularity_bytes, true);
tracepoint(librbd, aio_discard_exit, 0);
return 0;
}
extern "C" int rbd_aio_writesame(rbd_image_t image, uint64_t off, size_t len,
const char *buf, size_t data_len, rbd_completion_t c,
int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_writesame_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, off, len, data_len == 0 ? NULL : buf, data_len, comp->pc,
op_flags);
if (data_len == 0 || len % data_len) {
tracepoint(librbd, aio_writesame_exit, -EINVAL);
return -EINVAL;
}
bool discard_zero = ictx->config.get_val<bool>("rbd_discard_on_zeroed_write_same");
if (discard_zero && mem_is_zero(buf, data_len)) {
librbd::api::Io<>::aio_write_zeroes(
*ictx, get_aio_completion(comp), off, len, 0, op_flags, true);
tracepoint(librbd, aio_writesame_exit, 0);
return 0;
}
auto aio_completion = get_aio_completion(comp);
bufferlist bl;
bl.push_back(create_write_raw(ictx, buf, data_len, aio_completion));
librbd::api::Io<>::aio_write_same(
*ictx, aio_completion, off, len, std::move(bl), op_flags, true);
tracepoint(librbd, aio_writesame_exit, 0);
return 0;
}
extern "C" int rbd_aio_write_zeroes(rbd_image_t image, uint64_t off, size_t len,
rbd_completion_t c, int zero_flags,
int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
librbd::api::Io<>::aio_write_zeroes(*ictx, get_aio_completion(comp), off, len,
zero_flags, op_flags, true);
return 0;
}
extern "C" ssize_t rbd_aio_compare_and_write(rbd_image_t image, uint64_t off,
size_t len, const char *cmp_buf,
const char *buf, rbd_completion_t c,
uint64_t *mismatch_off,
int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_compare_and_write_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, off, len, cmp_buf, buf, comp->pc, op_flags);
auto aio_completion = get_aio_completion(comp);
bufferlist cmp_bl;
cmp_bl.push_back(create_write_raw(ictx, cmp_buf, len, aio_completion));
bufferlist bl;
bl.push_back(create_write_raw(ictx, buf, len, aio_completion));
librbd::api::Io<>::aio_compare_and_write(
*ictx, aio_completion, off, len, std::move(cmp_bl), std::move(bl),
mismatch_off, op_flags, false);
tracepoint(librbd, aio_compare_and_write_exit, 0);
return 0;
}
extern "C" ssize_t rbd_aio_compare_and_writev(rbd_image_t image,
uint64_t off,
const struct iovec *cmp_iov,
int cmp_iovcnt,
const struct iovec *iov,
int iovcnt,
rbd_completion_t c,
uint64_t *mismatch_off,
int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
size_t cmp_len;
int r = get_iovec_length(cmp_iov, cmp_iovcnt, cmp_len);
tracepoint(librbd, aio_compare_and_write_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, off, cmp_len, NULL, NULL,
comp->pc, op_flags);
if (r != 0) {
tracepoint(librbd, aio_compare_and_write_exit, r);
return r;
}
size_t write_len;
r = get_iovec_length(iov, iovcnt, write_len);
if (r != 0) {
tracepoint(librbd, aio_compare_and_write_exit, r);
return r;
}
if (cmp_len != write_len) {
tracepoint(librbd, aio_compare_and_write_exit, -EINVAL);
return -EINVAL;
}
auto aio_completion = get_aio_completion(comp);
auto cmp_bl = iovec_to_bufferlist(ictx, cmp_iov, cmp_iovcnt, aio_completion);
auto bl = iovec_to_bufferlist(ictx, iov, iovcnt, aio_completion);
librbd::api::Io<>::aio_compare_and_write(*ictx, aio_completion, off, cmp_len,
std::move(cmp_bl), std::move(bl),
mismatch_off, op_flags, false);
tracepoint(librbd, aio_compare_and_write_exit, 0);
return 0;
}
extern "C" int rbd_invalidate_cache(rbd_image_t image)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, invalidate_cache_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::invalidate_cache(ictx);
tracepoint(librbd, invalidate_cache_exit, r);
return r;
}
extern "C" int rbd_poll_io_events(rbd_image_t image, rbd_completion_t *comps, int numcomp)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::io::AioCompletion *cs[numcomp];
tracepoint(librbd, poll_io_events_enter, ictx, numcomp);
int r = librbd::poll_io_events(ictx, cs, numcomp);
tracepoint(librbd, poll_io_events_exit, r);
if (r > 0) {
for (int i = 0; i < r; ++i)
comps[i] = cs[i]->rbd_comp;
}
return r;
}
extern "C" int rbd_metadata_get(rbd_image_t image, const char *key, char *value, size_t *vallen)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
string val_s;
tracepoint(librbd, metadata_get_enter, ictx, key);
int r = librbd::metadata_get(ictx, key, &val_s);
if (r < 0) {
tracepoint(librbd, metadata_get_exit, r, key, NULL);
return r;
}
if (*vallen < val_s.size() + 1) {
r = -ERANGE;
*vallen = val_s.size() + 1;
tracepoint(librbd, metadata_get_exit, r, key, NULL);
} else {
strncpy(value, val_s.c_str(), val_s.size() + 1);
tracepoint(librbd, metadata_get_exit, r, key, value);
}
return r;
}
extern "C" int rbd_metadata_set(rbd_image_t image, const char *key, const char *value)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, metadata_set_enter, ictx, key, value);
int r = ictx->operations->metadata_set(key, value);
tracepoint(librbd, metadata_set_exit, r);
return r;
}
extern "C" int rbd_metadata_remove(rbd_image_t image, const char *key)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, metadata_remove_enter, ictx, key);
int r = ictx->operations->metadata_remove(key);
tracepoint(librbd, metadata_remove_exit, r);
return r;
}
extern "C" int rbd_metadata_list(rbd_image_t image, const char *start, uint64_t max,
char *key, size_t *key_len, char *value, size_t *val_len)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, metadata_list_enter, ictx);
map<string, bufferlist> pairs;
int r = librbd::metadata_list(ictx, start, max, &pairs);
size_t key_total_len = 0, val_total_len = 0;
bool too_short = false;
for (map<string, bufferlist>::iterator it = pairs.begin();
it != pairs.end(); ++it) {
key_total_len += it->first.size() + 1;
val_total_len += it->second.length() + 1;
}
if (*key_len < key_total_len || *val_len < val_total_len)
too_short = true;
*key_len = key_total_len;
*val_len = val_total_len;
if (too_short) {
tracepoint(librbd, metadata_list_exit, -ERANGE);
return -ERANGE;
}
char *key_p = key, *value_p = value;
for (map<string, bufferlist>::iterator it = pairs.begin();
it != pairs.end(); ++it) {
strncpy(key_p, it->first.c_str(), it->first.size() + 1);
key_p += it->first.size() + 1;
strncpy(value_p, it->second.c_str(), it->second.length());
value_p += it->second.length();
*value_p = '\0';
value_p++;
tracepoint(librbd, metadata_list_entry, it->first.c_str(), it->second.c_str());
}
tracepoint(librbd, metadata_list_exit, r);
return r;
}
extern "C" int rbd_mirror_image_enable(rbd_image_t image)
{
return rbd_mirror_image_enable2(image, RBD_MIRROR_IMAGE_MODE_JOURNAL);
}
extern "C" int rbd_mirror_image_enable2(rbd_image_t image,
rbd_mirror_image_mode_t mode)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Mirror<>::image_enable(ictx, mode, false);
}
extern "C" int rbd_mirror_image_disable(rbd_image_t image, bool force)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Mirror<>::image_disable(ictx, force);
}
extern "C" int rbd_mirror_image_promote(rbd_image_t image, bool force)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Mirror<>::image_promote(ictx, force);
}
extern "C" int rbd_mirror_image_demote(rbd_image_t image)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Mirror<>::image_demote(ictx);
}
extern "C" int rbd_mirror_image_resync(rbd_image_t image)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Mirror<>::image_resync(ictx);
}
extern "C" int rbd_mirror_image_create_snapshot(rbd_image_t image,
uint64_t *snap_id)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
auto flags = librbd::util::get_default_snap_create_flags(ictx);
return librbd::api::Mirror<>::image_snapshot_create(ictx, flags, snap_id);
}
extern "C" int rbd_mirror_image_create_snapshot2(rbd_image_t image,
uint32_t flags,
uint64_t *snap_id)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Mirror<>::image_snapshot_create(ictx, flags, snap_id);
}
extern "C" int rbd_mirror_image_get_info(rbd_image_t image,
rbd_mirror_image_info_t *mirror_image_info,
size_t info_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
if (sizeof(rbd_mirror_image_info_t) != info_size) {
return -ERANGE;
}
librbd::mirror_image_info_t cpp_mirror_image;
int r = librbd::api::Mirror<>::image_get_info(ictx, &cpp_mirror_image);
if (r < 0) {
return r;
}
mirror_image_info_cpp_to_c(cpp_mirror_image, mirror_image_info);
return 0;
}
extern "C" void rbd_mirror_image_get_info_cleanup(
rbd_mirror_image_info_t *mirror_image_info)
{
free(mirror_image_info->global_id);
}
extern "C" int rbd_mirror_image_get_mode(rbd_image_t image,
rbd_mirror_image_mode_t *mode)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Mirror<>::image_get_mode(ictx, mode);
}
extern "C" int rbd_mirror_image_get_global_status(
rbd_image_t image, rbd_mirror_image_global_status_t *status,
size_t status_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
if (sizeof(rbd_mirror_image_global_status_t) != status_size) {
return -ERANGE;
}
librbd::mirror_image_global_status_t cpp_status;
int r = librbd::api::Mirror<>::image_get_global_status(ictx, &cpp_status);
if (r < 0) {
return r;
}
mirror_image_global_status_cpp_to_c(cpp_status, status);
return 0;
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
extern "C" int rbd_mirror_image_get_status(rbd_image_t image,
rbd_mirror_image_status_t *status,
size_t status_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
if (sizeof(rbd_mirror_image_status_t) != status_size) {
return -ERANGE;
}
librbd::mirror_image_global_status_t cpp_status;
int r = librbd::api::Mirror<>::image_get_global_status(ictx, &cpp_status);
if (r < 0) {
return r;
}
mirror_image_global_status_cpp_to_c(cpp_status, status);
return 0;
}
#pragma GCC diagnostic pop
extern "C" int rbd_mirror_image_get_instance_id(rbd_image_t image,
char *instance_id,
size_t *instance_id_max_length)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
std::string cpp_instance_id;
int r = librbd::api::Mirror<>::image_get_instance_id(ictx, &cpp_instance_id);
if (r < 0) {
return r;
}
if (cpp_instance_id.size() >= *instance_id_max_length) {
*instance_id_max_length = cpp_instance_id.size() + 1;
return -ERANGE;
}
strcpy(instance_id, cpp_instance_id.c_str());
*instance_id_max_length = cpp_instance_id.size() + 1;
return 0;
}
extern "C" int rbd_aio_mirror_image_promote(rbd_image_t image, bool force,
rbd_completion_t c) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
librbd::api::Mirror<>::image_promote(
ictx, force, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(comp)));
return 0;
}
extern "C" int rbd_aio_mirror_image_demote(rbd_image_t image,
rbd_completion_t c) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
librbd::api::Mirror<>::image_demote(
ictx, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(comp)));
return 0;
}
extern "C" int rbd_aio_mirror_image_get_info(rbd_image_t image,
rbd_mirror_image_info_t *info,
size_t info_size,
rbd_completion_t c) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
if (sizeof(rbd_mirror_image_info_t) != info_size) {
return -ERANGE;
}
auto ctx = new C_MirrorImageGetInfo(
info, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(comp)));
librbd::api::Mirror<>::image_get_info(
ictx, &ctx->cpp_mirror_image_info, ctx);
return 0;
}
extern "C" int rbd_aio_mirror_image_get_mode(rbd_image_t image,
rbd_mirror_image_mode_t *mode,
rbd_completion_t c) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
librbd::api::Mirror<>::image_get_mode(
ictx, mode, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(comp)));
return 0;
}
extern "C" int rbd_aio_mirror_image_get_global_status(
rbd_image_t image, rbd_mirror_image_global_status_t *status,
size_t status_size, rbd_completion_t c) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
if (sizeof(rbd_mirror_image_global_status_t) != status_size) {
return -ERANGE;
}
auto ctx = new C_MirrorImageGetGlobalStatus(
status, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(comp)));
librbd::api::Mirror<>::image_get_global_status(
ictx, &ctx->cpp_mirror_image_global_status, ctx);
return 0;
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
extern "C" int rbd_aio_mirror_image_get_status(
rbd_image_t image, rbd_mirror_image_status_t *status, size_t status_size,
rbd_completion_t c) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
if (sizeof(rbd_mirror_image_status_t) != status_size) {
return -ERANGE;
}
auto ctx = new C_MirrorImageGetStatus(
status, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(comp)));
librbd::api::Mirror<>::image_get_global_status(
ictx, &ctx->cpp_mirror_image_global_status, ctx);
return 0;
}
#pragma GCC diagnostic pop
extern "C" int rbd_aio_mirror_image_create_snapshot(rbd_image_t image,
uint32_t flags,
uint64_t *snap_id,
rbd_completion_t c) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
librbd::api::Mirror<>::image_snapshot_create(
ictx, flags, snap_id, new C_AioCompletion(ictx,
librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(comp)));
return 0;
}
extern "C" int rbd_update_watch(rbd_image_t image, uint64_t *handle,
rbd_update_callback_t watch_cb, void *arg)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
C_UpdateWatchCB *wctx = new C_UpdateWatchCB(watch_cb, arg);
tracepoint(librbd, update_watch_enter, ictx, wctx);
int r = ictx->state->register_update_watcher(wctx, &wctx->handle);
tracepoint(librbd, update_watch_exit, r, wctx->handle);
*handle = reinterpret_cast<uint64_t>(wctx);
return r;
}
extern "C" int rbd_update_unwatch(rbd_image_t image, uint64_t handle)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
C_UpdateWatchCB *wctx = reinterpret_cast<C_UpdateWatchCB *>(handle);
tracepoint(librbd, update_unwatch_enter, ictx, wctx->handle);
int r = ictx->state->unregister_update_watcher(wctx->handle);
delete wctx;
tracepoint(librbd, update_unwatch_exit, r);
return r;
}
extern "C" int rbd_aio_is_complete(rbd_completion_t c)
{
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
return comp->is_complete();
}
extern "C" int rbd_aio_wait_for_complete(rbd_completion_t c)
{
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
return comp->wait_for_complete();
}
extern "C" ssize_t rbd_aio_get_return_value(rbd_completion_t c)
{
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
return comp->get_return_value();
}
extern "C" void *rbd_aio_get_arg(rbd_completion_t c)
{
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
return comp->get_arg();
}
extern "C" void rbd_aio_release(rbd_completion_t c)
{
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
comp->release();
}
extern "C" int rbd_group_create(rados_ioctx_t p, const char *name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, group_create_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), name);
int r = librbd::api::Group<>::create(io_ctx, name);
tracepoint(librbd, group_create_exit, r);
return r;
}
extern "C" int rbd_group_remove(rados_ioctx_t p, const char *name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, group_remove_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), name);
int r = librbd::api::Group<>::remove(io_ctx, name);
tracepoint(librbd, group_remove_exit, r);
return r;
}
extern "C" int rbd_group_list(rados_ioctx_t p, char *names, size_t *size)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, group_list_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id());
vector<string> cpp_names;
int r = librbd::api::Group<>::list(io_ctx, &cpp_names);
if (r < 0) {
tracepoint(librbd, group_list_exit, r);
return r;
}
size_t expected_size = 0;
for (size_t i = 0; i < cpp_names.size(); i++) {
expected_size += cpp_names[i].size() + 1;
}
if (*size < expected_size) {
*size = expected_size;
tracepoint(librbd, group_list_exit, -ERANGE);
return -ERANGE;
}
if (names == NULL) {
tracepoint(librbd, group_list_exit, -EINVAL);
return -EINVAL;
}
for (int i = 0; i < (int)cpp_names.size(); i++) {
const char* name = cpp_names[i].c_str();
tracepoint(librbd, group_list_entry, name);
strcpy(names, name);
names += strlen(names) + 1;
}
tracepoint(librbd, group_list_exit, (int)expected_size);
return (int)expected_size;
}
extern "C" int rbd_group_rename(rados_ioctx_t p, const char *src_name,
const char *dest_name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, group_rename_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), src_name, dest_name);
int r = librbd::api::Group<>::rename(io_ctx, src_name, dest_name);
tracepoint(librbd, group_rename_exit, r);
return r;
}
extern "C" int rbd_group_image_add(rados_ioctx_t group_p,
const char *group_name,
rados_ioctx_t image_p,
const char *image_name)
{
librados::IoCtx group_ioctx;
librados::IoCtx image_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
librados::IoCtx::from_rados_ioctx_t(image_p, image_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_image_add_enter, group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, image_ioctx.get_pool_name().c_str(),
image_ioctx.get_id(), image_name);
int r = librbd::api::Group<>::image_add(group_ioctx, group_name, image_ioctx,
image_name);
tracepoint(librbd, group_image_add_exit, r);
return r;
}
extern "C" int rbd_group_image_remove(rados_ioctx_t group_p,
const char *group_name,
rados_ioctx_t image_p,
const char *image_name)
{
librados::IoCtx group_ioctx;
librados::IoCtx image_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
librados::IoCtx::from_rados_ioctx_t(image_p, image_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_image_remove_enter, group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, image_ioctx.get_pool_name().c_str(),
image_ioctx.get_id(), image_name);
int r = librbd::api::Group<>::image_remove(group_ioctx, group_name,
image_ioctx, image_name);
tracepoint(librbd, group_image_remove_exit, r);
return r;
}
extern "C" int rbd_group_image_remove_by_id(rados_ioctx_t group_p,
const char *group_name,
rados_ioctx_t image_p,
const char *image_id)
{
librados::IoCtx group_ioctx;
librados::IoCtx image_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
librados::IoCtx::from_rados_ioctx_t(image_p, image_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_image_remove_by_id_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name,
image_ioctx.get_pool_name().c_str(),
image_ioctx.get_id(), image_id);
int r = librbd::api::Group<>::image_remove_by_id(group_ioctx, group_name,
image_ioctx, image_id);
tracepoint(librbd, group_image_remove_by_id_exit, r);
return r;
}
extern "C" int rbd_group_image_list(rados_ioctx_t group_p,
const char *group_name,
rbd_group_image_info_t *images,
size_t group_image_info_size,
size_t *image_size)
{
librados::IoCtx group_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_image_list_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name);
// FIPS zeroization audit 20191117: this memset is not security related.
memset(images, 0, sizeof(*images) * *image_size);
if (group_image_info_size != sizeof(rbd_group_image_info_t)) {
*image_size = 0;
tracepoint(librbd, group_image_list_exit, -ERANGE);
return -ERANGE;
}
std::vector<librbd::group_image_info_t> cpp_images;
int r = librbd::api::Group<>::image_list(group_ioctx, group_name,
&cpp_images);
if (r == -ENOENT) {
tracepoint(librbd, group_image_list_exit, 0);
*image_size = 0;
return 0;
}
if (r < 0) {
tracepoint(librbd, group_image_list_exit, r);
return r;
}
if (*image_size < cpp_images.size()) {
*image_size = cpp_images.size();
tracepoint(librbd, group_image_list_exit, -ERANGE);
return -ERANGE;
}
for (size_t i = 0; i < cpp_images.size(); ++i) {
group_image_status_cpp_to_c(cpp_images[i], &images[i]);
}
r = *image_size = cpp_images.size();
tracepoint(librbd, group_image_list_exit, r);
return r;
}
extern "C" int rbd_group_info_cleanup(rbd_group_info_t *group_info,
size_t group_info_size) {
if (group_info_size != sizeof(rbd_group_info_t)) {
return -ERANGE;
}
free(group_info->name);
return 0;
}
extern "C" int rbd_group_image_list_cleanup(rbd_group_image_info_t *images,
size_t group_image_info_size,
size_t len) {
if (group_image_info_size != sizeof(rbd_group_image_info_t)) {
return -ERANGE;
}
for (size_t i = 0; i < len; ++i) {
free(images[i].name);
}
return 0;
}
extern "C" int rbd_group_snap_create(rados_ioctx_t group_p,
const char *group_name,
const char *snap_name)
{
librados::IoCtx group_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_create_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
int r = librbd::api::Group<>::snap_create(group_ioctx, group_name,
snap_name, 0);
tracepoint(librbd, group_snap_create_exit, r);
return r;
}
extern "C" int rbd_group_snap_create2(rados_ioctx_t group_p,
const char *group_name,
const char *snap_name,
uint32_t flags)
{
librados::IoCtx group_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_create_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
int r = librbd::api::Group<>::snap_create(group_ioctx, group_name, snap_name,
flags);
tracepoint(librbd, group_snap_create_exit, r);
return r;
}
extern "C" int rbd_group_snap_remove(rados_ioctx_t group_p,
const char *group_name,
const char *snap_name)
{
librados::IoCtx group_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_remove_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
int r = librbd::api::Group<>::snap_remove(group_ioctx, group_name, snap_name);
tracepoint(librbd, group_snap_remove_exit, r);
return r;
}
extern "C" int rbd_group_snap_rename(rados_ioctx_t group_p,
const char *group_name,
const char *old_snap_name,
const char *new_snap_name)
{
librados::IoCtx group_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_rename_enter,
group_ioctx.get_pool_name().c_str(), group_ioctx.get_id(),
group_name, old_snap_name, new_snap_name);
int r = librbd::api::Group<>::snap_rename(group_ioctx, group_name,
old_snap_name, new_snap_name);
tracepoint(librbd, group_snap_list_exit, r);
return r;
}
extern "C" int rbd_group_snap_list(rados_ioctx_t group_p,
const char *group_name,
rbd_group_snap_info_t *snaps,
size_t group_snap_info_size,
size_t *snaps_size)
{
librados::IoCtx group_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_list_enter, group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name);
// FIPS zeroization audit 20191117: this memset is not security related.
memset(snaps, 0, sizeof(*snaps) * *snaps_size);
if (group_snap_info_size != sizeof(rbd_group_snap_info_t)) {
*snaps_size = 0;
tracepoint(librbd, group_snap_list_exit, -ERANGE);
return -ERANGE;
}
std::vector<librbd::group_snap_info_t> cpp_snaps;
int r = librbd::api::Group<>::snap_list(group_ioctx, group_name, &cpp_snaps);
if (r == -ENOENT) {
*snaps_size = 0;
tracepoint(librbd, group_snap_list_exit, 0);
return 0;
}
if (r < 0) {
tracepoint(librbd, group_snap_list_exit, r);
return r;
}
if (*snaps_size < cpp_snaps.size()) {
*snaps_size = cpp_snaps.size();
tracepoint(librbd, group_snap_list_exit, -ERANGE);
return -ERANGE;
}
for (size_t i = 0; i < cpp_snaps.size(); ++i) {
group_snap_info_cpp_to_c(cpp_snaps[i], &snaps[i]);
}
r = *snaps_size = cpp_snaps.size();
tracepoint(librbd, group_snap_list_exit, r);
return r;
}
extern "C" int rbd_group_snap_list_cleanup(rbd_group_snap_info_t *snaps,
size_t group_snap_info_size,
size_t len) {
if (group_snap_info_size != sizeof(rbd_group_snap_info_t)) {
return -ERANGE;
}
for (size_t i = 0; i < len; ++i) {
free(snaps[i].name);
}
return 0;
}
extern "C" int rbd_group_snap_rollback(rados_ioctx_t group_p,
const char *group_name,
const char *snap_name)
{
librados::IoCtx group_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_rollback_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Group<>::snap_rollback(group_ioctx, group_name,
snap_name, prog_ctx);
tracepoint(librbd, group_snap_rollback_exit, r);
return r;
}
extern "C" int rbd_group_snap_rollback_with_progress(rados_ioctx_t group_p,
const char *group_name,
const char *snap_name,
librbd_progress_fn_t cb,
void *cbdata)
{
librados::IoCtx group_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_rollback_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = librbd::api::Group<>::snap_rollback(group_ioctx, group_name,
snap_name, prog_ctx);
tracepoint(librbd, group_snap_rollback_exit, r);
return r;
}
extern "C" int rbd_snap_get_namespace_type(rbd_image_t image,
uint64_t snap_id,
rbd_snap_namespace_type_t *namespace_type) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_get_namespace_type_enter, ictx, ictx->name.c_str());
int r = librbd::api::Snapshot<>::get_namespace_type(ictx, snap_id,
namespace_type);
tracepoint(librbd, snap_get_namespace_type_exit, r);
return r;
}
extern "C" int rbd_snap_get_group_namespace(rbd_image_t image, uint64_t snap_id,
rbd_snap_group_namespace_t *group_snap,
size_t snap_group_namespace_size) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_get_group_namespace_enter, ictx,
ictx->name.c_str());
if (snap_group_namespace_size != sizeof(rbd_snap_group_namespace_t)) {
tracepoint(librbd, snap_get_group_namespace_exit, -ERANGE);
return -ERANGE;
}
librbd::snap_group_namespace_t group_namespace;
int r = librbd::api::Snapshot<>::get_group_namespace(ictx, snap_id,
&group_namespace);
if (r >= 0) {
group_snap->group_pool = group_namespace.group_pool;
group_snap->group_name = strdup(group_namespace.group_name.c_str());
group_snap->group_snap_name =
strdup(group_namespace.group_snap_name.c_str());
}
tracepoint(librbd, snap_get_group_namespace_exit, r);
return r;
}
extern "C" int rbd_snap_group_namespace_cleanup(rbd_snap_group_namespace_t *group_snap,
size_t snap_group_namespace_size) {
if (snap_group_namespace_size != sizeof(rbd_snap_group_namespace_t)) {
return -ERANGE;
}
free(group_snap->group_name);
free(group_snap->group_snap_name);
return 0;
}
extern "C" int rbd_snap_get_trash_namespace(rbd_image_t image, uint64_t snap_id,
char *original_name,
size_t max_length) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
std::string cpp_original_name;
int r = librbd::api::Snapshot<>::get_trash_namespace(ictx, snap_id,
&cpp_original_name);
if (r < 0) {
return r;
}
if (cpp_original_name.length() >= max_length) {
return -ERANGE;
}
strcpy(original_name, cpp_original_name.c_str());
return 0;
}
extern "C" int rbd_snap_get_mirror_namespace(
rbd_image_t image, uint64_t snap_id,
rbd_snap_mirror_namespace_t *mirror_snap,
size_t mirror_snap_size) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
if (mirror_snap_size != sizeof(rbd_snap_mirror_namespace_t)) {
return -ERANGE;
}
librbd::snap_mirror_namespace_t mirror_namespace;
int r = librbd::api::Snapshot<>::get_mirror_namespace(
ictx, snap_id, &mirror_namespace);
if (r < 0) {
return r;
}
mirror_snap->state = mirror_namespace.state;
mirror_snap->primary_mirror_uuid =
strdup(mirror_namespace.primary_mirror_uuid.c_str());
mirror_snap->primary_snap_id = mirror_namespace.primary_snap_id;
mirror_snap->mirror_peer_uuids_count =
mirror_namespace.mirror_peer_uuids.size();
size_t len = 0;
for (auto &peer : mirror_namespace.mirror_peer_uuids) {
len += peer.size() + 1;
}
mirror_snap->mirror_peer_uuids = (char *)malloc(len);
char *p = mirror_snap->mirror_peer_uuids;
for (auto &peer : mirror_namespace.mirror_peer_uuids) {
strncpy(p, peer.c_str(), peer.size() + 1);
p += peer.size() + 1;
}
mirror_snap->complete = mirror_namespace.complete;
mirror_snap->last_copied_object_number =
mirror_namespace.last_copied_object_number;
return 0;
}
extern "C" int rbd_snap_mirror_namespace_cleanup(
rbd_snap_mirror_namespace_t *mirror_snap,
size_t mirror_snap_size) {
if (mirror_snap_size != sizeof(rbd_snap_mirror_namespace_t)) {
return -ERANGE;
}
free(mirror_snap->primary_mirror_uuid);
free(mirror_snap->mirror_peer_uuids);
return 0;
}
extern "C" int rbd_watchers_list(rbd_image_t image,
rbd_image_watcher_t *watchers,
size_t *max_watchers) {
std::list<librbd::image_watcher_t> watcher_list;
librbd::ImageCtx *ictx = (librbd::ImageCtx*)image;
tracepoint(librbd, list_watchers_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
// FIPS zeroization audit 20191117: this memset is not security related.
memset(watchers, 0, sizeof(*watchers) * *max_watchers);
int r = librbd::list_watchers(ictx, watcher_list);
if (r < 0) {
tracepoint(librbd, list_watchers_exit, r, 0);
return r;
}
if (watcher_list.size() > *max_watchers) {
*max_watchers = watcher_list.size();
tracepoint(librbd, list_watchers_exit, -ERANGE, watcher_list.size());
return -ERANGE;
}
*max_watchers = 0;
for (auto &watcher : watcher_list) {
tracepoint(librbd, list_watchers_entry, watcher.addr.c_str(), watcher.id, watcher.cookie);
watchers[*max_watchers].addr = strdup(watcher.addr.c_str());
watchers[*max_watchers].id = watcher.id;
watchers[*max_watchers].cookie = watcher.cookie;
*max_watchers += 1;
}
tracepoint(librbd, list_watchers_exit, r, watcher_list.size());
return 0;
}
extern "C" void rbd_watchers_list_cleanup(rbd_image_watcher_t *watchers,
size_t num_watchers) {
for (size_t i = 0; i < num_watchers; ++i) {
free(watchers[i].addr);
}
}
extern "C" int rbd_config_image_list(rbd_image_t image,
rbd_config_option_t *options,
int *max_options) {
librbd::ImageCtx *ictx = (librbd::ImageCtx*)image;
std::vector<librbd::config_option_t> option_vector;
int r = librbd::api::Config<>::list(ictx, &option_vector);
if (r < 0) {
return r;
}
if (*max_options < static_cast<int>(option_vector.size())) {
*max_options = static_cast<int>(option_vector.size());
return -ERANGE;
}
for (int i = 0; i < static_cast<int>(option_vector.size()); ++i) {
config_option_cpp_to_c(option_vector[i], &options[i]);
}
*max_options = static_cast<int>(option_vector.size());
return 0;
}
extern "C" void rbd_config_image_list_cleanup(rbd_config_option_t *options,
int max_options) {
for (int i = 0; i < max_options; ++i) {
config_option_cleanup(options[i]);
}
}
extern "C" int rbd_quiesce_watch(rbd_image_t image,
rbd_update_callback_t quiesce_cb,
rbd_update_callback_t unquiesce_cb,
void *arg, uint64_t *handle)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
auto wctx = new C_QuiesceWatchCB(quiesce_cb, unquiesce_cb, arg);
int r = ictx->state->register_quiesce_watcher(wctx, &wctx->handle);
if (r < 0) {
delete wctx;
return r;
}
*handle = reinterpret_cast<uint64_t>(wctx);
return 0;
}
extern "C" int rbd_quiesce_unwatch(rbd_image_t image, uint64_t handle)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
auto *wctx = reinterpret_cast<C_QuiesceWatchCB *>(handle);
int r = ictx->state->unregister_quiesce_watcher(wctx->handle);
delete wctx;
return r;
}
extern "C" void rbd_quiesce_complete(rbd_image_t image, uint64_t handle, int r)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
ictx->state->quiesce_complete(handle, r);
}
| 263,251 | 34.288472 | 205 | cc |
null | ceph-main/src/librbd/api/Config.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Config.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/api/PoolMetadata.h"
#include "librbd/image/GetMetadataRequest.h"
#include <algorithm>
#include <boost/algorithm/string/predicate.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::Config: " << __func__ << ": "
namespace librbd {
namespace api {
namespace {
const uint32_t MAX_KEYS = 64;
typedef std::map<std::string_view, std::pair<std::string, config_source_t>> Parent;
static std::set<std::string_view> EXCLUDE_OPTIONS {
"rbd_auto_exclusive_lock_until_manual_request",
"rbd_default_format",
"rbd_default_pool",
"rbd_discard_on_zeroed_write_same",
"rbd_op_thread_timeout",
"rbd_op_threads",
"rbd_tracing",
"rbd_validate_names",
"rbd_validate_pool",
"rbd_mirror_pool_replayers_refresh_interval",
"rbd_config_pool_override_update_timestamp"
};
static std::set<std::string_view> EXCLUDE_IMAGE_OPTIONS {
"rbd_default_clone_format",
"rbd_default_data_pool",
"rbd_default_features",
"rbd_default_format",
"rbd_default_order",
"rbd_default_stripe_count",
"rbd_default_stripe_unit",
"rbd_journal_order",
"rbd_journal_pool",
"rbd_journal_splay_width"
};
struct Options : Parent {
librados::IoCtx m_io_ctx;
Options(librados::IoCtx& io_ctx, bool image_apply_only_options) {
m_io_ctx.dup(io_ctx);
m_io_ctx.set_namespace("");
CephContext *cct = reinterpret_cast<CephContext *>(m_io_ctx.cct());
const std::string rbd_key_prefix("rbd_");
const std::string rbd_mirror_key_prefix("rbd_mirror_");
auto& schema = cct->_conf.get_schema();
for (auto& pair : schema) {
if (!boost::starts_with(pair.first, rbd_key_prefix)) {
continue;
} else if (EXCLUDE_OPTIONS.count(pair.first) != 0) {
continue;
} else if (image_apply_only_options &&
EXCLUDE_IMAGE_OPTIONS.count(pair.first) != 0) {
continue;
} else if (image_apply_only_options &&
boost::starts_with(pair.first, rbd_mirror_key_prefix)) {
continue;
}
insert({pair.first, {}});
}
}
int init() {
CephContext *cct = (CephContext *)m_io_ctx.cct();
for (auto& [k,v] : *this) {
int r = cct->_conf.get_val(k, &v.first);
ceph_assert(r == 0);
v.second = RBD_CONFIG_SOURCE_CONFIG;
}
std::string last_key = ImageCtx::METADATA_CONF_PREFIX;
bool more_results = true;
while (more_results) {
std::map<std::string, bufferlist> pairs;
int r = librbd::api::PoolMetadata<>::list(m_io_ctx, last_key, MAX_KEYS,
&pairs);
if (r < 0) {
return r;
}
if (pairs.empty()) {
break;
}
more_results = (pairs.size() == MAX_KEYS);
last_key = pairs.rbegin()->first;
for (auto kv : pairs) {
std::string key;
if (!util::is_metadata_config_override(kv.first, &key)) {
more_results = false;
break;
}
auto it = find(key);
if (it != end()) {
it->second = {{kv.second.c_str(), kv.second.length()},
RBD_CONFIG_SOURCE_POOL};
}
}
}
return 0;
}
};
} // anonymous namespace
template <typename I>
bool Config<I>::is_option_name(librados::IoCtx& io_ctx,
const std::string &name) {
Options opts(io_ctx, false);
return (opts.find(name) != opts.end());
}
template <typename I>
int Config<I>::list(librados::IoCtx& io_ctx,
std::vector<config_option_t> *options) {
Options opts(io_ctx, false);
int r = opts.init();
if (r < 0) {
return r;
}
for (auto& [k,v] : opts) {
options->push_back({std::string{k}, v.first, v.second});
}
return 0;
}
template <typename I>
bool Config<I>::is_option_name(I *image_ctx, const std::string &name) {
Options opts(image_ctx->md_ctx, true);
return (opts.find(name) != opts.end());
}
template <typename I>
int Config<I>::list(I *image_ctx, std::vector<config_option_t> *options) {
CephContext *cct = image_ctx->cct;
Options opts(image_ctx->md_ctx, true);
int r = opts.init();
if (r < 0) {
return r;
}
std::map<std::string, bufferlist> pairs;
C_SaferCond ctx;
auto req = image::GetMetadataRequest<I>::create(
image_ctx->md_ctx, image_ctx->header_oid, true,
ImageCtx::METADATA_CONF_PREFIX, ImageCtx::METADATA_CONF_PREFIX, 0U, &pairs,
&ctx);
req->send();
r = ctx.wait();
if (r < 0) {
lderr(cct) << "failed reading image metadata: " << cpp_strerror(r)
<< dendl;
return r;
}
for (auto kv : pairs) {
std::string key;
if (!util::is_metadata_config_override(kv.first, &key)) {
break;
}
auto it = opts.find(key);
if (it != opts.end()) {
it->second = {{kv.second.c_str(), kv.second.length()},
RBD_CONFIG_SOURCE_IMAGE};
}
}
for (auto& [k,v] : opts) {
options->push_back({std::string{k}, v.first, v.second});
}
return 0;
}
template <typename I>
void Config<I>::apply_pool_overrides(librados::IoCtx& io_ctx,
ConfigProxy* config) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
Options opts(io_ctx, false);
int r = opts.init();
if (r < 0) {
lderr(cct) << "failed to read pool config overrides: " << cpp_strerror(r)
<< dendl;
return;
}
for (auto& [k,v] : opts) {
if (v.second == RBD_CONFIG_SOURCE_POOL) {
r = config->set_val(k, v.first);
if (r < 0) {
lderr(cct) << "failed to override pool config " << k << "="
<< v.first << ": " << cpp_strerror(r) << dendl;
}
}
}
}
} // namespace api
} // namespace librbd
template class librbd::api::Config<librbd::ImageCtx>;
| 6,087 | 25.017094 | 83 | cc |
null | ceph-main/src/librbd/api/Config.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_CONFIG_H
#define CEPH_LIBRBD_API_CONFIG_H
#include "common/config_fwd.h"
#include "include/common_fwd.h"
#include "include/rbd/librbd.hpp"
#include "include/rados/librados_fwd.hpp"
namespace librbd {
class ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
class Config {
public:
static bool is_option_name(librados::IoCtx& io_ctx, const std::string &name);
static int list(librados::IoCtx& io_ctx,
std::vector<config_option_t> *options);
static bool is_option_name(ImageCtxT *image_ctx, const std::string &name);
static int list(ImageCtxT *image_ctx, std::vector<config_option_t> *options);
static void apply_pool_overrides(librados::IoCtx& io_ctx,
ConfigProxy* config);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Config<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_API_CONFIG_H
| 1,032 | 26.184211 | 79 | h |
null | ceph-main/src/librbd/api/DiffIterate.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/DiffIterate.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/internal.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/object_map/DiffRequest.h"
#include "include/rados/librados.hpp"
#include "include/interval_set.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "common/Throttle.h"
#include "osdc/Striper.h"
#include <boost/tuple/tuple.hpp>
#include <list>
#include <map>
#include <vector>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::DiffIterate: "
namespace librbd {
namespace api {
namespace {
struct DiffContext {
DiffIterate<>::Callback callback;
void *callback_arg;
bool whole_object;
bool include_parent;
uint64_t from_snap_id;
uint64_t end_snap_id;
OrderedThrottle throttle;
template <typename I>
DiffContext(I &image_ctx, DiffIterate<>::Callback callback,
void *callback_arg, bool _whole_object, bool _include_parent,
uint64_t _from_snap_id, uint64_t _end_snap_id)
: callback(callback), callback_arg(callback_arg),
whole_object(_whole_object), include_parent(_include_parent),
from_snap_id(_from_snap_id), end_snap_id(_end_snap_id),
throttle(image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"), true) {
}
};
template <typename I>
class C_DiffObject : public Context {
public:
C_DiffObject(I &image_ctx, DiffContext &diff_context, uint64_t image_offset,
uint64_t image_length)
: m_image_ctx(image_ctx), m_cct(image_ctx.cct),
m_diff_context(diff_context), m_image_offset(image_offset),
m_image_length(image_length) {
}
void send() {
Context* ctx = m_diff_context.throttle.start_op(this);
auto aio_comp = io::AioCompletion::create_and_start(
ctx, util::get_image_ctx(&m_image_ctx), io::AIO_TYPE_GENERIC);
int list_snaps_flags = 0;
if (!m_diff_context.include_parent || m_diff_context.from_snap_id != 0) {
list_snaps_flags |= io::LIST_SNAPS_FLAG_DISABLE_LIST_FROM_PARENT;
}
if (m_diff_context.whole_object) {
list_snaps_flags |= io::LIST_SNAPS_FLAG_WHOLE_OBJECT;
}
auto req = io::ImageDispatchSpec::create_list_snaps(
m_image_ctx, io::IMAGE_DISPATCH_LAYER_INTERNAL_START,
aio_comp, {{m_image_offset, m_image_length}}, io::ImageArea::DATA,
{m_diff_context.from_snap_id, m_diff_context.end_snap_id},
list_snaps_flags, &m_snapshot_delta, {});
req->send();
}
protected:
typedef boost::tuple<uint64_t, size_t, bool> Diff;
typedef std::list<Diff> Diffs;
void finish(int r) override {
CephContext *cct = m_cct;
if (r < 0) {
ldout(cct, 20) << "list_snaps failed: " << m_image_offset << "~"
<< m_image_length << ": " << cpp_strerror(r) << dendl;
}
Diffs diffs;
ldout(cct, 20) << "image extent " << m_image_offset << "~"
<< m_image_length << ": list_snaps complete" << dendl;
compute_diffs(&diffs);
for (Diffs::const_iterator d = diffs.begin(); d != diffs.end(); ++d) {
r = m_diff_context.callback(d->get<0>(), d->get<1>(), d->get<2>(),
m_diff_context.callback_arg);
if (r < 0) {
break;
}
}
m_diff_context.throttle.end_op(r);
}
private:
I& m_image_ctx;
CephContext *m_cct;
DiffContext &m_diff_context;
uint64_t m_image_offset;
uint64_t m_image_length;
io::SnapshotDelta m_snapshot_delta;
void compute_diffs(Diffs *diffs) {
CephContext *cct = m_cct;
// merge per-snapshot deltas into an aggregate
io::SparseExtents aggregate_snapshot_extents;
for (auto& [key, snapshot_extents] : m_snapshot_delta) {
for (auto& snapshot_extent : snapshot_extents) {
auto state = snapshot_extent.get_val().state;
// ignore DNE object (and parent)
if ((state == io::SPARSE_EXTENT_STATE_DNE) ||
(key == io::INITIAL_WRITE_READ_SNAP_IDS &&
state == io::SPARSE_EXTENT_STATE_ZEROED)) {
continue;
}
aggregate_snapshot_extents.insert(
snapshot_extent.get_off(), snapshot_extent.get_len(),
{state, snapshot_extent.get_len()});
}
}
// build delta callback set
for (auto& snapshot_extent : aggregate_snapshot_extents) {
ldout(cct, 20) << "off=" << snapshot_extent.get_off() << ", "
<< "len=" << snapshot_extent.get_len() << ", "
<< "state=" << snapshot_extent.get_val().state << dendl;
diffs->emplace_back(
snapshot_extent.get_off(), snapshot_extent.get_len(),
snapshot_extent.get_val().state == io::SPARSE_EXTENT_STATE_DATA);
}
}
};
int simple_diff_cb(uint64_t off, size_t len, int exists, void *arg) {
// it's possible for a discard to create a hole in the parent image -- ignore
if (exists) {
interval_set<uint64_t> *diff = static_cast<interval_set<uint64_t> *>(arg);
diff->insert(off, len);
}
return 0;
}
} // anonymous namespace
template <typename I>
int DiffIterate<I>::diff_iterate(I *ictx,
const cls::rbd::SnapshotNamespace& from_snap_namespace,
const char *fromsnapname,
uint64_t off, uint64_t len,
bool include_parent, bool whole_object,
int (*cb)(uint64_t, size_t, int, void *),
void *arg)
{
ldout(ictx->cct, 20) << "diff_iterate " << ictx << " off = " << off
<< " len = " << len << dendl;
if (!ictx->data_ctx.is_valid()) {
return -ENODEV;
}
// ensure previous writes are visible to listsnaps
C_SaferCond flush_ctx;
{
std::shared_lock owner_locker{ictx->owner_lock};
auto aio_comp = io::AioCompletion::create_and_start(&flush_ctx, ictx,
io::AIO_TYPE_FLUSH);
auto req = io::ImageDispatchSpec::create_flush(
*ictx, io::IMAGE_DISPATCH_LAYER_INTERNAL_START,
aio_comp, io::FLUSH_SOURCE_INTERNAL, {});
req->send();
}
int r = flush_ctx.wait();
if (r < 0) {
return r;
}
r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
ictx->image_lock.lock_shared();
r = clip_io(ictx, off, &len, io::ImageArea::DATA);
ictx->image_lock.unlock_shared();
if (r < 0) {
return r;
}
DiffIterate command(*ictx, from_snap_namespace, fromsnapname, off, len,
include_parent, whole_object, cb, arg);
r = command.execute();
return r;
}
template <typename I>
int DiffIterate<I>::execute() {
CephContext* cct = m_image_ctx.cct;
ceph_assert(m_image_ctx.data_ctx.is_valid());
librados::snap_t from_snap_id = 0;
librados::snap_t end_snap_id;
uint64_t from_size = 0;
uint64_t end_size;
{
std::shared_lock image_locker{m_image_ctx.image_lock};
if (m_from_snap_name) {
from_snap_id = m_image_ctx.get_snap_id(m_from_snap_namespace,
m_from_snap_name);
from_size = m_image_ctx.get_image_size(from_snap_id);
}
end_snap_id = m_image_ctx.snap_id;
end_size = m_image_ctx.get_image_size(end_snap_id);
}
if (from_snap_id == CEPH_NOSNAP) {
return -ENOENT;
}
if (from_snap_id == end_snap_id) {
// no diff.
return 0;
}
if (from_snap_id >= end_snap_id) {
return -EINVAL;
}
int r;
bool fast_diff_enabled = false;
BitVector<2> object_diff_state;
interval_set<uint64_t> parent_diff;
if (m_whole_object) {
C_SaferCond ctx;
auto req = object_map::DiffRequest<I>::create(&m_image_ctx, from_snap_id,
end_snap_id,
&object_diff_state, &ctx);
req->send();
r = ctx.wait();
if (r < 0) {
ldout(cct, 5) << "fast diff disabled" << dendl;
} else {
ldout(cct, 5) << "fast diff enabled" << dendl;
fast_diff_enabled = true;
// check parent overlap only if we are comparing to the beginning of time
if (m_include_parent && from_snap_id == 0) {
std::shared_lock image_locker{m_image_ctx.image_lock};
uint64_t raw_overlap = 0;
m_image_ctx.get_parent_overlap(m_image_ctx.snap_id, &raw_overlap);
auto overlap = m_image_ctx.reduce_parent_overlap(raw_overlap, false);
if (overlap.first > 0 && overlap.second == io::ImageArea::DATA) {
ldout(cct, 10) << " first getting parent diff" << dendl;
DiffIterate diff_parent(*m_image_ctx.parent, {}, nullptr, 0,
overlap.first, true, true, &simple_diff_cb,
&parent_diff);
r = diff_parent.execute();
if (r < 0) {
return r;
}
}
}
}
}
ldout(cct, 5) << "diff_iterate from " << from_snap_id << " to "
<< end_snap_id << " size from " << from_size
<< " to " << end_size << dendl;
DiffContext diff_context(m_image_ctx, m_callback, m_callback_arg,
m_whole_object, m_include_parent, from_snap_id,
end_snap_id);
uint64_t period = m_image_ctx.get_stripe_period();
uint64_t off = m_offset;
uint64_t left = m_length;
while (left > 0) {
uint64_t period_off = off - (off % period);
uint64_t read_len = std::min(period_off + period - off, left);
if (fast_diff_enabled) {
// map to extents
std::map<object_t,std::vector<ObjectExtent> > object_extents;
Striper::file_to_extents(cct, m_image_ctx.format_string,
&m_image_ctx.layout, off, read_len, 0,
object_extents, 0);
// get diff info for each object and merge adjacent stripe units
// into an aggregate (this also sorts them)
io::SparseExtents aggregate_sparse_extents;
for (auto& [object, extents] : object_extents) {
const uint64_t object_no = extents.front().objectno;
uint8_t diff_state = object_diff_state[object_no];
ldout(cct, 20) << "object " << object << ": diff_state="
<< (int)diff_state << dendl;
if (diff_state == object_map::DIFF_STATE_HOLE &&
from_snap_id == 0 && !parent_diff.empty()) {
// no data in child object -- report parent diff instead
for (auto& oe : extents) {
for (auto& be : oe.buffer_extents) {
interval_set<uint64_t> o;
o.insert(off + be.first, be.second);
o.intersection_of(parent_diff);
ldout(cct, 20) << " reporting parent overlap " << o << dendl;
for (auto e = o.begin(); e != o.end(); ++e) {
aggregate_sparse_extents.insert(e.get_start(), e.get_len(),
{io::SPARSE_EXTENT_STATE_DATA,
e.get_len()});
}
}
}
} else if (diff_state == object_map::DIFF_STATE_HOLE_UPDATED ||
diff_state == object_map::DIFF_STATE_DATA_UPDATED) {
auto state = (diff_state == object_map::DIFF_STATE_HOLE_UPDATED ?
io::SPARSE_EXTENT_STATE_ZEROED : io::SPARSE_EXTENT_STATE_DATA);
for (auto& oe : extents) {
for (auto& be : oe.buffer_extents) {
aggregate_sparse_extents.insert(off + be.first, be.second,
{state, be.second});
}
}
}
}
for (const auto& se : aggregate_sparse_extents) {
ldout(cct, 20) << "off=" << se.get_off() << ", len=" << se.get_len()
<< ", state=" << se.get_val().state << dendl;
r = m_callback(se.get_off(), se.get_len(),
se.get_val().state == io::SPARSE_EXTENT_STATE_DATA,
m_callback_arg);
if (r < 0) {
return r;
}
}
} else {
auto diff_object = new C_DiffObject<I>(m_image_ctx, diff_context, off,
read_len);
diff_object->send();
if (diff_context.throttle.pending_error()) {
r = diff_context.throttle.wait_for_ret();
return r;
}
}
left -= read_len;
off += read_len;
}
r = diff_context.throttle.wait_for_ret();
if (r < 0) {
return r;
}
return 0;
}
} // namespace api
} // namespace librbd
template class librbd::api::DiffIterate<librbd::ImageCtx>;
| 12,738 | 32.612137 | 100 | cc |
null | ceph-main/src/librbd/api/DiffIterate.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_DIFF_ITERATE_H
#define CEPH_LIBRBD_API_DIFF_ITERATE_H
#include "include/int_types.h"
#include "common/bit_vector.hpp"
#include "cls/rbd/cls_rbd_types.h"
namespace librbd {
class ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
class DiffIterate {
public:
typedef int (*Callback)(uint64_t, size_t, int, void *);
static int diff_iterate(ImageCtxT *ictx,
const cls::rbd::SnapshotNamespace& from_snap_namespace,
const char *fromsnapname,
uint64_t off, uint64_t len, bool include_parent,
bool whole_object,
int (*cb)(uint64_t, size_t, int, void *),
void *arg);
private:
ImageCtxT &m_image_ctx;
cls::rbd::SnapshotNamespace m_from_snap_namespace;
const char* m_from_snap_name;
uint64_t m_offset;
uint64_t m_length;
bool m_include_parent;
bool m_whole_object;
Callback m_callback;
void *m_callback_arg;
DiffIterate(ImageCtxT &image_ctx,
const cls::rbd::SnapshotNamespace& from_snap_namespace,
const char *from_snap_name, uint64_t off, uint64_t len,
bool include_parent, bool whole_object, Callback callback,
void *callback_arg)
: m_image_ctx(image_ctx), m_from_snap_namespace(from_snap_namespace),
m_from_snap_name(from_snap_name), m_offset(off),
m_length(len), m_include_parent(include_parent),
m_whole_object(whole_object), m_callback(callback),
m_callback_arg(callback_arg)
{
}
int execute();
int diff_object_map(uint64_t from_snap_id, uint64_t to_snap_id,
BitVector<2>* object_diff_state);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::DiffIterate<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_API_DIFF_ITERATE_H
| 1,897 | 27.328358 | 74 | h |
null | ceph-main/src/librbd/api/Group.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/Cond.h"
#include "common/errno.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/api/Group.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/ImageWatcher.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/internal.h"
#include "librbd/io/AioCompletion.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Group: " << __func__ << ": "
using std::map;
using std::pair;
using std::set;
using std::string;
using std::vector;
// list binds to list() here, so std::list is explicitly used below
using ceph::bufferlist;
using librados::snap_t;
using librados::IoCtx;
using librados::Rados;
namespace librbd {
namespace api {
namespace {
template <typename I>
snap_t get_group_snap_id(I* ictx,
const cls::rbd::SnapshotNamespace& in_snap_namespace) {
ceph_assert(ceph_mutex_is_locked(ictx->image_lock));
auto it = ictx->snap_ids.lower_bound({cls::rbd::GroupSnapshotNamespace{},
""});
for (; it != ictx->snap_ids.end(); ++it) {
if (it->first.first == in_snap_namespace) {
return it->second;
} else if (!std::holds_alternative<cls::rbd::GroupSnapshotNamespace>(
it->first.first)) {
break;
}
}
return CEPH_NOSNAP;
}
string generate_uuid(librados::IoCtx& io_ctx)
{
Rados rados(io_ctx);
uint64_t bid = rados.get_instance_id();
uint32_t extra = rand() % 0xFFFFFFFF;
std::ostringstream bid_ss;
bid_ss << std::hex << bid << std::hex << extra;
return bid_ss.str();
}
int group_snap_list(librados::IoCtx& group_ioctx, const char *group_name,
std::vector<cls::rbd::GroupSnapshot> *cls_snaps)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
string group_id;
vector<string> ind_snap_names;
int r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY,
group_name, &group_id);
if (r < 0) {
lderr(cct) << "error reading group id object: "
<< cpp_strerror(r)
<< dendl;
return r;
}
string group_header_oid = util::group_header_name(group_id);
const int max_read = 1024;
cls::rbd::GroupSnapshot snap_last;
for (;;) {
vector<cls::rbd::GroupSnapshot> snaps_page;
r = cls_client::group_snap_list(&group_ioctx, group_header_oid,
snap_last, max_read, &snaps_page);
if (r < 0) {
lderr(cct) << "error reading snap list from group: "
<< cpp_strerror(-r) << dendl;
return r;
}
cls_snaps->insert(cls_snaps->end(), snaps_page.begin(), snaps_page.end());
if (snaps_page.size() < max_read) {
break;
}
snap_last = *snaps_page.rbegin();
}
return 0;
}
std::string calc_ind_image_snap_name(uint64_t pool_id,
const std::string &group_id,
const std::string &snap_id)
{
std::stringstream ind_snap_name_stream;
ind_snap_name_stream << ".group." << std::hex << pool_id << "_"
<< group_id << "_" << snap_id;
return ind_snap_name_stream.str();
}
int group_image_list(librados::IoCtx& group_ioctx, const char *group_name,
std::vector<cls::rbd::GroupImageStatus> *image_ids)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
string group_id;
int r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY,
group_name, &group_id);
if (r < 0) {
lderr(cct) << "error reading group id object: "
<< cpp_strerror(r)
<< dendl;
return r;
}
string group_header_oid = util::group_header_name(group_id);
ldout(cct, 20) << "listing images in group name "
<< group_name << " group id " << group_header_oid << dendl;
image_ids->clear();
const int max_read = 1024;
cls::rbd::GroupImageSpec start_last;
do {
std::vector<cls::rbd::GroupImageStatus> image_ids_page;
r = cls_client::group_image_list(&group_ioctx, group_header_oid,
start_last, max_read, &image_ids_page);
if (r < 0) {
lderr(cct) << "error reading image list from group: "
<< cpp_strerror(-r) << dendl;
return r;
}
image_ids->insert(image_ids->end(),
image_ids_page.begin(), image_ids_page.end());
if (image_ids_page.size() > 0)
start_last = image_ids_page.rbegin()->spec;
r = image_ids_page.size();
} while (r == max_read);
return 0;
}
int group_image_remove(librados::IoCtx& group_ioctx, string group_id,
librados::IoCtx& image_ioctx, string image_id)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
string group_header_oid = util::group_header_name(group_id);
string image_header_oid = util::header_name(image_id);
ldout(cct, 20) << "removing image " << image_id
<< " image id " << image_header_oid << dendl;
cls::rbd::GroupSpec group_spec(group_id, group_ioctx.get_id());
cls::rbd::GroupImageStatus incomplete_st(image_id, image_ioctx.get_id(),
cls::rbd::GROUP_IMAGE_LINK_STATE_INCOMPLETE);
cls::rbd::GroupImageSpec spec(image_id, image_ioctx.get_id());
int r = cls_client::group_image_set(&group_ioctx, group_header_oid,
incomplete_st);
if (r < 0) {
lderr(cct) << "couldn't put image into removing state: "
<< cpp_strerror(-r) << dendl;
return r;
}
r = cls_client::image_group_remove(&image_ioctx, image_header_oid,
group_spec);
if ((r < 0) && (r != -ENOENT)) {
lderr(cct) << "couldn't remove group reference from image"
<< cpp_strerror(-r) << dendl;
return r;
} else if (r >= 0) {
ImageWatcher<>::notify_header_update(image_ioctx, image_header_oid);
}
r = cls_client::group_image_remove(&group_ioctx, group_header_oid, spec);
if (r < 0) {
lderr(cct) << "couldn't remove image from group"
<< cpp_strerror(-r) << dendl;
return r;
}
return 0;
}
int group_snap_remove_by_record(librados::IoCtx& group_ioctx,
const cls::rbd::GroupSnapshot& group_snap,
const std::string& group_id,
const std::string& group_header_oid) {
CephContext *cct = (CephContext *)group_ioctx.cct();
std::vector<C_SaferCond*> on_finishes;
int r, ret_code;
std::vector<librbd::ImageCtx*> ictxs;
cls::rbd::GroupSnapshotNamespace ne{group_ioctx.get_id(), group_id,
group_snap.id};
ldout(cct, 20) << "Removing snapshots" << dendl;
int snap_count = group_snap.snaps.size();
for (int i = 0; i < snap_count; ++i) {
librbd::IoCtx image_io_ctx;
r = util::create_ioctx(group_ioctx, "image", group_snap.snaps[i].pool, {},
&image_io_ctx);
if (r < 0) {
return r;
}
librbd::ImageCtx* image_ctx = new ImageCtx("", group_snap.snaps[i].image_id,
nullptr, image_io_ctx, false);
C_SaferCond* on_finish = new C_SaferCond;
image_ctx->state->open(0, on_finish);
ictxs.push_back(image_ctx);
on_finishes.push_back(on_finish);
}
ret_code = 0;
for (int i = 0; i < snap_count; ++i) {
r = on_finishes[i]->wait();
delete on_finishes[i];
if (r < 0) {
ictxs[i] = nullptr;
ret_code = r;
}
}
if (ret_code != 0) {
goto finish;
}
ldout(cct, 20) << "Opened participating images. " <<
"Deleting snapshots themselves." << dendl;
for (int i = 0; i < snap_count; ++i) {
ImageCtx *ictx = ictxs[i];
on_finishes[i] = new C_SaferCond;
std::string snap_name;
ictx->image_lock.lock_shared();
snap_t snap_id = get_group_snap_id(ictx, ne);
r = ictx->get_snap_name(snap_id, &snap_name);
ictx->image_lock.unlock_shared();
if (r >= 0) {
ldout(cct, 20) << "removing individual snapshot from image " << ictx->name
<< dendl;
ictx->operations->snap_remove(ne, snap_name, on_finishes[i]);
} else {
// We are ok to ignore missing image snapshots. The snapshot could have
// been inconsistent in the first place.
on_finishes[i]->complete(0);
}
}
for (int i = 0; i < snap_count; ++i) {
r = on_finishes[i]->wait();
delete on_finishes[i];
if (r < 0 && r != -ENOENT) {
// if previous attempts to remove this snapshot failed then the image's
// snapshot may not exist
lderr(cct) << "Failed deleting image snapshot. Ret code: " << r << dendl;
ret_code = r;
}
}
if (ret_code != 0) {
goto finish;
}
ldout(cct, 20) << "Removed images snapshots removing snapshot record."
<< dendl;
r = cls_client::group_snap_remove(&group_ioctx, group_header_oid,
group_snap.id);
if (r < 0) {
ret_code = r;
goto finish;
}
finish:
for (int i = 0; i < snap_count; ++i) {
if (ictxs[i] != nullptr) {
ictxs[i]->state->close();
}
}
return ret_code;
}
int group_snap_rollback_by_record(librados::IoCtx& group_ioctx,
const cls::rbd::GroupSnapshot& group_snap,
const std::string& group_id,
const std::string& group_header_oid,
ProgressContext& pctx) {
CephContext *cct = (CephContext *)group_ioctx.cct();
std::vector<C_SaferCond*> on_finishes;
int r, ret_code;
std::vector<librbd::ImageCtx*> ictxs;
cls::rbd::GroupSnapshotNamespace ne{group_ioctx.get_id(), group_id,
group_snap.id};
ldout(cct, 20) << "Rolling back snapshots" << dendl;
int snap_count = group_snap.snaps.size();
for (int i = 0; i < snap_count; ++i) {
librados::IoCtx image_io_ctx;
r = util::create_ioctx(group_ioctx, "image", group_snap.snaps[i].pool, {},
&image_io_ctx);
if (r < 0) {
return r;
}
librbd::ImageCtx* image_ctx = new ImageCtx("", group_snap.snaps[i].image_id,
nullptr, image_io_ctx, false);
C_SaferCond* on_finish = new C_SaferCond;
image_ctx->state->open(0, on_finish);
ictxs.push_back(image_ctx);
on_finishes.push_back(on_finish);
}
ret_code = 0;
for (int i = 0; i < snap_count; ++i) {
r = on_finishes[i]->wait();
delete on_finishes[i];
if (r < 0) {
ictxs[i] = nullptr;
ret_code = r;
}
}
if (ret_code != 0) {
goto finish;
}
ldout(cct, 20) << "Requesting exclusive locks for images" << dendl;
for (auto ictx: ictxs) {
std::shared_lock owner_lock{ictx->owner_lock};
if (ictx->exclusive_lock != nullptr) {
ictx->exclusive_lock->block_requests(-EBUSY);
}
}
for (int i = 0; i < snap_count; ++i) {
ImageCtx *ictx = ictxs[i];
std::shared_lock owner_lock{ictx->owner_lock};
on_finishes[i] = new C_SaferCond;
if (ictx->exclusive_lock != nullptr) {
ictx->exclusive_lock->acquire_lock(on_finishes[i]);
}
}
ret_code = 0;
for (int i = 0; i < snap_count; ++i) {
r = 0;
ImageCtx *ictx = ictxs[i];
if (ictx->exclusive_lock != nullptr) {
r = on_finishes[i]->wait();
}
delete on_finishes[i];
if (r < 0) {
ret_code = r;
}
}
if (ret_code != 0) {
goto finish;
}
for (int i = 0; i < snap_count; ++i) {
ImageCtx *ictx = ictxs[i];
on_finishes[i] = new C_SaferCond;
std::shared_lock owner_locker{ictx->owner_lock};
std::string snap_name;
ictx->image_lock.lock_shared();
snap_t snap_id = get_group_snap_id(ictx, ne);
r = ictx->get_snap_name(snap_id, &snap_name);
ictx->image_lock.unlock_shared();
if (r >= 0) {
ldout(cct, 20) << "rolling back to individual snapshot for image " << ictx->name
<< dendl;
ictx->operations->execute_snap_rollback(ne, snap_name, pctx, on_finishes[i]);
} else {
on_finishes[i]->complete(r);
}
}
for (int i = 0; i < snap_count; ++i) {
r = on_finishes[i]->wait();
delete on_finishes[i];
if (r < 0 && r != -ENOENT) {
lderr(cct) << "Failed rolling back group to snapshot. Ret code: " << r << dendl;
ret_code = r;
}
}
finish:
for (int i = 0; i < snap_count; ++i) {
if (ictxs[i] != nullptr) {
ictxs[i]->state->close();
}
}
return ret_code;
}
template <typename I>
void notify_unquiesce(std::vector<I*> &ictxs,
const std::vector<uint64_t> &requests) {
if (requests.empty()) {
return;
}
ceph_assert(requests.size() == ictxs.size());
int image_count = ictxs.size();
std::vector<C_SaferCond> on_finishes(image_count);
for (int i = 0; i < image_count; ++i) {
ImageCtx *ictx = ictxs[i];
ictx->image_watcher->notify_unquiesce(requests[i], &on_finishes[i]);
}
for (int i = 0; i < image_count; ++i) {
on_finishes[i].wait();
}
}
template <typename I>
int notify_quiesce(std::vector<I*> &ictxs, ProgressContext &prog_ctx,
std::vector<uint64_t> *requests) {
int image_count = ictxs.size();
std::vector<C_SaferCond> on_finishes(image_count);
requests->resize(image_count);
for (int i = 0; i < image_count; ++i) {
auto ictx = ictxs[i];
ictx->image_watcher->notify_quiesce(&(*requests)[i], prog_ctx,
&on_finishes[i]);
}
int ret_code = 0;
for (int i = 0; i < image_count; ++i) {
int r = on_finishes[i].wait();
if (r < 0) {
ret_code = r;
}
}
if (ret_code != 0) {
notify_unquiesce(ictxs, *requests);
}
return ret_code;
}
} // anonymous namespace
template <typename I>
int Group<I>::image_remove_by_id(librados::IoCtx& group_ioctx,
const char *group_name,
librados::IoCtx& image_ioctx,
const char *image_id)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
ldout(cct, 20) << "io_ctx=" << &group_ioctx
<< " group name " << group_name << " image "
<< &image_ioctx << " id " << image_id << dendl;
string group_id;
int r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY, group_name,
&group_id);
if (r < 0) {
lderr(cct) << "error reading group id object: "
<< cpp_strerror(r)
<< dendl;
return r;
}
ldout(cct, 20) << "removing image from group name " << group_name
<< " group id " << group_id << dendl;
return group_image_remove(group_ioctx, group_id, image_ioctx, string(image_id));
}
template <typename I>
int Group<I>::create(librados::IoCtx& io_ctx, const char *group_name)
{
CephContext *cct = (CephContext *)io_ctx.cct();
string id = generate_uuid(io_ctx);
ldout(cct, 2) << "adding group to directory..." << dendl;
int r = cls_client::group_dir_add(&io_ctx, RBD_GROUP_DIRECTORY, group_name,
id);
if (r < 0) {
lderr(cct) << "error adding group to directory: "
<< cpp_strerror(r)
<< dendl;
return r;
}
string header_oid = util::group_header_name(id);
r = io_ctx.create(header_oid, true);
if (r < 0) {
lderr(cct) << "error creating group header: " << cpp_strerror(r) << dendl;
goto err_remove_from_dir;
}
return 0;
err_remove_from_dir:
int remove_r = cls_client::group_dir_remove(&io_ctx, RBD_GROUP_DIRECTORY,
group_name, id);
if (remove_r < 0) {
lderr(cct) << "error cleaning up group from rbd_directory "
<< "object after creation failed: " << cpp_strerror(remove_r)
<< dendl;
}
return r;
}
template <typename I>
int Group<I>::remove(librados::IoCtx& io_ctx, const char *group_name)
{
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << "group_remove " << &io_ctx << " " << group_name << dendl;
std::string group_id;
int r = cls_client::dir_get_id(&io_ctx, RBD_GROUP_DIRECTORY,
std::string(group_name), &group_id);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error getting id of group" << dendl;
return r;
}
string group_header_oid = util::group_header_name(group_id);
std::vector<cls::rbd::GroupSnapshot> snaps;
r = group_snap_list(io_ctx, group_name, &snaps);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error listing group snapshots" << dendl;
return r;
}
for (auto &snap : snaps) {
r = group_snap_remove_by_record(io_ctx, snap, group_id, group_header_oid);
if (r < 0) {
return r;
}
}
std::vector<cls::rbd::GroupImageStatus> images;
r = group_image_list(io_ctx, group_name, &images);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error listing group images" << dendl;
return r;
}
for (auto image : images) {
IoCtx image_ioctx;
r = util::create_ioctx(io_ctx, "image", image.spec.pool_id, {},
&image_ioctx);
if (r < 0) {
return r;
}
r = group_image_remove(io_ctx, group_id, image_ioctx, image.spec.image_id);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error removing image from a group" << dendl;
return r;
}
}
string header_oid = util::group_header_name(group_id);
r = io_ctx.remove(header_oid);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error removing header: " << cpp_strerror(-r) << dendl;
return r;
}
r = cls_client::group_dir_remove(&io_ctx, RBD_GROUP_DIRECTORY,
group_name, group_id);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error removing group from directory" << dendl;
return r;
}
return 0;
}
template <typename I>
int Group<I>::list(IoCtx& io_ctx, vector<string> *names)
{
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 20) << "io_ctx=" << &io_ctx << dendl;
int max_read = 1024;
string last_read = "";
int r;
do {
map<string, string> groups;
r = cls_client::group_dir_list(&io_ctx, RBD_GROUP_DIRECTORY, last_read,
max_read, &groups);
if (r < 0) {
if (r != -ENOENT) {
lderr(cct) << "error listing group in directory: "
<< cpp_strerror(r) << dendl;
} else {
r = 0;
}
return r;
}
for (pair<string, string> group : groups) {
names->push_back(group.first);
}
if (!groups.empty()) {
last_read = groups.rbegin()->first;
}
r = groups.size();
} while (r == max_read);
return 0;
}
template <typename I>
int Group<I>::image_add(librados::IoCtx& group_ioctx, const char *group_name,
librados::IoCtx& image_ioctx, const char *image_name)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
ldout(cct, 20) << "io_ctx=" << &group_ioctx
<< " group name " << group_name << " image "
<< &image_ioctx << " name " << image_name << dendl;
if (group_ioctx.get_namespace() != image_ioctx.get_namespace()) {
lderr(cct) << "group and image cannot be in different namespaces" << dendl;
return -EINVAL;
}
string group_id;
int r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY, group_name,
&group_id);
if (r < 0) {
lderr(cct) << "error reading group id object: "
<< cpp_strerror(r)
<< dendl;
return r;
}
string group_header_oid = util::group_header_name(group_id);
ldout(cct, 20) << "adding image to group name " << group_name
<< " group id " << group_header_oid << dendl;
string image_id;
r = cls_client::dir_get_id(&image_ioctx, RBD_DIRECTORY, image_name,
&image_id);
if (r < 0) {
lderr(cct) << "error reading image id object: "
<< cpp_strerror(-r) << dendl;
return r;
}
string image_header_oid = util::header_name(image_id);
ldout(cct, 20) << "adding image " << image_name
<< " image id " << image_header_oid << dendl;
cls::rbd::GroupImageStatus incomplete_st(
image_id, image_ioctx.get_id(),
cls::rbd::GROUP_IMAGE_LINK_STATE_INCOMPLETE);
cls::rbd::GroupImageStatus attached_st(
image_id, image_ioctx.get_id(), cls::rbd::GROUP_IMAGE_LINK_STATE_ATTACHED);
r = cls_client::group_image_set(&group_ioctx, group_header_oid,
incomplete_st);
cls::rbd::GroupSpec group_spec(group_id, group_ioctx.get_id());
if (r < 0) {
lderr(cct) << "error adding image reference to group: "
<< cpp_strerror(-r) << dendl;
return r;
}
r = cls_client::image_group_add(&image_ioctx, image_header_oid, group_spec);
if (r < 0) {
lderr(cct) << "error adding group reference to image: "
<< cpp_strerror(-r) << dendl;
cls::rbd::GroupImageSpec spec(image_id, image_ioctx.get_id());
cls_client::group_image_remove(&group_ioctx, group_header_oid, spec);
// Ignore errors in the clean up procedure.
return r;
}
ImageWatcher<>::notify_header_update(image_ioctx, image_header_oid);
r = cls_client::group_image_set(&group_ioctx, group_header_oid,
attached_st);
return r;
}
template <typename I>
int Group<I>::image_remove(librados::IoCtx& group_ioctx, const char *group_name,
librados::IoCtx& image_ioctx, const char *image_name)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
ldout(cct, 20) << "io_ctx=" << &group_ioctx
<< " group name " << group_name << " image "
<< &image_ioctx << " name " << image_name << dendl;
if (group_ioctx.get_namespace() != image_ioctx.get_namespace()) {
lderr(cct) << "group and image cannot be in different namespaces" << dendl;
return -EINVAL;
}
string group_id;
int r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY, group_name,
&group_id);
if (r < 0) {
lderr(cct) << "error reading group id object: "
<< cpp_strerror(r)
<< dendl;
return r;
}
ldout(cct, 20) << "removing image from group name " << group_name
<< " group id " << group_id << dendl;
string image_id;
r = cls_client::dir_get_id(&image_ioctx, RBD_DIRECTORY, image_name,
&image_id);
if (r < 0) {
lderr(cct) << "error reading image id object: "
<< cpp_strerror(-r) << dendl;
return r;
}
r = group_image_remove(group_ioctx, group_id, image_ioctx, image_id);
return r;
}
template <typename I>
int Group<I>::image_list(librados::IoCtx& group_ioctx,
const char *group_name,
std::vector<group_image_info_t>* images)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
ldout(cct, 20) << "io_ctx=" << &group_ioctx
<< " group name " << group_name << dendl;
std::vector<cls::rbd::GroupImageStatus> image_ids;
group_image_list(group_ioctx, group_name, &image_ids);
for (auto image_id : image_ids) {
IoCtx ioctx;
int r = util::create_ioctx(group_ioctx, "image", image_id.spec.pool_id, {},
&ioctx);
if (r < 0) {
return r;
}
std::string image_name;
r = cls_client::dir_get_name(&ioctx, RBD_DIRECTORY,
image_id.spec.image_id, &image_name);
if (r < 0) {
return r;
}
images->push_back(
group_image_info_t {
image_name,
ioctx.get_id(),
static_cast<group_image_state_t>(image_id.state)});
}
return 0;
}
template <typename I>
int Group<I>::rename(librados::IoCtx& io_ctx, const char *src_name,
const char *dest_name)
{
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << "group_rename " << &io_ctx << " " << src_name
<< " -> " << dest_name << dendl;
std::string group_id;
int r = cls_client::dir_get_id(&io_ctx, RBD_GROUP_DIRECTORY,
std::string(src_name), &group_id);
if (r < 0) {
if (r != -ENOENT)
lderr(cct) << "error getting id of group" << dendl;
return r;
}
r = cls_client::group_dir_rename(&io_ctx, RBD_GROUP_DIRECTORY,
src_name, dest_name, group_id);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error renaming group from directory" << dendl;
return r;
}
return 0;
}
template <typename I>
int Group<I>::image_get_group(I *ictx, group_info_t *group_info)
{
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
if (RBD_GROUP_INVALID_POOL != ictx->group_spec.pool_id) {
IoCtx ioctx;
r = util::create_ioctx(ictx->md_ctx, "group", ictx->group_spec.pool_id, {},
&ioctx);
if (r < 0) {
return r;
}
std::string group_name;
r = cls_client::dir_get_name(&ioctx, RBD_GROUP_DIRECTORY,
ictx->group_spec.group_id, &group_name);
if (r < 0)
return r;
group_info->pool = ioctx.get_id();
group_info->name = group_name;
} else {
group_info->pool = RBD_GROUP_INVALID_POOL;
group_info->name = "";
}
return 0;
}
template <typename I>
int Group<I>::snap_create(librados::IoCtx& group_ioctx,
const char *group_name, const char *snap_name,
uint32_t flags) {
CephContext *cct = (CephContext *)group_ioctx.cct();
string group_id;
cls::rbd::GroupSnapshot group_snap;
vector<cls::rbd::ImageSnapshotSpec> image_snaps;
std::string ind_snap_name;
std::vector<librbd::ImageCtx*> ictxs;
std::vector<C_SaferCond*> on_finishes;
std::vector<uint64_t> quiesce_requests;
NoOpProgressContext prog_ctx;
uint64_t internal_flags = 0;
int r = util::snap_create_flags_api_to_internal(cct, flags, &internal_flags);
if (r < 0) {
return r;
}
internal_flags &= ~(SNAP_CREATE_FLAG_SKIP_NOTIFY_QUIESCE |
SNAP_CREATE_FLAG_IGNORE_NOTIFY_QUIESCE_ERROR);
r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY, group_name,
&group_id);
if (r < 0) {
lderr(cct) << "error reading group id object: "
<< cpp_strerror(r)
<< dendl;
return r;
}
std::vector<cls::rbd::GroupImageStatus> images;
r = group_image_list(group_ioctx, group_name, &images);
if (r < 0) {
return r;
}
int image_count = images.size();
ldout(cct, 20) << "Found " << image_count << " images in group" << dendl;
image_snaps = vector<cls::rbd::ImageSnapshotSpec>(image_count,
cls::rbd::ImageSnapshotSpec());
for (int i = 0; i < image_count; ++i) {
image_snaps[i].pool = images[i].spec.pool_id;
image_snaps[i].image_id = images[i].spec.image_id;
}
string group_header_oid = util::group_header_name(group_id);
group_snap.id = generate_uuid(group_ioctx);
group_snap.name = string(snap_name);
group_snap.state = cls::rbd::GROUP_SNAPSHOT_STATE_INCOMPLETE;
group_snap.snaps = image_snaps;
cls::rbd::GroupSnapshotNamespace ne{group_ioctx.get_id(), group_id,
group_snap.id};
r = cls_client::group_snap_set(&group_ioctx, group_header_oid, group_snap);
if (r == -EEXIST) {
lderr(cct) << "snapshot with this name already exists: "
<< cpp_strerror(r)
<< dendl;
}
int ret_code = 0;
if (r < 0) {
ret_code = r;
goto finish;
}
for (auto image: images) {
librbd::IoCtx image_io_ctx;
r = util::create_ioctx(group_ioctx, "image", image.spec.pool_id, {},
&image_io_ctx);
if (r < 0) {
ret_code = r;
goto finish;
}
ldout(cct, 20) << "Opening image with id " << image.spec.image_id << dendl;
librbd::ImageCtx* image_ctx = new ImageCtx("", image.spec.image_id.c_str(),
nullptr, image_io_ctx, false);
C_SaferCond* on_finish = new C_SaferCond;
image_ctx->state->open(0, on_finish);
ictxs.push_back(image_ctx);
on_finishes.push_back(on_finish);
}
ldout(cct, 20) << "Issued open request waiting for the completion" << dendl;
ret_code = 0;
for (int i = 0; i < image_count; ++i) {
ldout(cct, 20) << "Waiting for completion on on_finish: " <<
on_finishes[i] << dendl;
r = on_finishes[i]->wait();
delete on_finishes[i];
if (r < 0) {
ictxs[i] = nullptr;
ret_code = r;
}
}
if (ret_code != 0) {
goto remove_record;
}
if ((flags & RBD_SNAP_CREATE_SKIP_QUIESCE) == 0) {
ldout(cct, 20) << "Sending quiesce notification" << dendl;
ret_code = notify_quiesce(ictxs, prog_ctx, &quiesce_requests);
if (ret_code != 0 && (flags & RBD_SNAP_CREATE_IGNORE_QUIESCE_ERROR) == 0) {
goto remove_record;
}
}
ldout(cct, 20) << "Requesting exclusive locks for images" << dendl;
for (auto ictx: ictxs) {
std::shared_lock owner_lock{ictx->owner_lock};
if (ictx->exclusive_lock != nullptr) {
ictx->exclusive_lock->block_requests(-EBUSY);
}
}
for (int i = 0; i < image_count; ++i) {
ImageCtx *ictx = ictxs[i];
std::shared_lock owner_lock{ictx->owner_lock};
on_finishes[i] = new C_SaferCond;
if (ictx->exclusive_lock != nullptr) {
ictx->exclusive_lock->acquire_lock(on_finishes[i]);
}
}
ret_code = 0;
for (int i = 0; i < image_count; ++i) {
r = 0;
ImageCtx *ictx = ictxs[i];
if (ictx->exclusive_lock != nullptr) {
r = on_finishes[i]->wait();
}
delete on_finishes[i];
if (r < 0) {
ret_code = r;
}
}
if (ret_code != 0) {
notify_unquiesce(ictxs, quiesce_requests);
goto remove_record;
}
ind_snap_name = calc_ind_image_snap_name(group_ioctx.get_id(), group_id,
group_snap.id);
for (int i = 0; i < image_count; ++i) {
ImageCtx *ictx = ictxs[i];
C_SaferCond* on_finish = new C_SaferCond;
std::shared_lock owner_locker{ictx->owner_lock};
ictx->operations->execute_snap_create(
ne, ind_snap_name.c_str(), on_finish, 0,
SNAP_CREATE_FLAG_SKIP_NOTIFY_QUIESCE, prog_ctx);
on_finishes[i] = on_finish;
}
ret_code = 0;
for (int i = 0; i < image_count; ++i) {
r = on_finishes[i]->wait();
delete on_finishes[i];
if (r < 0) {
ret_code = r;
} else {
ImageCtx *ictx = ictxs[i];
ictx->image_lock.lock_shared();
snap_t snap_id = get_group_snap_id(ictx, ne);
ictx->image_lock.unlock_shared();
if (snap_id == CEPH_NOSNAP) {
ldout(cct, 20) << "Couldn't find created snapshot with namespace: "
<< ne << dendl;
ret_code = -ENOENT;
} else {
image_snaps[i].snap_id = snapid_t(snap_id);
image_snaps[i].pool = ictx->md_ctx.get_id();
image_snaps[i].image_id = ictx->id;
}
}
}
if (ret_code != 0) {
goto remove_image_snaps;
}
group_snap.snaps = image_snaps;
group_snap.state = cls::rbd::GROUP_SNAPSHOT_STATE_COMPLETE;
r = cls_client::group_snap_set(&group_ioctx, group_header_oid, group_snap);
if (r < 0) {
ret_code = r;
goto remove_image_snaps;
}
ldout(cct, 20) << "Sending unquiesce notification" << dendl;
notify_unquiesce(ictxs, quiesce_requests);
goto finish;
remove_image_snaps:
notify_unquiesce(ictxs, quiesce_requests);
for (int i = 0; i < image_count; ++i) {
ImageCtx *ictx = ictxs[i];
ldout(cct, 20) << "Removing individual snapshot with name: " <<
ind_snap_name << dendl;
on_finishes[i] = new C_SaferCond;
std::string snap_name;
ictx->image_lock.lock_shared();
snap_t snap_id = get_group_snap_id(ictx, ne);
r = ictx->get_snap_name(snap_id, &snap_name);
ictx->image_lock.unlock_shared();
if (r >= 0) {
ictx->operations->snap_remove(ne, snap_name.c_str(), on_finishes[i]);
} else {
// Ignore missing image snapshots. The whole snapshot could have been
// inconsistent.
on_finishes[i]->complete(0);
}
}
for (int i = 0, n = on_finishes.size(); i < n; ++i) {
r = on_finishes[i]->wait();
delete on_finishes[i];
if (r < 0 && r != -ENOENT) { // if previous attempts to remove this snapshot failed then the image's snapshot may not exist
lderr(cct) << "Failed cleaning up image snapshot. Ret code: " << r << dendl;
// just report error, but don't abort the process
}
}
remove_record:
r = cls_client::group_snap_remove(&group_ioctx, group_header_oid,
group_snap.id);
if (r < 0) {
lderr(cct) << "error while cleaning up group snapshot" << dendl;
// we ignore return value in clean up
}
finish:
for (int i = 0, n = ictxs.size(); i < n; ++i) {
if (ictxs[i] != nullptr) {
ictxs[i]->state->close();
}
}
return ret_code;
}
template <typename I>
int Group<I>::snap_remove(librados::IoCtx& group_ioctx, const char *group_name,
const char *snap_name)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
string group_id;
int r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY,
group_name, &group_id);
if (r < 0) {
lderr(cct) << "error reading group id object: "
<< cpp_strerror(r)
<< dendl;
return r;
}
std::vector<cls::rbd::GroupSnapshot> snaps;
r = group_snap_list(group_ioctx, group_name, &snaps);
if (r < 0) {
return r;
}
cls::rbd::GroupSnapshot *group_snap = nullptr;
for (auto &snap : snaps) {
if (snap.name == string(snap_name)) {
group_snap = &snap;
break;
}
}
if (group_snap == nullptr) {
return -ENOENT;
}
string group_header_oid = util::group_header_name(group_id);
r = group_snap_remove_by_record(group_ioctx, *group_snap, group_id,
group_header_oid);
return r;
}
template <typename I>
int Group<I>::snap_rename(librados::IoCtx& group_ioctx, const char *group_name,
const char *old_snap_name,
const char *new_snap_name) {
CephContext *cct = (CephContext *)group_ioctx.cct();
if (0 == strcmp(old_snap_name, new_snap_name))
return -EEXIST;
std::string group_id;
int r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY,
group_name, &group_id);
if (r == -ENOENT) {
return r;
} else if (r < 0) {
lderr(cct) << "error reading group id object: " << cpp_strerror(r) << dendl;
return r;
}
std::vector<cls::rbd::GroupSnapshot> group_snaps;
r = group_snap_list(group_ioctx, group_name, &group_snaps);
if (r < 0) {
return r;
}
cls::rbd::GroupSnapshot group_snap;
for (auto &snap : group_snaps) {
if (snap.name == old_snap_name) {
group_snap = snap;
break;
}
}
if (group_snap.id.empty()) {
return -ENOENT;
}
std::string group_header_oid = util::group_header_name(group_id);
group_snap.name = new_snap_name;
r = cls_client::group_snap_set(&group_ioctx, group_header_oid, group_snap);
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Group<I>::snap_list(librados::IoCtx& group_ioctx, const char *group_name,
std::vector<group_snap_info_t> *snaps)
{
std::vector<cls::rbd::GroupSnapshot> cls_snaps;
int r = group_snap_list(group_ioctx, group_name, &cls_snaps);
if (r < 0) {
return r;
}
for (auto snap : cls_snaps) {
snaps->push_back(
group_snap_info_t {
snap.name,
static_cast<group_snap_state_t>(snap.state)});
}
return 0;
}
template <typename I>
int Group<I>::snap_rollback(librados::IoCtx& group_ioctx,
const char *group_name, const char *snap_name,
ProgressContext& pctx)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
string group_id;
int r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY,
group_name, &group_id);
if (r < 0) {
lderr(cct) << "error reading group id object: "
<< cpp_strerror(r) << dendl;
return r;
}
std::vector<cls::rbd::GroupSnapshot> snaps;
r = group_snap_list(group_ioctx, group_name, &snaps);
if (r < 0) {
return r;
}
cls::rbd::GroupSnapshot *group_snap = nullptr;
for (auto &snap : snaps) {
if (snap.name == string(snap_name)) {
group_snap = &snap;
break;
}
}
if (group_snap == nullptr) {
return -ENOENT;
}
string group_header_oid = util::group_header_name(group_id);
r = group_snap_rollback_by_record(group_ioctx, *group_snap, group_id,
group_header_oid, pctx);
return r;
}
} // namespace api
} // namespace librbd
template class librbd::api::Group<librbd::ImageCtx>;
| 35,799 | 26.795031 | 127 | cc |
null | ceph-main/src/librbd/api/Group.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_GROUP_H
#define CEPH_LIBRBD_API_GROUP_H
#include "include/rbd/librbd.hpp"
#include "include/rados/librados_fwd.hpp"
#include <string>
#include <vector>
namespace librbd {
struct ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
struct Group {
static int create(librados::IoCtx& io_ctx, const char *group_name);
static int remove(librados::IoCtx& io_ctx, const char *group_name);
static int list(librados::IoCtx& io_ctx, std::vector<std::string> *names);
static int rename(librados::IoCtx& io_ctx, const char *src_group_name,
const char *dest_group_name);
static int image_add(librados::IoCtx& group_ioctx, const char *group_name,
librados::IoCtx& image_ioctx, const char *image_name);
static int image_remove(librados::IoCtx& group_ioctx, const char *group_name,
librados::IoCtx& image_ioctx, const char *image_name);
static int image_remove_by_id(librados::IoCtx& group_ioctx,
const char *group_name,
librados::IoCtx& image_ioctx,
const char *image_id);
static int image_list(librados::IoCtx& group_ioctx, const char *group_name,
std::vector<group_image_info_t> *images);
static int image_get_group(ImageCtxT *ictx, group_info_t *group_info);
static int snap_create(librados::IoCtx& group_ioctx,
const char *group_name, const char *snap_name,
uint32_t flags);
static int snap_remove(librados::IoCtx& group_ioctx,
const char *group_name, const char *snap_name);
static int snap_rename(librados::IoCtx& group_ioctx, const char *group_name,
const char *old_snap_name, const char *new_snap_name);
static int snap_list(librados::IoCtx& group_ioctx, const char *group_name,
std::vector<group_snap_info_t> *snaps);
static int snap_rollback(librados::IoCtx& group_ioctx,
const char *group_name, const char *snap_name,
ProgressContext& pctx);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Group<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_API_GROUP_H
| 2,386 | 38.131148 | 79 | h |
null | ceph-main/src/librbd/api/Image.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Image.h"
#include "include/rados/librados.hpp"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/AsioEngine.h"
#include "librbd/DeepCopyRequest.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/internal.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/api/Config.h"
#include "librbd/api/Trash.h"
#include "librbd/api/Utils.h"
#include "librbd/crypto/FormatRequest.h"
#include "librbd/crypto/LoadRequest.h"
#include "librbd/deep_copy/Handler.h"
#include "librbd/image/CloneRequest.h"
#include "librbd/image/RemoveRequest.h"
#include "librbd/image/PreRemoveRequest.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include <boost/scope_exit.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Image: " << __func__ << ": "
using std::map;
using std::string;
using librados::snap_t;
namespace librbd {
namespace api {
namespace {
bool compare_by_pool(const librbd::linked_image_spec_t& lhs,
const librbd::linked_image_spec_t& rhs)
{
if (lhs.pool_id != rhs.pool_id) {
return lhs.pool_id < rhs.pool_id;
} else if (lhs.pool_namespace != rhs.pool_namespace) {
return lhs.pool_namespace < rhs.pool_namespace;
}
return false;
}
bool compare(const librbd::linked_image_spec_t& lhs,
const librbd::linked_image_spec_t& rhs)
{
if (lhs.pool_name != rhs.pool_name) {
return lhs.pool_name < rhs.pool_name;
} else if (lhs.pool_id != rhs.pool_id) {
return lhs.pool_id < rhs.pool_id;
} else if (lhs.pool_namespace != rhs.pool_namespace) {
return lhs.pool_namespace < rhs.pool_namespace;
} else if (lhs.image_name != rhs.image_name) {
return lhs.image_name < rhs.image_name;
} else if (lhs.image_id != rhs.image_id) {
return lhs.image_id < rhs.image_id;
}
return false;
}
template <typename I>
int pre_remove_image(librados::IoCtx& io_ctx, const std::string& image_id) {
I *image_ctx = I::create("", image_id, nullptr, io_ctx, false);
int r = image_ctx->state->open(OPEN_FLAG_SKIP_OPEN_PARENT);
if (r < 0) {
return r;
}
C_SaferCond ctx;
auto req = image::PreRemoveRequest<I>::create(image_ctx, false, &ctx);
req->send();
r = ctx.wait();
image_ctx->state->close();
return r;
}
} // anonymous namespace
template <typename I>
int64_t Image<I>::get_data_pool_id(I *ictx) {
if (ictx->data_ctx.is_valid()) {
return ictx->data_ctx.get_id();
}
int64_t pool_id;
int r = cls_client::get_data_pool(&ictx->md_ctx, ictx->header_oid, &pool_id);
if (r < 0) {
CephContext *cct = ictx->cct;
lderr(cct) << "error getting data pool ID: " << cpp_strerror(r) << dendl;
return r;
}
return pool_id;
}
template <typename I>
int Image<I>::get_op_features(I *ictx, uint64_t *op_features) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "image_ctx=" << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
std::shared_lock image_locker{ictx->image_lock};
*op_features = ictx->op_features;
return 0;
}
template <typename I>
int Image<I>::list_images(librados::IoCtx& io_ctx,
std::vector<image_spec_t> *images) {
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 20) << "list " << &io_ctx << dendl;
int r;
images->clear();
if (io_ctx.get_namespace().empty()) {
bufferlist bl;
r = io_ctx.read(RBD_DIRECTORY, bl, 0, 0);
if (r == -ENOENT) {
return 0;
} else if (r < 0) {
lderr(cct) << "error listing v1 images: " << cpp_strerror(r) << dendl;
return r;
}
// V1 format images are in a tmap
if (bl.length()) {
auto p = bl.cbegin();
bufferlist header;
std::map<std::string, bufferlist> m;
decode(header, p);
decode(m, p);
for (auto& it : m) {
images->push_back({.id ="", .name = it.first});
}
}
}
// V2 format images
std::map<std::string, std::string> image_names_to_ids;
r = list_images_v2(io_ctx, &image_names_to_ids);
if (r < 0) {
lderr(cct) << "error listing v2 images: " << cpp_strerror(r) << dendl;
return r;
}
for (const auto& img_pair : image_names_to_ids) {
images->push_back({.id = img_pair.second,
.name = img_pair.first});
}
// include V2 images in a partially removed state
std::vector<librbd::trash_image_info_t> trash_images;
r = Trash<I>::list(io_ctx, trash_images, false);
if (r < 0 && r != -EOPNOTSUPP) {
lderr(cct) << "error listing trash images: " << cpp_strerror(r) << dendl;
return r;
}
for (const auto& trash_image : trash_images) {
if (trash_image.source == RBD_TRASH_IMAGE_SOURCE_REMOVING) {
images->push_back({.id = trash_image.id,
.name = trash_image.name});
}
}
return 0;
}
template <typename I>
int Image<I>::list_images_v2(librados::IoCtx& io_ctx, ImageNameToIds *images) {
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 20) << "io_ctx=" << &io_ctx << dendl;
// new format images are accessed by class methods
int r;
int max_read = 1024;
string last_read = "";
do {
map<string, string> images_page;
r = cls_client::dir_list(&io_ctx, RBD_DIRECTORY, last_read, max_read,
&images_page);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error listing image in directory: "
<< cpp_strerror(r) << dendl;
return r;
} else if (r == -ENOENT) {
break;
}
for (map<string, string>::const_iterator it = images_page.begin();
it != images_page.end(); ++it) {
images->insert(*it);
}
if (!images_page.empty()) {
last_read = images_page.rbegin()->first;
}
r = images_page.size();
} while (r == max_read);
return 0;
}
template <typename I>
int Image<I>::get_parent(I *ictx,
librbd::linked_image_spec_t *parent_image,
librbd::snap_spec_t *parent_snap) {
auto cct = ictx->cct;
ldout(cct, 20) << "image_ctx=" << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
std::shared_lock image_locker{ictx->image_lock};
bool release_image_lock = false;
BOOST_SCOPE_EXIT_ALL(ictx, &release_image_lock) {
if (release_image_lock) {
ictx->parent->image_lock.unlock_shared();
}
};
// if a migration is in-progress, the true parent is the parent
// of the migration source image
auto parent = ictx->parent;
if (!ictx->migration_info.empty() && ictx->parent != nullptr) {
release_image_lock = true;
ictx->parent->image_lock.lock_shared();
parent = ictx->parent->parent;
}
if (parent == nullptr) {
return -ENOENT;
}
parent_image->pool_id = parent->md_ctx.get_id();
parent_image->pool_name = parent->md_ctx.get_pool_name();
parent_image->pool_namespace = parent->md_ctx.get_namespace();
std::shared_lock parent_image_locker{parent->image_lock};
parent_snap->id = parent->snap_id;
parent_snap->namespace_type = RBD_SNAP_NAMESPACE_TYPE_USER;
if (parent->snap_id != CEPH_NOSNAP) {
auto snap_info = parent->get_snap_info(parent->snap_id);
if (snap_info == nullptr) {
lderr(cct) << "error finding parent snap name: " << cpp_strerror(r)
<< dendl;
return -ENOENT;
}
parent_snap->namespace_type = static_cast<snap_namespace_type_t>(
cls::rbd::get_snap_namespace_type(snap_info->snap_namespace));
parent_snap->name = snap_info->name;
}
parent_image->image_id = parent->id;
parent_image->image_name = parent->name;
parent_image->trash = true;
librbd::trash_image_info_t trash_info;
r = Trash<I>::get(parent->md_ctx, parent->id, &trash_info);
if (r == -ENOENT || r == -EOPNOTSUPP) {
parent_image->trash = false;
} else if (r < 0) {
lderr(cct) << "error looking up trash status: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int Image<I>::list_children(I *ictx,
std::vector<librbd::linked_image_spec_t> *images) {
images->clear();
return list_descendants(ictx, 1, images);
}
template <typename I>
int Image<I>::list_children(I *ictx,
const cls::rbd::ParentImageSpec &parent_spec,
std::vector<librbd::linked_image_spec_t> *images) {
images->clear();
return list_descendants(ictx, parent_spec, 1, images);
}
template <typename I>
int Image<I>::list_descendants(
librados::IoCtx& io_ctx, const std::string &image_id,
const std::optional<size_t> &max_level,
std::vector<librbd::linked_image_spec_t> *images) {
ImageCtx *ictx = new librbd::ImageCtx("", image_id, nullptr,
io_ctx, true);
CephContext *cct = ictx->cct;
int r = ictx->state->open(OPEN_FLAG_SKIP_OPEN_PARENT);
if (r < 0) {
if (r == -ENOENT) {
return 0;
}
lderr(cct) << "failed to open descendant " << image_id
<< " from pool " << io_ctx.get_pool_name() << ":"
<< cpp_strerror(r) << dendl;
return r;
}
r = list_descendants(ictx, max_level, images);
int r1 = ictx->state->close();
if (r1 < 0) {
lderr(cct) << "error when closing descendant " << image_id
<< " from pool " << io_ctx.get_pool_name() << ":"
<< cpp_strerror(r1) << dendl;
}
return r;
}
template <typename I>
int Image<I>::list_descendants(
I *ictx, const std::optional<size_t> &max_level,
std::vector<librbd::linked_image_spec_t> *images) {
std::shared_lock l{ictx->image_lock};
std::vector<librados::snap_t> snap_ids;
if (ictx->snap_id != CEPH_NOSNAP) {
snap_ids.push_back(ictx->snap_id);
} else {
snap_ids = ictx->snaps;
}
for (auto snap_id : snap_ids) {
cls::rbd::ParentImageSpec parent_spec{ictx->md_ctx.get_id(),
ictx->md_ctx.get_namespace(),
ictx->id, snap_id};
int r = list_descendants(ictx, parent_spec, max_level, images);
if (r < 0) {
return r;
}
}
return 0;
}
template <typename I>
int Image<I>::list_descendants(
I *ictx, const cls::rbd::ParentImageSpec &parent_spec,
const std::optional<size_t> &max_level,
std::vector<librbd::linked_image_spec_t> *images) {
auto child_max_level = max_level;
if (child_max_level) {
if (child_max_level == 0) {
return 0;
}
(*child_max_level)--;
}
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
// no children for non-layered or old format image
if (!ictx->test_features(RBD_FEATURE_LAYERING, ictx->image_lock)) {
return 0;
}
librados::Rados rados(ictx->md_ctx);
// search all pools for clone v1 children dependent on this snapshot
std::list<std::pair<int64_t, std::string> > pools;
int r = rados.pool_list2(pools);
if (r < 0) {
lderr(cct) << "error listing pools: " << cpp_strerror(r) << dendl;
return r;
}
for (auto& it : pools) {
int64_t base_tier;
r = rados.pool_get_base_tier(it.first, &base_tier);
if (r == -ENOENT) {
ldout(cct, 1) << "pool " << it.second << " no longer exists" << dendl;
continue;
} else if (r < 0) {
lderr(cct) << "error retrieving base tier for pool " << it.second
<< dendl;
return r;
}
if (it.first != base_tier) {
// pool is a cache; skip it
continue;
}
IoCtx ioctx;
r = librbd::util::create_ioctx(
ictx->md_ctx, "child image", it.first, {}, &ioctx);
if (r == -ENOENT) {
continue;
} else if (r < 0) {
return r;
}
std::set<std::string> image_ids;
r = cls_client::get_children(&ioctx, RBD_CHILDREN, parent_spec,
image_ids);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error reading list of children from pool " << it.second
<< dendl;
return r;
}
for (auto& image_id : image_ids) {
images->push_back({
it.first, "", ictx->md_ctx.get_namespace(), image_id, "", false});
r = list_descendants(ioctx, image_id, child_max_level, images);
if (r < 0) {
return r;
}
}
}
// retrieve clone v2 children attached to this snapshot
IoCtx parent_io_ctx;
r = librbd::util::create_ioctx(
ictx->md_ctx, "parent image",parent_spec.pool_id,
parent_spec.pool_namespace, &parent_io_ctx);
if (r < 0) {
return r;
}
cls::rbd::ChildImageSpecs child_images;
r = cls_client::children_list(
&parent_io_ctx, librbd::util::header_name(parent_spec.image_id),
parent_spec.snap_id, &child_images);
if (r < 0 && r != -ENOENT && r != -EOPNOTSUPP) {
lderr(cct) << "error retrieving children: " << cpp_strerror(r) << dendl;
return r;
}
for (auto& child_image : child_images) {
images->push_back({
child_image.pool_id, "", child_image.pool_namespace,
child_image.image_id, "", false});
if (!child_max_level || *child_max_level > 0) {
IoCtx ioctx;
r = librbd::util::create_ioctx(
ictx->md_ctx, "child image", child_image.pool_id,
child_image.pool_namespace, &ioctx);
if (r == -ENOENT) {
continue;
} else if (r < 0) {
return r;
}
r = list_descendants(ioctx, child_image.image_id, child_max_level,
images);
if (r < 0) {
return r;
}
}
}
// batch lookups by pool + namespace
std::sort(images->begin(), images->end(), compare_by_pool);
int64_t child_pool_id = -1;
librados::IoCtx child_io_ctx;
std::map<std::string, std::pair<std::string, bool>> child_image_id_to_info;
for (auto& image : *images) {
if (child_pool_id == -1 || child_pool_id != image.pool_id ||
child_io_ctx.get_namespace() != image.pool_namespace) {
r = librbd::util::create_ioctx(
ictx->md_ctx, "child image", image.pool_id, image.pool_namespace,
&child_io_ctx);
if (r == -ENOENT) {
image.pool_name = "";
image.image_name = "";
continue;
} else if (r < 0) {
return r;
}
child_pool_id = image.pool_id;
child_image_id_to_info.clear();
std::map<std::string, std::string> image_names_to_ids;
r = list_images_v2(child_io_ctx, &image_names_to_ids);
if (r < 0) {
lderr(cct) << "error listing v2 images: " << cpp_strerror(r) << dendl;
return r;
}
for (auto& [name, id] : image_names_to_ids) {
child_image_id_to_info.insert({id, {name, false}});
}
std::vector<librbd::trash_image_info_t> trash_images;
r = Trash<I>::list(child_io_ctx, trash_images, false);
if (r < 0 && r != -EOPNOTSUPP) {
lderr(cct) << "error listing trash images: " << cpp_strerror(r)
<< dendl;
return r;
}
for (auto& it : trash_images) {
child_image_id_to_info.insert({
it.id,
{it.name,
it.source == RBD_TRASH_IMAGE_SOURCE_REMOVING ? false : true}});
}
}
auto it = child_image_id_to_info.find(image.image_id);
if (it == child_image_id_to_info.end()) {
lderr(cct) << "error looking up name for image id "
<< image.image_id << " in pool "
<< child_io_ctx.get_pool_name()
<< (image.pool_namespace.empty() ?
"" : "/" + image.pool_namespace) << dendl;
return -ENOENT;
}
image.pool_name = child_io_ctx.get_pool_name();
image.image_name = it->second.first;
image.trash = it->second.second;
}
// final sort by pool + image names
std::sort(images->begin(), images->end(), compare);
return 0;
}
template <typename I>
int Image<I>::deep_copy(I *src, librados::IoCtx& dest_md_ctx,
const char *destname, ImageOptions& opts,
ProgressContext &prog_ctx) {
CephContext *cct = (CephContext *)dest_md_ctx.cct();
ldout(cct, 20) << src->name
<< (src->snap_name.length() ? "@" + src->snap_name : "")
<< " -> " << destname << " opts = " << opts << dendl;
uint64_t features;
uint64_t src_size;
{
std::shared_lock image_locker{src->image_lock};
if (!src->migration_info.empty()) {
lderr(cct) << "cannot deep copy migrating image" << dendl;
return -EBUSY;
}
features = src->features;
src_size = src->get_image_size(src->snap_id);
}
uint64_t format = 2;
if (opts.get(RBD_IMAGE_OPTION_FORMAT, &format) != 0) {
opts.set(RBD_IMAGE_OPTION_FORMAT, format);
}
if (format == 1) {
lderr(cct) << "old format not supported for destination image" << dendl;
return -EINVAL;
}
uint64_t stripe_unit = src->stripe_unit;
if (opts.get(RBD_IMAGE_OPTION_STRIPE_UNIT, &stripe_unit) != 0) {
opts.set(RBD_IMAGE_OPTION_STRIPE_UNIT, stripe_unit);
}
uint64_t stripe_count = src->stripe_count;
if (opts.get(RBD_IMAGE_OPTION_STRIPE_COUNT, &stripe_count) != 0) {
opts.set(RBD_IMAGE_OPTION_STRIPE_COUNT, stripe_count);
}
uint64_t order = src->order;
if (opts.get(RBD_IMAGE_OPTION_ORDER, &order) != 0) {
opts.set(RBD_IMAGE_OPTION_ORDER, order);
}
if (opts.get(RBD_IMAGE_OPTION_FEATURES, &features) != 0) {
opts.set(RBD_IMAGE_OPTION_FEATURES, features);
}
if (features & ~RBD_FEATURES_ALL) {
lderr(cct) << "librbd does not support requested features" << dendl;
return -ENOSYS;
}
uint64_t flatten = 0;
if (opts.get(RBD_IMAGE_OPTION_FLATTEN, &flatten) == 0) {
opts.unset(RBD_IMAGE_OPTION_FLATTEN);
}
cls::rbd::ParentImageSpec parent_spec;
if (flatten > 0) {
parent_spec.pool_id = -1;
} else {
std::shared_lock image_locker{src->image_lock};
// use oldest snapshot or HEAD for parent spec
if (!src->snap_info.empty()) {
parent_spec = src->snap_info.begin()->second.parent.spec;
} else {
parent_spec = src->parent_md.spec;
}
}
int r;
if (parent_spec.pool_id == -1) {
r = create(dest_md_ctx, destname, "", src_size, opts, "", "", false);
} else {
librados::IoCtx parent_io_ctx;
r = librbd::util::create_ioctx(
src->md_ctx, "parent image", parent_spec.pool_id,
parent_spec.pool_namespace, &parent_io_ctx);
if (r < 0) {
return r;
}
ConfigProxy config{cct->_conf};
api::Config<I>::apply_pool_overrides(dest_md_ctx, &config);
C_SaferCond ctx;
std::string dest_id = librbd::util::generate_image_id(dest_md_ctx);
auto *req = image::CloneRequest<I>::create(
config, parent_io_ctx, parent_spec.image_id, "", {}, parent_spec.snap_id,
dest_md_ctx, destname, dest_id, opts, cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"", "", src->op_work_queue, &ctx);
req->send();
r = ctx.wait();
}
if (r < 0) {
lderr(cct) << "header creation failed" << dendl;
return r;
}
opts.set(RBD_IMAGE_OPTION_ORDER, static_cast<uint64_t>(order));
auto dest = new I(destname, "", nullptr, dest_md_ctx, false);
r = dest->state->open(0);
if (r < 0) {
lderr(cct) << "failed to read newly created header" << dendl;
return r;
}
C_SaferCond lock_ctx;
{
std::unique_lock locker{dest->owner_lock};
if (dest->exclusive_lock == nullptr ||
dest->exclusive_lock->is_lock_owner()) {
lock_ctx.complete(0);
} else {
dest->exclusive_lock->acquire_lock(&lock_ctx);
}
}
r = lock_ctx.wait();
if (r < 0) {
lderr(cct) << "failed to request exclusive lock: " << cpp_strerror(r)
<< dendl;
dest->state->close();
return r;
}
r = deep_copy(src, dest, flatten > 0, prog_ctx);
int close_r = dest->state->close();
if (r == 0 && close_r < 0) {
r = close_r;
}
return r;
}
template <typename I>
int Image<I>::deep_copy(I *src, I *dest, bool flatten,
ProgressContext &prog_ctx) {
// ensure previous writes are visible to dest
C_SaferCond flush_ctx;
{
std::shared_lock owner_locker{src->owner_lock};
auto aio_comp = io::AioCompletion::create_and_start(&flush_ctx, src,
io::AIO_TYPE_FLUSH);
auto req = io::ImageDispatchSpec::create_flush(
*src, io::IMAGE_DISPATCH_LAYER_INTERNAL_START,
aio_comp, io::FLUSH_SOURCE_INTERNAL, {});
req->send();
}
int r = flush_ctx.wait();
if (r < 0) {
return r;
}
librados::snap_t snap_id_start = 0;
librados::snap_t snap_id_end;
{
std::shared_lock image_locker{src->image_lock};
snap_id_end = src->snap_id;
}
AsioEngine asio_engine(src->md_ctx);
C_SaferCond cond;
SnapSeqs snap_seqs;
deep_copy::ProgressHandler progress_handler{&prog_ctx};
auto req = DeepCopyRequest<I>::create(
src, dest, snap_id_start, snap_id_end, 0U, flatten, boost::none,
asio_engine.get_work_queue(), &snap_seqs, &progress_handler, &cond);
req->send();
r = cond.wait();
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Image<I>::snap_set(I *ictx,
const cls::rbd::SnapshotNamespace &snap_namespace,
const char *snap_name) {
ldout(ictx->cct, 20) << "snap_set " << ictx << " snap = "
<< (snap_name ? snap_name : "NULL") << dendl;
// ignore return value, since we may be set to a non-existent
// snapshot and the user is trying to fix that
ictx->state->refresh_if_required();
uint64_t snap_id = CEPH_NOSNAP;
std::string name(snap_name == nullptr ? "" : snap_name);
if (!name.empty()) {
std::shared_lock image_locker{ictx->image_lock};
snap_id = ictx->get_snap_id(snap_namespace, snap_name);
if (snap_id == CEPH_NOSNAP) {
return -ENOENT;
}
}
return snap_set(ictx, snap_id);
}
template <typename I>
int Image<I>::snap_set(I *ictx, uint64_t snap_id) {
ldout(ictx->cct, 20) << "snap_set " << ictx << " "
<< "snap_id=" << snap_id << dendl;
// ignore return value, since we may be set to a non-existent
// snapshot and the user is trying to fix that
ictx->state->refresh_if_required();
C_SaferCond ctx;
ictx->state->snap_set(snap_id, &ctx);
int r = ctx.wait();
if (r < 0) {
if (r != -ENOENT) {
lderr(ictx->cct) << "failed to " << (snap_id == CEPH_NOSNAP ? "un" : "")
<< "set snapshot: " << cpp_strerror(r) << dendl;
}
return r;
}
return 0;
}
template <typename I>
int Image<I>::remove(IoCtx& io_ctx, const std::string &image_name,
ProgressContext& prog_ctx)
{
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << "name=" << image_name << dendl;
// look up the V2 image id based on the image name
std::string image_id;
int r = cls_client::dir_get_id(&io_ctx, RBD_DIRECTORY, image_name,
&image_id);
if (r == -ENOENT) {
// check if it already exists in trash from an aborted trash remove attempt
std::vector<trash_image_info_t> trash_entries;
r = Trash<I>::list(io_ctx, trash_entries, false);
if (r < 0) {
return r;
}
for (auto& entry : trash_entries) {
if (entry.name == image_name &&
entry.source == RBD_TRASH_IMAGE_SOURCE_REMOVING) {
cls::rbd::TrashImageSpec spec;
r = cls_client::trash_get(&io_ctx, entry.id, &spec);
if (r < 0) {
lderr(cct) << "error getting image id " << entry.id
<< " info from trash: " << cpp_strerror(r) << dendl;
return r;
}
if (spec.state == cls::rbd::TRASH_IMAGE_STATE_MOVING) {
r = Trash<I>::move(io_ctx, entry.source, entry.name, entry.id, 0);
if (r < 0) {
return r;
}
}
return Trash<I>::remove(io_ctx, entry.id, true, prog_ctx);
}
}
// fall-through if we failed to locate the image in the V2 directory and
// trash
} else if (r < 0) {
lderr(cct) << "failed to retrieve image id: " << cpp_strerror(r) << dendl;
return r;
} else {
// attempt to move the image to the trash (and optionally immediately
// delete the image)
ConfigProxy config(cct->_conf);
Config<I>::apply_pool_overrides(io_ctx, &config);
rbd_trash_image_source_t trash_image_source =
RBD_TRASH_IMAGE_SOURCE_REMOVING;
uint64_t expire_seconds = 0;
if (config.get_val<bool>("rbd_move_to_trash_on_remove")) {
// keep the image in the trash upon remove requests
trash_image_source = RBD_TRASH_IMAGE_SOURCE_USER;
expire_seconds = config.get_val<uint64_t>(
"rbd_move_to_trash_on_remove_expire_seconds");
} else {
// attempt to pre-validate the removal before moving to trash and
// removing
r = pre_remove_image<I>(io_ctx, image_id);
if (r == -ECHILD) {
if (config.get_val<bool>("rbd_move_parent_to_trash_on_remove")) {
// keep the image in the trash until the last child is removed
trash_image_source = RBD_TRASH_IMAGE_SOURCE_USER_PARENT;
} else {
lderr(cct) << "image has snapshots - not removing" << dendl;
return -ENOTEMPTY;
}
} else if (r < 0 && r != -ENOENT) {
return r;
}
}
r = Trash<I>::move(io_ctx, trash_image_source, image_name, image_id,
expire_seconds);
if (r >= 0) {
if (trash_image_source == RBD_TRASH_IMAGE_SOURCE_REMOVING) {
// proceed with attempting to immediately remove the image
r = Trash<I>::remove(io_ctx, image_id, true, prog_ctx);
if (r == -ENOTEMPTY || r == -EBUSY || r == -EMLINK) {
// best-effort try to restore the image if the removal
// failed for possible expected reasons
Trash<I>::restore(io_ctx, {cls::rbd::TRASH_IMAGE_SOURCE_REMOVING},
image_id, image_name);
}
}
return r;
} else if (r < 0 && r != -EOPNOTSUPP) {
return r;
}
// fall-through if trash isn't supported
}
AsioEngine asio_engine(io_ctx);
// might be a V1 image format that cannot be moved to the trash
// and would not have been listed in the V2 directory -- or the OSDs
// are too old and don't support the trash feature
C_SaferCond cond;
auto req = librbd::image::RemoveRequest<I>::create(
io_ctx, image_name, "", false, false, prog_ctx,
asio_engine.get_work_queue(), &cond);
req->send();
return cond.wait();
}
template <typename I>
int Image<I>::flatten_children(I *ictx, const char* snap_name,
ProgressContext& pctx) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "children flatten " << ictx->name << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
std::shared_lock l{ictx->image_lock};
snap_t snap_id = ictx->get_snap_id(cls::rbd::UserSnapshotNamespace(),
snap_name);
cls::rbd::ParentImageSpec parent_spec{ictx->md_ctx.get_id(),
ictx->md_ctx.get_namespace(),
ictx->id, snap_id};
std::vector<librbd::linked_image_spec_t> child_images;
r = list_children(ictx, parent_spec, &child_images);
if (r < 0) {
return r;
}
size_t size = child_images.size();
if (size == 0) {
return 0;
}
librados::IoCtx child_io_ctx;
int64_t child_pool_id = -1;
size_t i = 0;
for (auto &child_image : child_images){
std::string pool = child_image.pool_name;
if (child_pool_id == -1 ||
child_pool_id != child_image.pool_id ||
child_io_ctx.get_namespace() != child_image.pool_namespace) {
r = librbd::util::create_ioctx(
ictx->md_ctx, "child image", child_image.pool_id,
child_image.pool_namespace, &child_io_ctx);
if (r < 0) {
return r;
}
child_pool_id = child_image.pool_id;
}
ImageCtx *imctx = new ImageCtx("", child_image.image_id, nullptr,
child_io_ctx, false);
r = imctx->state->open(0);
if (r < 0) {
lderr(cct) << "error opening image: " << cpp_strerror(r) << dendl;
return r;
}
if ((imctx->features & RBD_FEATURE_DEEP_FLATTEN) == 0 &&
!imctx->snaps.empty()) {
lderr(cct) << "snapshot in-use by " << pool << "/" << imctx->name
<< dendl;
imctx->state->close();
return -EBUSY;
}
librbd::NoOpProgressContext prog_ctx;
r = imctx->operations->flatten(prog_ctx);
if (r < 0) {
lderr(cct) << "error flattening image: " << pool << "/"
<< (child_image.pool_namespace.empty() ?
"" : "/" + child_image.pool_namespace)
<< child_image.image_name << cpp_strerror(r) << dendl;
imctx->state->close();
return r;
}
r = imctx->state->close();
if (r < 0) {
lderr(cct) << "failed to close image: " << cpp_strerror(r) << dendl;
return r;
}
pctx.update_progress(++i, size);
ceph_assert(i <= size);
}
return 0;
}
template <typename I>
int Image<I>::encryption_format(I* ictx, encryption_format_t format,
encryption_options_t opts, size_t opts_size,
bool c_api) {
crypto::EncryptionFormat<I>* result_format;
auto r = util::create_encryption_format(
ictx->cct, format, opts, opts_size, c_api, &result_format);
if (r != 0) {
return r;
}
C_SaferCond cond;
auto req = librbd::crypto::FormatRequest<I>::create(
ictx, std::unique_ptr<crypto::EncryptionFormat<I>>(result_format),
&cond);
req->send();
return cond.wait();
}
template <typename I>
int Image<I>::encryption_load(I* ictx, const encryption_spec_t *specs,
size_t spec_count, bool c_api) {
std::vector<std::unique_ptr<crypto::EncryptionFormat<I>>> formats;
for (size_t i = 0; i < spec_count; ++i) {
crypto::EncryptionFormat<I>* result_format;
auto r = util::create_encryption_format(
ictx->cct, specs[i].format, specs[i].opts, specs[i].opts_size,
c_api, &result_format);
if (r != 0) {
return r;
}
formats.emplace_back(result_format);
}
C_SaferCond cond;
auto req = librbd::crypto::LoadRequest<I>::create(
ictx, std::move(formats), &cond);
req->send();
return cond.wait();
}
} // namespace api
} // namespace librbd
template class librbd::api::Image<librbd::ImageCtx>;
| 30,982 | 29.495079 | 80 | cc |
null | ceph-main/src/librbd/api/Image.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRBD_API_IMAGE_H
#define LIBRBD_API_IMAGE_H
#include "include/rbd/librbd.hpp"
#include "include/rados/librados_fwd.hpp"
#include "librbd/Types.h"
#include <map>
#include <set>
#include <string>
namespace librbd {
class ImageOptions;
class ProgressContext;
struct ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
struct Image {
typedef std::map<std::string, std::string> ImageNameToIds;
static int64_t get_data_pool_id(ImageCtxT *ictx);
static int get_op_features(ImageCtxT *ictx, uint64_t *op_features);
static int list_images(librados::IoCtx& io_ctx,
std::vector<image_spec_t> *images);
static int list_images_v2(librados::IoCtx& io_ctx,
ImageNameToIds *images);
static int get_parent(ImageCtxT *ictx,
librbd::linked_image_spec_t *parent_image,
librbd::snap_spec_t *parent_snap);
static int list_children(ImageCtxT *ictx,
std::vector<librbd::linked_image_spec_t> *images);
static int list_children(ImageCtxT *ictx,
const cls::rbd::ParentImageSpec &parent_spec,
std::vector<librbd::linked_image_spec_t> *images);
static int list_descendants(IoCtx& io_ctx, const std::string &image_id,
const std::optional<size_t> &max_level,
std::vector<librbd::linked_image_spec_t> *images);
static int list_descendants(ImageCtxT *ictx,
const std::optional<size_t> &max_level,
std::vector<librbd::linked_image_spec_t> *images);
static int list_descendants(ImageCtxT *ictx,
const cls::rbd::ParentImageSpec &parent_spec,
const std::optional<size_t> &max_level,
std::vector<librbd::linked_image_spec_t> *images);
static int deep_copy(ImageCtxT *ictx, librados::IoCtx& dest_md_ctx,
const char *destname, ImageOptions& opts,
ProgressContext &prog_ctx);
static int deep_copy(ImageCtxT *src, ImageCtxT *dest, bool flatten,
ProgressContext &prog_ctx);
static int snap_set(ImageCtxT *ictx,
const cls::rbd::SnapshotNamespace &snap_namespace,
const char *snap_name);
static int snap_set(ImageCtxT *ictx, uint64_t snap_id);
static int remove(librados::IoCtx& io_ctx, const std::string &image_name,
ProgressContext& prog_ctx);
static int flatten_children(ImageCtxT *ictx, const char* snap_name, ProgressContext& pctx);
static int encryption_format(ImageCtxT *ictx, encryption_format_t format,
encryption_options_t opts, size_t opts_size,
bool c_api);
static int encryption_load(ImageCtxT *ictx, const encryption_spec_t *specs,
size_t spec_count, bool c_api);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Image<librbd::ImageCtx>;
#endif // LIBRBD_API_IMAGE_H
| 3,265 | 36.976744 | 93 | h |
null | ceph-main/src/librbd/api/Io.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Io.h"
#include "include/intarith.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "common/EventTrace.h"
#include "librbd/ImageCtx.h"
#include "librbd/internal.h"
#include "librbd/Utils.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/Types.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Io " << __func__ << ": "
namespace librbd {
namespace api {
namespace {
template <typename I>
bool is_valid_io(I& image_ctx, io::AioCompletion* aio_comp) {
auto cct = image_ctx.cct;
if (!image_ctx.data_ctx.is_valid()) {
lderr(cct) << "missing data pool" << dendl;
aio_comp->fail(-ENODEV);
return false;
}
return true;
}
} // anonymous namespace
template <typename I>
ssize_t Io<I>::read(
I &image_ctx, uint64_t off, uint64_t len, io::ReadResult &&read_result,
int op_flags) {
auto cct = image_ctx.cct;
ldout(cct, 20) << "ictx=" << &image_ctx << ", off=" << off << ", "
<< "len = " << len << dendl;
C_SaferCond ctx;
auto aio_comp = io::AioCompletion::create(&ctx);
aio_read(image_ctx, aio_comp, off, len, std::move(read_result), op_flags,
false);
return ctx.wait();
}
template <typename I>
ssize_t Io<I>::write(
I &image_ctx, uint64_t off, uint64_t len, bufferlist &&bl, int op_flags) {
auto cct = image_ctx.cct;
ldout(cct, 20) << "ictx=" << &image_ctx << ", off=" << off << ", "
<< "len = " << len << dendl;
image_ctx.image_lock.lock_shared();
int r = clip_io(util::get_image_ctx(&image_ctx), off, &len,
io::ImageArea::DATA);
image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
return r;
}
C_SaferCond ctx;
auto aio_comp = io::AioCompletion::create(&ctx);
aio_write(image_ctx, aio_comp, off, len, std::move(bl), op_flags, false);
r = ctx.wait();
if (r < 0) {
return r;
}
return len;
}
template <typename I>
ssize_t Io<I>::discard(
I &image_ctx, uint64_t off, uint64_t len,
uint32_t discard_granularity_bytes) {
auto cct = image_ctx.cct;
ldout(cct, 20) << "ictx=" << &image_ctx << ", off=" << off << ", "
<< "len = " << len << dendl;
image_ctx.image_lock.lock_shared();
int r = clip_io(util::get_image_ctx(&image_ctx), off, &len,
io::ImageArea::DATA);
image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
return r;
}
C_SaferCond ctx;
auto aio_comp = io::AioCompletion::create(&ctx);
aio_discard(image_ctx, aio_comp, off, len, discard_granularity_bytes, false);
r = ctx.wait();
if (r < 0) {
return r;
}
return len;
}
template <typename I>
ssize_t Io<I>::write_same(
I &image_ctx, uint64_t off, uint64_t len, bufferlist &&bl, int op_flags) {
auto cct = image_ctx.cct;
ldout(cct, 20) << "ictx=" << &image_ctx << ", off=" << off << ", "
<< "len = " << len << ", data_len " << bl.length() << dendl;
image_ctx.image_lock.lock_shared();
int r = clip_io(util::get_image_ctx(&image_ctx), off, &len,
io::ImageArea::DATA);
image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
return r;
}
C_SaferCond ctx;
auto aio_comp = io::AioCompletion::create(&ctx);
aio_write_same(image_ctx, aio_comp, off, len, std::move(bl), op_flags, false);
r = ctx.wait();
if (r < 0) {
return r;
}
return len;
}
template <typename I>
ssize_t Io<I>::write_zeroes(I& image_ctx, uint64_t off, uint64_t len,
int zero_flags, int op_flags) {
auto cct = image_ctx.cct;
ldout(cct, 20) << "ictx=" << &image_ctx << ", off=" << off << ", "
<< "len = " << len << dendl;
image_ctx.image_lock.lock_shared();
int r = clip_io(util::get_image_ctx(&image_ctx), off, &len,
io::ImageArea::DATA);
image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
return r;
}
C_SaferCond ctx;
auto aio_comp = io::AioCompletion::create(&ctx);
aio_write_zeroes(image_ctx, aio_comp, off, len, zero_flags, op_flags, false);
r = ctx.wait();
if (r < 0) {
return r;
}
return len;
}
template <typename I>
ssize_t Io<I>::compare_and_write(
I &image_ctx, uint64_t off, uint64_t len, bufferlist &&cmp_bl,
bufferlist &&bl, uint64_t *mismatch_off, int op_flags) {
auto cct = image_ctx.cct;
ldout(cct, 20) << "compare_and_write ictx=" << &image_ctx << ", off="
<< off << ", " << "len = " << len << dendl;
image_ctx.image_lock.lock_shared();
int r = clip_io(util::get_image_ctx(&image_ctx), off, &len,
io::ImageArea::DATA);
image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
return r;
}
C_SaferCond ctx;
auto aio_comp = io::AioCompletion::create(&ctx);
aio_compare_and_write(image_ctx, aio_comp, off, len, std::move(cmp_bl),
std::move(bl), mismatch_off, op_flags, false);
r = ctx.wait();
if (r < 0) {
return r;
}
return len;
}
template <typename I>
int Io<I>::flush(I &image_ctx) {
auto cct = image_ctx.cct;
ldout(cct, 20) << "ictx=" << &image_ctx << dendl;
C_SaferCond ctx;
auto aio_comp = io::AioCompletion::create(&ctx);
aio_flush(image_ctx, aio_comp, false);
int r = ctx.wait();
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
void Io<I>::aio_read(I &image_ctx, io::AioCompletion *aio_comp, uint64_t off,
uint64_t len, io::ReadResult &&read_result, int op_flags,
bool native_async) {
auto cct = image_ctx.cct;
FUNCTRACE(cct);
ZTracer::Trace trace;
if (image_ctx.blkin_trace_all) {
trace.init("io: read", &image_ctx.trace_endpoint);
trace.event("init");
}
aio_comp->init_time(util::get_image_ctx(&image_ctx), io::AIO_TYPE_READ);
ldout(cct, 20) << "ictx=" << &image_ctx << ", "
<< "completion=" << aio_comp << ", off=" << off << ", "
<< "len=" << len << ", " << "flags=" << op_flags << dendl;
if (native_async && image_ctx.event_socket.is_valid()) {
aio_comp->set_event_notify(true);
}
if (!is_valid_io(image_ctx, aio_comp)) {
return;
}
auto req = io::ImageDispatchSpec::create_read(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{off, len}}, io::ImageArea::DATA, std::move(read_result),
image_ctx.get_data_io_context(), op_flags, 0, trace);
req->send();
}
template <typename I>
void Io<I>::aio_write(I &image_ctx, io::AioCompletion *aio_comp, uint64_t off,
uint64_t len, bufferlist &&bl, int op_flags,
bool native_async) {
auto cct = image_ctx.cct;
FUNCTRACE(cct);
ZTracer::Trace trace;
if (image_ctx.blkin_trace_all) {
trace.init("io: write", &image_ctx.trace_endpoint);
trace.event("init");
}
aio_comp->init_time(util::get_image_ctx(&image_ctx), io::AIO_TYPE_WRITE);
ldout(cct, 20) << "ictx=" << &image_ctx << ", "
<< "completion=" << aio_comp << ", off=" << off << ", "
<< "len=" << len << ", flags=" << op_flags << dendl;
if (native_async && image_ctx.event_socket.is_valid()) {
aio_comp->set_event_notify(true);
}
if (!is_valid_io(image_ctx, aio_comp)) {
return;
}
auto req = io::ImageDispatchSpec::create_write(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{off, len}}, io::ImageArea::DATA, std::move(bl), op_flags, trace);
req->send();
}
template <typename I>
void Io<I>::aio_discard(I &image_ctx, io::AioCompletion *aio_comp, uint64_t off,
uint64_t len, uint32_t discard_granularity_bytes,
bool native_async) {
auto cct = image_ctx.cct;
FUNCTRACE(cct);
ZTracer::Trace trace;
if (image_ctx.blkin_trace_all) {
trace.init("io: discard", &image_ctx.trace_endpoint);
trace.event("init");
}
aio_comp->init_time(util::get_image_ctx(&image_ctx), io::AIO_TYPE_DISCARD);
ldout(cct, 20) << "ictx=" << &image_ctx << ", "
<< "completion=" << aio_comp << ", off=" << off << ", "
<< "len=" << len << dendl;
if (native_async && image_ctx.event_socket.is_valid()) {
aio_comp->set_event_notify(true);
}
if (!is_valid_io(image_ctx, aio_comp)) {
return;
}
auto req = io::ImageDispatchSpec::create_discard(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{off, len}}, io::ImageArea::DATA, discard_granularity_bytes, trace);
req->send();
}
template <typename I>
void Io<I>::aio_write_same(I &image_ctx, io::AioCompletion *aio_comp,
uint64_t off, uint64_t len, bufferlist &&bl,
int op_flags, bool native_async) {
auto cct = image_ctx.cct;
FUNCTRACE(cct);
ZTracer::Trace trace;
if (image_ctx.blkin_trace_all) {
trace.init("io: writesame", &image_ctx.trace_endpoint);
trace.event("init");
}
aio_comp->init_time(util::get_image_ctx(&image_ctx), io::AIO_TYPE_WRITESAME);
ldout(cct, 20) << "ictx=" << &image_ctx << ", "
<< "completion=" << aio_comp << ", off=" << off << ", "
<< "len=" << len << ", data_len = " << bl.length() << ", "
<< "flags=" << op_flags << dendl;
if (native_async && image_ctx.event_socket.is_valid()) {
aio_comp->set_event_notify(true);
}
if (!is_valid_io(image_ctx, aio_comp)) {
return;
}
auto req = io::ImageDispatchSpec::create_write_same(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{off, len}}, io::ImageArea::DATA, std::move(bl), op_flags, trace);
req->send();
}
template <typename I>
void Io<I>::aio_write_zeroes(I& image_ctx, io::AioCompletion *aio_comp,
uint64_t off, uint64_t len, int zero_flags,
int op_flags, bool native_async) {
auto cct = image_ctx.cct;
FUNCTRACE(cct);
ZTracer::Trace trace;
if (image_ctx.blkin_trace_all) {
trace.init("io: write_zeroes", &image_ctx.trace_endpoint);
trace.event("init");
}
auto io_type = io::AIO_TYPE_DISCARD;
if ((zero_flags & RBD_WRITE_ZEROES_FLAG_THICK_PROVISION) != 0) {
zero_flags &= ~RBD_WRITE_ZEROES_FLAG_THICK_PROVISION;
io_type = io::AIO_TYPE_WRITESAME;
}
aio_comp->init_time(util::get_image_ctx(&image_ctx), io_type);
ldout(cct, 20) << "ictx=" << &image_ctx << ", "
<< "completion=" << aio_comp << ", off=" << off << ", "
<< "len=" << len << dendl;
if (native_async && image_ctx.event_socket.is_valid()) {
aio_comp->set_event_notify(true);
}
// validate the supported flags
if (zero_flags != 0U) {
aio_comp->fail(-EINVAL);
return;
}
if (!is_valid_io(image_ctx, aio_comp)) {
return;
}
if (io_type == io::AIO_TYPE_WRITESAME) {
// write-same needs to be aligned to its buffer but librbd has never forced
// block alignment. Hide that requirement from the user by adding optional
// writes.
const uint64_t data_length = 512;
uint64_t write_same_offset = p2roundup(off, data_length);
uint64_t write_same_offset_end = p2align(off + len, data_length);
uint64_t write_same_length = 0;
if (write_same_offset_end > write_same_offset) {
write_same_length = write_same_offset_end - write_same_offset;
}
uint64_t prepend_offset = off;
uint64_t prepend_length = write_same_offset - off;
uint64_t append_offset = write_same_offset + write_same_length;
uint64_t append_length = len - prepend_length - write_same_length;
ldout(cct, 20) << "prepend_offset=" << prepend_offset << ", "
<< "prepend_length=" << prepend_length << ", "
<< "write_same_offset=" << write_same_offset << ", "
<< "write_same_length=" << write_same_length << ", "
<< "append_offset=" << append_offset << ", "
<< "append_length=" << append_length << dendl;
ceph_assert(prepend_length + write_same_length + append_length == len);
if (write_same_length <= data_length) {
// unaligned or small write-zeroes request -- use single write
bufferlist bl;
bl.append_zero(len);
aio_comp->aio_type = io::AIO_TYPE_WRITE;
auto req = io::ImageDispatchSpec::create_write(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{off, len}}, io::ImageArea::DATA, std::move(bl), op_flags, trace);
req->send();
return;
} else if (prepend_length == 0 && append_length == 0) {
// fully aligned -- use a single write-same image request
bufferlist bl;
bl.append_zero(data_length);
auto req = io::ImageDispatchSpec::create_write_same(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{off, len}}, io::ImageArea::DATA, std::move(bl), op_flags, trace);
req->send();
return;
}
// to reach this point, we need at least one prepend/append write along with
// a write-same -- therefore we will need to wrap the provided AioCompletion
auto request_count = 1;
if (prepend_length > 0) {
++request_count;
}
if (append_length > 0) {
++request_count;
}
ceph_assert(request_count > 1);
aio_comp->start_op();
aio_comp->set_request_count(request_count);
if (prepend_length > 0) {
bufferlist bl;
bl.append_zero(prepend_length);
Context* prepend_ctx = new io::C_AioRequest(aio_comp);
auto prepend_aio_comp = io::AioCompletion::create_and_start(
prepend_ctx, &image_ctx, io::AIO_TYPE_WRITE);
auto prepend_req = io::ImageDispatchSpec::create_write(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, prepend_aio_comp,
{{prepend_offset, prepend_length}}, io::ImageArea::DATA,
std::move(bl), op_flags, trace);
prepend_req->send();
}
if (append_length > 0) {
bufferlist bl;
bl.append_zero(append_length);
Context* append_ctx = new io::C_AioRequest(aio_comp);
auto append_aio_comp = io::AioCompletion::create_and_start(
append_ctx, &image_ctx, io::AIO_TYPE_WRITE);
auto append_req = io::ImageDispatchSpec::create_write(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, append_aio_comp,
{{append_offset, append_length}}, io::ImageArea::DATA,
std::move(bl), op_flags, trace);
append_req->send();
}
bufferlist bl;
bl.append_zero(data_length);
Context* write_same_ctx = new io::C_AioRequest(aio_comp);
auto write_same_aio_comp = io::AioCompletion::create_and_start(
write_same_ctx, &image_ctx, io::AIO_TYPE_WRITESAME);
auto req = io::ImageDispatchSpec::create_write_same(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, write_same_aio_comp,
{{write_same_offset, write_same_length}}, io::ImageArea::DATA,
std::move(bl), op_flags, trace);
req->send();
return;
}
// enable partial discard (zeroing) of objects
uint32_t discard_granularity_bytes = 0;
auto req = io::ImageDispatchSpec::create_discard(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{off, len}}, io::ImageArea::DATA, discard_granularity_bytes, trace);
req->send();
}
template <typename I>
void Io<I>::aio_compare_and_write(I &image_ctx, io::AioCompletion *aio_comp,
uint64_t off, uint64_t len,
bufferlist &&cmp_bl,
bufferlist &&bl, uint64_t *mismatch_off,
int op_flags, bool native_async) {
auto cct = image_ctx.cct;
FUNCTRACE(cct);
ZTracer::Trace trace;
if (image_ctx.blkin_trace_all) {
trace.init("io: compare_and_write", &image_ctx.trace_endpoint);
trace.event("init");
}
aio_comp->init_time(util::get_image_ctx(&image_ctx),
io::AIO_TYPE_COMPARE_AND_WRITE);
ldout(cct, 20) << "ictx=" << &image_ctx << ", "
<< "completion=" << aio_comp << ", off=" << off << ", "
<< "len=" << len << dendl;
if (native_async && image_ctx.event_socket.is_valid()) {
aio_comp->set_event_notify(true);
}
if (!is_valid_io(image_ctx, aio_comp)) {
return;
}
auto req = io::ImageDispatchSpec::create_compare_and_write(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{off, len}}, io::ImageArea::DATA, std::move(cmp_bl), std::move(bl),
mismatch_off, op_flags, trace);
req->send();
}
template <typename I>
void Io<I>::aio_flush(I &image_ctx, io::AioCompletion *aio_comp,
bool native_async) {
auto cct = image_ctx.cct;
FUNCTRACE(cct);
ZTracer::Trace trace;
if (image_ctx.blkin_trace_all) {
trace.init("io: flush", &image_ctx.trace_endpoint);
trace.event("init");
}
aio_comp->init_time(util::get_image_ctx(&image_ctx), io::AIO_TYPE_FLUSH);
ldout(cct, 20) << "ictx=" << &image_ctx << ", "
<< "completion=" << aio_comp << dendl;
if (native_async && image_ctx.event_socket.is_valid()) {
aio_comp->set_event_notify(true);
}
if (!is_valid_io(image_ctx, aio_comp)) {
return;
}
auto req = io::ImageDispatchSpec::create_flush(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
io::FLUSH_SOURCE_USER, trace);
req->send();
}
} // namespace api
} // namespace librbd
template class librbd::api::Io<librbd::ImageCtx>;
| 17,870 | 31.142086 | 80 | cc |
null | ceph-main/src/librbd/api/Io.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRBD_API_IO_H
#define LIBRBD_API_IO_H
#include "include/int_types.h"
#include "librbd/io/ReadResult.h"
namespace librbd {
struct ImageCtx;
namespace io { struct AioCompletion; }
namespace api {
template<typename ImageCtxT = ImageCtx>
struct Io {
static ssize_t read(ImageCtxT &image_ctx, uint64_t off, uint64_t len,
io::ReadResult &&read_result, int op_flags);
static ssize_t write(ImageCtxT &image_ctx, uint64_t off, uint64_t len,
bufferlist &&bl, int op_flags);
static ssize_t discard(ImageCtxT &image_ctx, uint64_t off, uint64_t len,
uint32_t discard_granularity_bytes);
static ssize_t write_same(ImageCtxT &image_ctx, uint64_t off, uint64_t len,
bufferlist &&bl, int op_flags);
static ssize_t write_zeroes(ImageCtxT &image_ctx, uint64_t off, uint64_t len,
int zero_flags, int op_flags);
static ssize_t compare_and_write(ImageCtxT &image_ctx, uint64_t off,
uint64_t len, bufferlist &&cmp_bl,
bufferlist &&bl, uint64_t *mismatch_off,
int op_flags);
static int flush(ImageCtxT &image_ctx);
static void aio_read(ImageCtxT &image_ctx, io::AioCompletion *c, uint64_t off,
uint64_t len, io::ReadResult &&read_result, int op_flags,
bool native_async);
static void aio_write(ImageCtxT &image_ctx, io::AioCompletion *c,
uint64_t off, uint64_t len, bufferlist &&bl,
int op_flags, bool native_async);
static void aio_discard(ImageCtxT &image_ctx, io::AioCompletion *c,
uint64_t off, uint64_t len,
uint32_t discard_granularity_bytes,
bool native_async);
static void aio_write_same(ImageCtxT &image_ctx, io::AioCompletion *c,
uint64_t off, uint64_t len, bufferlist &&bl,
int op_flags, bool native_async);
static void aio_write_zeroes(ImageCtxT &image_ctx, io::AioCompletion *c,
uint64_t off, uint64_t len, int zero_flags,
int op_flags, bool native_async);
static void aio_compare_and_write(ImageCtxT &image_ctx, io::AioCompletion *c,
uint64_t off, uint64_t len,
bufferlist &&cmp_bl, bufferlist &&bl,
uint64_t *mismatch_off, int op_flags,
bool native_async);
static void aio_flush(ImageCtxT &image_ctx, io::AioCompletion *c,
bool native_async);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Io<librbd::ImageCtx>;
#endif // LIBRBD_API_IO_H
| 3,000 | 44.469697 | 80 | h |
null | ceph-main/src/librbd/api/Migration.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Migration.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/AsioEngine.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/api/Config.h"
#include "librbd/api/Group.h"
#include "librbd/api/Image.h"
#include "librbd/api/Snapshot.h"
#include "librbd/api/Trash.h"
#include "librbd/deep_copy/Handler.h"
#include "librbd/deep_copy/ImageCopyRequest.h"
#include "librbd/deep_copy/MetadataCopyRequest.h"
#include "librbd/deep_copy/SnapshotCopyRequest.h"
#include "librbd/exclusive_lock/Policy.h"
#include "librbd/image/AttachChildRequest.h"
#include "librbd/image/AttachParentRequest.h"
#include "librbd/image/CloneRequest.h"
#include "librbd/image/CreateRequest.h"
#include "librbd/image/DetachChildRequest.h"
#include "librbd/image/DetachParentRequest.h"
#include "librbd/image/ListWatchersRequest.h"
#include "librbd/image/RemoveRequest.h"
#include "librbd/image/Types.h"
#include "librbd/internal.h"
#include "librbd/migration/FormatInterface.h"
#include "librbd/migration/OpenSourceImageRequest.h"
#include "librbd/migration/NativeFormat.h"
#include "librbd/mirror/DisableRequest.h"
#include "librbd/mirror/EnableRequest.h"
#include <boost/scope_exit.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::Migration: " << __func__ << ": "
namespace librbd {
inline bool operator==(const linked_image_spec_t& rhs,
const linked_image_spec_t& lhs) {
bool result = (rhs.pool_id == lhs.pool_id &&
rhs.pool_namespace == lhs.pool_namespace &&
rhs.image_id == lhs.image_id);
return result;
}
namespace api {
using util::create_rados_callback;
namespace {
class MigrationProgressContext : public ProgressContext {
public:
MigrationProgressContext(librados::IoCtx& io_ctx,
const std::string &header_oid,
cls::rbd::MigrationState state,
ProgressContext *prog_ctx)
: m_io_ctx(io_ctx), m_header_oid(header_oid), m_state(state),
m_prog_ctx(prog_ctx), m_cct(reinterpret_cast<CephContext*>(io_ctx.cct())),
m_lock(ceph::make_mutex(
util::unique_lock_name("librbd::api::MigrationProgressContext",
this))) {
ceph_assert(m_prog_ctx != nullptr);
}
~MigrationProgressContext() {
wait_for_in_flight_updates();
}
int update_progress(uint64_t offset, uint64_t total) override {
ldout(m_cct, 20) << "offset=" << offset << ", total=" << total << dendl;
m_prog_ctx->update_progress(offset, total);
std::string description = stringify(offset * 100 / total) + "% complete";
send_state_description_update(description);
return 0;
}
private:
librados::IoCtx& m_io_ctx;
std::string m_header_oid;
cls::rbd::MigrationState m_state;
ProgressContext *m_prog_ctx;
CephContext* m_cct;
mutable ceph::mutex m_lock;
ceph::condition_variable m_cond;
std::string m_state_description;
bool m_pending_update = false;
int m_in_flight_state_updates = 0;
void send_state_description_update(const std::string &description) {
std::lock_guard locker{m_lock};
if (description == m_state_description) {
return;
}
m_state_description = description;
if (m_in_flight_state_updates > 0) {
m_pending_update = true;
return;
}
set_state_description();
}
void set_state_description() {
ldout(m_cct, 20) << "state_description=" << m_state_description << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
librados::ObjectWriteOperation op;
cls_client::migration_set_state(&op, m_state, m_state_description);
using klass = MigrationProgressContext;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_set_state_description>(this);
int r = m_io_ctx.aio_operate(m_header_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
m_in_flight_state_updates++;
}
void handle_set_state_description(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
std::lock_guard locker{m_lock};
m_in_flight_state_updates--;
if (r < 0) {
lderr(m_cct) << "failed to update migration state: " << cpp_strerror(r)
<< dendl;
} else if (m_pending_update) {
set_state_description();
m_pending_update = false;
} else {
m_cond.notify_all();
}
}
void wait_for_in_flight_updates() {
std::unique_lock locker{m_lock};
ldout(m_cct, 20) << "m_in_flight_state_updates="
<< m_in_flight_state_updates << dendl;
m_pending_update = false;
m_cond.wait(locker, [this] { return m_in_flight_state_updates <= 0; });
}
};
int trash_search(librados::IoCtx &io_ctx, rbd_trash_image_source_t source,
const std::string &image_name, std::string *image_id) {
std::vector<trash_image_info_t> entries;
int r = Trash<>::list(io_ctx, entries, false);
if (r < 0) {
return r;
}
for (auto &entry : entries) {
if (entry.source == source && entry.name == image_name) {
*image_id = entry.id;
return 0;
}
}
return -ENOENT;
}
template <typename I>
int open_images(librados::IoCtx& io_ctx, const std::string &image_name,
I **src_image_ctx, I **dst_image_ctx,
cls::rbd::MigrationSpec* src_migration_spec,
cls::rbd::MigrationSpec* dst_migration_spec,
bool skip_open_dst_image) {
CephContext* cct = reinterpret_cast<CephContext *>(io_ctx.cct());
*src_image_ctx = nullptr;
*dst_image_ctx = nullptr;
ldout(cct, 10) << "trying to open image by name " << io_ctx.get_pool_name()
<< "/" << image_name << dendl;
auto image_ctx = I::create(image_name, "", nullptr, io_ctx, false);
int r = image_ctx->state->open(OPEN_FLAG_IGNORE_MIGRATING);
if (r == -ENOENT) {
// presume user passed the source image so we need to search the trash
ldout(cct, 10) << "Source image is not found. Trying trash" << dendl;
std::string src_image_id;
r = trash_search(io_ctx, RBD_TRASH_IMAGE_SOURCE_MIGRATION, image_name,
&src_image_id);
if (r < 0) {
lderr(cct) << "failed to determine image id: " << cpp_strerror(r)
<< dendl;
return r;
}
ldout(cct, 10) << "source image id from trash: " << src_image_id << dendl;
image_ctx = I::create(image_name, src_image_id, nullptr, io_ctx, false);
r = image_ctx->state->open(OPEN_FLAG_IGNORE_MIGRATING);
}
if (r < 0) {
if (r != -ENOENT) {
lderr(cct) << "failed to open image: " << cpp_strerror(r) << dendl;
return r;
}
image_ctx = nullptr;
}
BOOST_SCOPE_EXIT_TPL(&r, &image_ctx, src_image_ctx, dst_image_ctx) {
if (r != 0) {
if (*src_image_ctx != nullptr) {
(*src_image_ctx)->state->close();
}
if (*dst_image_ctx != nullptr) {
(*dst_image_ctx)->state->close();
}
if (image_ctx != nullptr) {
image_ctx->state->close();
}
}
} BOOST_SCOPE_EXIT_END;
// The opened image is either a source or destination
cls::rbd::MigrationSpec migration_spec;
r = cls_client::migration_get(&image_ctx->md_ctx, image_ctx->header_oid,
&migration_spec);
if (r < 0) {
lderr(cct) << "failed retrieving migration header: " << cpp_strerror(r)
<< dendl;
return r;
}
ldout(cct, 10) << "migration spec: " << migration_spec << dendl;
if (migration_spec.header_type == cls::rbd::MIGRATION_HEADER_TYPE_SRC) {
ldout(cct, 10) << "the source image is opened" << dendl;
*src_image_ctx = image_ctx;
*src_migration_spec = migration_spec;
image_ctx = nullptr;
} else if (migration_spec.header_type ==
cls::rbd::MIGRATION_HEADER_TYPE_DST) {
ldout(cct, 10) << "the destination image is opened" << dendl;
std::string image_id = image_ctx->id;
image_ctx->state->close();
image_ctx = I::create(image_name, image_id, nullptr, io_ctx, false);
if (!skip_open_dst_image) {
ldout(cct, 10) << "re-opening the destination image" << dendl;
r = image_ctx->state->open(0);
if (r < 0) {
image_ctx = nullptr;
lderr(cct) << "failed to re-open destination image: " << cpp_strerror(r)
<< dendl;
return r;
}
}
*dst_image_ctx = image_ctx;
*dst_migration_spec = migration_spec;
image_ctx = nullptr;
} else {
lderr(cct) << "unexpected migration header type: "
<< migration_spec.header_type << dendl;
r = -EINVAL;
return r;
}
// attempt to open the other (paired) image
I** other_image_ctx = nullptr;
std::string other_image_type;
std::string other_image_name;
std::string other_image_id;
cls::rbd::MigrationSpec* other_migration_spec = nullptr;
librados::IoCtx other_io_ctx;
int flags = OPEN_FLAG_IGNORE_MIGRATING;
if (*src_image_ctx == nullptr &&
dst_migration_spec->source_spec.empty()) {
r = util::create_ioctx(io_ctx, "source image", migration_spec.pool_id,
migration_spec.pool_namespace, &other_io_ctx);
if (r < 0) {
return r;
}
other_image_type = "source";
other_image_ctx = src_image_ctx;
other_migration_spec = src_migration_spec;
other_image_name = migration_spec.image_name;
other_image_id = migration_spec.image_id;
if (other_image_id.empty()) {
ldout(cct, 20) << "trying to open v1 image by name "
<< other_io_ctx.get_pool_name() << "/"
<< other_image_name << dendl;
flags |= OPEN_FLAG_OLD_FORMAT;
} else {
ldout(cct, 20) << "trying to open v2 image by id "
<< other_io_ctx.get_pool_name() << "/"
<< other_image_id << dendl;
}
*src_image_ctx = I::create(other_image_name, other_image_id, nullptr,
other_io_ctx, false);
} else if (*dst_image_ctx == nullptr) {
r = util::create_ioctx(io_ctx, "destination image", migration_spec.pool_id,
migration_spec.pool_namespace, &other_io_ctx);
if (r < 0) {
return r;
}
other_image_name = migration_spec.image_name;
if (skip_open_dst_image) {
other_image_id = migration_spec.image_id;
} else {
other_image_type = "destination";
other_image_ctx = dst_image_ctx;
other_migration_spec = dst_migration_spec;
other_image_id = migration_spec.image_id;
}
*dst_image_ctx = I::create(other_image_name, other_image_id, nullptr,
other_io_ctx, false);
}
if (other_image_ctx != nullptr) {
r = (*other_image_ctx)->state->open(flags);
if (r < 0) {
lderr(cct) << "failed to open " << other_image_type << " image "
<< other_io_ctx.get_pool_name()
<< "/" << (other_image_id.empty() ?
other_image_name : other_image_id)
<< ": " << cpp_strerror(r) << dendl;
*other_image_ctx = nullptr;
return r;
}
r = cls_client::migration_get(&(*other_image_ctx)->md_ctx,
(*other_image_ctx)->header_oid,
other_migration_spec);
if (r < 0) {
lderr(cct) << "failed retrieving migration header: " << cpp_strerror(r)
<< dendl;
return r;
}
ldout(cct, 20) << other_image_type << " migration spec: "
<< *other_migration_spec << dendl;
}
if (!skip_open_dst_image) {
// legacy clients will only store status in the source images
if (dst_migration_spec->source_spec.empty()) {
dst_migration_spec->state = migration_spec.state;
dst_migration_spec->state_description =
migration_spec.state_description;
}
}
return 0;
}
class SteppedProgressContext : public ProgressContext {
public:
SteppedProgressContext(ProgressContext* progress_ctx, size_t total_steps)
: m_progress_ctx(progress_ctx), m_total_steps(total_steps) {
}
void next_step() {
ceph_assert(m_current_step < m_total_steps);
++m_current_step;
}
int update_progress(uint64_t object_number,
uint64_t object_count) override {
return m_progress_ctx->update_progress(
object_number + (object_count * (m_current_step - 1)),
object_count * m_total_steps);
}
private:
ProgressContext* m_progress_ctx;
size_t m_total_steps;
size_t m_current_step = 1;
};
} // anonymous namespace
template <typename I>
int Migration<I>::prepare(librados::IoCtx& io_ctx,
const std::string &image_name,
librados::IoCtx& dest_io_ctx,
const std::string &dest_image_name_,
ImageOptions& opts) {
CephContext* cct = reinterpret_cast<CephContext *>(io_ctx.cct());
std::string dest_image_name = dest_image_name_.empty() ? image_name :
dest_image_name_;
ldout(cct, 10) << io_ctx.get_pool_name() << "/" << image_name << " -> "
<< dest_io_ctx.get_pool_name() << "/" << dest_image_name
<< ", opts=" << opts << dendl;
auto src_image_ctx = I::create(image_name, "", nullptr, io_ctx, false);
int r = src_image_ctx->state->open(0);
if (r < 0) {
lderr(cct) << "failed to open image: " << cpp_strerror(r) << dendl;
return r;
}
BOOST_SCOPE_EXIT_TPL(src_image_ctx) {
src_image_ctx->state->close();
} BOOST_SCOPE_EXIT_END;
std::list<obj_watch_t> watchers;
int flags = librbd::image::LIST_WATCHERS_FILTER_OUT_MY_INSTANCE |
librbd::image::LIST_WATCHERS_FILTER_OUT_MIRROR_INSTANCES;
C_SaferCond on_list_watchers;
auto list_watchers_request = librbd::image::ListWatchersRequest<I>::create(
*src_image_ctx, flags, &watchers, &on_list_watchers);
list_watchers_request->send();
r = on_list_watchers.wait();
if (r < 0) {
lderr(cct) << "failed listing watchers:" << cpp_strerror(r) << dendl;
return r;
}
if (!watchers.empty()) {
lderr(cct) << "image has watchers - not migrating" << dendl;
return -EBUSY;
}
uint64_t format = 2;
if (opts.get(RBD_IMAGE_OPTION_FORMAT, &format) != 0) {
opts.set(RBD_IMAGE_OPTION_FORMAT, format);
}
if (format != 2) {
lderr(cct) << "unsupported destination image format: " << format << dendl;
return -EINVAL;
}
uint64_t features;
{
std::shared_lock image_locker{src_image_ctx->image_lock};
features = src_image_ctx->features;
}
opts.get(RBD_IMAGE_OPTION_FEATURES, &features);
if ((features & ~RBD_FEATURES_ALL) != 0) {
lderr(cct) << "librbd does not support requested features" << dendl;
return -ENOSYS;
}
opts.set(RBD_IMAGE_OPTION_FEATURES, features);
uint64_t order = src_image_ctx->order;
if (opts.get(RBD_IMAGE_OPTION_ORDER, &order) != 0) {
opts.set(RBD_IMAGE_OPTION_ORDER, order);
}
r = image::CreateRequest<I>::validate_order(cct, order);
if (r < 0) {
return r;
}
uint64_t stripe_unit = src_image_ctx->stripe_unit;
if (opts.get(RBD_IMAGE_OPTION_STRIPE_UNIT, &stripe_unit) != 0) {
opts.set(RBD_IMAGE_OPTION_STRIPE_UNIT, stripe_unit);
}
uint64_t stripe_count = src_image_ctx->stripe_count;
if (opts.get(RBD_IMAGE_OPTION_STRIPE_COUNT, &stripe_count) != 0) {
opts.set(RBD_IMAGE_OPTION_STRIPE_COUNT, stripe_count);
}
uint64_t flatten = 0;
if (opts.get(RBD_IMAGE_OPTION_FLATTEN, &flatten) == 0) {
opts.unset(RBD_IMAGE_OPTION_FLATTEN);
}
ldout(cct, 20) << "updated opts=" << opts << dendl;
auto dst_image_ctx = I::create(
dest_image_name, util::generate_image_id(dest_io_ctx), nullptr,
dest_io_ctx, false);
src_image_ctx->image_lock.lock_shared();
cls::rbd::MigrationSpec dst_migration_spec{
cls::rbd::MIGRATION_HEADER_TYPE_DST,
src_image_ctx->md_ctx.get_id(), src_image_ctx->md_ctx.get_namespace(),
src_image_ctx->name, src_image_ctx->id, "", {}, 0, false,
cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, flatten > 0,
cls::rbd::MIGRATION_STATE_PREPARING, ""};
src_image_ctx->image_lock.unlock_shared();
Migration migration(src_image_ctx, dst_image_ctx, dst_migration_spec,
opts, nullptr);
r = migration.prepare();
return r;
}
template <typename I>
int Migration<I>::prepare_import(
const std::string& source_spec, librados::IoCtx& dest_io_ctx,
const std::string &dest_image_name, ImageOptions& opts) {
if (source_spec.empty() || !dest_io_ctx.is_valid() ||
dest_image_name.empty()) {
return -EINVAL;
}
auto cct = reinterpret_cast<CephContext *>(dest_io_ctx.cct());
ldout(cct, 10) << source_spec << " -> "
<< dest_io_ctx.get_pool_name() << "/"
<< dest_image_name << ", opts=" << opts << dendl;
I* src_image_ctx = nullptr;
C_SaferCond open_ctx;
auto req = migration::OpenSourceImageRequest<I>::create(
dest_io_ctx, nullptr, CEPH_NOSNAP,
{-1, "", "", "", source_spec, {}, 0, false}, &src_image_ctx, &open_ctx);
req->send();
int r = open_ctx.wait();
if (r < 0) {
lderr(cct) << "failed to open source image: " << cpp_strerror(r) << dendl;
return r;
}
auto asio_engine = src_image_ctx->asio_engine;
BOOST_SCOPE_EXIT_TPL(src_image_ctx) {
src_image_ctx->state->close();
} BOOST_SCOPE_EXIT_END;
uint64_t image_format = 2;
if (opts.get(RBD_IMAGE_OPTION_FORMAT, &image_format) != 0) {
opts.set(RBD_IMAGE_OPTION_FORMAT, image_format);
}
if (image_format != 2) {
lderr(cct) << "unsupported destination image format: " << image_format
<< dendl;
return -EINVAL;
}
ldout(cct, 20) << "updated opts=" << opts << dendl;
// use json-spirit to clean-up json formatting
json_spirit::mObject source_spec_object;
json_spirit::mValue json_root;
if(json_spirit::read(source_spec, json_root)) {
try {
source_spec_object = json_root.get_obj();
} catch (std::runtime_error&) {
lderr(cct) << "failed to clean source spec" << dendl;
return -EINVAL;
}
}
auto dst_image_ctx = I::create(
dest_image_name, util::generate_image_id(dest_io_ctx), nullptr,
dest_io_ctx, false);
cls::rbd::MigrationSpec dst_migration_spec{
cls::rbd::MIGRATION_HEADER_TYPE_DST, -1, "", "", "",
json_spirit::write(source_spec_object), {},
0, false, cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, true,
cls::rbd::MIGRATION_STATE_PREPARING, ""};
Migration migration(src_image_ctx, dst_image_ctx, dst_migration_spec,
opts, nullptr);
return migration.prepare_import();
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Migration<I>::execute(librados::IoCtx& io_ctx,
const std::string &image_name,
ProgressContext &prog_ctx) {
CephContext* cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 10) << io_ctx.get_pool_name() << "/" << image_name << dendl;
I *src_image_ctx;
I *dst_image_ctx;
cls::rbd::MigrationSpec src_migration_spec;
cls::rbd::MigrationSpec dst_migration_spec;
int r = open_images(io_ctx, image_name, &src_image_ctx, &dst_image_ctx,
&src_migration_spec, &dst_migration_spec, false);
if (r < 0) {
return r;
}
// ensure the destination loads the migration info
dst_image_ctx->ignore_migrating = false;
r = dst_image_ctx->state->refresh();
if (r < 0) {
lderr(cct) << "failed to refresh destination image: " << cpp_strerror(r)
<< dendl;
return r;
}
BOOST_SCOPE_EXIT_TPL(src_image_ctx, dst_image_ctx) {
dst_image_ctx->state->close();
if (src_image_ctx != nullptr) {
src_image_ctx->state->close();
}
} BOOST_SCOPE_EXIT_END;
if (dst_migration_spec.state != cls::rbd::MIGRATION_STATE_PREPARED &&
dst_migration_spec.state != cls::rbd::MIGRATION_STATE_EXECUTING) {
lderr(cct) << "current migration state is '" << dst_migration_spec.state
<< "' (should be 'prepared')" << dendl;
return -EINVAL;
}
ldout(cct, 5) << "migrating ";
if (!dst_migration_spec.source_spec.empty()) {
*_dout << dst_migration_spec.source_spec;
} else {
*_dout << src_image_ctx->md_ctx.get_pool_name() << "/"
<< src_image_ctx->name;
}
*_dout << " -> " << dst_image_ctx->md_ctx.get_pool_name() << "/"
<< dst_image_ctx->name << dendl;
ImageOptions opts;
Migration migration(src_image_ctx, dst_image_ctx, dst_migration_spec,
opts, &prog_ctx);
r = migration.execute();
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Migration<I>::abort(librados::IoCtx& io_ctx, const std::string &image_name,
ProgressContext &prog_ctx) {
CephContext* cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 10) << io_ctx.get_pool_name() << "/" << image_name << dendl;
I *src_image_ctx;
I *dst_image_ctx;
cls::rbd::MigrationSpec src_migration_spec;
cls::rbd::MigrationSpec dst_migration_spec;
int r = open_images(io_ctx, image_name, &src_image_ctx, &dst_image_ctx,
&src_migration_spec, &dst_migration_spec, true);
if (r < 0) {
return r;
}
ldout(cct, 5) << "canceling incomplete migration ";
if (!dst_migration_spec.source_spec.empty()) {
*_dout << dst_migration_spec.source_spec;
} else {
*_dout << src_image_ctx->md_ctx.get_pool_name() << "/"
<< src_image_ctx->name;
}
*_dout << " -> " << dst_image_ctx->md_ctx.get_pool_name() << "/"
<< dst_image_ctx->name << dendl;
ImageOptions opts;
Migration migration(src_image_ctx, dst_image_ctx, dst_migration_spec,
opts, &prog_ctx);
r = migration.abort();
if (src_image_ctx != nullptr) {
src_image_ctx->state->close();
}
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Migration<I>::commit(librados::IoCtx& io_ctx,
const std::string &image_name,
ProgressContext &prog_ctx) {
CephContext* cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 10) << io_ctx.get_pool_name() << "/" << image_name << dendl;
I *src_image_ctx;
I *dst_image_ctx;
cls::rbd::MigrationSpec src_migration_spec;
cls::rbd::MigrationSpec dst_migration_spec;
int r = open_images(io_ctx, image_name, &src_image_ctx, &dst_image_ctx,
&src_migration_spec, &dst_migration_spec, false);
if (r < 0) {
return r;
}
if (dst_migration_spec.state != cls::rbd::MIGRATION_STATE_EXECUTED) {
lderr(cct) << "current migration state is '" << dst_migration_spec.state
<< "' (should be 'executed')" << dendl;
dst_image_ctx->state->close();
if (src_image_ctx != nullptr) {
src_image_ctx->state->close();
}
return -EINVAL;
}
// ensure the destination loads the migration info
dst_image_ctx->ignore_migrating = false;
r = dst_image_ctx->state->refresh();
if (r < 0) {
lderr(cct) << "failed to refresh destination image: " << cpp_strerror(r)
<< dendl;
return r;
}
ldout(cct, 5) << "migrating ";
if (!dst_migration_spec.source_spec.empty()) {
*_dout << dst_migration_spec.source_spec;
} else {
*_dout << src_image_ctx->md_ctx.get_pool_name() << "/"
<< src_image_ctx->name;
}
*_dout << " -> " << dst_image_ctx->md_ctx.get_pool_name() << "/"
<< dst_image_ctx->name << dendl;
ImageOptions opts;
Migration migration(src_image_ctx, dst_image_ctx, dst_migration_spec,
opts, &prog_ctx);
r = migration.commit();
// image_ctx is closed in commit when removing src image
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Migration<I>::status(librados::IoCtx& io_ctx,
const std::string &image_name,
image_migration_status_t *status) {
CephContext* cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 10) << io_ctx.get_pool_name() << "/" << image_name << dendl;
I *src_image_ctx;
I *dst_image_ctx;
cls::rbd::MigrationSpec src_migration_spec;
cls::rbd::MigrationSpec dst_migration_spec;
int r = open_images(io_ctx, image_name, &src_image_ctx, &dst_image_ctx,
&src_migration_spec, &dst_migration_spec, false);
if (r < 0) {
return r;
}
ldout(cct, 5) << "migrating ";
if (!dst_migration_spec.source_spec.empty()) {
*_dout << dst_migration_spec.source_spec;
} else {
*_dout << src_image_ctx->md_ctx.get_pool_name() << "/"
<< src_image_ctx->name;
}
*_dout << " -> " << dst_image_ctx->md_ctx.get_pool_name() << "/"
<< dst_image_ctx->name << dendl;
ImageOptions opts;
Migration migration(src_image_ctx, dst_image_ctx, dst_migration_spec,
opts, nullptr);
r = migration.status(status);
dst_image_ctx->state->close();
if (src_image_ctx != nullptr) {
src_image_ctx->state->close();
}
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Migration<I>::get_source_spec(I* image_ctx, std::string* source_spec) {
auto cct = image_ctx->cct;
ldout(cct, 10) << dendl;
image_ctx->image_lock.lock_shared();
auto migration_info = image_ctx->migration_info;
image_ctx->image_lock.unlock_shared();
if (migration_info.empty()) {
// attempt to directly read the spec in case the state is EXECUTED
cls::rbd::MigrationSpec migration_spec;
int r = cls_client::migration_get(&image_ctx->md_ctx, image_ctx->header_oid,
&migration_spec);
if (r == -ENOENT) {
return r;
} else if (r < 0) {
lderr(cct) << "failed retrieving migration header: " << cpp_strerror(r)
<< dendl;
return r;
}
migration_info = {
migration_spec.pool_id, migration_spec.pool_namespace,
migration_spec.image_name, migration_spec.image_id,
migration_spec.source_spec, {}, 0, false};
}
if (!migration_info.source_spec.empty()) {
*source_spec = migration_info.source_spec;
} else {
// legacy migration source
*source_spec = migration::NativeFormat<I>::build_source_spec(
migration_info.pool_id,
migration_info.pool_namespace,
migration_info.image_name,
migration_info.image_id);
}
return 0;
}
template <typename I>
Migration<I>::Migration(ImageCtx* src_image_ctx,
ImageCtx* dst_image_ctx,
const cls::rbd::MigrationSpec& dst_migration_spec,
ImageOptions& opts, ProgressContext *prog_ctx)
: m_cct(dst_image_ctx->cct),
m_src_image_ctx(src_image_ctx), m_dst_image_ctx(dst_image_ctx),
m_dst_io_ctx(dst_image_ctx->md_ctx), m_dst_image_name(dst_image_ctx->name),
m_dst_image_id(dst_image_ctx->id),
m_dst_header_oid(util::header_name(m_dst_image_id)),
m_image_options(opts), m_flatten(dst_migration_spec.flatten),
m_mirroring(dst_migration_spec.mirroring),
m_mirror_image_mode(dst_migration_spec.mirror_image_mode),
m_prog_ctx(prog_ctx),
m_src_migration_spec(cls::rbd::MIGRATION_HEADER_TYPE_SRC,
m_dst_io_ctx.get_id(), m_dst_io_ctx.get_namespace(),
m_dst_image_name, m_dst_image_id, "", {}, 0,
m_mirroring, m_mirror_image_mode, m_flatten,
dst_migration_spec.state,
dst_migration_spec.state_description),
m_dst_migration_spec(dst_migration_spec) {
m_dst_io_ctx.dup(dst_image_ctx->md_ctx);
}
template <typename I>
int Migration<I>::prepare() {
ldout(m_cct, 10) << dendl;
BOOST_SCOPE_EXIT_TPL(&m_dst_image_ctx) {
if (m_dst_image_ctx != nullptr) {
m_dst_image_ctx->state->close();
}
} BOOST_SCOPE_EXIT_END;
int r = validate_src_snaps(m_src_image_ctx);
if (r < 0) {
return r;
}
r = disable_mirroring(m_src_image_ctx, &m_mirroring, &m_mirror_image_mode);
if (r < 0) {
return r;
}
r = unlink_src_image(m_src_image_ctx);
if (r < 0) {
enable_mirroring(m_src_image_ctx, m_mirroring, m_mirror_image_mode);
return r;
}
r = set_src_migration(m_src_image_ctx);
if (r < 0) {
relink_src_image(m_src_image_ctx);
enable_mirroring(m_src_image_ctx, m_mirroring, m_mirror_image_mode);
return r;
}
r = create_dst_image(&m_dst_image_ctx);
if (r < 0) {
abort();
return r;
}
ldout(m_cct, 10) << "succeeded" << dendl;
return 0;
}
template <typename I>
int Migration<I>::prepare_import() {
ldout(m_cct, 10) << dendl;
BOOST_SCOPE_EXIT_TPL(&m_dst_image_ctx) {
if (m_dst_image_ctx != nullptr) {
m_dst_image_ctx->state->close();
}
} BOOST_SCOPE_EXIT_END;
int r = create_dst_image(&m_dst_image_ctx);
if (r < 0) {
abort();
return r;
}
return 0;
}
template <typename I>
int Migration<I>::execute() {
ldout(m_cct, 10) << dendl;
int r = set_state(cls::rbd::MIGRATION_STATE_EXECUTING, "");
if (r < 0) {
return r;
}
{
MigrationProgressContext dst_prog_ctx(
m_dst_image_ctx->md_ctx, m_dst_image_ctx->header_oid,
cls::rbd::MIGRATION_STATE_EXECUTING, m_prog_ctx);
std::optional<MigrationProgressContext> src_prog_ctx;
if (m_src_image_ctx != nullptr) {
src_prog_ctx.emplace(m_src_image_ctx->md_ctx, m_src_image_ctx->header_oid,
cls::rbd::MIGRATION_STATE_EXECUTING, &dst_prog_ctx);
}
while (true) {
r = m_dst_image_ctx->operations->migrate(
*(src_prog_ctx ? &src_prog_ctx.value() : &dst_prog_ctx));
if (r == -EROFS) {
std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
if (m_dst_image_ctx->exclusive_lock != nullptr &&
!m_dst_image_ctx->exclusive_lock->accept_ops()) {
ldout(m_cct, 5) << "lost exclusive lock, retrying remote" << dendl;
continue;
}
}
break;
}
}
if (r < 0) {
lderr(m_cct) << "migration failed: " << cpp_strerror(r) << dendl;
return r;
}
r = set_state(cls::rbd::MIGRATION_STATE_EXECUTED, "");
if (r < 0) {
return r;
}
m_dst_image_ctx->notify_update();
ldout(m_cct, 10) << "succeeded" << dendl;
return 0;
}
template <typename I>
int Migration<I>::abort() {
ldout(m_cct, 10) << dendl;
int r;
if (m_src_image_ctx != nullptr) {
m_src_image_ctx->owner_lock.lock_shared();
if (m_src_image_ctx->exclusive_lock != nullptr &&
!m_src_image_ctx->exclusive_lock->is_lock_owner()) {
C_SaferCond ctx;
m_src_image_ctx->exclusive_lock->acquire_lock(&ctx);
m_src_image_ctx->owner_lock.unlock_shared();
r = ctx.wait();
if (r < 0) {
lderr(m_cct) << "error acquiring exclusive lock: " << cpp_strerror(r)
<< dendl;
return r;
}
} else {
m_src_image_ctx->owner_lock.unlock_shared();
}
}
group_info_t group_info;
group_info.pool = -1;
r = m_dst_image_ctx->state->open(OPEN_FLAG_IGNORE_MIGRATING);
if (r < 0) {
ldout(m_cct, 1) << "failed to open destination image: " << cpp_strerror(r)
<< dendl;
m_dst_image_ctx = nullptr;
} else {
BOOST_SCOPE_EXIT_TPL(&m_dst_image_ctx) {
if (m_dst_image_ctx != nullptr) {
m_dst_image_ctx->state->close();
}
} BOOST_SCOPE_EXIT_END;
std::list<obj_watch_t> watchers;
int flags = librbd::image::LIST_WATCHERS_FILTER_OUT_MY_INSTANCE |
librbd::image::LIST_WATCHERS_FILTER_OUT_MIRROR_INSTANCES;
C_SaferCond on_list_watchers;
auto list_watchers_request = librbd::image::ListWatchersRequest<I>::create(
*m_dst_image_ctx, flags, &watchers, &on_list_watchers);
list_watchers_request->send();
r = on_list_watchers.wait();
if (r < 0) {
lderr(m_cct) << "failed listing watchers:" << cpp_strerror(r) << dendl;
return r;
}
if (!watchers.empty()) {
lderr(m_cct) << "image has watchers - cannot abort migration" << dendl;
return -EBUSY;
}
// ensure destination image is now read-only
r = set_state(cls::rbd::MIGRATION_STATE_ABORTING, "");
if (r < 0) {
return r;
}
SteppedProgressContext progress_ctx(
m_prog_ctx, (m_src_image_ctx != nullptr ? 2 : 1));
if (m_src_image_ctx != nullptr) {
// copy dst HEAD -> src HEAD
revert_data(m_dst_image_ctx, m_src_image_ctx, &progress_ctx);
progress_ctx.next_step();
ldout(m_cct, 10) << "relinking children" << dendl;
r = relink_children(m_dst_image_ctx, m_src_image_ctx);
if (r < 0) {
return r;
}
}
ldout(m_cct, 10) << "removing dst image snapshots" << dendl;
std::vector<librbd::snap_info_t> snaps;
r = Snapshot<I>::list(m_dst_image_ctx, snaps);
if (r < 0) {
lderr(m_cct) << "failed listing snapshots: " << cpp_strerror(r)
<< dendl;
return r;
}
for (auto &snap : snaps) {
librbd::NoOpProgressContext prog_ctx;
int r = Snapshot<I>::remove(m_dst_image_ctx, snap.name.c_str(),
RBD_SNAP_REMOVE_UNPROTECT, prog_ctx);
if (r < 0) {
lderr(m_cct) << "failed removing snapshot: " << cpp_strerror(r)
<< dendl;
return r;
}
}
ldout(m_cct, 10) << "removing group" << dendl;
r = remove_group(m_dst_image_ctx, &group_info);
if (r < 0 && r != -ENOENT) {
return r;
}
ldout(m_cct, 10) << "removing dst image" << dendl;
ceph_assert(m_dst_image_ctx->ignore_migrating);
auto asio_engine = m_dst_image_ctx->asio_engine;
librados::IoCtx dst_io_ctx(m_dst_image_ctx->md_ctx);
C_SaferCond on_remove;
auto req = librbd::image::RemoveRequest<>::create(
dst_io_ctx, m_dst_image_ctx, false, false, progress_ctx,
asio_engine->get_work_queue(), &on_remove);
req->send();
r = on_remove.wait();
m_dst_image_ctx = nullptr;
if (r < 0) {
lderr(m_cct) << "failed removing destination image '"
<< dst_io_ctx.get_pool_name() << "/" << m_dst_image_name
<< " (" << m_dst_image_id << ")': " << cpp_strerror(r)
<< dendl;
return r;
}
}
if (m_src_image_ctx != nullptr) {
r = relink_src_image(m_src_image_ctx);
if (r < 0) {
return r;
}
r = add_group(m_src_image_ctx, group_info);
if (r < 0) {
return r;
}
r = remove_migration(m_src_image_ctx);
if (r < 0) {
return r;
}
r = enable_mirroring(m_src_image_ctx, m_mirroring, m_mirror_image_mode);
if (r < 0) {
return r;
}
}
ldout(m_cct, 10) << "succeeded" << dendl;
return 0;
}
template <typename I>
int Migration<I>::commit() {
ldout(m_cct, 10) << dendl;
BOOST_SCOPE_EXIT_TPL(&m_dst_image_ctx, &m_src_image_ctx) {
m_dst_image_ctx->state->close();
if (m_src_image_ctx != nullptr) {
m_src_image_ctx->state->close();
}
} BOOST_SCOPE_EXIT_END;
int r = remove_migration(m_dst_image_ctx);
if (r < 0) {
return r;
}
if (m_src_image_ctx != nullptr) {
r = remove_src_image(&m_src_image_ctx);
if (r < 0) {
return r;
}
}
r = enable_mirroring(m_dst_image_ctx, m_mirroring, m_mirror_image_mode);
if (r < 0) {
return r;
}
ldout(m_cct, 10) << "succeeded" << dendl;
return 0;
}
template <typename I>
int Migration<I>::status(image_migration_status_t *status) {
ldout(m_cct, 10) << dendl;
status->source_pool_id = m_dst_migration_spec.pool_id;
status->source_pool_namespace = m_dst_migration_spec.pool_namespace;
status->source_image_name = m_dst_migration_spec.image_name;
status->source_image_id = m_dst_migration_spec.image_id;
status->dest_pool_id = m_src_migration_spec.pool_id;
status->dest_pool_namespace = m_src_migration_spec.pool_namespace;
status->dest_image_name = m_src_migration_spec.image_name;
status->dest_image_id = m_src_migration_spec.image_id;
switch (m_src_migration_spec.state) {
case cls::rbd::MIGRATION_STATE_ERROR:
status->state = RBD_IMAGE_MIGRATION_STATE_ERROR;
break;
case cls::rbd::MIGRATION_STATE_PREPARING:
status->state = RBD_IMAGE_MIGRATION_STATE_PREPARING;
break;
case cls::rbd::MIGRATION_STATE_PREPARED:
status->state = RBD_IMAGE_MIGRATION_STATE_PREPARED;
break;
case cls::rbd::MIGRATION_STATE_EXECUTING:
status->state = RBD_IMAGE_MIGRATION_STATE_EXECUTING;
break;
case cls::rbd::MIGRATION_STATE_EXECUTED:
status->state = RBD_IMAGE_MIGRATION_STATE_EXECUTED;
break;
default:
status->state = RBD_IMAGE_MIGRATION_STATE_UNKNOWN;
break;
}
status->state_description = m_src_migration_spec.state_description;
return 0;
}
template <typename I>
int Migration<I>::set_state(I* image_ctx, const std::string& image_description,
cls::rbd::MigrationState state,
const std::string &description) {
int r = cls_client::migration_set_state(&image_ctx->md_ctx,
image_ctx->header_oid,
state, description);
if (r < 0) {
lderr(m_cct) << "failed to set " << image_description << " "
<< "migration header: " << cpp_strerror(r) << dendl;
return r;
}
return 0;
}
template <typename I>
int Migration<I>::set_state(cls::rbd::MigrationState state,
const std::string &description) {
int r;
if (m_src_image_ctx != nullptr) {
r = set_state(m_src_image_ctx, "source", state, description);
if (r < 0) {
return r;
}
}
r = set_state(m_dst_image_ctx, "destination", state, description);
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Migration<I>::list_src_snaps(I* image_ctx,
std::vector<librbd::snap_info_t> *snaps) {
ldout(m_cct, 10) << dendl;
int r = Snapshot<I>::list(image_ctx, *snaps);
if (r < 0) {
lderr(m_cct) << "failed listing snapshots: " << cpp_strerror(r) << dendl;
return r;
}
for (auto &snap : *snaps) {
librbd::snap_namespace_type_t namespace_type;
r = Snapshot<I>::get_namespace_type(image_ctx, snap.id,
&namespace_type);
if (r < 0) {
lderr(m_cct) << "error getting snap namespace type: " << cpp_strerror(r)
<< dendl;
return r;
}
if (namespace_type != RBD_SNAP_NAMESPACE_TYPE_USER) {
if (namespace_type == RBD_SNAP_NAMESPACE_TYPE_TRASH) {
lderr(m_cct) << "image has snapshots with linked clones that must be "
<< "deleted or flattened before the image can be migrated"
<< dendl;
} else {
lderr(m_cct) << "image has non-user type snapshots "
<< "that are not supported by migration" << dendl;
}
return -EBUSY;
}
}
return 0;
}
template <typename I>
int Migration<I>::validate_src_snaps(I* image_ctx) {
ldout(m_cct, 10) << dendl;
std::vector<librbd::snap_info_t> snaps;
int r = list_src_snaps(image_ctx, &snaps);
if (r < 0) {
return r;
}
uint64_t dst_features = 0;
r = m_image_options.get(RBD_IMAGE_OPTION_FEATURES, &dst_features);
ceph_assert(r == 0);
if (!image_ctx->test_features(RBD_FEATURE_LAYERING)) {
return 0;
}
for (auto &snap : snaps) {
std::shared_lock image_locker{image_ctx->image_lock};
cls::rbd::ParentImageSpec parent_spec{image_ctx->md_ctx.get_id(),
image_ctx->md_ctx.get_namespace(),
image_ctx->id, snap.id};
std::vector<librbd::linked_image_spec_t> child_images;
r = api::Image<I>::list_children(image_ctx, parent_spec,
&child_images);
if (r < 0) {
lderr(m_cct) << "failed listing children: " << cpp_strerror(r)
<< dendl;
return r;
}
if (!child_images.empty()) {
ldout(m_cct, 1) << image_ctx->name << "@" << snap.name
<< " has children" << dendl;
if ((dst_features & RBD_FEATURE_LAYERING) == 0) {
lderr(m_cct) << "can't migrate to destination without layering feature: "
<< "image has children" << dendl;
return -EINVAL;
}
}
}
return 0;
}
template <typename I>
int Migration<I>::set_src_migration(I* image_ctx) {
ldout(m_cct, 10) << dendl;
image_ctx->ignore_migrating = true;
int r = cls_client::migration_set(&image_ctx->md_ctx, image_ctx->header_oid,
m_src_migration_spec);
if (r < 0) {
lderr(m_cct) << "failed to set source migration header: " << cpp_strerror(r)
<< dendl;
return r;
}
image_ctx->notify_update();
return 0;
}
template <typename I>
int Migration<I>::remove_migration(I *image_ctx) {
ldout(m_cct, 10) << dendl;
int r;
r = cls_client::migration_remove(&image_ctx->md_ctx, image_ctx->header_oid);
if (r == -ENOENT) {
r = 0;
}
if (r < 0) {
lderr(m_cct) << "failed removing migration header: " << cpp_strerror(r)
<< dendl;
return r;
}
image_ctx->notify_update();
return 0;
}
template <typename I>
int Migration<I>::unlink_src_image(I* image_ctx) {
if (image_ctx->old_format) {
return v1_unlink_src_image(image_ctx);
} else {
return v2_unlink_src_image(image_ctx);
}
}
template <typename I>
int Migration<I>::v1_unlink_src_image(I* image_ctx) {
ldout(m_cct, 10) << dendl;
std::shared_lock image_locker{image_ctx->image_lock};
int r = tmap_rm(image_ctx->md_ctx, image_ctx->name);
if (r < 0) {
lderr(m_cct) << "failed removing " << image_ctx->name << " from tmap: "
<< cpp_strerror(r) << dendl;
return r;
}
return 0;
}
template <typename I>
int Migration<I>::v2_unlink_src_image(I* image_ctx) {
ldout(m_cct, 10) << dendl;
image_ctx->owner_lock.lock_shared();
if (image_ctx->exclusive_lock != nullptr &&
image_ctx->exclusive_lock->is_lock_owner()) {
C_SaferCond ctx;
image_ctx->exclusive_lock->release_lock(&ctx);
image_ctx->owner_lock.unlock_shared();
int r = ctx.wait();
if (r < 0) {
lderr(m_cct) << "error releasing exclusive lock: " << cpp_strerror(r)
<< dendl;
return r;
}
} else {
image_ctx->owner_lock.unlock_shared();
}
int r = Trash<I>::move(image_ctx->md_ctx, RBD_TRASH_IMAGE_SOURCE_MIGRATION,
image_ctx->name, 0);
if (r < 0) {
lderr(m_cct) << "failed moving image to trash: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int Migration<I>::relink_src_image(I* image_ctx) {
if (image_ctx->old_format) {
return v1_relink_src_image(image_ctx);
} else {
return v2_relink_src_image(image_ctx);
}
}
template <typename I>
int Migration<I>::v1_relink_src_image(I* image_ctx) {
ldout(m_cct, 10) << dendl;
std::shared_lock image_locker{image_ctx->image_lock};
int r = tmap_set(image_ctx->md_ctx, image_ctx->name);
if (r < 0) {
lderr(m_cct) << "failed adding " << image_ctx->name << " to tmap: "
<< cpp_strerror(r) << dendl;
return r;
}
return 0;
}
template <typename I>
int Migration<I>::v2_relink_src_image(I* image_ctx) {
ldout(m_cct, 10) << dendl;
std::shared_lock image_locker{image_ctx->image_lock};
int r = Trash<I>::restore(image_ctx->md_ctx,
{cls::rbd::TRASH_IMAGE_SOURCE_MIGRATION},
image_ctx->id, image_ctx->name);
if (r < 0) {
lderr(m_cct) << "failed restoring image from trash: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int Migration<I>::create_dst_image(I** image_ctx) {
ldout(m_cct, 10) << dendl;
uint64_t size;
cls::rbd::ParentImageSpec parent_spec;
{
std::shared_lock image_locker{m_src_image_ctx->image_lock};
size = m_src_image_ctx->size;
// use oldest snapshot or HEAD for parent spec
if (!m_src_image_ctx->snap_info.empty()) {
parent_spec = m_src_image_ctx->snap_info.begin()->second.parent.spec;
} else {
parent_spec = m_src_image_ctx->parent_md.spec;
}
}
ConfigProxy config{m_cct->_conf};
api::Config<I>::apply_pool_overrides(m_dst_io_ctx, &config);
uint64_t mirror_image_mode;
if (m_image_options.get(RBD_IMAGE_OPTION_MIRROR_IMAGE_MODE,
&mirror_image_mode) == 0) {
m_mirroring = true;
m_mirror_image_mode = static_cast<cls::rbd::MirrorImageMode>(
mirror_image_mode);
m_image_options.unset(RBD_IMAGE_OPTION_MIRROR_IMAGE_MODE);
}
int r;
C_SaferCond on_create;
librados::IoCtx parent_io_ctx;
if (parent_spec.pool_id == -1) {
auto *req = image::CreateRequest<I>::create(
config, m_dst_io_ctx, m_dst_image_name, m_dst_image_id, size,
m_image_options, image::CREATE_FLAG_SKIP_MIRROR_ENABLE,
cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "", "",
m_src_image_ctx->op_work_queue, &on_create);
req->send();
} else {
r = util::create_ioctx(m_src_image_ctx->md_ctx, "parent image",
parent_spec.pool_id, parent_spec.pool_namespace,
&parent_io_ctx);
if (r < 0) {
return r;
}
auto *req = image::CloneRequest<I>::create(
config, parent_io_ctx, parent_spec.image_id, "", {}, parent_spec.snap_id,
m_dst_io_ctx, m_dst_image_name, m_dst_image_id, m_image_options,
cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "", "",
m_src_image_ctx->op_work_queue, &on_create);
req->send();
}
r = on_create.wait();
if (r < 0) {
lderr(m_cct) << "header creation failed: " << cpp_strerror(r) << dendl;
return r;
}
auto dst_image_ctx = *image_ctx;
dst_image_ctx->id = m_dst_image_id;
*image_ctx = nullptr; // prevent prepare from cleaning up the ImageCtx
r = dst_image_ctx->state->open(OPEN_FLAG_IGNORE_MIGRATING);
if (r < 0) {
lderr(m_cct) << "failed to open newly created header: " << cpp_strerror(r)
<< dendl;
return r;
}
BOOST_SCOPE_EXIT_TPL(dst_image_ctx) {
dst_image_ctx->state->close();
} BOOST_SCOPE_EXIT_END;
{
std::shared_lock owner_locker{dst_image_ctx->owner_lock};
r = dst_image_ctx->operations->prepare_image_update(
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, true);
if (r < 0) {
lderr(m_cct) << "cannot obtain exclusive lock" << dendl;
return r;
}
if (dst_image_ctx->exclusive_lock != nullptr) {
dst_image_ctx->exclusive_lock->block_requests(0);
}
}
SnapSeqs snap_seqs;
C_SaferCond on_snapshot_copy;
auto snapshot_copy_req = librbd::deep_copy::SnapshotCopyRequest<I>::create(
m_src_image_ctx, dst_image_ctx, 0, CEPH_NOSNAP, 0, m_flatten,
m_src_image_ctx->op_work_queue, &snap_seqs, &on_snapshot_copy);
snapshot_copy_req->send();
r = on_snapshot_copy.wait();
if (r < 0) {
lderr(m_cct) << "failed to copy snapshots: " << cpp_strerror(r) << dendl;
return r;
}
if (!m_src_image_ctx->header_oid.empty()) {
C_SaferCond on_metadata_copy;
auto metadata_copy_req = librbd::deep_copy::MetadataCopyRequest<I>::create(
m_src_image_ctx, dst_image_ctx, &on_metadata_copy);
metadata_copy_req->send();
r = on_metadata_copy.wait();
if (r < 0) {
lderr(m_cct) << "failed to copy metadata: " << cpp_strerror(r) << dendl;
return r;
}
}
m_dst_migration_spec.snap_seqs = snap_seqs;
m_dst_migration_spec.overlap = size;
m_dst_migration_spec.mirroring = m_mirroring;
m_dst_migration_spec.mirror_image_mode = m_mirror_image_mode;
m_dst_migration_spec.flatten = m_flatten;
r = cls_client::migration_set(&m_dst_io_ctx, m_dst_header_oid,
m_dst_migration_spec);
if (r < 0) {
lderr(m_cct) << "failed to set migration header: " << cpp_strerror(r)
<< dendl;
return r;
}
if (m_dst_migration_spec.source_spec.empty()) {
r = update_group(m_src_image_ctx, dst_image_ctx);
if (r < 0) {
return r;
}
r = set_state(m_src_image_ctx, "source",
cls::rbd::MIGRATION_STATE_PREPARED, "");
if (r < 0) {
return r;
}
}
r = set_state(dst_image_ctx, "destination",
cls::rbd::MIGRATION_STATE_PREPARED, "");
if (r < 0) {
return r;
}
if (m_dst_migration_spec.source_spec.empty()) {
r = dst_image_ctx->state->refresh();
if (r < 0) {
lderr(m_cct) << "failed to refresh destination image: " << cpp_strerror(r)
<< dendl;
return r;
}
r = relink_children(m_src_image_ctx, dst_image_ctx);
if (r < 0) {
return r;
}
}
return 0;
}
template <typename I>
int Migration<I>::remove_group(I *image_ctx, group_info_t *group_info) {
int r = librbd::api::Group<I>::image_get_group(image_ctx, group_info);
if (r < 0) {
lderr(m_cct) << "failed to get image group: " << cpp_strerror(r) << dendl;
return r;
}
if (group_info->pool == -1) {
return -ENOENT;
}
ceph_assert(!image_ctx->id.empty());
ldout(m_cct, 10) << dendl;
IoCtx group_ioctx;
r = util::create_ioctx(image_ctx->md_ctx, "group", group_info->pool, {},
&group_ioctx);
if (r < 0) {
return r;
}
r = librbd::api::Group<I>::image_remove_by_id(group_ioctx,
group_info->name.c_str(),
image_ctx->md_ctx,
image_ctx->id.c_str());
if (r < 0) {
lderr(m_cct) << "failed to remove image from group: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int Migration<I>::add_group(I *image_ctx, group_info_t &group_info) {
if (group_info.pool == -1) {
return 0;
}
ldout(m_cct, 10) << dendl;
IoCtx group_ioctx;
int r = util::create_ioctx(image_ctx->md_ctx, "group", group_info.pool, {},
&group_ioctx);
if (r < 0) {
return r;
}
r = librbd::api::Group<I>::image_add(group_ioctx, group_info.name.c_str(),
image_ctx->md_ctx,
image_ctx->name.c_str());
if (r < 0) {
lderr(m_cct) << "failed to add image to group: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int Migration<I>::update_group(I *from_image_ctx, I *to_image_ctx) {
ldout(m_cct, 10) << dendl;
group_info_t group_info;
int r = remove_group(from_image_ctx, &group_info);
if (r < 0) {
return r == -ENOENT ? 0 : r;
}
r = add_group(to_image_ctx, group_info);
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Migration<I>::disable_mirroring(
I *image_ctx, bool *was_enabled,
cls::rbd::MirrorImageMode *mirror_image_mode) {
*was_enabled = false;
cls::rbd::MirrorImage mirror_image;
int r = cls_client::mirror_image_get(&image_ctx->md_ctx, image_ctx->id,
&mirror_image);
if (r == -ENOENT) {
ldout(m_cct, 10) << "mirroring is not enabled for this image" << dendl;
return 0;
}
if (r < 0) {
lderr(m_cct) << "failed to retrieve mirror image: " << cpp_strerror(r)
<< dendl;
return r;
}
if (mirror_image.state == cls::rbd::MIRROR_IMAGE_STATE_ENABLED) {
*was_enabled = true;
*mirror_image_mode = mirror_image.mode;
}
ldout(m_cct, 10) << dendl;
C_SaferCond ctx;
auto req = mirror::DisableRequest<I>::create(image_ctx, false, true, &ctx);
req->send();
r = ctx.wait();
if (r < 0) {
lderr(m_cct) << "failed to disable mirroring: " << cpp_strerror(r)
<< dendl;
return r;
}
m_src_migration_spec.mirroring = true;
return 0;
}
template <typename I>
int Migration<I>::enable_mirroring(
I *image_ctx, bool was_enabled,
cls::rbd::MirrorImageMode mirror_image_mode) {
cls::rbd::MirrorMode mirror_mode;
int r = cls_client::mirror_mode_get(&image_ctx->md_ctx, &mirror_mode);
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "failed to retrieve mirror mode: " << cpp_strerror(r)
<< dendl;
return r;
}
if (mirror_mode == cls::rbd::MIRROR_MODE_DISABLED) {
ldout(m_cct, 10) << "mirroring is not enabled for destination pool"
<< dendl;
return 0;
}
if (mirror_mode == cls::rbd::MIRROR_MODE_IMAGE && !was_enabled) {
ldout(m_cct, 10) << "mirroring is not enabled for image" << dendl;
return 0;
}
ldout(m_cct, 10) << dendl;
C_SaferCond ctx;
auto req = mirror::EnableRequest<I>::create(
image_ctx, mirror_image_mode, "", false, &ctx);
req->send();
r = ctx.wait();
if (r < 0) {
lderr(m_cct) << "failed to enable mirroring: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
// When relinking children we should be careful as it my be interrupted
// at any moment by some reason and we may end up in an inconsistent
// state, which we have to be able to fix with "migration abort". Below
// are all possible states during migration (P1 - source parent, P2 -
// destination parent, C - child):
//
// P1 P2 P1 P2 P1 P2 P1 P2
// ^\ \ ^ \ /^ /^
// \v v/ v/ v/
// C C C C
//
// 1 2 3 4
//
// (1) and (4) are the initial and the final consistent states. (2)
// and (3) are intermediate inconsistent states that have to be fixed
// by relink_children running in "migration abort" mode. For this, it
// scans P2 for all children attached and relinks (fixes) states (3)
// and (4) to state (1). Then it scans P1 for remaining children and
// fixes the states (2).
template <typename I>
int Migration<I>::relink_children(I *from_image_ctx, I *to_image_ctx) {
ldout(m_cct, 10) << dendl;
bool migration_abort = (to_image_ctx == m_src_image_ctx);
std::vector<librbd::snap_info_t> snaps;
int r = list_src_snaps(
migration_abort ? to_image_ctx : from_image_ctx, &snaps);
if (r < 0) {
return r;
}
for (auto it = snaps.begin(); it != snaps.end(); it++) {
auto &snap = *it;
std::vector<librbd::linked_image_spec_t> src_child_images;
if (from_image_ctx != m_src_image_ctx) {
ceph_assert(migration_abort);
// We run list snaps against the src image to get only those snapshots
// that are migrated. If the "from" image is not the src image
// (abort migration case), we need to remap snap ids.
// Also collect the list of the children currently attached to the
// source, so we could make a proper decision later about relinking.
std::shared_lock src_image_locker{to_image_ctx->image_lock};
cls::rbd::ParentImageSpec src_parent_spec{to_image_ctx->md_ctx.get_id(),
to_image_ctx->md_ctx.get_namespace(),
to_image_ctx->id, snap.id};
r = api::Image<I>::list_children(to_image_ctx, src_parent_spec,
&src_child_images);
if (r < 0) {
lderr(m_cct) << "failed listing children: " << cpp_strerror(r)
<< dendl;
return r;
}
std::shared_lock image_locker{from_image_ctx->image_lock};
snap.id = from_image_ctx->get_snap_id(cls::rbd::UserSnapshotNamespace(),
snap.name);
if (snap.id == CEPH_NOSNAP) {
ldout(m_cct, 5) << "skipping snapshot " << snap.name << dendl;
continue;
}
}
std::vector<librbd::linked_image_spec_t> child_images;
{
std::shared_lock image_locker{from_image_ctx->image_lock};
cls::rbd::ParentImageSpec parent_spec{from_image_ctx->md_ctx.get_id(),
from_image_ctx->md_ctx.get_namespace(),
from_image_ctx->id, snap.id};
r = api::Image<I>::list_children(from_image_ctx, parent_spec,
&child_images);
if (r < 0) {
lderr(m_cct) << "failed listing children: " << cpp_strerror(r)
<< dendl;
return r;
}
}
for (auto &child_image : child_images) {
r = relink_child(from_image_ctx, to_image_ctx, snap, child_image,
migration_abort, true);
if (r < 0) {
return r;
}
src_child_images.erase(std::remove(src_child_images.begin(),
src_child_images.end(), child_image),
src_child_images.end());
}
for (auto &child_image : src_child_images) {
r = relink_child(from_image_ctx, to_image_ctx, snap, child_image,
migration_abort, false);
if (r < 0) {
return r;
}
}
}
return 0;
}
template <typename I>
int Migration<I>::relink_child(I *from_image_ctx, I *to_image_ctx,
const librbd::snap_info_t &from_snap,
const librbd::linked_image_spec_t &child_image,
bool migration_abort, bool reattach_child) {
ldout(m_cct, 10) << from_snap.name << " " << child_image.pool_name << "/"
<< child_image.pool_namespace << "/"
<< child_image.image_name << " (migration_abort="
<< migration_abort << ", reattach_child=" << reattach_child
<< ")" << dendl;
librados::snap_t to_snap_id;
{
std::shared_lock image_locker{to_image_ctx->image_lock};
to_snap_id = to_image_ctx->get_snap_id(cls::rbd::UserSnapshotNamespace(),
from_snap.name);
if (to_snap_id == CEPH_NOSNAP) {
lderr(m_cct) << "no snapshot " << from_snap.name << " on destination image"
<< dendl;
return -ENOENT;
}
}
librados::IoCtx child_io_ctx;
int r = util::create_ioctx(to_image_ctx->md_ctx,
"child image " + child_image.image_name,
child_image.pool_id, child_image.pool_namespace,
&child_io_ctx);
if (r < 0) {
return r;
}
I *child_image_ctx = I::create("", child_image.image_id, nullptr,
child_io_ctx, false);
r = child_image_ctx->state->open(OPEN_FLAG_SKIP_OPEN_PARENT);
if (r < 0) {
lderr(m_cct) << "failed to open child image: " << cpp_strerror(r) << dendl;
return r;
}
BOOST_SCOPE_EXIT_TPL(child_image_ctx) {
child_image_ctx->state->close();
} BOOST_SCOPE_EXIT_END;
uint32_t clone_format = 1;
if (child_image_ctx->test_op_features(RBD_OPERATION_FEATURE_CLONE_CHILD)) {
clone_format = 2;
}
cls::rbd::ParentImageSpec parent_spec;
uint64_t parent_overlap;
{
std::shared_lock image_locker{child_image_ctx->image_lock};
// use oldest snapshot or HEAD for parent spec
if (!child_image_ctx->snap_info.empty()) {
parent_spec = child_image_ctx->snap_info.begin()->second.parent.spec;
parent_overlap = child_image_ctx->snap_info.begin()->second.parent.overlap;
} else {
parent_spec = child_image_ctx->parent_md.spec;
parent_overlap = child_image_ctx->parent_md.overlap;
}
}
if (migration_abort &&
parent_spec.pool_id == to_image_ctx->md_ctx.get_id() &&
parent_spec.pool_namespace == to_image_ctx->md_ctx.get_namespace() &&
parent_spec.image_id == to_image_ctx->id &&
parent_spec.snap_id == to_snap_id) {
ldout(m_cct, 10) << "no need for parent re-attach" << dendl;
} else {
if (parent_spec.pool_id != from_image_ctx->md_ctx.get_id() ||
parent_spec.pool_namespace != from_image_ctx->md_ctx.get_namespace() ||
parent_spec.image_id != from_image_ctx->id ||
parent_spec.snap_id != from_snap.id) {
lderr(m_cct) << "parent is not source image: " << parent_spec.pool_id
<< "/" << parent_spec.pool_namespace << "/"
<< parent_spec.image_id << "@" << parent_spec.snap_id
<< dendl;
return -ESTALE;
}
parent_spec.pool_id = to_image_ctx->md_ctx.get_id();
parent_spec.pool_namespace = to_image_ctx->md_ctx.get_namespace();
parent_spec.image_id = to_image_ctx->id;
parent_spec.snap_id = to_snap_id;
C_SaferCond on_reattach_parent;
auto reattach_parent_req = image::AttachParentRequest<I>::create(
*child_image_ctx, parent_spec, parent_overlap, true, &on_reattach_parent);
reattach_parent_req->send();
r = on_reattach_parent.wait();
if (r < 0) {
lderr(m_cct) << "failed to re-attach parent: " << cpp_strerror(r) << dendl;
return r;
}
}
if (reattach_child) {
C_SaferCond on_reattach_child;
auto reattach_child_req = image::AttachChildRequest<I>::create(
child_image_ctx, to_image_ctx, to_snap_id, from_image_ctx, from_snap.id,
clone_format, &on_reattach_child);
reattach_child_req->send();
r = on_reattach_child.wait();
if (r < 0) {
lderr(m_cct) << "failed to re-attach child: " << cpp_strerror(r) << dendl;
return r;
}
}
child_image_ctx->notify_update();
return 0;
}
template <typename I>
int Migration<I>::remove_src_image(I** image_ctx) {
ldout(m_cct, 10) << dendl;
auto src_image_ctx = *image_ctx;
std::vector<librbd::snap_info_t> snaps;
int r = list_src_snaps(src_image_ctx, &snaps);
if (r < 0) {
return r;
}
for (auto it = snaps.rbegin(); it != snaps.rend(); it++) {
auto &snap = *it;
librbd::NoOpProgressContext prog_ctx;
int r = Snapshot<I>::remove(src_image_ctx, snap.name.c_str(),
RBD_SNAP_REMOVE_UNPROTECT, prog_ctx);
if (r < 0) {
lderr(m_cct) << "failed removing source image snapshot '" << snap.name
<< "': " << cpp_strerror(r) << dendl;
return r;
}
}
ceph_assert(src_image_ctx->ignore_migrating);
auto asio_engine = src_image_ctx->asio_engine;
auto src_image_id = src_image_ctx->id;
librados::IoCtx src_io_ctx(src_image_ctx->md_ctx);
C_SaferCond on_remove;
auto req = librbd::image::RemoveRequest<I>::create(
src_io_ctx, src_image_ctx, false, true, *m_prog_ctx,
asio_engine->get_work_queue(), &on_remove);
req->send();
r = on_remove.wait();
*image_ctx = nullptr;
// For old format image it will return -ENOENT due to expected
// tmap_rm failure at the end.
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "failed removing source image: " << cpp_strerror(r)
<< dendl;
return r;
}
if (!src_image_id.empty()) {
r = cls_client::trash_remove(&src_io_ctx, src_image_id);
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "error removing image " << src_image_id
<< " from rbd_trash object" << dendl;
}
}
return 0;
}
template <typename I>
int Migration<I>::revert_data(I* src_image_ctx, I* dst_image_ctx,
ProgressContext* prog_ctx) {
ldout(m_cct, 10) << dendl;
cls::rbd::MigrationSpec migration_spec;
int r = cls_client::migration_get(&src_image_ctx->md_ctx,
src_image_ctx->header_oid,
&migration_spec);
if (r < 0) {
lderr(m_cct) << "failed retrieving migration header: " << cpp_strerror(r)
<< dendl;
return r;
}
if (migration_spec.header_type != cls::rbd::MIGRATION_HEADER_TYPE_DST) {
lderr(m_cct) << "unexpected migration header type: "
<< migration_spec.header_type << dendl;
return -EINVAL;
}
uint64_t src_snap_id_start = 0;
uint64_t src_snap_id_end = CEPH_NOSNAP;
uint64_t dst_snap_id_start = 0;
if (!migration_spec.snap_seqs.empty()) {
src_snap_id_start = migration_spec.snap_seqs.rbegin()->second;
}
// we only care about the HEAD revision so only add a single mapping to
// represent the most recent state
SnapSeqs snap_seqs;
snap_seqs[CEPH_NOSNAP] = CEPH_NOSNAP;
ldout(m_cct, 20) << "src_snap_id_start=" << src_snap_id_start << ", "
<< "src_snap_id_end=" << src_snap_id_end << ", "
<< "dst_snap_id_start=" << dst_snap_id_start << ", "
<< "snap_seqs=" << snap_seqs << dendl;
C_SaferCond ctx;
deep_copy::ProgressHandler progress_handler(prog_ctx);
auto request = deep_copy::ImageCopyRequest<I>::create(
src_image_ctx, dst_image_ctx, src_snap_id_start, src_snap_id_end,
dst_snap_id_start, false, {}, snap_seqs, &progress_handler, &ctx);
request->send();
r = ctx.wait();
if (r < 0) {
lderr(m_cct) << "error reverting destination image data blocks back to "
<< "source image: " << cpp_strerror(r) << dendl;
return r;
}
return 0;
}
} // namespace api
} // namespace librbd
template class librbd::api::Migration<librbd::ImageCtx>;
| 65,448 | 29.770569 | 85 | cc |
null | ceph-main/src/librbd/api/Migration.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_MIGRATION_H
#define CEPH_LIBRBD_API_MIGRATION_H
#include "include/int_types.h"
#include "include/rados/librados_fwd.hpp"
#include "include/rbd/librbd.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include <vector>
namespace librbd {
class ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
class Migration {
public:
static int prepare(librados::IoCtx& io_ctx, const std::string &image_name,
librados::IoCtx& dest_io_ctx,
const std::string &dest_image_name, ImageOptions& opts);
static int prepare_import(const std::string& source_spec,
librados::IoCtx& dest_io_ctx,
const std::string &dest_image_name,
ImageOptions& opts);
static int execute(librados::IoCtx& io_ctx, const std::string &image_name,
ProgressContext &prog_ctx);
static int abort(librados::IoCtx& io_ctx, const std::string &image_name,
ProgressContext &prog_ctx);
static int commit(librados::IoCtx& io_ctx, const std::string &image_name,
ProgressContext &prog_ctx);
static int status(librados::IoCtx& io_ctx, const std::string &image_name,
image_migration_status_t *status);
static int get_source_spec(ImageCtxT* image_ctx, std::string* source_spec);
private:
CephContext* m_cct;
ImageCtx* m_src_image_ctx;
ImageCtx* m_dst_image_ctx;
librados::IoCtx m_dst_io_ctx;
std::string m_dst_image_name;
std::string m_dst_image_id;
std::string m_dst_header_oid;
ImageOptions &m_image_options;
bool m_flatten;
bool m_mirroring;
cls::rbd::MirrorImageMode m_mirror_image_mode;
ProgressContext *m_prog_ctx;
cls::rbd::MigrationSpec m_src_migration_spec;
cls::rbd::MigrationSpec m_dst_migration_spec;
Migration(ImageCtx* src_image_ctx, ImageCtx* dst_image_ctx,
const cls::rbd::MigrationSpec& dst_migration_spec,
ImageOptions& opts, ProgressContext *prog_ctx);
int prepare();
int prepare_import();
int execute();
int abort();
int commit();
int status(image_migration_status_t *status);
int set_state(ImageCtxT* image_ctx, const std::string& image_description,
cls::rbd::MigrationState state,
const std::string &description);
int set_state(cls::rbd::MigrationState state, const std::string &description);
int list_src_snaps(ImageCtxT* image_ctx,
std::vector<librbd::snap_info_t> *snaps);
int validate_src_snaps(ImageCtxT* image_ctx);
int disable_mirroring(ImageCtxT* image_ctx, bool *was_enabled,
cls::rbd::MirrorImageMode *mirror_image_mode);
int enable_mirroring(ImageCtxT* image_ctx, bool was_enabled,
cls::rbd::MirrorImageMode mirror_image_mode);
int set_src_migration(ImageCtxT* image_ctx);
int unlink_src_image(ImageCtxT* image_ctx);
int relink_src_image(ImageCtxT* image_ctx);
int create_dst_image(ImageCtxT** image_ctx);
int remove_group(ImageCtxT* image_ctx, group_info_t *group_info);
int add_group(ImageCtxT* image_ctx, group_info_t &group_info);
int update_group(ImageCtxT *from_image_ctx, ImageCtxT *to_image_ctx);
int remove_migration(ImageCtxT* image_ctx);
int relink_children(ImageCtxT *from_image_ctx, ImageCtxT *to_image_ctx);
int remove_src_image(ImageCtxT** image_ctx);
int v1_set_src_migration(ImageCtxT* image_ctx);
int v2_set_src_migration(ImageCtxT* image_ctx);
int v1_unlink_src_image(ImageCtxT* image_ctx);
int v2_unlink_src_image(ImageCtxT* image_ctx);
int v1_relink_src_image(ImageCtxT* image_ctx);
int v2_relink_src_image(ImageCtxT* image_ctx);
int relink_child(ImageCtxT *from_image_ctx, ImageCtxT *to_image_ctx,
const librbd::snap_info_t &src_snap,
const librbd::linked_image_spec_t &child_image,
bool migration_abort, bool reattach_child);
int revert_data(ImageCtxT* src_image_ctx, ImageCtxT* dst_image_ctx,
ProgressContext *prog_ctx);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Migration<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_API_MIGRATION_H
| 4,328 | 36.973684 | 80 | h |
null | ceph-main/src/librbd/api/Mirror.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Mirror.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "common/ceph_json.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Journal.h"
#include "librbd/MirroringWatcher.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/api/Image.h"
#include "librbd/api/Namespace.h"
#include "librbd/mirror/DemoteRequest.h"
#include "librbd/mirror/DisableRequest.h"
#include "librbd/mirror/EnableRequest.h"
#include "librbd/mirror/GetInfoRequest.h"
#include "librbd/mirror/GetStatusRequest.h"
#include "librbd/mirror/GetUuidRequest.h"
#include "librbd/mirror/PromoteRequest.h"
#include "librbd/mirror/Types.h"
#include "librbd/MirroringWatcher.h"
#include "librbd/mirror/snapshot/CreatePrimaryRequest.h"
#include "librbd/mirror/snapshot/ImageMeta.h"
#include "librbd/mirror/snapshot/UnlinkPeerRequest.h"
#include "librbd/mirror/snapshot/Utils.h"
#include <boost/algorithm/string/trim.hpp>
#include <boost/algorithm/string/replace.hpp>
#include <boost/scope_exit.hpp>
#include "json_spirit/json_spirit.h"
#include <algorithm>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Mirror: " << __func__ << ": "
namespace librbd {
namespace api {
namespace {
int get_config_key(librados::Rados& rados, const std::string& key,
std::string* value) {
std::string cmd =
"{"
"\"prefix\": \"config-key get\", "
"\"key\": \"" + key + "\""
"}";
bufferlist in_bl;
bufferlist out_bl;
int r = rados.mon_command(cmd, in_bl, &out_bl, nullptr);
if (r == -EINVAL) {
return -EOPNOTSUPP;
} else if (r < 0 && r != -ENOENT) {
return r;
}
*value = out_bl.to_str();
return 0;
}
int set_config_key(librados::Rados& rados, const std::string& key,
const std::string& value) {
std::string cmd;
if (value.empty()) {
cmd = "{"
"\"prefix\": \"config-key rm\", "
"\"key\": \"" + key + "\""
"}";
} else {
cmd = "{"
"\"prefix\": \"config-key set\", "
"\"key\": \"" + key + "\", "
"\"val\": \"" + value + "\""
"}";
}
bufferlist in_bl;
bufferlist out_bl;
int r = rados.mon_command(cmd, in_bl, &out_bl, nullptr);
if (r == -EINVAL) {
return -EOPNOTSUPP;
} else if (r < 0) {
return r;
}
return 0;
}
std::string get_peer_config_key_name(int64_t pool_id,
const std::string& peer_uuid) {
return RBD_MIRROR_PEER_CONFIG_KEY_PREFIX + stringify(pool_id) + "/" +
peer_uuid;
}
int remove_peer_config_key(librados::IoCtx& io_ctx,
const std::string& peer_uuid) {
int64_t pool_id = io_ctx.get_id();
auto key = get_peer_config_key_name(pool_id, peer_uuid);
librados::Rados rados(io_ctx);
int r = set_config_key(rados, key, "");
if (r < 0 && r != -ENOENT && r != -EPERM) {
return r;
}
return 0;
}
std::string get_mon_host(CephContext* cct) {
std::string mon_host;
if (auto mon_addrs = cct->get_mon_addrs();
mon_addrs != nullptr && !mon_addrs->empty()) {
CachedStackStringStream css;
for (auto it = mon_addrs->begin(); it != mon_addrs->end(); ++it) {
if (it != mon_addrs->begin()) {
*css << ",";
}
*css << *it;
}
mon_host = css->str();
} else {
ldout(cct, 20) << "falling back to mon_host in conf" << dendl;
mon_host = cct->_conf.get_val<std::string>("mon_host");
}
ldout(cct, 20) << "mon_host=" << mon_host << dendl;
return mon_host;
}
int create_bootstrap_user(CephContext* cct, librados::Rados& rados,
std::string* peer_client_id, std::string* cephx_key) {
ldout(cct, 20) << dendl;
// retrieve peer CephX user from config-key
int r = get_config_key(rados, RBD_MIRROR_PEER_CLIENT_ID_CONFIG_KEY,
peer_client_id);
if (r == -EACCES) {
ldout(cct, 5) << "insufficient permissions to get peer-client-id "
<< "config-key" << dendl;
return r;
} else if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve peer client id key: "
<< cpp_strerror(r) << dendl;
return r;
} else if (r == -ENOENT || peer_client_id->empty()) {
ldout(cct, 20) << "creating new peer-client-id config-key" << dendl;
*peer_client_id = "rbd-mirror-peer";
r = set_config_key(rados, RBD_MIRROR_PEER_CLIENT_ID_CONFIG_KEY,
*peer_client_id);
if (r == -EACCES) {
ldout(cct, 5) << "insufficient permissions to update peer-client-id "
<< "config-key" << dendl;
return r;
} else if (r < 0) {
lderr(cct) << "failed to update peer client id key: "
<< cpp_strerror(r) << dendl;
return r;
}
}
ldout(cct, 20) << "peer_client_id=" << *peer_client_id << dendl;
// create peer client user
std::string cmd =
R"({)" \
R"( "prefix": "auth get-or-create",)" \
R"( "entity": "client.)" + *peer_client_id + R"(",)" \
R"( "caps": [)" \
R"( "mon", "profile rbd-mirror-peer",)" \
R"( "osd", "profile rbd"],)" \
R"( "format": "json")" \
R"(})";
bufferlist in_bl;
bufferlist out_bl;
r = rados.mon_command(cmd, in_bl, &out_bl, nullptr);
if (r == -EINVAL) {
ldout(cct, 5) << "caps mismatch for existing user" << dendl;
return -EEXIST;
} else if (r == -EACCES) {
ldout(cct, 5) << "insufficient permissions to create user" << dendl;
return r;
} else if (r < 0) {
lderr(cct) << "failed to create or update RBD mirroring bootstrap user: "
<< cpp_strerror(r) << dendl;
return r;
}
// extract key from response
bool json_valid = false;
json_spirit::mValue json_root;
if(json_spirit::read(out_bl.to_str(), json_root)) {
try {
auto& json_obj = json_root.get_array()[0].get_obj();
*cephx_key = json_obj["key"].get_str();
json_valid = true;
} catch (std::runtime_error&) {
}
}
if (!json_valid) {
lderr(cct) << "invalid auth keyring JSON received" << dendl;
return -EBADMSG;
}
return 0;
}
int create_bootstrap_peer(CephContext* cct, librados::IoCtx& io_ctx,
mirror_peer_direction_t direction,
const std::string& site_name, const std::string& fsid,
const std::string& client_id, const std::string& key,
const std::string& mon_host,
const std::string& cluster1,
const std::string& cluster2) {
ldout(cct, 20) << dendl;
std::string peer_uuid;
std::vector<mirror_peer_site_t> peers;
int r = Mirror<>::peer_site_list(io_ctx, &peers);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to list mirror peers: " << cpp_strerror(r) << dendl;
return r;
}
if (peers.empty()) {
r = Mirror<>::peer_site_add(io_ctx, &peer_uuid, direction, site_name,
"client." + client_id);
if (r < 0) {
lderr(cct) << "failed to add " << cluster1 << " peer to "
<< cluster2 << " " << "cluster: " << cpp_strerror(r) << dendl;
return r;
}
} else if (peers[0].site_name != site_name &&
peers[0].site_name != fsid) {
// only support a single peer
lderr(cct) << "multiple peers are not currently supported" << dendl;
return -EINVAL;
} else {
peer_uuid = peers[0].uuid;
if (peers[0].site_name != site_name) {
r = Mirror<>::peer_site_set_name(io_ctx, peer_uuid, site_name);
if (r < 0) {
// non-fatal attempt to update site name
lderr(cct) << "failed to update peer site name" << dendl;
}
}
}
Mirror<>::Attributes attributes {
{"mon_host", mon_host},
{"key", key}};
r = Mirror<>::peer_site_set_attributes(io_ctx, peer_uuid, attributes);
if (r < 0) {
lderr(cct) << "failed to update " << cluster1 << " cluster connection "
<< "attributes in " << cluster2 << " cluster: "
<< cpp_strerror(r) << dendl;
return r;
}
return 0;
}
int list_mirror_images(librados::IoCtx& io_ctx,
std::set<std::string>& mirror_image_ids) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
std::string last_read = "";
int max_read = 1024;
int r;
do {
std::map<std::string, std::string> mirror_images;
r = cls_client::mirror_image_list(&io_ctx, last_read, max_read,
&mirror_images);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error listing mirrored image directory: "
<< cpp_strerror(r) << dendl;
return r;
}
for (auto it = mirror_images.begin(); it != mirror_images.end(); ++it) {
mirror_image_ids.insert(it->first);
}
if (!mirror_images.empty()) {
last_read = mirror_images.rbegin()->first;
}
r = mirror_images.size();
} while (r == max_read);
return 0;
}
template <typename I>
const char *pool_or_namespace(I *ictx) {
if (!ictx->md_ctx.get_namespace().empty()) {
return "namespace";
} else {
return "pool";
}
}
struct C_ImageGetInfo : public Context {
mirror_image_info_t *mirror_image_info;
mirror_image_mode_t *mirror_image_mode;
Context *on_finish;
cls::rbd::MirrorImage mirror_image;
mirror::PromotionState promotion_state = mirror::PROMOTION_STATE_PRIMARY;
std::string primary_mirror_uuid;
C_ImageGetInfo(mirror_image_info_t *mirror_image_info,
mirror_image_mode_t *mirror_image_mode, Context *on_finish)
: mirror_image_info(mirror_image_info),
mirror_image_mode(mirror_image_mode), on_finish(on_finish) {
}
void finish(int r) override {
if (r < 0 && r != -ENOENT) {
on_finish->complete(r);
return;
}
if (mirror_image_info != nullptr) {
mirror_image_info->global_id = mirror_image.global_image_id;
mirror_image_info->state = static_cast<rbd_mirror_image_state_t>(
mirror_image.state);
mirror_image_info->primary = (
promotion_state == mirror::PROMOTION_STATE_PRIMARY);
}
if (mirror_image_mode != nullptr) {
*mirror_image_mode =
static_cast<rbd_mirror_image_mode_t>(mirror_image.mode);
}
on_finish->complete(0);
}
};
struct C_ImageGetGlobalStatus : public C_ImageGetInfo {
std::string image_name;
mirror_image_global_status_t *mirror_image_global_status;
cls::rbd::MirrorImageStatus mirror_image_status_internal;
C_ImageGetGlobalStatus(
const std::string &image_name,
mirror_image_global_status_t *mirror_image_global_status,
Context *on_finish)
: C_ImageGetInfo(&mirror_image_global_status->info, nullptr, on_finish),
image_name(image_name),
mirror_image_global_status(mirror_image_global_status) {
}
void finish(int r) override {
if (r < 0 && r != -ENOENT) {
on_finish->complete(r);
return;
}
mirror_image_global_status->name = image_name;
mirror_image_global_status->site_statuses.clear();
mirror_image_global_status->site_statuses.reserve(
mirror_image_status_internal.mirror_image_site_statuses.size());
for (auto& site_status :
mirror_image_status_internal.mirror_image_site_statuses) {
mirror_image_global_status->site_statuses.push_back({
site_status.mirror_uuid,
static_cast<mirror_image_status_state_t>(site_status.state),
site_status.description, site_status.last_update.sec(),
site_status.up});
}
C_ImageGetInfo::finish(0);
}
};
template <typename I>
struct C_ImageSnapshotCreate : public Context {
I *ictx;
uint64_t snap_create_flags;
uint64_t *snap_id;
Context *on_finish;
cls::rbd::MirrorImage mirror_image;
mirror::PromotionState promotion_state;
std::string primary_mirror_uuid;
C_ImageSnapshotCreate(I *ictx, uint64_t snap_create_flags, uint64_t *snap_id,
Context *on_finish)
: ictx(ictx), snap_create_flags(snap_create_flags), snap_id(snap_id),
on_finish(on_finish) {
}
void finish(int r) override {
if (r < 0 && r != -ENOENT) {
on_finish->complete(r);
return;
}
if (mirror_image.mode != cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT ||
mirror_image.state != cls::rbd::MIRROR_IMAGE_STATE_ENABLED) {
lderr(ictx->cct) << "snapshot based mirroring is not enabled" << dendl;
on_finish->complete(-EINVAL);
return;
}
auto req = mirror::snapshot::CreatePrimaryRequest<I>::create(
ictx, mirror_image.global_image_id, CEPH_NOSNAP, snap_create_flags, 0U,
snap_id, on_finish);
req->send();
}
};
} // anonymous namespace
template <typename I>
int Mirror<I>::image_enable(I *ictx, mirror_image_mode_t mode,
bool relax_same_pool_parent_check) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << " mode=" << mode
<< " relax_same_pool_parent_check="
<< relax_same_pool_parent_check << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
cls::rbd::MirrorMode mirror_mode;
r = cls_client::mirror_mode_get(&ictx->md_ctx, &mirror_mode);
if (r < 0) {
lderr(cct) << "cannot enable mirroring: failed to retrieve mirror mode: "
<< cpp_strerror(r) << dendl;
return r;
}
if (mirror_mode == cls::rbd::MIRROR_MODE_DISABLED) {
lderr(cct) << "cannot enable mirroring: mirroring is not enabled on a "
<< pool_or_namespace(ictx) << dendl;
return -EINVAL;
}
if (mirror_mode != cls::rbd::MIRROR_MODE_IMAGE) {
lderr(cct) << "cannot enable mirroring: " << pool_or_namespace(ictx)
<< " is not in image mirror mode" << dendl;
return -EINVAL;
}
// is mirroring not enabled for the parent?
{
std::shared_lock image_locker{ictx->image_lock};
ImageCtx *parent = ictx->parent;
if (parent) {
if (parent->md_ctx.get_id() != ictx->md_ctx.get_id() ||
!relax_same_pool_parent_check) {
cls::rbd::MirrorImage mirror_image_internal;
r = cls_client::mirror_image_get(&(parent->md_ctx), parent->id,
&mirror_image_internal);
if (r == -ENOENT) {
lderr(cct) << "mirroring is not enabled for the parent" << dendl;
return -EINVAL;
}
}
}
}
if (mode == RBD_MIRROR_IMAGE_MODE_JOURNAL &&
!ictx->test_features(RBD_FEATURE_JOURNALING)) {
uint64_t features = RBD_FEATURE_JOURNALING;
if (!ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) {
features |= RBD_FEATURE_EXCLUSIVE_LOCK;
}
r = ictx->operations->update_features(features, true);
if (r < 0) {
lderr(cct) << "cannot enable journaling: " << cpp_strerror(r) << dendl;
return r;
}
}
C_SaferCond ctx;
auto req = mirror::EnableRequest<ImageCtx>::create(
ictx, static_cast<cls::rbd::MirrorImageMode>(mode), "", false, &ctx);
req->send();
r = ctx.wait();
if (r < 0) {
lderr(cct) << "cannot enable mirroring: " << cpp_strerror(r) << dendl;
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::image_disable(I *ictx, bool force) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
cls::rbd::MirrorMode mirror_mode;
r = cls_client::mirror_mode_get(&ictx->md_ctx, &mirror_mode);
if (r < 0) {
lderr(cct) << "cannot disable mirroring: failed to retrieve pool "
"mirroring mode: " << cpp_strerror(r) << dendl;
return r;
}
if (mirror_mode != cls::rbd::MIRROR_MODE_IMAGE) {
lderr(cct) << "cannot disable mirroring in the current pool mirroring "
"mode" << dendl;
return -EINVAL;
}
// is mirroring enabled for the image?
cls::rbd::MirrorImage mirror_image_internal;
r = cls_client::mirror_image_get(&ictx->md_ctx, ictx->id,
&mirror_image_internal);
if (r == -ENOENT) {
// mirroring is not enabled for this image
ldout(cct, 20) << "ignoring disable command: mirroring is not enabled for "
<< "this image" << dendl;
return 0;
} else if (r == -EOPNOTSUPP) {
ldout(cct, 5) << "mirroring not supported by OSD" << dendl;
return r;
} else if (r < 0) {
lderr(cct) << "failed to retrieve mirror image metadata: "
<< cpp_strerror(r) << dendl;
return r;
}
mirror_image_internal.state = cls::rbd::MIRROR_IMAGE_STATE_DISABLING;
r = cls_client::mirror_image_set(&ictx->md_ctx, ictx->id,
mirror_image_internal);
if (r < 0) {
lderr(cct) << "cannot disable mirroring: " << cpp_strerror(r) << dendl;
return r;
}
bool rollback = false;
BOOST_SCOPE_EXIT_ALL(ictx, &mirror_image_internal, &rollback) {
if (rollback) {
// restore the mask bit for treating the non-primary feature as read-only
ictx->image_lock.lock();
ictx->read_only_mask |= IMAGE_READ_ONLY_FLAG_NON_PRIMARY;
ictx->image_lock.unlock();
ictx->state->handle_update_notification();
// attempt to restore the image state
CephContext *cct = ictx->cct;
mirror_image_internal.state = cls::rbd::MIRROR_IMAGE_STATE_ENABLED;
int r = cls_client::mirror_image_set(&ictx->md_ctx, ictx->id,
mirror_image_internal);
if (r < 0) {
lderr(cct) << "failed to re-enable image mirroring: "
<< cpp_strerror(r) << dendl;
}
}
};
std::unique_lock image_locker{ictx->image_lock};
std::map<librados::snap_t, SnapInfo> snap_info = ictx->snap_info;
for (auto &info : snap_info) {
cls::rbd::ParentImageSpec parent_spec{ictx->md_ctx.get_id(),
ictx->md_ctx.get_namespace(),
ictx->id, info.first};
std::vector<librbd::linked_image_spec_t> child_images;
r = Image<I>::list_children(ictx, parent_spec, &child_images);
if (r < 0) {
rollback = true;
return r;
}
if (child_images.empty()) {
continue;
}
librados::IoCtx child_io_ctx;
int64_t child_pool_id = -1;
for (auto &child_image : child_images){
std::string pool = child_image.pool_name;
if (child_pool_id == -1 ||
child_pool_id != child_image.pool_id ||
child_io_ctx.get_namespace() != child_image.pool_namespace) {
r = util::create_ioctx(ictx->md_ctx, "child image",
child_image.pool_id,
child_image.pool_namespace,
&child_io_ctx);
if (r < 0) {
rollback = true;
return r;
}
child_pool_id = child_image.pool_id;
}
cls::rbd::MirrorImage child_mirror_image_internal;
r = cls_client::mirror_image_get(&child_io_ctx, child_image.image_id,
&child_mirror_image_internal);
if (r != -ENOENT) {
rollback = true;
lderr(cct) << "mirroring is enabled on one or more children "
<< dendl;
return -EBUSY;
}
}
}
image_locker.unlock();
if (mirror_image_internal.mode == cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
// don't let the non-primary feature bit prevent image updates
ictx->image_lock.lock();
ictx->read_only_mask &= ~IMAGE_READ_ONLY_FLAG_NON_PRIMARY;
ictx->image_lock.unlock();
r = ictx->state->refresh();
if (r < 0) {
rollback = true;
return r;
}
// remove any snapshot-based mirroring image-meta from image
std::string mirror_uuid;
r = uuid_get(ictx->md_ctx, &mirror_uuid);
if (r < 0) {
rollback = true;
return r;
}
r = ictx->operations->metadata_remove(
mirror::snapshot::util::get_image_meta_key(mirror_uuid));
if (r < 0 && r != -ENOENT) {
lderr(cct) << "cannot remove snapshot image-meta key: " << cpp_strerror(r)
<< dendl;
rollback = true;
return r;
}
}
C_SaferCond ctx;
auto req = mirror::DisableRequest<ImageCtx>::create(ictx, force, true,
&ctx);
req->send();
r = ctx.wait();
if (r < 0) {
lderr(cct) << "cannot disable mirroring: " << cpp_strerror(r) << dendl;
rollback = true;
return r;
}
if (mirror_image_internal.mode == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) {
r = ictx->operations->update_features(RBD_FEATURE_JOURNALING, false);
if (r < 0) {
lderr(cct) << "cannot disable journaling: " << cpp_strerror(r) << dendl;
// not fatal
}
}
return 0;
}
template <typename I>
int Mirror<I>::image_promote(I *ictx, bool force) {
CephContext *cct = ictx->cct;
C_SaferCond ctx;
Mirror<I>::image_promote(ictx, force, &ctx);
int r = ctx.wait();
if (r < 0) {
lderr(cct) << "failed to promote image" << dendl;
return r;
}
return 0;
}
template <typename I>
void Mirror<I>::image_promote(I *ictx, bool force, Context *on_finish) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << ", "
<< "force=" << force << dendl;
// don't let the non-primary feature bit prevent image updates
ictx->image_lock.lock();
ictx->read_only_mask &= ~IMAGE_READ_ONLY_FLAG_NON_PRIMARY;
ictx->image_lock.unlock();
auto on_promote = new LambdaContext([ictx, on_finish](int r) {
ictx->image_lock.lock();
ictx->read_only_mask |= IMAGE_READ_ONLY_FLAG_NON_PRIMARY;
ictx->image_lock.unlock();
ictx->state->handle_update_notification();
on_finish->complete(r);
});
auto on_refresh = new LambdaContext([ictx, force, on_promote](int r) {
if (r < 0) {
lderr(ictx->cct) << "refresh failed: " << cpp_strerror(r) << dendl;
on_promote->complete(r);
return;
}
auto req = mirror::PromoteRequest<>::create(*ictx, force, on_promote);
req->send();
});
ictx->state->refresh(on_refresh);
}
template <typename I>
int Mirror<I>::image_demote(I *ictx) {
CephContext *cct = ictx->cct;
C_SaferCond ctx;
Mirror<I>::image_demote(ictx, &ctx);
int r = ctx.wait();
if (r < 0) {
lderr(cct) << "failed to demote image" << dendl;
return r;
}
return 0;
}
template <typename I>
void Mirror<I>::image_demote(I *ictx, Context *on_finish) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
auto on_cleanup = new LambdaContext([ictx, on_finish](int r) {
ictx->image_lock.lock();
ictx->read_only_mask |= IMAGE_READ_ONLY_FLAG_NON_PRIMARY;
ictx->image_lock.unlock();
ictx->state->handle_update_notification();
on_finish->complete(r);
});
auto on_refresh = new LambdaContext([ictx, on_cleanup](int r) {
if (r < 0) {
lderr(ictx->cct) << "refresh failed: " << cpp_strerror(r) << dendl;
on_cleanup->complete(r);
return;
}
auto req = mirror::DemoteRequest<>::create(*ictx, on_cleanup);
req->send();
});
// ensure we can create a snapshot after setting the non-primary
// feature bit
ictx->image_lock.lock();
ictx->read_only_mask &= ~IMAGE_READ_ONLY_FLAG_NON_PRIMARY;
ictx->image_lock.unlock();
ictx->state->refresh(on_refresh);
}
template <typename I>
int Mirror<I>::image_resync(I *ictx) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
cls::rbd::MirrorImage mirror_image;
mirror::PromotionState promotion_state;
std::string primary_mirror_uuid;
C_SaferCond get_info_ctx;
auto req = mirror::GetInfoRequest<I>::create(*ictx, &mirror_image,
&promotion_state,
&primary_mirror_uuid,
&get_info_ctx);
req->send();
r = get_info_ctx.wait();
if (r < 0) {
return r;
}
if (promotion_state == mirror::PROMOTION_STATE_PRIMARY) {
lderr(cct) << "image is primary, cannot resync to itself" << dendl;
return -EINVAL;
}
if (mirror_image.mode == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) {
// flag the journal indicating that we want to rebuild the local image
r = Journal<I>::request_resync(ictx);
if (r < 0) {
lderr(cct) << "failed to request resync: " << cpp_strerror(r) << dendl;
return r;
}
} else if (mirror_image.mode == cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
std::string mirror_uuid;
r = uuid_get(ictx->md_ctx, &mirror_uuid);
if (r < 0) {
return r;
}
mirror::snapshot::ImageMeta image_meta(ictx, mirror_uuid);
C_SaferCond load_meta_ctx;
image_meta.load(&load_meta_ctx);
r = load_meta_ctx.wait();
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to load mirror image-meta: " << cpp_strerror(r)
<< dendl;
return r;
}
image_meta.resync_requested = true;
C_SaferCond save_meta_ctx;
image_meta.save(&save_meta_ctx);
r = save_meta_ctx.wait();
if (r < 0) {
lderr(cct) << "failed to request resync: " << cpp_strerror(r) << dendl;
return r;
}
} else {
lderr(cct) << "unknown mirror mode" << dendl;
return -EINVAL;
}
return 0;
}
template <typename I>
void Mirror<I>::image_get_info(I *ictx, mirror_image_info_t *mirror_image_info,
Context *on_finish) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
auto on_refresh = new LambdaContext(
[ictx, mirror_image_info, on_finish](int r) {
if (r < 0) {
lderr(ictx->cct) << "refresh failed: " << cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
auto ctx = new C_ImageGetInfo(mirror_image_info, nullptr, on_finish);
auto req = mirror::GetInfoRequest<I>::create(*ictx, &ctx->mirror_image,
&ctx->promotion_state,
&ctx->primary_mirror_uuid,
ctx);
req->send();
});
if (ictx->state->is_refresh_required()) {
ictx->state->refresh(on_refresh);
} else {
on_refresh->complete(0);
}
}
template <typename I>
int Mirror<I>::image_get_info(I *ictx, mirror_image_info_t *mirror_image_info) {
C_SaferCond ctx;
image_get_info(ictx, mirror_image_info, &ctx);
int r = ctx.wait();
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
void Mirror<I>::image_get_info(librados::IoCtx& io_ctx,
asio::ContextWQ *op_work_queue,
const std::string &image_id,
mirror_image_info_t *mirror_image_info,
Context *on_finish) {
auto cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "pool_id=" << io_ctx.get_id() << ", image_id=" << image_id
<< dendl;
auto ctx = new C_ImageGetInfo(mirror_image_info, nullptr, on_finish);
auto req = mirror::GetInfoRequest<I>::create(io_ctx, op_work_queue, image_id,
&ctx->mirror_image,
&ctx->promotion_state,
&ctx->primary_mirror_uuid, ctx);
req->send();
}
template <typename I>
int Mirror<I>::image_get_info(librados::IoCtx& io_ctx,
asio::ContextWQ *op_work_queue,
const std::string &image_id,
mirror_image_info_t *mirror_image_info) {
C_SaferCond ctx;
image_get_info(io_ctx, op_work_queue, image_id, mirror_image_info, &ctx);
int r = ctx.wait();
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
void Mirror<I>::image_get_mode(I *ictx, mirror_image_mode_t *mode,
Context *on_finish) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
auto ctx = new C_ImageGetInfo(nullptr, mode, on_finish);
auto req = mirror::GetInfoRequest<I>::create(*ictx, &ctx->mirror_image,
&ctx->promotion_state,
&ctx->primary_mirror_uuid, ctx);
req->send();
}
template <typename I>
int Mirror<I>::image_get_mode(I *ictx, mirror_image_mode_t *mode) {
C_SaferCond ctx;
image_get_mode(ictx, mode, &ctx);
int r = ctx.wait();
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
void Mirror<I>::image_get_global_status(I *ictx,
mirror_image_global_status_t *status,
Context *on_finish) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
auto ctx = new C_ImageGetGlobalStatus(ictx->name, status, on_finish);
auto req = mirror::GetStatusRequest<I>::create(
*ictx, &ctx->mirror_image_status_internal, &ctx->mirror_image,
&ctx->promotion_state, ctx);
req->send();
}
template <typename I>
int Mirror<I>::image_get_global_status(I *ictx,
mirror_image_global_status_t *status) {
C_SaferCond ctx;
image_get_global_status(ictx, status, &ctx);
int r = ctx.wait();
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::image_get_instance_id(I *ictx, std::string *instance_id) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
cls::rbd::MirrorImage mirror_image;
int r = cls_client::mirror_image_get(&ictx->md_ctx, ictx->id, &mirror_image);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve mirroring state: " << cpp_strerror(r)
<< dendl;
return r;
} else if (mirror_image.state != cls::rbd::MIRROR_IMAGE_STATE_ENABLED) {
lderr(cct) << "mirroring is not currently enabled" << dendl;
return -EINVAL;
}
entity_inst_t instance;
r = cls_client::mirror_image_instance_get(&ictx->md_ctx,
mirror_image.global_image_id,
&instance);
if (r < 0) {
if (r != -ENOENT && r != -ESTALE) {
lderr(cct) << "failed to get mirror image instance: " << cpp_strerror(r)
<< dendl;
}
return r;
}
*instance_id = stringify(instance.name.num());
return 0;
}
template <typename I>
int Mirror<I>::site_name_get(librados::Rados& rados, std::string* name) {
CephContext *cct = reinterpret_cast<CephContext *>(rados.cct());
ldout(cct, 20) << dendl;
int r = get_config_key(rados, RBD_MIRROR_SITE_NAME_CONFIG_KEY, name);
if (r == -EOPNOTSUPP) {
return r;
} else if (r == -ENOENT || name->empty()) {
// default to the cluster fsid
r = rados.cluster_fsid(name);
if (r < 0) {
lderr(cct) << "failed to retrieve cluster fsid: " << cpp_strerror(r)
<< dendl;
}
return r;
} else if (r < 0) {
lderr(cct) << "failed to retrieve site name: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::site_name_set(librados::Rados& rados, const std::string& name) {
CephContext *cct = reinterpret_cast<CephContext *>(rados.cct());
std::string site_name{name};
boost::algorithm::trim(site_name);
ldout(cct, 20) << "site_name=" << site_name << dendl;
int r = set_config_key(rados, RBD_MIRROR_SITE_NAME_CONFIG_KEY, name);
if (r == -EOPNOTSUPP) {
return r;
} else if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to update site name: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::mode_get(librados::IoCtx& io_ctx,
rbd_mirror_mode_t *mirror_mode) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << dendl;
cls::rbd::MirrorMode mirror_mode_internal;
int r = cls_client::mirror_mode_get(&io_ctx, &mirror_mode_internal);
if (r < 0) {
lderr(cct) << "failed to retrieve mirror mode: " << cpp_strerror(r)
<< dendl;
return r;
}
switch (mirror_mode_internal) {
case cls::rbd::MIRROR_MODE_DISABLED:
case cls::rbd::MIRROR_MODE_IMAGE:
case cls::rbd::MIRROR_MODE_POOL:
*mirror_mode = static_cast<rbd_mirror_mode_t>(mirror_mode_internal);
break;
default:
lderr(cct) << "unknown mirror mode ("
<< static_cast<uint32_t>(mirror_mode_internal) << ")"
<< dendl;
return -EINVAL;
}
return 0;
}
template <typename I>
int Mirror<I>::mode_set(librados::IoCtx& io_ctx,
rbd_mirror_mode_t mirror_mode) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << dendl;
cls::rbd::MirrorMode next_mirror_mode;
switch (mirror_mode) {
case RBD_MIRROR_MODE_DISABLED:
case RBD_MIRROR_MODE_IMAGE:
case RBD_MIRROR_MODE_POOL:
next_mirror_mode = static_cast<cls::rbd::MirrorMode>(mirror_mode);
break;
default:
lderr(cct) << "unknown mirror mode ("
<< static_cast<uint32_t>(mirror_mode) << ")" << dendl;
return -EINVAL;
}
int r;
if (next_mirror_mode == cls::rbd::MIRROR_MODE_DISABLED) {
// fail early if pool still has peers registered and attempting to disable
std::vector<cls::rbd::MirrorPeer> mirror_peers;
r = cls_client::mirror_peer_list(&io_ctx, &mirror_peers);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to list peers: " << cpp_strerror(r) << dendl;
return r;
} else if (!mirror_peers.empty()) {
lderr(cct) << "mirror peers still registered" << dendl;
return -EBUSY;
}
}
cls::rbd::MirrorMode current_mirror_mode;
r = cls_client::mirror_mode_get(&io_ctx, ¤t_mirror_mode);
if (r < 0) {
lderr(cct) << "failed to retrieve mirror mode: " << cpp_strerror(r)
<< dendl;
return r;
}
if (current_mirror_mode == next_mirror_mode) {
return 0;
} else if (current_mirror_mode == cls::rbd::MIRROR_MODE_DISABLED) {
uuid_d uuid_gen;
uuid_gen.generate_random();
r = cls_client::mirror_uuid_set(&io_ctx, uuid_gen.to_string());
if (r < 0) {
lderr(cct) << "failed to allocate mirroring uuid: " << cpp_strerror(r)
<< dendl;
return r;
}
}
if (current_mirror_mode != cls::rbd::MIRROR_MODE_IMAGE) {
r = cls_client::mirror_mode_set(&io_ctx, cls::rbd::MIRROR_MODE_IMAGE);
if (r < 0) {
lderr(cct) << "failed to set mirror mode to image: "
<< cpp_strerror(r) << dendl;
return r;
}
r = MirroringWatcher<>::notify_mode_updated(io_ctx,
cls::rbd::MIRROR_MODE_IMAGE);
if (r < 0) {
lderr(cct) << "failed to send update notification: " << cpp_strerror(r)
<< dendl;
}
}
if (next_mirror_mode == cls::rbd::MIRROR_MODE_IMAGE) {
return 0;
}
if (next_mirror_mode == cls::rbd::MIRROR_MODE_POOL) {
std::map<std::string, std::string> images;
r = Image<I>::list_images_v2(io_ctx, &images);
if (r < 0) {
lderr(cct) << "failed listing images: " << cpp_strerror(r) << dendl;
return r;
}
for (const auto& img_pair : images) {
uint64_t features;
uint64_t incompatible_features;
r = cls_client::get_features(&io_ctx, util::header_name(img_pair.second),
true, &features, &incompatible_features);
if (r < 0) {
lderr(cct) << "error getting features for image " << img_pair.first
<< ": " << cpp_strerror(r) << dendl;
return r;
}
// Enable only journal based mirroring
if ((features & RBD_FEATURE_JOURNALING) != 0) {
I *img_ctx = I::create("", img_pair.second, nullptr, io_ctx, false);
r = img_ctx->state->open(0);
if (r < 0) {
lderr(cct) << "error opening image "<< img_pair.first << ": "
<< cpp_strerror(r) << dendl;
return r;
}
r = image_enable(img_ctx, RBD_MIRROR_IMAGE_MODE_JOURNAL, true);
int close_r = img_ctx->state->close();
if (r < 0) {
lderr(cct) << "error enabling mirroring for image "
<< img_pair.first << ": " << cpp_strerror(r) << dendl;
return r;
} else if (close_r < 0) {
lderr(cct) << "failed to close image " << img_pair.first << ": "
<< cpp_strerror(close_r) << dendl;
return close_r;
}
}
}
} else if (next_mirror_mode == cls::rbd::MIRROR_MODE_DISABLED) {
while (true) {
bool retry_busy = false;
bool pending_busy = false;
std::set<std::string> image_ids;
r = list_mirror_images(io_ctx, image_ids);
if (r < 0) {
lderr(cct) << "failed listing images: " << cpp_strerror(r) << dendl;
return r;
}
for (const auto& img_id : image_ids) {
if (current_mirror_mode == cls::rbd::MIRROR_MODE_IMAGE) {
cls::rbd::MirrorImage mirror_image;
r = cls_client::mirror_image_get(&io_ctx, img_id, &mirror_image);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve mirroring state for image id "
<< img_id << ": " << cpp_strerror(r) << dendl;
return r;
}
if (mirror_image.state == cls::rbd::MIRROR_IMAGE_STATE_ENABLED) {
lderr(cct) << "failed to disable mirror mode: there are still "
<< "images with mirroring enabled" << dendl;
return -EINVAL;
}
} else {
I *img_ctx = I::create("", img_id, nullptr, io_ctx, false);
r = img_ctx->state->open(0);
if (r < 0) {
lderr(cct) << "error opening image id "<< img_id << ": "
<< cpp_strerror(r) << dendl;
return r;
}
r = image_disable(img_ctx, false);
int close_r = img_ctx->state->close();
if (r == -EBUSY) {
pending_busy = true;
} else if (r < 0) {
lderr(cct) << "error disabling mirroring for image id " << img_id
<< cpp_strerror(r) << dendl;
return r;
} else if (close_r < 0) {
lderr(cct) << "failed to close image id " << img_id << ": "
<< cpp_strerror(close_r) << dendl;
return close_r;
} else if (pending_busy) {
// at least one mirrored image was successfully disabled, so we can
// retry any failures caused by busy parent/child relationships
retry_busy = true;
}
}
}
if (!retry_busy && pending_busy) {
lderr(cct) << "error disabling mirroring for one or more images"
<< dendl;
return -EBUSY;
} else if (!retry_busy) {
break;
}
}
}
r = cls_client::mirror_mode_set(&io_ctx, next_mirror_mode);
if (r < 0) {
lderr(cct) << "failed to set mirror mode: " << cpp_strerror(r) << dendl;
return r;
}
r = MirroringWatcher<>::notify_mode_updated(io_ctx, next_mirror_mode);
if (r < 0) {
lderr(cct) << "failed to send update notification: " << cpp_strerror(r)
<< dendl;
}
return 0;
}
template <typename I>
int Mirror<I>::uuid_get(librados::IoCtx& io_ctx, std::string* mirror_uuid) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << dendl;
C_SaferCond ctx;
uuid_get(io_ctx, mirror_uuid, &ctx);
int r = ctx.wait();
if (r < 0) {
if (r != -ENOENT) {
lderr(cct) << "failed to retrieve mirroring uuid: " << cpp_strerror(r)
<< dendl;
}
return r;
}
return 0;
}
template <typename I>
void Mirror<I>::uuid_get(librados::IoCtx& io_ctx, std::string* mirror_uuid,
Context* on_finish) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << dendl;
auto req = mirror::GetUuidRequest<I>::create(io_ctx, mirror_uuid, on_finish);
req->send();
}
template <typename I>
int Mirror<I>::peer_bootstrap_create(librados::IoCtx& io_ctx,
std::string* token) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << dendl;
auto mirror_mode = cls::rbd::MIRROR_MODE_DISABLED;
int r = cls_client::mirror_mode_get(&io_ctx, &mirror_mode);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve mirroring mode: " << cpp_strerror(r)
<< dendl;
return r;
} else if (mirror_mode == cls::rbd::MIRROR_MODE_DISABLED) {
return -EINVAL;
}
// retrieve the cluster fsid
std::string fsid;
librados::Rados rados(io_ctx);
r = rados.cluster_fsid(&fsid);
if (r < 0) {
lderr(cct) << "failed to retrieve cluster fsid: " << cpp_strerror(r)
<< dendl;
return r;
}
std::string peer_client_id;
std::string cephx_key;
r = create_bootstrap_user(cct, rados, &peer_client_id, &cephx_key);
if (r < 0) {
return r;
}
std::string mon_host = get_mon_host(cct);
// format the token response
bufferlist token_bl;
token_bl.append(
R"({)" \
R"("fsid":")" + fsid + R"(",)" + \
R"("client_id":")" + peer_client_id + R"(",)" + \
R"("key":")" + cephx_key + R"(",)" + \
R"("mon_host":")" + \
boost::replace_all_copy(mon_host, "\"", "\\\"") + R"(")" + \
R"(})");
ldout(cct, 20) << "token=" << token_bl.to_str() << dendl;
bufferlist base64_bl;
token_bl.encode_base64(base64_bl);
*token = base64_bl.to_str();
return 0;
}
template <typename I>
int Mirror<I>::peer_bootstrap_import(librados::IoCtx& io_ctx,
rbd_mirror_peer_direction_t direction,
const std::string& token) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << dendl;
if (direction != RBD_MIRROR_PEER_DIRECTION_RX &&
direction != RBD_MIRROR_PEER_DIRECTION_RX_TX) {
lderr(cct) << "invalid mirror peer direction" << dendl;
return -EINVAL;
}
bufferlist token_bl;
try {
bufferlist base64_bl;
base64_bl.append(token);
token_bl.decode_base64(base64_bl);
} catch (buffer::error& err) {
lderr(cct) << "failed to decode base64" << dendl;
return -EINVAL;
}
ldout(cct, 20) << "token=" << token_bl.to_str() << dendl;
bool json_valid = false;
std::string expected_remote_fsid;
std::string remote_client_id;
std::string remote_key;
std::string remote_mon_host;
json_spirit::mValue json_root;
if(json_spirit::read(token_bl.to_str(), json_root)) {
try {
auto& json_obj = json_root.get_obj();
expected_remote_fsid = json_obj["fsid"].get_str();
remote_client_id = json_obj["client_id"].get_str();
remote_key = json_obj["key"].get_str();
remote_mon_host = json_obj["mon_host"].get_str();
json_valid = true;
} catch (std::runtime_error&) {
}
}
if (!json_valid) {
lderr(cct) << "invalid bootstrap token JSON received" << dendl;
return -EINVAL;
}
// sanity check import process
std::string local_fsid;
librados::Rados rados(io_ctx);
int r = rados.cluster_fsid(&local_fsid);
if (r < 0) {
lderr(cct) << "failed to retrieve cluster fsid: " << cpp_strerror(r)
<< dendl;
return r;
}
std::string local_site_name;
r = site_name_get(rados, &local_site_name);
if (r < 0) {
lderr(cct) << "failed to retrieve cluster site name: " << cpp_strerror(r)
<< dendl;
return r;
}
// attempt to connect to remote cluster
librados::Rados remote_rados;
remote_rados.init(remote_client_id.c_str());
auto remote_cct = reinterpret_cast<CephContext*>(remote_rados.cct());
remote_cct->_conf.set_val("mon_host", remote_mon_host);
remote_cct->_conf.set_val("key", remote_key);
r = remote_rados.connect();
if (r < 0) {
lderr(cct) << "failed to connect to peer cluster: " << cpp_strerror(r)
<< dendl;
return r;
}
std::string remote_fsid;
r = remote_rados.cluster_fsid(&remote_fsid);
if (r < 0) {
lderr(cct) << "failed to retrieve remote cluster fsid: "
<< cpp_strerror(r) << dendl;
return r;
} else if (local_fsid == remote_fsid) {
lderr(cct) << "cannot import token for local cluster" << dendl;
return -EINVAL;
} else if (expected_remote_fsid != remote_fsid) {
lderr(cct) << "unexpected remote cluster fsid" << dendl;
return -EINVAL;
}
std::string remote_site_name;
r = site_name_get(remote_rados, &remote_site_name);
if (r < 0) {
lderr(cct) << "failed to retrieve remote cluster site name: "
<< cpp_strerror(r) << dendl;
return r;
} else if (local_site_name == remote_site_name) {
lderr(cct) << "cannot import token for duplicate site name" << dendl;
return -EINVAL;
}
librados::IoCtx remote_io_ctx;
r = remote_rados.ioctx_create(io_ctx.get_pool_name().c_str(), remote_io_ctx);
if (r == -ENOENT) {
ldout(cct, 10) << "remote pool does not exist" << dendl;
return r;
} else if (r < 0) {
lderr(cct) << "failed to open remote pool '" << io_ctx.get_pool_name()
<< "': " << cpp_strerror(r) << dendl;
return r;
}
auto remote_mirror_mode = cls::rbd::MIRROR_MODE_DISABLED;
r = cls_client::mirror_mode_get(&remote_io_ctx, &remote_mirror_mode);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve remote mirroring mode: "
<< cpp_strerror(r) << dendl;
return r;
} else if (remote_mirror_mode == cls::rbd::MIRROR_MODE_DISABLED) {
return -ENOSYS;
}
auto local_mirror_mode = cls::rbd::MIRROR_MODE_DISABLED;
r = cls_client::mirror_mode_get(&io_ctx, &local_mirror_mode);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve local mirroring mode: " << cpp_strerror(r)
<< dendl;
return r;
} else if (local_mirror_mode == cls::rbd::MIRROR_MODE_DISABLED) {
// copy mirror mode from remote peer
r = mode_set(io_ctx, static_cast<rbd_mirror_mode_t>(remote_mirror_mode));
if (r < 0) {
return r;
}
}
if (direction == RBD_MIRROR_PEER_DIRECTION_RX_TX) {
// create a local mirror peer user and export it to the remote cluster
std::string local_client_id;
std::string local_key;
r = create_bootstrap_user(cct, rados, &local_client_id, &local_key);
if (r < 0) {
return r;
}
std::string local_mon_host = get_mon_host(cct);
// create local cluster peer in remote cluster
r = create_bootstrap_peer(cct, remote_io_ctx,
RBD_MIRROR_PEER_DIRECTION_RX_TX, local_site_name,
local_fsid, local_client_id, local_key,
local_mon_host, "local", "remote");
if (r < 0) {
return r;
}
}
// create remote cluster peer in local cluster
r = create_bootstrap_peer(cct, io_ctx, direction, remote_site_name,
remote_fsid, remote_client_id, remote_key,
remote_mon_host, "remote", "local");
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::peer_site_add(librados::IoCtx& io_ctx, std::string *uuid,
mirror_peer_direction_t direction,
const std::string &site_name,
const std::string &client_name) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "name=" << site_name << ", "
<< "client=" << client_name << dendl;
if (cct->_conf->cluster == site_name) {
lderr(cct) << "cannot add self as remote peer" << dendl;
return -EINVAL;
}
if (direction == RBD_MIRROR_PEER_DIRECTION_TX) {
return -EINVAL;
}
int r;
do {
uuid_d uuid_gen;
uuid_gen.generate_random();
*uuid = uuid_gen.to_string();
r = cls_client::mirror_peer_add(
&io_ctx, {*uuid, static_cast<cls::rbd::MirrorPeerDirection>(direction),
site_name, client_name, ""});
if (r == -ESTALE) {
ldout(cct, 5) << "duplicate UUID detected, retrying" << dendl;
} else if (r < 0) {
lderr(cct) << "failed to add mirror peer '" << site_name << "': "
<< cpp_strerror(r) << dendl;
return r;
}
} while (r == -ESTALE);
return 0;
}
template <typename I>
int Mirror<I>::peer_site_remove(librados::IoCtx& io_ctx,
const std::string &uuid) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "uuid=" << uuid << dendl;
int r = remove_peer_config_key(io_ctx, uuid);
if (r < 0) {
lderr(cct) << "failed to remove peer attributes '" << uuid << "': "
<< cpp_strerror(r) << dendl;
return r;
}
r = cls_client::mirror_peer_remove(&io_ctx, uuid);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to remove peer '" << uuid << "': "
<< cpp_strerror(r) << dendl;
return r;
}
std::vector<std::string> names;
r = Namespace<I>::list(io_ctx, &names);
if (r < 0) {
return r;
}
names.push_back("");
librados::IoCtx ns_io_ctx;
ns_io_ctx.dup(io_ctx);
for (auto &name : names) {
ns_io_ctx.set_namespace(name);
std::set<std::string> image_ids;
r = list_mirror_images(ns_io_ctx, image_ids);
if (r < 0) {
lderr(cct) << "failed listing images in "
<< (name.empty() ? "default" : name) << " namespace : "
<< cpp_strerror(r) << dendl;
return r;
}
for (const auto& image_id : image_ids) {
cls::rbd::MirrorImage mirror_image;
r = cls_client::mirror_image_get(&ns_io_ctx, image_id, &mirror_image);
if (r == -ENOENT) {
continue;
}
if (r < 0) {
lderr(cct) << "error getting mirror info for image " << image_id
<< ": " << cpp_strerror(r) << dendl;
return r;
}
if (mirror_image.mode != cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
continue;
}
// Snapshot based mirroring. Unlink the peer from mirroring snapshots.
// TODO: optimize.
I *img_ctx = I::create("", image_id, nullptr, ns_io_ctx, false);
img_ctx->read_only_mask &= ~IMAGE_READ_ONLY_FLAG_NON_PRIMARY;
r = img_ctx->state->open(0);
if (r == -ENOENT) {
continue;
}
if (r < 0) {
lderr(cct) << "error opening image " << image_id << ": "
<< cpp_strerror(r) << dendl;
return r;
}
std::list<uint64_t> snap_ids;
{
std::shared_lock image_locker{img_ctx->image_lock};
for (auto &it : img_ctx->snap_info) {
auto info = std::get_if<cls::rbd::MirrorSnapshotNamespace>(
&it.second.snap_namespace);
if (info && info->mirror_peer_uuids.count(uuid)) {
snap_ids.push_back(it.first);
}
}
}
for (auto snap_id : snap_ids) {
C_SaferCond cond;
auto req = mirror::snapshot::UnlinkPeerRequest<I>::create(
img_ctx, snap_id, uuid, true, &cond);
req->send();
r = cond.wait();
if (r == -ENOENT) {
r = 0;
}
if (r < 0) {
break;
}
}
int close_r = img_ctx->state->close();
if (r < 0) {
lderr(cct) << "error unlinking peer for image " << image_id << ": "
<< cpp_strerror(r) << dendl;
return r;
} else if (close_r < 0) {
lderr(cct) << "failed to close image " << image_id << ": "
<< cpp_strerror(close_r) << dendl;
return close_r;
}
}
}
return 0;
}
template <typename I>
int Mirror<I>::peer_site_list(librados::IoCtx& io_ctx,
std::vector<mirror_peer_site_t> *peers) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << dendl;
std::vector<cls::rbd::MirrorPeer> mirror_peers;
int r = cls_client::mirror_peer_list(&io_ctx, &mirror_peers);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to list peers: " << cpp_strerror(r) << dendl;
return r;
}
peers->clear();
peers->reserve(mirror_peers.size());
for (auto &mirror_peer : mirror_peers) {
mirror_peer_site_t peer;
peer.uuid = mirror_peer.uuid;
peer.direction = static_cast<mirror_peer_direction_t>(
mirror_peer.mirror_peer_direction);
peer.site_name = mirror_peer.site_name;
peer.mirror_uuid = mirror_peer.mirror_uuid;
peer.client_name = mirror_peer.client_name;
peer.last_seen = mirror_peer.last_seen.sec();
peers->push_back(peer);
}
return 0;
}
template <typename I>
int Mirror<I>::peer_site_set_client(librados::IoCtx& io_ctx,
const std::string &uuid,
const std::string &client_name) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "uuid=" << uuid << ", "
<< "client=" << client_name << dendl;
int r = cls_client::mirror_peer_set_client(&io_ctx, uuid, client_name);
if (r < 0) {
lderr(cct) << "failed to update client '" << uuid << "': "
<< cpp_strerror(r) << dendl;
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::peer_site_set_name(librados::IoCtx& io_ctx,
const std::string &uuid,
const std::string &site_name) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "uuid=" << uuid << ", "
<< "name=" << site_name << dendl;
if (cct->_conf->cluster == site_name) {
lderr(cct) << "cannot set self as remote peer" << dendl;
return -EINVAL;
}
int r = cls_client::mirror_peer_set_cluster(&io_ctx, uuid, site_name);
if (r < 0) {
lderr(cct) << "failed to update site '" << uuid << "': "
<< cpp_strerror(r) << dendl;
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::peer_site_set_direction(librados::IoCtx& io_ctx,
const std::string &uuid,
mirror_peer_direction_t direction) {
cls::rbd::MirrorPeerDirection mirror_peer_direction = static_cast<
cls::rbd::MirrorPeerDirection>(direction);
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "uuid=" << uuid << ", "
<< "direction=" << mirror_peer_direction << dendl;
int r = cls_client::mirror_peer_set_direction(&io_ctx, uuid,
mirror_peer_direction);
if (r < 0) {
lderr(cct) << "failed to update direction '" << uuid << "': "
<< cpp_strerror(r) << dendl;
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::peer_site_get_attributes(librados::IoCtx& io_ctx,
const std::string &uuid,
Attributes* attributes) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "uuid=" << uuid << dendl;
attributes->clear();
librados::Rados rados(io_ctx);
std::string value;
int r = get_config_key(rados, get_peer_config_key_name(io_ctx.get_id(), uuid),
&value);
if (r == -ENOENT || value.empty()) {
return -ENOENT;
} else if (r < 0) {
lderr(cct) << "failed to retrieve peer attributes: " << cpp_strerror(r)
<< dendl;
return r;
}
bool json_valid = false;
json_spirit::mValue json_root;
if(json_spirit::read(value, json_root)) {
try {
auto& json_obj = json_root.get_obj();
for (auto& pairs : json_obj) {
(*attributes)[pairs.first] = pairs.second.get_str();
}
json_valid = true;
} catch (std::runtime_error&) {
}
}
if (!json_valid) {
lderr(cct) << "invalid peer attributes JSON received" << dendl;
return -EINVAL;
}
return 0;
}
template <typename I>
int Mirror<I>::peer_site_set_attributes(librados::IoCtx& io_ctx,
const std::string &uuid,
const Attributes& attributes) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "uuid=" << uuid << ", "
<< "attributes=" << attributes << dendl;
std::vector<mirror_peer_site_t> mirror_peers;
int r = peer_site_list(io_ctx, &mirror_peers);
if (r < 0) {
return r;
}
if (std::find_if(mirror_peers.begin(), mirror_peers.end(),
[&uuid](const librbd::mirror_peer_site_t& peer) {
return uuid == peer.uuid;
}) == mirror_peers.end()) {
ldout(cct, 5) << "mirror peer uuid " << uuid << " does not exist" << dendl;
return -ENOENT;
}
std::stringstream ss;
ss << "{";
for (auto& pair : attributes) {
ss << "\\\"" << pair.first << "\\\": "
<< "\\\"" << pair.second << "\\\"";
if (&pair != &(*attributes.rbegin())) {
ss << ", ";
}
}
ss << "}";
librados::Rados rados(io_ctx);
r = set_config_key(rados, get_peer_config_key_name(io_ctx.get_id(), uuid),
ss.str());
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to update peer attributes: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::image_global_status_list(
librados::IoCtx& io_ctx, const std::string &start_id, size_t max,
IdToMirrorImageGlobalStatus *images) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
int r;
std::map<std::string, std::string> id_to_name;
{
std::map<std::string, std::string> name_to_id;
r = Image<I>::list_images_v2(io_ctx, &name_to_id);
if (r < 0) {
return r;
}
for (auto it : name_to_id) {
id_to_name[it.second] = it.first;
}
}
std::map<std::string, cls::rbd::MirrorImage> images_;
std::map<std::string, cls::rbd::MirrorImageStatus> statuses_;
r = librbd::cls_client::mirror_image_status_list(&io_ctx, start_id, max,
&images_, &statuses_);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to list mirror image statuses: "
<< cpp_strerror(r) << dendl;
return r;
}
const std::string STATUS_NOT_FOUND("status not found");
for (auto it = images_.begin(); it != images_.end(); ++it) {
auto &image_id = it->first;
auto &info = it->second;
if (info.state == cls::rbd::MIRROR_IMAGE_STATE_DISABLED) {
continue;
}
auto &image_name = id_to_name[image_id];
if (image_name.empty()) {
lderr(cct) << "failed to find image name for image " << image_id << ", "
<< "using image id as name" << dendl;
image_name = image_id;
}
mirror_image_global_status_t& global_status = (*images)[image_id];
global_status.name = image_name;
global_status.info = mirror_image_info_t{
info.global_image_id,
static_cast<mirror_image_state_t>(info.state),
false}; // XXX: To set "primary" right would require an additional call.
bool found_local_site_status = false;
auto s_it = statuses_.find(image_id);
if (s_it != statuses_.end()) {
auto& status = s_it->second;
global_status.site_statuses.reserve(
status.mirror_image_site_statuses.size());
for (auto& site_status : status.mirror_image_site_statuses) {
if (site_status.mirror_uuid ==
cls::rbd::MirrorImageSiteStatus::LOCAL_MIRROR_UUID) {
found_local_site_status = true;
}
global_status.site_statuses.push_back(mirror_image_site_status_t{
site_status.mirror_uuid,
static_cast<mirror_image_status_state_t>(site_status.state),
site_status.state == cls::rbd::MIRROR_IMAGE_STATUS_STATE_UNKNOWN ?
STATUS_NOT_FOUND : site_status.description,
site_status.last_update.sec(), site_status.up});
}
}
if (!found_local_site_status) {
global_status.site_statuses.push_back(mirror_image_site_status_t{
cls::rbd::MirrorImageSiteStatus::LOCAL_MIRROR_UUID,
MIRROR_IMAGE_STATUS_STATE_UNKNOWN, STATUS_NOT_FOUND, 0, false});
}
}
return 0;
}
template <typename I>
int Mirror<I>::image_status_summary(librados::IoCtx& io_ctx,
MirrorImageStatusStates *states) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
std::vector<cls::rbd::MirrorPeer> mirror_peers;
int r = cls_client::mirror_peer_list(&io_ctx, &mirror_peers);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to list mirror peers: " << cpp_strerror(r) << dendl;
return r;
}
std::map<cls::rbd::MirrorImageStatusState, int32_t> states_;
r = cls_client::mirror_image_status_get_summary(&io_ctx, mirror_peers,
&states_);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to get mirror status summary: "
<< cpp_strerror(r) << dendl;
return r;
}
for (auto &s : states_) {
(*states)[static_cast<mirror_image_status_state_t>(s.first)] = s.second;
}
return 0;
}
template <typename I>
int Mirror<I>::image_instance_id_list(
librados::IoCtx& io_ctx, const std::string &start_image_id, size_t max,
std::map<std::string, std::string> *instance_ids) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
std::map<std::string, entity_inst_t> instances;
int r = librbd::cls_client::mirror_image_instance_list(
&io_ctx, start_image_id, max, &instances);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to list mirror image instances: " << cpp_strerror(r)
<< dendl;
return r;
}
for (auto it : instances) {
(*instance_ids)[it.first] = stringify(it.second.name.num());
}
return 0;
}
template <typename I>
int Mirror<I>::image_info_list(
librados::IoCtx& io_ctx, mirror_image_mode_t *mode_filter,
const std::string &start_id, size_t max,
std::map<std::string, std::pair<mirror_image_mode_t,
mirror_image_info_t>> *entries) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "pool=" << io_ctx.get_pool_name() << ", mode_filter="
<< (mode_filter ? stringify(*mode_filter) : "null")
<< ", start_id=" << start_id << ", max=" << max << dendl;
std::string last_read = start_id;
entries->clear();
while (entries->size() < max) {
std::map<std::string, cls::rbd::MirrorImage> images;
std::map<std::string, cls::rbd::MirrorImageStatus> statuses;
int r = librbd::cls_client::mirror_image_status_list(&io_ctx, last_read,
max, &images,
&statuses);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to list mirror image statuses: "
<< cpp_strerror(r) << dendl;
return r;
}
if (images.empty()) {
break;
}
AsioEngine asio_engine(io_ctx);
for (auto &it : images) {
auto &image_id = it.first;
auto &image = it.second;
auto mode = static_cast<mirror_image_mode_t>(image.mode);
if ((mode_filter && mode != *mode_filter) ||
image.state != cls::rbd::MIRROR_IMAGE_STATE_ENABLED) {
continue;
}
// need to call get_info for every image to retrieve promotion state
mirror_image_info_t info;
r = image_get_info(io_ctx, asio_engine.get_work_queue(), image_id, &info);
if (r < 0) {
continue;
}
(*entries)[image_id] = std::make_pair(mode, info);
if (entries->size() == max) {
break;
}
}
last_read = images.rbegin()->first;
}
return 0;
}
template <typename I>
int Mirror<I>::image_snapshot_create(I *ictx, uint32_t flags,
uint64_t *snap_id) {
C_SaferCond ctx;
Mirror<I>::image_snapshot_create(ictx, flags, snap_id, &ctx);
return ctx.wait();
}
template <typename I>
void Mirror<I>::image_snapshot_create(I *ictx, uint32_t flags,
uint64_t *snap_id, Context *on_finish) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
uint64_t snap_create_flags = 0;
int r = util::snap_create_flags_api_to_internal(cct, flags,
&snap_create_flags);
if (r < 0) {
on_finish->complete(r);
return;
}
auto on_refresh = new LambdaContext(
[ictx, snap_create_flags, snap_id, on_finish](int r) {
if (r < 0) {
lderr(ictx->cct) << "refresh failed: " << cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
auto ctx = new C_ImageSnapshotCreate<I>(ictx, snap_create_flags, snap_id,
on_finish);
auto req = mirror::GetInfoRequest<I>::create(*ictx, &ctx->mirror_image,
&ctx->promotion_state,
&ctx->primary_mirror_uuid,
ctx);
req->send();
});
if (ictx->state->is_refresh_required()) {
ictx->state->refresh(on_refresh);
} else {
on_refresh->complete(0);
}
}
} // namespace api
} // namespace librbd
template class librbd::api::Mirror<librbd::ImageCtx>;
| 66,186 | 30.442755 | 80 | cc |
null | ceph-main/src/librbd/api/Mirror.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRBD_API_MIRROR_H
#define LIBRBD_API_MIRROR_H
#include "include/rbd/librbd.hpp"
#include <map>
#include <string>
#include <vector>
struct Context;
namespace librbd {
struct ImageCtx;
namespace asio { struct ContextWQ; }
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
struct Mirror {
typedef std::map<std::string, std::string> Attributes;
typedef std::map<std::string, mirror_image_global_status_t>
IdToMirrorImageGlobalStatus;
typedef std::map<mirror_image_status_state_t, int> MirrorImageStatusStates;
static int site_name_get(librados::Rados& rados, std::string* name);
static int site_name_set(librados::Rados& rados, const std::string& name);
static int mode_get(librados::IoCtx& io_ctx, rbd_mirror_mode_t *mirror_mode);
static int mode_set(librados::IoCtx& io_ctx, rbd_mirror_mode_t mirror_mode);
static int uuid_get(librados::IoCtx& io_ctx, std::string* mirror_uuid);
static void uuid_get(librados::IoCtx& io_ctx, std::string* mirror_uuid,
Context* on_finish);
static int peer_bootstrap_create(librados::IoCtx& io_ctx, std::string* token);
static int peer_bootstrap_import(librados::IoCtx& io_ctx,
rbd_mirror_peer_direction_t direction,
const std::string& token);
static int peer_site_add(librados::IoCtx& io_ctx, std::string *uuid,
mirror_peer_direction_t direction,
const std::string &site_name,
const std::string &client_name);
static int peer_site_remove(librados::IoCtx& io_ctx, const std::string &uuid);
static int peer_site_list(librados::IoCtx& io_ctx,
std::vector<mirror_peer_site_t> *peers);
static int peer_site_set_client(librados::IoCtx& io_ctx,
const std::string &uuid,
const std::string &client_name);
static int peer_site_set_name(librados::IoCtx& io_ctx,
const std::string &uuid,
const std::string &site_name);
static int peer_site_set_direction(librados::IoCtx& io_ctx,
const std::string &uuid,
mirror_peer_direction_t direction);
static int peer_site_get_attributes(librados::IoCtx& io_ctx,
const std::string &uuid,
Attributes* attributes);
static int peer_site_set_attributes(librados::IoCtx& io_ctx,
const std::string &uuid,
const Attributes& attributes);
static int image_global_status_list(librados::IoCtx& io_ctx,
const std::string &start_id, size_t max,
IdToMirrorImageGlobalStatus *images);
static int image_status_summary(librados::IoCtx& io_ctx,
MirrorImageStatusStates *states);
static int image_instance_id_list(librados::IoCtx& io_ctx,
const std::string &start_image_id,
size_t max,
std::map<std::string, std::string> *ids);
static int image_info_list(
librados::IoCtx& io_ctx, mirror_image_mode_t *mode_filter,
const std::string &start_id, size_t max,
std::map<std::string, std::pair<mirror_image_mode_t,
mirror_image_info_t>> *entries);
static int image_enable(ImageCtxT *ictx, mirror_image_mode_t mode,
bool relax_same_pool_parent_check);
static int image_disable(ImageCtxT *ictx, bool force);
static int image_promote(ImageCtxT *ictx, bool force);
static void image_promote(ImageCtxT *ictx, bool force, Context *on_finish);
static int image_demote(ImageCtxT *ictx);
static void image_demote(ImageCtxT *ictx, Context *on_finish);
static int image_resync(ImageCtxT *ictx);
static int image_get_info(ImageCtxT *ictx,
mirror_image_info_t *mirror_image_info);
static void image_get_info(ImageCtxT *ictx,
mirror_image_info_t *mirror_image_info,
Context *on_finish);
static int image_get_info(librados::IoCtx& io_ctx,
asio::ContextWQ *op_work_queue,
const std::string &image_id,
mirror_image_info_t *mirror_image_info);
static void image_get_info(librados::IoCtx& io_ctx,
asio::ContextWQ *op_work_queue,
const std::string &image_id,
mirror_image_info_t *mirror_image_info,
Context *on_finish);
static int image_get_mode(ImageCtxT *ictx, mirror_image_mode_t *mode);
static void image_get_mode(ImageCtxT *ictx, mirror_image_mode_t *mode,
Context *on_finish);
static int image_get_global_status(ImageCtxT *ictx,
mirror_image_global_status_t *status);
static void image_get_global_status(ImageCtxT *ictx,
mirror_image_global_status_t *status,
Context *on_finish);
static int image_get_instance_id(ImageCtxT *ictx, std::string *instance_id);
static int image_snapshot_create(ImageCtxT *ictx, uint32_t flags,
uint64_t *snap_id);
static void image_snapshot_create(ImageCtxT *ictx, uint32_t flags,
uint64_t *snap_id, Context *on_finish);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Mirror<librbd::ImageCtx>;
#endif // LIBRBD_API_MIRROR_H
| 5,990 | 46.173228 | 80 | h |
null | ceph-main/src/librbd/api/Namespace.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/api/Mirror.h"
#include "librbd/api/Namespace.h"
#include "librbd/ImageCtx.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Namespace: " << __func__ << ": "
namespace librbd {
namespace api {
namespace {
const std::list<std::string> POOL_OBJECTS {
RBD_CHILDREN,
RBD_GROUP_DIRECTORY,
RBD_INFO,
RBD_MIRRORING,
RBD_TASK,
RBD_TRASH,
RBD_DIRECTORY
};
} // anonymous namespace
template <typename I>
int Namespace<I>::create(librados::IoCtx& io_ctx, const std::string& name)
{
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 5) << "name=" << name << dendl;
if (name.empty()) {
return -EINVAL;
}
librados::Rados rados(io_ctx);
int8_t require_osd_release;
int r = rados.get_min_compatible_osd(&require_osd_release);
if (r < 0) {
lderr(cct) << "failed to retrieve min OSD release: " << cpp_strerror(r)
<< dendl;
return r;
}
if (require_osd_release < CEPH_RELEASE_NAUTILUS) {
ldout(cct, 1) << "namespace support requires nautilus or later OSD"
<< dendl;
return -ENOSYS;
}
librados::IoCtx default_ns_ctx;
default_ns_ctx.dup(io_ctx);
default_ns_ctx.set_namespace("");
r = cls_client::namespace_add(&default_ns_ctx, name);
if (r < 0) {
lderr(cct) << "failed to add namespace: " << cpp_strerror(r) << dendl;
return r;
}
librados::IoCtx ns_ctx;
ns_ctx.dup(io_ctx);
ns_ctx.set_namespace(name);
r = cls_client::dir_state_set(&ns_ctx, RBD_DIRECTORY,
cls::rbd::DIRECTORY_STATE_READY);
if (r < 0) {
lderr(cct) << "failed to initialize image directory: " << cpp_strerror(r)
<< dendl;
goto rollback;
}
return 0;
rollback:
int ret_val = cls_client::namespace_remove(&default_ns_ctx, name);
if (ret_val < 0) {
lderr(cct) << "failed to remove namespace: " << cpp_strerror(ret_val) << dendl;
}
return r;
}
template <typename I>
int Namespace<I>::remove(librados::IoCtx& io_ctx, const std::string& name)
{
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 5) << "name=" << name << dendl;
if (name.empty()) {
return -EINVAL;
}
librados::IoCtx default_ns_ctx;
default_ns_ctx.dup(io_ctx);
default_ns_ctx.set_namespace("");
librados::IoCtx ns_ctx;
ns_ctx.dup(io_ctx);
ns_ctx.set_namespace(name);
std::map<std::string, cls::rbd::TrashImageSpec> trash_entries;
librados::ObjectWriteOperation dir_op;
librbd::cls_client::dir_state_set(
&dir_op, cls::rbd::DIRECTORY_STATE_ADD_DISABLED);
dir_op.remove();
int r = ns_ctx.operate(RBD_DIRECTORY, &dir_op);
if (r == -EBUSY) {
ldout(cct, 5) << "image directory not empty" << dendl;
goto rollback;
} else if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to disable the namespace: " << cpp_strerror(r)
<< dendl;
return r;
}
r = cls_client::trash_list(&ns_ctx, "", 1, &trash_entries);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to list trash directory: " << cpp_strerror(r)
<< dendl;
return r;
} else if (!trash_entries.empty()) {
ldout(cct, 5) << "image trash not empty" << dendl;
goto rollback;
}
r = Mirror<I>::mode_set(ns_ctx, RBD_MIRROR_MODE_DISABLED);
if (r < 0) {
lderr(cct) << "failed to disable mirroring: " << cpp_strerror(r)
<< dendl;
return r;
}
for (auto& oid : POOL_OBJECTS) {
r = ns_ctx.remove(oid);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to remove object '" << oid << "': "
<< cpp_strerror(r) << dendl;
return r;
}
}
r = cls_client::namespace_remove(&default_ns_ctx, name);
if (r < 0) {
lderr(cct) << "failed to remove namespace: " << cpp_strerror(r) << dendl;
return r;
}
return 0;
rollback:
r = librbd::cls_client::dir_state_set(
&ns_ctx, RBD_DIRECTORY, cls::rbd::DIRECTORY_STATE_READY);
if (r < 0) {
lderr(cct) << "failed to restore directory state: " << cpp_strerror(r)
<< dendl;
}
return -EBUSY;
}
template <typename I>
int Namespace<I>::list(IoCtx& io_ctx, std::vector<std::string> *names)
{
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 5) << dendl;
librados::IoCtx default_ns_ctx;
default_ns_ctx.dup(io_ctx);
default_ns_ctx.set_namespace("");
int r;
int max_read = 1024;
std::string last_read = "";
do {
std::list<std::string> name_list;
r = cls_client::namespace_list(&default_ns_ctx, last_read, max_read,
&name_list);
if (r == -ENOENT) {
return 0;
} else if (r < 0) {
lderr(cct) << "error listing namespaces: " << cpp_strerror(r) << dendl;
return r;
}
names->insert(names->end(), name_list.begin(), name_list.end());
if (!name_list.empty()) {
last_read = name_list.back();
}
r = name_list.size();
} while (r == max_read);
return 0;
}
template <typename I>
int Namespace<I>::exists(librados::IoCtx& io_ctx, const std::string& name, bool *exists)
{
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 5) << "name=" << name << dendl;
*exists = false;
if (name.empty()) {
return -EINVAL;
}
librados::IoCtx ns_ctx;
ns_ctx.dup(io_ctx);
ns_ctx.set_namespace(name);
int r = librbd::cls_client::dir_state_assert(&ns_ctx, RBD_DIRECTORY,
cls::rbd::DIRECTORY_STATE_READY);
if (r == 0) {
*exists = true;
} else if (r != -ENOENT) {
lderr(cct) << "error asserting namespace: " << cpp_strerror(r) << dendl;
return r;
}
return 0;
}
} // namespace api
} // namespace librbd
template class librbd::api::Namespace<librbd::ImageCtx>;
| 5,911 | 24.050847 | 88 | cc |
null | ceph-main/src/librbd/api/Namespace.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_NAMESPACE_H
#define CEPH_LIBRBD_API_NAMESPACE_H
#include "include/rados/librados_fwd.hpp"
#include "include/rbd/librbd.hpp"
#include <string>
#include <vector>
namespace librbd {
struct ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
struct Namespace {
static int create(librados::IoCtx& io_ctx, const std::string& name);
static int remove(librados::IoCtx& io_ctx, const std::string& name);
static int list(librados::IoCtx& io_ctx, std::vector<std::string>* names);
static int exists(librados::IoCtx& io_ctx, const std::string& name, bool *exists);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Namespace<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_API_NAMESPACE_H
| 863 | 24.411765 | 84 | h |
null | ceph-main/src/librbd/api/Pool.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Pool.h"
#include "include/rados/librados.hpp"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "common/Throttle.h"
#include "cls/rbd/cls_rbd_client.h"
#include "osd/osd_types.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/api/Config.h"
#include "librbd/api/Image.h"
#include "librbd/api/Trash.h"
#include "librbd/image/ValidatePoolRequest.h"
#define dout_subsys ceph_subsys_rbd
namespace librbd {
namespace api {
namespace {
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Pool::ImageStatRequest: " \
<< __func__ << " " << this << ": " \
<< "(id=" << m_image_id << "): "
template <typename I>
class ImageStatRequest {
public:
ImageStatRequest(librados::IoCtx& io_ctx, SimpleThrottle& throttle,
const std::string& image_id, bool scan_snaps,
std::atomic<uint64_t>* bytes,
std::atomic<uint64_t>* max_bytes,
std::atomic<uint64_t>* snaps)
: m_cct(reinterpret_cast<CephContext*>(io_ctx.cct())),
m_io_ctx(io_ctx), m_throttle(throttle), m_image_id(image_id),
m_scan_snaps(scan_snaps), m_bytes(bytes), m_max_bytes(max_bytes),
m_snaps(snaps) {
m_throttle.start_op();
}
void send() {
get_head();
}
protected:
void finish(int r) {
(*m_max_bytes) += m_max_size;
m_throttle.end_op(r);
delete this;
}
private:
CephContext* m_cct;
librados::IoCtx& m_io_ctx;
SimpleThrottle& m_throttle;
const std::string& m_image_id;
bool m_scan_snaps;
std::atomic<uint64_t>* m_bytes;
std::atomic<uint64_t>* m_max_bytes;
std::atomic<uint64_t>* m_snaps;
bufferlist m_out_bl;
uint64_t m_max_size = 0;
::SnapContext m_snapc;
void get_head() {
ldout(m_cct, 15) << dendl;
librados::ObjectReadOperation op;
cls_client::get_size_start(&op, CEPH_NOSNAP);
if (m_scan_snaps) {
cls_client::get_snapcontext_start(&op);
}
m_out_bl.clear();
auto aio_comp = util::create_rados_callback<
ImageStatRequest<I>, &ImageStatRequest<I>::handle_get_head>(this);
int r = m_io_ctx.aio_operate(util::header_name(m_image_id), aio_comp, &op,
&m_out_bl);
ceph_assert(r == 0);
aio_comp->release();
}
void handle_get_head(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
auto it = m_out_bl.cbegin();
if (r == 0) {
uint8_t order;
r = cls_client::get_size_finish(&it, &m_max_size, &order);
if (r == 0) {
(*m_bytes) += m_max_size;
}
}
if (m_scan_snaps && r == 0) {
r = cls_client::get_snapcontext_finish(&it, &m_snapc);
if (r == 0) {
(*m_snaps) += m_snapc.snaps.size();
}
}
if (r == -ENOENT) {
finish(r);
return;
} else if (r < 0) {
lderr(m_cct) << "failed to stat image: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
if (!m_snapc.is_valid()) {
lderr(m_cct) << "snap context is invalid" << dendl;
finish(-EIO);
return;
}
get_snaps();
}
void get_snaps() {
if (!m_scan_snaps || m_snapc.snaps.empty()) {
finish(0);
return;
}
ldout(m_cct, 15) << dendl;
librados::ObjectReadOperation op;
for (auto snap_seq : m_snapc.snaps) {
cls_client::get_size_start(&op, snap_seq);
}
m_out_bl.clear();
auto aio_comp = util::create_rados_callback<
ImageStatRequest<I>, &ImageStatRequest<I>::handle_get_snaps>(this);
int r = m_io_ctx.aio_operate(util::header_name(m_image_id), aio_comp, &op,
&m_out_bl);
ceph_assert(r == 0);
aio_comp->release();
}
void handle_get_snaps(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
auto it = m_out_bl.cbegin();
for ([[maybe_unused]] auto snap_seq : m_snapc.snaps) {
uint64_t size;
if (r == 0) {
uint8_t order;
r = cls_client::get_size_finish(&it, &size, &order);
}
if (r == 0 && m_max_size < size) {
m_max_size = size;
}
}
if (r == -ENOENT) {
ldout(m_cct, 15) << "out-of-sync metadata" << dendl;
get_head();
} else if (r < 0) {
lderr(m_cct) << "failed to retrieve snap size: " << cpp_strerror(r)
<< dendl;
finish(r);
} else {
finish(0);
}
}
};
template <typename I>
void get_pool_stat_option_value(typename Pool<I>::StatOptions* stat_options,
rbd_pool_stat_option_t option,
uint64_t** value) {
auto it = stat_options->find(option);
if (it == stat_options->end()) {
*value = nullptr;
} else {
*value = it->second;
}
}
template <typename I>
int get_pool_stats(librados::IoCtx& io_ctx, const ConfigProxy& config,
const std::vector<std::string>& image_ids, uint64_t* image_count,
uint64_t* provisioned_bytes, uint64_t* max_provisioned_bytes,
uint64_t* snapshot_count) {
bool scan_snaps = ((max_provisioned_bytes != nullptr) ||
(snapshot_count != nullptr));
SimpleThrottle throttle(
config.template get_val<uint64_t>("rbd_concurrent_management_ops"), true);
std::atomic<uint64_t> bytes{0};
std::atomic<uint64_t> max_bytes{0};
std::atomic<uint64_t> snaps{0};
for (auto& image_id : image_ids) {
if (throttle.pending_error()) {
break;
}
auto req = new ImageStatRequest<I>(io_ctx, throttle, image_id,
scan_snaps, &bytes, &max_bytes, &snaps);
req->send();
}
int r = throttle.wait_for_ret();
if (r < 0) {
return r;
}
if (image_count != nullptr) {
*image_count = image_ids.size();
}
if (provisioned_bytes != nullptr) {
*provisioned_bytes = bytes.load();
}
if (max_provisioned_bytes != nullptr) {
*max_provisioned_bytes = max_bytes.load();
}
if (snapshot_count != nullptr) {
*snapshot_count = snaps.load();
}
return 0;
}
} // anonymous namespace
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Pool: " << __func__ << ": "
template <typename I>
int Pool<I>::init(librados::IoCtx& io_ctx, bool force) {
auto cct = reinterpret_cast<CephContext*>(io_ctx.cct());
ldout(cct, 10) << dendl;
int r = io_ctx.application_enable(pg_pool_t::APPLICATION_NAME_RBD, force);
if (r < 0) {
return r;
}
ConfigProxy config{cct->_conf};
api::Config<I>::apply_pool_overrides(io_ctx, &config);
if (!config.get_val<bool>("rbd_validate_pool")) {
return 0;
}
C_SaferCond ctx;
auto req = image::ValidatePoolRequest<I>::create(io_ctx, &ctx);
req->send();
return ctx.wait();
}
template <typename I>
int Pool<I>::add_stat_option(StatOptions* stat_options,
rbd_pool_stat_option_t option,
uint64_t* value) {
switch (option) {
case RBD_POOL_STAT_OPTION_IMAGES:
case RBD_POOL_STAT_OPTION_IMAGE_PROVISIONED_BYTES:
case RBD_POOL_STAT_OPTION_IMAGE_MAX_PROVISIONED_BYTES:
case RBD_POOL_STAT_OPTION_IMAGE_SNAPSHOTS:
case RBD_POOL_STAT_OPTION_TRASH_IMAGES:
case RBD_POOL_STAT_OPTION_TRASH_PROVISIONED_BYTES:
case RBD_POOL_STAT_OPTION_TRASH_MAX_PROVISIONED_BYTES:
case RBD_POOL_STAT_OPTION_TRASH_SNAPSHOTS:
stat_options->emplace(option, value);
return 0;
default:
break;
}
return -ENOENT;
}
template <typename I>
int Pool<I>::get_stats(librados::IoCtx& io_ctx, StatOptions* stat_options) {
auto cct = reinterpret_cast<CephContext*>(io_ctx.cct());
ldout(cct, 10) << dendl;
ConfigProxy config{cct->_conf};
api::Config<I>::apply_pool_overrides(io_ctx, &config);
uint64_t* image_count;
uint64_t* provisioned_bytes;
uint64_t* max_provisioned_bytes;
uint64_t* snapshot_count;
std::vector<trash_image_info_t> trash_entries;
int r = Trash<I>::list(io_ctx, trash_entries, false);
if (r < 0 && r != -EOPNOTSUPP) {
return r;
}
get_pool_stat_option_value<I>(
stat_options, RBD_POOL_STAT_OPTION_IMAGES, &image_count);
get_pool_stat_option_value<I>(
stat_options, RBD_POOL_STAT_OPTION_IMAGE_PROVISIONED_BYTES,
&provisioned_bytes);
get_pool_stat_option_value<I>(
stat_options, RBD_POOL_STAT_OPTION_IMAGE_MAX_PROVISIONED_BYTES,
&max_provisioned_bytes);
get_pool_stat_option_value<I>(
stat_options, RBD_POOL_STAT_OPTION_IMAGE_SNAPSHOTS, &snapshot_count);
if (image_count != nullptr || provisioned_bytes != nullptr ||
max_provisioned_bytes != nullptr || snapshot_count != nullptr) {
typename Image<I>::ImageNameToIds images;
int r = Image<I>::list_images_v2(io_ctx, &images);
if (r < 0) {
return r;
}
std::vector<std::string> image_ids;
image_ids.reserve(images.size() + trash_entries.size());
for (auto& it : images) {
image_ids.push_back(std::move(it.second));
}
for (auto& it : trash_entries) {
if (it.source == RBD_TRASH_IMAGE_SOURCE_REMOVING) {
image_ids.push_back(std::move(it.id));
}
}
r = get_pool_stats<I>(io_ctx, config, image_ids, image_count,
provisioned_bytes, max_provisioned_bytes,
snapshot_count);
if (r < 0) {
return r;
}
}
get_pool_stat_option_value<I>(
stat_options, RBD_POOL_STAT_OPTION_TRASH_IMAGES, &image_count);
get_pool_stat_option_value<I>(
stat_options, RBD_POOL_STAT_OPTION_TRASH_PROVISIONED_BYTES,
&provisioned_bytes);
get_pool_stat_option_value<I>(
stat_options, RBD_POOL_STAT_OPTION_TRASH_MAX_PROVISIONED_BYTES,
&max_provisioned_bytes);
get_pool_stat_option_value<I>(
stat_options, RBD_POOL_STAT_OPTION_TRASH_SNAPSHOTS, &snapshot_count);
if (image_count != nullptr || provisioned_bytes != nullptr ||
max_provisioned_bytes != nullptr || snapshot_count != nullptr) {
std::vector<std::string> image_ids;
image_ids.reserve(trash_entries.size());
for (auto& it : trash_entries) {
if (it.source == RBD_TRASH_IMAGE_SOURCE_REMOVING) {
continue;
}
image_ids.push_back(std::move(it.id));
}
r = get_pool_stats<I>(io_ctx, config, image_ids, image_count,
provisioned_bytes, max_provisioned_bytes,
snapshot_count);
if (r < 0) {
return r;
}
}
return 0;
}
} // namespace api
} // namespace librbd
template class librbd::api::Pool<librbd::ImageCtx>;
| 10,605 | 27.207447 | 79 | cc |
null | ceph-main/src/librbd/api/Pool.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_POOL_H
#define CEPH_LIBRBD_API_POOL_H
#include "include/int_types.h"
#include "include/rados/librados_fwd.hpp"
#include "include/rbd/librbd.h"
#include <map>
namespace librbd {
struct ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
class Pool {
public:
typedef std::map<rbd_pool_stat_option_t, uint64_t*> StatOptions;
static int init(librados::IoCtx& io_ctx, bool force);
static int add_stat_option(StatOptions* stat_options,
rbd_pool_stat_option_t option,
uint64_t* value);
static int get_stats(librados::IoCtx& io_ctx, StatOptions* stat_options);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Pool<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_API_POOL_H
| 913 | 22.435897 | 75 | h |
null | ceph-main/src/librbd/api/PoolMetadata.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/PoolMetadata.h"
#include "cls/rbd/cls_rbd_client.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "librbd/Utils.h"
#include "librbd/api/Config.h"
#include "librbd/image/GetMetadataRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::PoolMetadata: " << __func__ << ": "
namespace librbd {
namespace api {
namespace {
void update_pool_timestamp(librados::IoCtx& io_ctx) {
CephContext *cct = (CephContext *)io_ctx.cct();
auto now = ceph_clock_now();
std::string cmd =
R"({)"
R"("prefix": "config set", )"
R"("who": "global", )"
R"("name": "rbd_config_pool_override_update_timestamp", )"
R"("value": ")" + stringify(now.sec()) + R"(")"
R"(})";
librados::Rados rados(io_ctx);
bufferlist in_bl;
std::string ss;
int r = rados.mon_command(cmd, in_bl, nullptr, &ss);
if (r < 0) {
lderr(cct) << "failed to notify clients of pool config update: "
<< cpp_strerror(r) << dendl;
}
}
} // anonymous namespace
template <typename I>
int PoolMetadata<I>::get(librados::IoCtx& io_ctx,
const std::string &key, std::string *value) {
CephContext *cct = (CephContext *)io_ctx.cct();
int r = cls_client::metadata_get(&io_ctx, RBD_INFO, key, value);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed reading metadata " << key << ": " << cpp_strerror(r)
<< dendl;
}
return r;
}
template <typename I>
int PoolMetadata<I>::set(librados::IoCtx& io_ctx, const std::string &key,
const std::string &value) {
CephContext *cct = (CephContext *)io_ctx.cct();
bool need_update_pool_timestamp = false;
std::string config_key;
if (util::is_metadata_config_override(key, &config_key)) {
if (!librbd::api::Config<I>::is_option_name(io_ctx, config_key)) {
lderr(cct) << "validation for " << key
<< " failed: not allowed pool level override" << dendl;
return -EINVAL;
}
int r = ConfigProxy{false}.set_val(config_key.c_str(), value);
if (r < 0) {
lderr(cct) << "validation for " << key << " failed: " << cpp_strerror(r)
<< dendl;
return -EINVAL;
}
need_update_pool_timestamp = true;
}
ceph::bufferlist bl;
bl.append(value);
int r = cls_client::metadata_set(&io_ctx, RBD_INFO, {{key, bl}});
if (r < 0) {
lderr(cct) << "failed setting metadata " << key << ": " << cpp_strerror(r)
<< dendl;
return r;
}
if (need_update_pool_timestamp) {
update_pool_timestamp(io_ctx);
}
return 0;
}
template <typename I>
int PoolMetadata<I>::remove(librados::IoCtx& io_ctx, const std::string &key) {
CephContext *cct = (CephContext *)io_ctx.cct();
std::string value;
int r = cls_client::metadata_get(&io_ctx, RBD_INFO, key, &value);
if (r < 0) {
if (r == -ENOENT) {
ldout(cct, 1) << "metadata " << key << " does not exist" << dendl;
} else {
lderr(cct) << "failed reading metadata " << key << ": " << cpp_strerror(r)
<< dendl;
}
return r;
}
r = cls_client::metadata_remove(&io_ctx, RBD_INFO, key);
if (r < 0) {
lderr(cct) << "failed removing metadata " << key << ": " << cpp_strerror(r)
<< dendl;
return r;
}
std::string config_key;
if (util::is_metadata_config_override(key, &config_key)) {
update_pool_timestamp(io_ctx);
}
return 0;
}
template <typename I>
int PoolMetadata<I>::list(librados::IoCtx& io_ctx, const std::string &start,
uint64_t max,
std::map<std::string, ceph::bufferlist> *pairs) {
CephContext *cct = (CephContext *)io_ctx.cct();
pairs->clear();
C_SaferCond ctx;
auto req = image::GetMetadataRequest<I>::create(
io_ctx, RBD_INFO, false, "", start, max, pairs, &ctx);
req->send();
int r = ctx.wait();
if (r < 0) {
lderr(cct) << "failed listing metadata: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
} // namespace api
} // namespace librbd
template class librbd::api::PoolMetadata<librbd::ImageCtx>;
| 4,276 | 26.242038 | 80 | cc |
null | ceph-main/src/librbd/api/PoolMetadata.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_POOL_METADATA_H
#define CEPH_LIBRBD_API_POOL_METADATA_H
#include "include/buffer_fwd.h"
#include "include/rados/librados_fwd.hpp"
#include <cstdint>
#include <map>
#include <string>
namespace librbd {
class ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
class PoolMetadata {
public:
static int get(librados::IoCtx& io_ctx, const std::string &key,
std::string *value);
static int set(librados::IoCtx& io_ctx, const std::string &key,
const std::string &value);
static int remove(librados::IoCtx& io_ctx, const std::string &key);
static int list(librados::IoCtx& io_ctx, const std::string &start,
uint64_t max, std::map<std::string, ceph::bufferlist> *pairs);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::PoolMetadata<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_API_POOL_METADATA_H
| 1,030 | 26.131579 | 80 | h |
null | ceph-main/src/librbd/api/Snapshot.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Snapshot.h"
#include "cls/rbd/cls_rbd_types.h"
#include "common/errno.h"
#include "librbd/internal.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/api/Image.h"
#include "include/Context.h"
#include "common/Cond.h"
#include <boost/variant.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Snapshot: " << __func__ << ": "
using librados::snap_t;
namespace librbd {
namespace api {
namespace {
class GetGroupVisitor {
public:
CephContext* cct;
librados::IoCtx *image_ioctx;
snap_group_namespace_t *group_snap;
explicit GetGroupVisitor(CephContext* cct, librados::IoCtx *_image_ioctx,
snap_group_namespace_t *group_snap)
: cct(cct), image_ioctx(_image_ioctx), group_snap(group_snap) {};
template <typename T>
inline int operator()(const T&) const {
// ignore other than GroupSnapshotNamespace types.
return -EINVAL;
}
inline int operator()(
const cls::rbd::GroupSnapshotNamespace& snap_namespace) {
IoCtx group_ioctx;
int r = util::create_ioctx(*image_ioctx, "group", snap_namespace.group_pool,
{}, &group_ioctx);
if (r < 0) {
return r;
}
cls::rbd::GroupSnapshot group_snapshot;
std::string group_name;
r = cls_client::dir_get_name(&group_ioctx, RBD_GROUP_DIRECTORY,
snap_namespace.group_id, &group_name);
if (r < 0) {
lderr(cct) << "failed to retrieve group name: " << cpp_strerror(r)
<< dendl;
return r;
}
std::string group_header_oid = util::group_header_name(snap_namespace.group_id);
r = cls_client::group_snap_get_by_id(&group_ioctx,
group_header_oid,
snap_namespace.group_snapshot_id,
&group_snapshot);
if (r < 0) {
lderr(cct) << "failed to retrieve group snapshot: " << cpp_strerror(r)
<< dendl;
return r;
}
group_snap->group_pool = group_ioctx.get_id();
group_snap->group_name = group_name;
group_snap->group_snap_name = group_snapshot.name;
return 0;
}
};
class GetTrashVisitor {
public:
std::string* original_name;
explicit GetTrashVisitor(std::string* original_name)
: original_name(original_name) {
}
template <typename T>
inline int operator()(const T&) const {
return -EINVAL;
}
inline int operator()(
const cls::rbd::TrashSnapshotNamespace& snap_namespace) {
*original_name = snap_namespace.original_name;
return 0;
}
};
class GetMirrorVisitor {
public:
snap_mirror_namespace_t *mirror_snap;
explicit GetMirrorVisitor(snap_mirror_namespace_t *mirror_snap)
: mirror_snap(mirror_snap) {
}
template <typename T>
inline int operator()(const T&) const {
return -EINVAL;
}
inline int operator()(
const cls::rbd::MirrorSnapshotNamespace& snap_namespace) {
mirror_snap->state = static_cast<snap_mirror_state_t>(snap_namespace.state);
mirror_snap->complete = snap_namespace.complete;
mirror_snap->mirror_peer_uuids = snap_namespace.mirror_peer_uuids;
mirror_snap->primary_mirror_uuid = snap_namespace.primary_mirror_uuid;
mirror_snap->primary_snap_id = snap_namespace.primary_snap_id;
mirror_snap->last_copied_object_number =
snap_namespace.last_copied_object_number;
return 0;
}
};
} // anonymous namespace
template <typename I>
int Snapshot<I>::get_group_namespace(I *ictx, uint64_t snap_id,
snap_group_namespace_t *group_snap) {
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
std::shared_lock image_locker{ictx->image_lock};
auto snap_info = ictx->get_snap_info(snap_id);
if (snap_info == nullptr) {
return -ENOENT;
}
GetGroupVisitor ggv = GetGroupVisitor(ictx->cct, &ictx->md_ctx, group_snap);
r = snap_info->snap_namespace.visit(ggv);
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Snapshot<I>::get_trash_namespace(I *ictx, uint64_t snap_id,
std::string* original_name) {
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
std::shared_lock image_locker{ictx->image_lock};
auto snap_info = ictx->get_snap_info(snap_id);
if (snap_info == nullptr) {
return -ENOENT;
}
auto visitor = GetTrashVisitor(original_name);
r = snap_info->snap_namespace.visit(visitor);
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Snapshot<I>::get_mirror_namespace(
I *ictx, uint64_t snap_id, snap_mirror_namespace_t *mirror_snap) {
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
std::shared_lock image_locker{ictx->image_lock};
auto snap_info = ictx->get_snap_info(snap_id);
if (snap_info == nullptr) {
return -ENOENT;
}
auto gmv = GetMirrorVisitor(mirror_snap);
r = snap_info->snap_namespace.visit(gmv);
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Snapshot<I>::get_namespace_type(I *ictx, uint64_t snap_id,
snap_namespace_type_t *namespace_type) {
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
std::shared_lock l{ictx->image_lock};
auto snap_info = ictx->get_snap_info(snap_id);
if (snap_info == nullptr) {
return -ENOENT;
}
*namespace_type = static_cast<snap_namespace_type_t>(
cls::rbd::get_snap_namespace_type(snap_info->snap_namespace));
return 0;
}
template <typename I>
int Snapshot<I>::remove(I *ictx, uint64_t snap_id) {
ldout(ictx->cct, 20) << "snap_remove " << ictx << " " << snap_id << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
cls::rbd::SnapshotNamespace snapshot_namespace;
std::string snapshot_name;
{
std::shared_lock image_locker{ictx->image_lock};
auto it = ictx->snap_info.find(snap_id);
if (it == ictx->snap_info.end()) {
return -ENOENT;
}
snapshot_namespace = it->second.snap_namespace;
snapshot_name = it->second.name;
}
C_SaferCond ctx;
ictx->operations->snap_remove(snapshot_namespace, snapshot_name, &ctx);
r = ctx.wait();
return r;
}
template <typename I>
int Snapshot<I>::get_name(I *ictx, uint64_t snap_id, std::string *snap_name)
{
ldout(ictx->cct, 20) << "snap_get_name " << ictx << " " << snap_id << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock image_locker{ictx->image_lock};
r = ictx->get_snap_name(snap_id, snap_name);
return r;
}
template <typename I>
int Snapshot<I>::get_id(I *ictx, const std::string& snap_name, uint64_t *snap_id)
{
ldout(ictx->cct, 20) << "snap_get_id " << ictx << " " << snap_name << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock image_locker{ictx->image_lock};
*snap_id = ictx->get_snap_id(cls::rbd::UserSnapshotNamespace(), snap_name);
if (*snap_id == CEPH_NOSNAP)
return -ENOENT;
return 0;
}
template <typename I>
int Snapshot<I>::list(I *ictx, std::vector<snap_info_t>& snaps) {
ldout(ictx->cct, 20) << "snap_list " << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock l{ictx->image_lock};
for (auto &it : ictx->snap_info) {
snap_info_t info;
info.name = it.second.name;
info.id = it.first;
info.size = it.second.size;
snaps.push_back(info);
}
return 0;
}
template <typename I>
int Snapshot<I>::exists(I *ictx, const cls::rbd::SnapshotNamespace& snap_namespace,
const char *snap_name, bool *exists) {
ldout(ictx->cct, 20) << "snap_exists " << ictx << " " << snap_name << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock l{ictx->image_lock};
*exists = ictx->get_snap_id(snap_namespace, snap_name) != CEPH_NOSNAP;
return 0;
}
template <typename I>
int Snapshot<I>::create(I *ictx, const char *snap_name, uint32_t flags,
ProgressContext& pctx) {
ldout(ictx->cct, 20) << "snap_create " << ictx << " " << snap_name
<< " flags: " << flags << dendl;
uint64_t internal_flags = 0;
int r = util::snap_create_flags_api_to_internal(ictx->cct, flags,
&internal_flags);
if (r < 0) {
return r;
}
return ictx->operations->snap_create(cls::rbd::UserSnapshotNamespace(),
snap_name, internal_flags, pctx);
}
template <typename I>
int Snapshot<I>::remove(I *ictx, const char *snap_name, uint32_t flags,
ProgressContext& pctx) {
ldout(ictx->cct, 20) << "snap_remove " << ictx << " " << snap_name << " flags: " << flags << dendl;
int r = 0;
r = ictx->state->refresh_if_required();
if (r < 0)
return r;
if (flags & RBD_SNAP_REMOVE_FLATTEN) {
r = Image<I>::flatten_children(ictx, snap_name, pctx);
if (r < 0) {
return r;
}
}
bool protect;
r = is_protected(ictx, snap_name, &protect);
if (r < 0) {
return r;
}
if (protect && flags & RBD_SNAP_REMOVE_UNPROTECT) {
r = ictx->operations->snap_unprotect(cls::rbd::UserSnapshotNamespace(), snap_name);
if (r < 0) {
lderr(ictx->cct) << "failed to unprotect snapshot: " << snap_name << dendl;
return r;
}
r = is_protected(ictx, snap_name, &protect);
if (r < 0) {
return r;
}
if (protect) {
lderr(ictx->cct) << "snapshot is still protected after unprotection" << dendl;
ceph_abort();
}
}
C_SaferCond ctx;
ictx->operations->snap_remove(cls::rbd::UserSnapshotNamespace(), snap_name, &ctx);
r = ctx.wait();
return r;
}
template <typename I>
int Snapshot<I>::get_timestamp(I *ictx, uint64_t snap_id, struct timespec *timestamp) {
auto snap_it = ictx->snap_info.find(snap_id);
ceph_assert(snap_it != ictx->snap_info.end());
utime_t time = snap_it->second.timestamp;
time.to_timespec(timestamp);
return 0;
}
template <typename I>
int Snapshot<I>::get_limit(I *ictx, uint64_t *limit) {
int r = cls_client::snapshot_get_limit(&ictx->md_ctx, ictx->header_oid,
limit);
if (r == -EOPNOTSUPP) {
*limit = UINT64_MAX;
r = 0;
}
return r;
}
template <typename I>
int Snapshot<I>::set_limit(I *ictx, uint64_t limit) {
return ictx->operations->snap_set_limit(limit);
}
template <typename I>
int Snapshot<I>::is_protected(I *ictx, const char *snap_name, bool *protect) {
ldout(ictx->cct, 20) << "snap_is_protected " << ictx << " " << snap_name
<< dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock l{ictx->image_lock};
snap_t snap_id = ictx->get_snap_id(cls::rbd::UserSnapshotNamespace(), snap_name);
if (snap_id == CEPH_NOSNAP)
return -ENOENT;
bool is_unprotected;
r = ictx->is_snap_unprotected(snap_id, &is_unprotected);
// consider both PROTECTED or UNPROTECTING to be 'protected',
// since in either state they can't be deleted
*protect = !is_unprotected;
return r;
}
template <typename I>
int Snapshot<I>::get_namespace(I *ictx, const char *snap_name,
cls::rbd::SnapshotNamespace *snap_namespace) {
ldout(ictx->cct, 20) << "get_snap_namespace " << ictx << " " << snap_name
<< dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock l{ictx->image_lock};
snap_t snap_id = ictx->get_snap_id(*snap_namespace, snap_name);
if (snap_id == CEPH_NOSNAP)
return -ENOENT;
r = ictx->get_snap_namespace(snap_id, snap_namespace);
return r;
}
} // namespace api
} // namespace librbd
template class librbd::api::Snapshot<librbd::ImageCtx>;
| 12,009 | 25.988764 | 101 | cc |
null | ceph-main/src/librbd/api/Snapshot.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_SNAPSHOT_H
#define CEPH_LIBRBD_API_SNAPSHOT_H
#include "include/rbd/librbd.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include <string>
namespace librbd {
struct ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
struct Snapshot {
static int get_group_namespace(ImageCtxT *ictx, uint64_t snap_id,
snap_group_namespace_t *group_snap);
static int get_trash_namespace(ImageCtxT *ictx, uint64_t snap_id,
std::string *original_name);
static int get_mirror_namespace(
ImageCtxT *ictx, uint64_t snap_id,
snap_mirror_namespace_t *mirror_snap);
static int get_namespace_type(ImageCtxT *ictx, uint64_t snap_id,
snap_namespace_type_t *namespace_type);
static int remove(ImageCtxT *ictx, uint64_t snap_id);
static int get_name(ImageCtxT *ictx, uint64_t snap_id, std::string *snap_name);
static int get_id(ImageCtxT *ictx, const std::string& snap_name, uint64_t *snap_id);
static int list(ImageCtxT *ictx, std::vector<snap_info_t>& snaps);
static int exists(ImageCtxT *ictx, const cls::rbd::SnapshotNamespace& snap_namespace,
const char *snap_name, bool *exists);
static int create(ImageCtxT *ictx, const char *snap_name, uint32_t flags,
ProgressContext& pctx);
static int remove(ImageCtxT *ictx, const char *snap_name, uint32_t flags, ProgressContext& pctx);
static int get_limit(ImageCtxT *ictx, uint64_t *limit);
static int set_limit(ImageCtxT *ictx, uint64_t limit);
static int get_timestamp(ImageCtxT *ictx, uint64_t snap_id, struct timespec *timestamp);
static int is_protected(ImageCtxT *ictx, const char *snap_name, bool *protect);
static int get_namespace(ImageCtxT *ictx, const char *snap_name,
cls::rbd::SnapshotNamespace *snap_namespace);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Snapshot<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_API_SNAPSHOT_H
| 2,125 | 30.264706 | 99 | h |
null | ceph-main/src/librbd/api/Trash.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Trash.h"
#include "include/rados/librados.hpp"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/AsioEngine.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/internal.h"
#include "librbd/Operations.h"
#include "librbd/TrashWatcher.h"
#include "librbd/Utils.h"
#include "librbd/api/DiffIterate.h"
#include "librbd/exclusive_lock/Policy.h"
#include "librbd/image/RemoveRequest.h"
#include "librbd/mirror/DisableRequest.h"
#include "librbd/mirror/EnableRequest.h"
#include "librbd/trash/MoveRequest.h"
#include "librbd/trash/RemoveRequest.h"
#include <json_spirit/json_spirit.h>
#include "librbd/journal/DisabledPolicy.h"
#include "librbd/image/ListWatchersRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Trash: " << __func__ << ": "
namespace librbd {
namespace api {
template <typename I>
const typename Trash<I>::TrashImageSources Trash<I>::ALLOWED_RESTORE_SOURCES {
cls::rbd::TRASH_IMAGE_SOURCE_USER,
cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING,
cls::rbd::TRASH_IMAGE_SOURCE_USER_PARENT
};
namespace {
template <typename I>
int disable_mirroring(I *ictx) {
ldout(ictx->cct, 10) << dendl;
C_SaferCond ctx;
auto req = mirror::DisableRequest<I>::create(ictx, false, true, &ctx);
req->send();
int r = ctx.wait();
if (r < 0) {
lderr(ictx->cct) << "failed to disable mirroring: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int enable_mirroring(IoCtx &io_ctx, const std::string &image_id) {
auto cct = reinterpret_cast<CephContext*>(io_ctx.cct());
uint64_t features;
uint64_t incompatible_features;
int r = cls_client::get_features(&io_ctx, util::header_name(image_id), true,
&features, &incompatible_features);
if (r < 0) {
lderr(cct) << "failed to retrieve features: " << cpp_strerror(r) << dendl;
return r;
}
if ((features & RBD_FEATURE_JOURNALING) == 0) {
return 0;
}
cls::rbd::MirrorMode mirror_mode;
r = cls_client::mirror_mode_get(&io_ctx, &mirror_mode);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve mirror mode: " << cpp_strerror(r)
<< dendl;
return r;
}
if (mirror_mode != cls::rbd::MIRROR_MODE_POOL) {
ldout(cct, 10) << "not pool mirroring mode" << dendl;
return 0;
}
ldout(cct, 10) << dendl;
AsioEngine asio_engine(io_ctx);
C_SaferCond ctx;
auto req = mirror::EnableRequest<I>::create(
io_ctx, image_id, cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "", false,
asio_engine.get_work_queue(), &ctx);
req->send();
r = ctx.wait();
if (r < 0) {
lderr(cct) << "failed to enable mirroring: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
int list_trash_image_specs(
librados::IoCtx &io_ctx,
std::map<std::string, cls::rbd::TrashImageSpec>* trash_image_specs,
bool exclude_user_remove_source) {
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << "list_trash_image_specs " << &io_ctx << dendl;
bool more_entries;
uint32_t max_read = 1024;
std::string last_read;
do {
std::map<std::string, cls::rbd::TrashImageSpec> trash_entries;
int r = cls_client::trash_list(&io_ctx, last_read, max_read,
&trash_entries);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error listing rbd trash entries: " << cpp_strerror(r)
<< dendl;
return r;
} else if (r == -ENOENT) {
break;
}
if (trash_entries.empty()) {
break;
}
for (const auto &entry : trash_entries) {
if (exclude_user_remove_source &&
entry.second.source == cls::rbd::TRASH_IMAGE_SOURCE_REMOVING) {
continue;
}
trash_image_specs->insert({entry.first, entry.second});
}
last_read = trash_entries.rbegin()->first;
more_entries = (trash_entries.size() >= max_read);
} while (more_entries);
return 0;
}
} // anonymous namespace
template <typename I>
int Trash<I>::move(librados::IoCtx &io_ctx, rbd_trash_image_source_t source,
const std::string &image_name, const std::string &image_id,
uint64_t delay) {
ceph_assert(!image_name.empty() && !image_id.empty());
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << &io_ctx << " name=" << image_name << ", id=" << image_id
<< dendl;
auto ictx = new I("", image_id, nullptr, io_ctx, false);
int r = ictx->state->open(OPEN_FLAG_SKIP_OPEN_PARENT);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to open image: " << cpp_strerror(r) << dendl;
return r;
}
if (r == 0) {
cls::rbd::MirrorImage mirror_image;
int mirror_r = cls_client::mirror_image_get(&ictx->md_ctx, ictx->id,
&mirror_image);
if (mirror_r == -ENOENT) {
ldout(ictx->cct, 10) << "mirroring is not enabled for this image"
<< dendl;
} else if (mirror_r < 0) {
lderr(ictx->cct) << "failed to retrieve mirror image: "
<< cpp_strerror(mirror_r) << dendl;
return mirror_r;
} else if (mirror_image.mode == cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
// a remote rbd-mirror might own the exclusive-lock on this image
// and therefore we need to disable mirroring so that it closes the image
r = disable_mirroring<I>(ictx);
if (r < 0) {
ictx->state->close();
return r;
}
}
if (ictx->test_features(RBD_FEATURE_JOURNALING)) {
std::unique_lock image_locker{ictx->image_lock};
ictx->set_journal_policy(new journal::DisabledPolicy());
}
ictx->owner_lock.lock_shared();
if (ictx->exclusive_lock != nullptr) {
ictx->exclusive_lock->block_requests(0);
r = ictx->operations->prepare_image_update(
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, true);
if (r < 0) {
lderr(cct) << "cannot obtain exclusive lock - not removing" << dendl;
ictx->owner_lock.unlock_shared();
ictx->state->close();
return -EBUSY;
}
}
ictx->owner_lock.unlock_shared();
ictx->image_lock.lock_shared();
if (!ictx->migration_info.empty()) {
lderr(cct) << "cannot move migrating image to trash" << dendl;
ictx->image_lock.unlock_shared();
ictx->state->close();
return -EBUSY;
}
ictx->image_lock.unlock_shared();
if (mirror_r >= 0 &&
mirror_image.mode != cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
r = disable_mirroring<I>(ictx);
if (r < 0) {
ictx->state->close();
return r;
}
}
ictx->state->close();
}
utime_t delete_time{ceph_clock_now()};
utime_t deferment_end_time{delete_time};
deferment_end_time += delay;
cls::rbd::TrashImageSpec trash_image_spec{
static_cast<cls::rbd::TrashImageSource>(source), image_name,
delete_time, deferment_end_time};
trash_image_spec.state = cls::rbd::TRASH_IMAGE_STATE_MOVING;
C_SaferCond ctx;
auto req = trash::MoveRequest<I>::create(io_ctx, image_id, trash_image_spec,
&ctx);
req->send();
r = ctx.wait();
trash_image_spec.state = cls::rbd::TRASH_IMAGE_STATE_NORMAL;
int ret = cls_client::trash_state_set(&io_ctx, image_id,
trash_image_spec.state,
cls::rbd::TRASH_IMAGE_STATE_MOVING);
if (ret < 0 && ret != -EOPNOTSUPP) {
lderr(cct) << "error setting trash image state: "
<< cpp_strerror(ret) << dendl;
return ret;
}
if (r < 0) {
return r;
}
C_SaferCond notify_ctx;
TrashWatcher<I>::notify_image_added(io_ctx, image_id, trash_image_spec,
¬ify_ctx);
r = notify_ctx.wait();
if (r < 0) {
lderr(cct) << "failed to send update notification: " << cpp_strerror(r)
<< dendl;
}
return 0;
}
template <typename I>
int Trash<I>::move(librados::IoCtx &io_ctx, rbd_trash_image_source_t source,
const std::string &image_name, uint64_t delay) {
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << &io_ctx << " name=" << image_name << dendl;
// try to get image id from the directory
std::string image_id;
int r = cls_client::dir_get_id(&io_ctx, RBD_DIRECTORY, image_name,
&image_id);
if (r == -ENOENT) {
r = io_ctx.stat(util::old_header_name(image_name), nullptr, nullptr);
if (r == 0) {
// cannot move V1 image to trash
ldout(cct, 10) << "cannot move v1 image to trash" << dendl;
return -EOPNOTSUPP;
}
// search for an interrupted trash move request
std::map<std::string, cls::rbd::TrashImageSpec> trash_image_specs;
int r = list_trash_image_specs(io_ctx, &trash_image_specs, true);
if (r < 0) {
return r;
}
if (auto found_image =
std::find_if(
trash_image_specs.begin(), trash_image_specs.end(),
[&](const auto& pair) {
const auto& spec = pair.second;
return (spec.source == cls::rbd::TRASH_IMAGE_SOURCE_USER &&
spec.state == cls::rbd::TRASH_IMAGE_STATE_MOVING &&
spec.name == image_name);
});
found_image != trash_image_specs.end()) {
image_id = found_image->first;
} else {
return -ENOENT;
}
ldout(cct, 15) << "derived image id " << image_id << " from existing "
<< "trash entry" << dendl;
} else if (r < 0) {
lderr(cct) << "failed to retrieve image id: " << cpp_strerror(r) << dendl;
return r;
}
if (image_name.empty() || image_id.empty()) {
lderr(cct) << "invalid image name/id" << dendl;
return -EINVAL;
}
return Trash<I>::move(io_ctx, source, image_name, image_id, delay);
}
template <typename I>
int Trash<I>::get(IoCtx &io_ctx, const std::string &id,
trash_image_info_t *info) {
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << __func__ << " " << &io_ctx << dendl;
cls::rbd::TrashImageSpec spec;
int r = cls_client::trash_get(&io_ctx, id, &spec);
if (r == -ENOENT) {
return r;
} else if (r < 0) {
lderr(cct) << "error retrieving trash entry: " << cpp_strerror(r)
<< dendl;
return r;
}
rbd_trash_image_source_t source = static_cast<rbd_trash_image_source_t>(
spec.source);
*info = trash_image_info_t{id, spec.name, source, spec.deletion_time.sec(),
spec.deferment_end_time.sec()};
return 0;
}
template <typename I>
int Trash<I>::list(IoCtx &io_ctx, std::vector<trash_image_info_t> &entries,
bool exclude_user_remove_source) {
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << __func__ << " " << &io_ctx << dendl;
std::map<std::string, cls::rbd::TrashImageSpec> trash_image_specs;
int r = list_trash_image_specs(io_ctx, &trash_image_specs,
exclude_user_remove_source);
if (r < 0) {
return r;
}
entries.reserve(trash_image_specs.size());
for (const auto& [image_id, spec] : trash_image_specs) {
rbd_trash_image_source_t source =
static_cast<rbd_trash_image_source_t>(spec.source);
entries.push_back({image_id, spec.name, source,
spec.deletion_time.sec(),
spec.deferment_end_time.sec()});
}
return 0;
}
template <typename I>
int Trash<I>::purge(IoCtx& io_ctx, time_t expire_ts,
float threshold, ProgressContext& pctx) {
auto *cct((CephContext *) io_ctx.cct());
ldout(cct, 20) << &io_ctx << dendl;
std::vector<librbd::trash_image_info_t> trash_entries;
int r = librbd::api::Trash<I>::list(io_ctx, trash_entries, true);
if (r < 0) {
return r;
}
trash_entries.erase(
std::remove_if(trash_entries.begin(), trash_entries.end(),
[](librbd::trash_image_info_t info) {
return info.source != RBD_TRASH_IMAGE_SOURCE_USER &&
info.source != RBD_TRASH_IMAGE_SOURCE_USER_PARENT;
}),
trash_entries.end());
std::set<std::string> to_be_removed;
if (threshold != -1) {
if (threshold < 0 || threshold > 1) {
lderr(cct) << "argument 'threshold' is out of valid range"
<< dendl;
return -EINVAL;
}
librados::bufferlist inbl;
librados::bufferlist outbl;
std::string pool_name = io_ctx.get_pool_name();
librados::Rados rados(io_ctx);
rados.mon_command(R"({"prefix": "df", "format": "json"})", inbl,
&outbl, nullptr);
json_spirit::mValue json;
if (!json_spirit::read(outbl.to_str(), json)) {
lderr(cct) << "ceph df json output could not be parsed"
<< dendl;
return -EBADMSG;
}
json_spirit::mArray arr = json.get_obj()["pools"].get_array();
double pool_percent_used = 0;
uint64_t pool_total_bytes = 0;
std::map<std::string, std::vector<std::string>> datapools;
std::sort(trash_entries.begin(), trash_entries.end(),
[](librbd::trash_image_info_t a, librbd::trash_image_info_t b) {
return a.deferment_end_time < b.deferment_end_time;
}
);
for (const auto &entry : trash_entries) {
int64_t data_pool_id = -1;
r = cls_client::get_data_pool(&io_ctx, util::header_name(entry.id),
&data_pool_id);
if (r < 0 && r != -ENOENT && r != -EOPNOTSUPP) {
lderr(cct) << "failed to query data pool: " << cpp_strerror(r) << dendl;
return r;
} else if (data_pool_id == -1) {
data_pool_id = io_ctx.get_id();
}
if (data_pool_id != io_ctx.get_id()) {
librados::IoCtx data_io_ctx;
r = util::create_ioctx(io_ctx, "image", data_pool_id,
{}, &data_io_ctx);
if (r < 0) {
lderr(cct) << "error accessing data pool" << dendl;
continue;
}
auto data_pool = data_io_ctx.get_pool_name();
datapools[data_pool].push_back(entry.id);
} else {
datapools[pool_name].push_back(entry.id);
}
}
uint64_t bytes_to_free = 0;
for (uint8_t i = 0; i < arr.size(); ++i) {
json_spirit::mObject obj = arr[i].get_obj();
std::string name = obj.find("name")->second.get_str();
auto img = datapools.find(name);
if (img != datapools.end()) {
json_spirit::mObject stats = arr[i].get_obj()["stats"].get_obj();
pool_percent_used = stats["percent_used"].get_real();
if (pool_percent_used <= threshold) continue;
bytes_to_free = 0;
pool_total_bytes = stats["max_avail"].get_uint64() +
stats["bytes_used"].get_uint64();
auto bytes_threshold = (uint64_t) (pool_total_bytes *
(pool_percent_used - threshold));
for (const auto &it : img->second) {
auto ictx = new I("", it, nullptr, io_ctx, false);
r = ictx->state->open(OPEN_FLAG_SKIP_OPEN_PARENT);
if (r == -ENOENT) {
continue;
} else if (r < 0) {
lderr(cct) << "failed to open image " << it << ": "
<< cpp_strerror(r) << dendl;
}
r = librbd::api::DiffIterate<I>::diff_iterate(
ictx, cls::rbd::UserSnapshotNamespace(), nullptr, 0, ictx->size,
false, true,
[](uint64_t offset, size_t len, int exists, void *arg) {
auto *to_free = reinterpret_cast<uint64_t *>(arg);
if (exists)
(*to_free) += len;
return 0;
}, &bytes_to_free);
ictx->state->close();
if (r < 0) {
lderr(cct) << "failed to calculate disk usage for image " << it
<< ": " << cpp_strerror(r) << dendl;
continue;
}
to_be_removed.insert(it);
if (bytes_to_free >= bytes_threshold) {
break;
}
}
}
}
if (bytes_to_free == 0) {
ldout(cct, 10) << "pool usage is lower than or equal to "
<< (threshold * 100)
<< "%" << dendl;
return 0;
}
}
if (expire_ts == 0) {
struct timespec now;
clock_gettime(CLOCK_REALTIME, &now);
expire_ts = now.tv_sec;
}
for (const auto &entry : trash_entries) {
if (expire_ts >= entry.deferment_end_time) {
to_be_removed.insert(entry.id);
}
}
NoOpProgressContext remove_pctx;
uint64_t list_size = to_be_removed.size(), i = 0;
int remove_err = 1;
while (!to_be_removed.empty() && remove_err == 1) {
remove_err = 0;
for (auto it = to_be_removed.begin(); it != to_be_removed.end(); ) {
trash_image_info_t trash_info;
r = Trash<I>::get(io_ctx, *it, &trash_info);
if (r == -ENOENT) {
// likely RBD_TRASH_IMAGE_SOURCE_USER_PARENT image removed as a side
// effect of a preceeding remove (last child detach)
pctx.update_progress(++i, list_size);
it = to_be_removed.erase(it);
continue;
} else if (r < 0) {
lderr(cct) << "error getting image id " << *it
<< " info: " << cpp_strerror(r) << dendl;
return r;
}
r = Trash<I>::remove(io_ctx, *it, true, remove_pctx);
if (r == -ENOTEMPTY || r == -EBUSY || r == -EMLINK || r == -EUCLEAN) {
if (!remove_err) {
remove_err = r;
}
++it;
continue;
} else if (r < 0) {
lderr(cct) << "error removing image id " << *it
<< ": " << cpp_strerror(r) << dendl;
return r;
}
pctx.update_progress(++i, list_size);
it = to_be_removed.erase(it);
remove_err = 1;
}
ldout(cct, 20) << "remove_err=" << remove_err << dendl;
}
if (!to_be_removed.empty()) {
ceph_assert(remove_err < 0);
ldout(cct, 10) << "couldn't remove " << to_be_removed.size()
<< " expired images" << dendl;
return remove_err;
}
return 0;
}
template <typename I>
int Trash<I>::remove(IoCtx &io_ctx, const std::string &image_id, bool force,
ProgressContext& prog_ctx) {
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << "trash_remove " << &io_ctx << " " << image_id
<< " " << force << dendl;
cls::rbd::TrashImageSpec trash_spec;
int r = cls_client::trash_get(&io_ctx, image_id, &trash_spec);
if (r < 0) {
lderr(cct) << "error getting image id " << image_id
<< " info from trash: " << cpp_strerror(r) << dendl;
return r;
}
utime_t now = ceph_clock_now();
if (now < trash_spec.deferment_end_time && !force) {
lderr(cct) << "error: deferment time has not expired." << dendl;
return -EPERM;
}
if (trash_spec.state == cls::rbd::TRASH_IMAGE_STATE_MOVING) {
lderr(cct) << "error: image is pending moving to the trash."
<< dendl;
return -EUCLEAN;
} else if (trash_spec.state != cls::rbd::TRASH_IMAGE_STATE_NORMAL &&
trash_spec.state != cls::rbd::TRASH_IMAGE_STATE_REMOVING) {
lderr(cct) << "error: image is pending restoration." << dendl;
return -EBUSY;
}
AsioEngine asio_engine(io_ctx);
C_SaferCond cond;
auto req = librbd::trash::RemoveRequest<I>::create(
io_ctx, image_id, asio_engine.get_work_queue(), force, prog_ctx, &cond);
req->send();
r = cond.wait();
if (r < 0) {
return r;
}
C_SaferCond notify_ctx;
TrashWatcher<I>::notify_image_removed(io_ctx, image_id, ¬ify_ctx);
r = notify_ctx.wait();
if (r < 0) {
lderr(cct) << "failed to send update notification: " << cpp_strerror(r)
<< dendl;
}
return 0;
}
template <typename I>
int Trash<I>::restore(librados::IoCtx &io_ctx,
const TrashImageSources& trash_image_sources,
const std::string &image_id,
const std::string &image_new_name) {
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << "trash_restore " << &io_ctx << " " << image_id << " "
<< image_new_name << dendl;
cls::rbd::TrashImageSpec trash_spec;
int r = cls_client::trash_get(&io_ctx, image_id, &trash_spec);
if (r < 0) {
lderr(cct) << "error getting image id " << image_id
<< " info from trash: " << cpp_strerror(r) << dendl;
return r;
}
if (trash_image_sources.count(trash_spec.source) == 0) {
lderr(cct) << "Current trash source '" << trash_spec.source << "' "
<< "does not match expected: "
<< trash_image_sources << dendl;
return -EINVAL;
}
std::string image_name = image_new_name;
if (trash_spec.state != cls::rbd::TRASH_IMAGE_STATE_NORMAL &&
trash_spec.state != cls::rbd::TRASH_IMAGE_STATE_RESTORING) {
lderr(cct) << "error restoring image id " << image_id
<< ", which is pending deletion" << dendl;
return -EBUSY;
}
r = cls_client::trash_state_set(&io_ctx, image_id,
cls::rbd::TRASH_IMAGE_STATE_RESTORING,
cls::rbd::TRASH_IMAGE_STATE_NORMAL);
if (r < 0 && r != -EOPNOTSUPP) {
lderr(cct) << "error setting trash image state: "
<< cpp_strerror(r) << dendl;
return r;
}
if (image_name.empty()) {
// if user didn't specify a new name, let's try using the old name
image_name = trash_spec.name;
ldout(cct, 20) << "restoring image id " << image_id << " with name "
<< image_name << dendl;
}
// check if no image exists with the same name
bool create_id_obj = true;
std::string existing_id;
r = cls_client::get_id(&io_ctx, util::id_obj_name(image_name), &existing_id);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error checking if image " << image_name << " exists: "
<< cpp_strerror(r) << dendl;
int ret = cls_client::trash_state_set(&io_ctx, image_id,
cls::rbd::TRASH_IMAGE_STATE_NORMAL,
cls::rbd::TRASH_IMAGE_STATE_RESTORING);
if (ret < 0 && ret != -EOPNOTSUPP) {
lderr(cct) << "error setting trash image state: "
<< cpp_strerror(ret) << dendl;
}
return r;
} else if (r != -ENOENT){
// checking if we are recovering from an incomplete restore
if (existing_id != image_id) {
ldout(cct, 2) << "an image with the same name already exists" << dendl;
int r2 = cls_client::trash_state_set(&io_ctx, image_id,
cls::rbd::TRASH_IMAGE_STATE_NORMAL,
cls::rbd::TRASH_IMAGE_STATE_RESTORING);
if (r2 < 0 && r2 != -EOPNOTSUPP) {
lderr(cct) << "error setting trash image state: "
<< cpp_strerror(r2) << dendl;
}
return -EEXIST;
}
create_id_obj = false;
}
if (create_id_obj) {
ldout(cct, 2) << "adding id object" << dendl;
librados::ObjectWriteOperation op;
op.create(true);
cls_client::set_id(&op, image_id);
r = io_ctx.operate(util::id_obj_name(image_name), &op);
if (r < 0) {
lderr(cct) << "error adding id object for image " << image_name
<< ": " << cpp_strerror(r) << dendl;
return r;
}
}
ldout(cct, 2) << "adding rbd image to v2 directory..." << dendl;
r = cls_client::dir_add_image(&io_ctx, RBD_DIRECTORY, image_name,
image_id);
if (r < 0 && r != -EEXIST) {
lderr(cct) << "error adding image to v2 directory: "
<< cpp_strerror(r) << dendl;
return r;
}
r = enable_mirroring<I>(io_ctx, image_id);
if (r < 0) {
// not fatal -- ignore
}
ldout(cct, 2) << "removing image from trash..." << dendl;
r = cls_client::trash_remove(&io_ctx, image_id);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error removing image id " << image_id << " from trash: "
<< cpp_strerror(r) << dendl;
return r;
}
C_SaferCond notify_ctx;
TrashWatcher<I>::notify_image_removed(io_ctx, image_id, ¬ify_ctx);
r = notify_ctx.wait();
if (r < 0) {
lderr(cct) << "failed to send update notification: " << cpp_strerror(r)
<< dendl;
}
return 0;
}
} // namespace api
} // namespace librbd
template class librbd::api::Trash<librbd::ImageCtx>;
| 24,816 | 31.653947 | 82 | cc |
null | ceph-main/src/librbd/api/Trash.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRBD_API_TRASH_H
#define LIBRBD_API_TRASH_H
#include "include/rados/librados_fwd.hpp"
#include "include/rbd/librbd.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include <set>
#include <string>
#include <vector>
namespace librbd {
class ProgressContext;
struct ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
struct Trash {
typedef std::set<cls::rbd::TrashImageSource> TrashImageSources;
static const TrashImageSources ALLOWED_RESTORE_SOURCES;
static int move(librados::IoCtx &io_ctx, rbd_trash_image_source_t source,
const std::string &image_name, uint64_t delay);
static int move(librados::IoCtx &io_ctx, rbd_trash_image_source_t source,
const std::string &image_name, const std::string &image_id,
uint64_t delay);
static int get(librados::IoCtx &io_ctx, const std::string &id,
trash_image_info_t *info);
static int list(librados::IoCtx &io_ctx,
std::vector<trash_image_info_t> &entries,
bool exclude_user_remove_source);
static int purge(IoCtx& io_ctx, time_t expire_ts,
float threshold, ProgressContext& pctx);
static int remove(librados::IoCtx &io_ctx, const std::string &image_id,
bool force, ProgressContext& prog_ctx);
static int restore(librados::IoCtx &io_ctx,
const TrashImageSources& trash_image_sources,
const std::string &image_id,
const std::string &image_new_name);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Trash<librbd::ImageCtx>;
#endif // LIBRBD_API_TRASH_H
| 1,783 | 32.037037 | 77 | h |
null | ceph-main/src/librbd/api/Utils.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Utils.h"
#include "common/dout.h"
#if defined(HAVE_LIBCRYPTSETUP)
#include "librbd/crypto/luks/LUKSEncryptionFormat.h"
#endif
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::util: " << __func__ << ": "
namespace librbd {
namespace api {
namespace util {
template <typename I>
int create_encryption_format(
CephContext* cct, encryption_format_t format,
encryption_options_t opts, size_t opts_size, bool c_api,
crypto::EncryptionFormat<I>** result_format) {
size_t expected_opts_size;
switch (format) {
#if defined(HAVE_LIBCRYPTSETUP)
case RBD_ENCRYPTION_FORMAT_LUKS1: {
if (c_api) {
expected_opts_size = sizeof(rbd_encryption_luks1_format_options_t);
if (expected_opts_size == opts_size) {
auto c_opts = (rbd_encryption_luks1_format_options_t*)opts;
*result_format = new crypto::luks::LUKS1EncryptionFormat<I>(
c_opts->alg, {c_opts->passphrase, c_opts->passphrase_size});
}
} else {
expected_opts_size = sizeof(encryption_luks1_format_options_t);
if (expected_opts_size == opts_size) {
auto cpp_opts = (encryption_luks1_format_options_t*)opts;
*result_format = new crypto::luks::LUKS1EncryptionFormat<I>(
cpp_opts->alg, cpp_opts->passphrase);
}
}
break;
}
case RBD_ENCRYPTION_FORMAT_LUKS2: {
if (c_api) {
expected_opts_size = sizeof(rbd_encryption_luks2_format_options_t);
if (expected_opts_size == opts_size) {
auto c_opts = (rbd_encryption_luks2_format_options_t*)opts;
*result_format = new crypto::luks::LUKS2EncryptionFormat<I>(
c_opts->alg, {c_opts->passphrase, c_opts->passphrase_size});
}
} else {
expected_opts_size = sizeof(encryption_luks2_format_options_t);
if (expected_opts_size == opts_size) {
auto cpp_opts = (encryption_luks2_format_options_t*)opts;
*result_format = new crypto::luks::LUKS2EncryptionFormat<I>(
cpp_opts->alg, cpp_opts->passphrase);
}
}
break;
}
case RBD_ENCRYPTION_FORMAT_LUKS: {
if (c_api) {
expected_opts_size = sizeof(rbd_encryption_luks_format_options_t);
if (expected_opts_size == opts_size) {
auto c_opts = (rbd_encryption_luks_format_options_t*)opts;
*result_format = new crypto::luks::LUKSEncryptionFormat<I>(
{c_opts->passphrase, c_opts->passphrase_size});
}
} else {
expected_opts_size = sizeof(encryption_luks_format_options_t);
if (expected_opts_size == opts_size) {
auto cpp_opts = (encryption_luks_format_options_t*)opts;
*result_format = new crypto::luks::LUKSEncryptionFormat<I>(
cpp_opts->passphrase);
}
}
break;
}
#endif
default:
lderr(cct) << "unsupported encryption format: " << format << dendl;
return -ENOTSUP;
}
if (expected_opts_size != opts_size) {
lderr(cct) << "expected opts_size: " << expected_opts_size << dendl;
return -EINVAL;
}
return 0;
}
} // namespace util
} // namespace api
} // namespace librbd
template int librbd::api::util::create_encryption_format(
CephContext* cct, encryption_format_t format, encryption_options_t opts,
size_t opts_size, bool c_api,
crypto::EncryptionFormat<librbd::ImageCtx>** result_format);
| 3,607 | 34.029126 | 78 | cc |
null | ceph-main/src/librbd/api/Utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_UTILS_H
#define CEPH_LIBRBD_API_UTILS_H
#include "include/rbd/librbd.hpp"
#include "librbd/ImageCtx.h"
#include "librbd/crypto/EncryptionFormat.h"
namespace librbd {
struct ImageCtx;
namespace api {
namespace util {
template <typename ImageCtxT = librbd::ImageCtx>
int create_encryption_format(
CephContext* cct, encryption_format_t format,
encryption_options_t opts, size_t opts_size, bool c_api,
crypto::EncryptionFormat<ImageCtxT>** result_format);
} // namespace util
} // namespace api
} // namespace librbd
#endif // CEPH_LIBRBD_API_UTILS_H
| 701 | 23.206897 | 70 | h |
null | ceph-main/src/librbd/asio/ContextWQ.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/asio/ContextWQ.h"
#include "include/Context.h"
#include "common/Cond.h"
#include "common/dout.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::asio::ContextWQ: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace asio {
ContextWQ::ContextWQ(CephContext* cct, boost::asio::io_context& io_context)
: m_cct(cct), m_io_context(io_context),
m_strand(std::make_unique<boost::asio::io_context::strand>(io_context)),
m_queued_ops(0) {
ldout(m_cct, 20) << dendl;
}
ContextWQ::~ContextWQ() {
ldout(m_cct, 20) << dendl;
drain();
m_strand.reset();
}
void ContextWQ::drain() {
ldout(m_cct, 20) << dendl;
C_SaferCond ctx;
drain_handler(&ctx);
ctx.wait();
}
void ContextWQ::drain_handler(Context* ctx) {
if (m_queued_ops == 0) {
ctx->complete(0);
return;
}
// new items might be queued while we are trying to drain, so we
// might need to post the handler multiple times
boost::asio::post(*m_strand, [this, ctx]() { drain_handler(ctx); });
}
} // namespace asio
} // namespace librbd
| 1,227 | 23.56 | 76 | cc |
null | ceph-main/src/librbd/asio/ContextWQ.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_ASIO_CONTEXT_WQ_H
#define CEPH_LIBRBD_ASIO_CONTEXT_WQ_H
#include "include/common_fwd.h"
#include "include/Context.h"
#include <atomic>
#include <memory>
#include <boost/asio/io_context.hpp>
#include <boost/asio/io_context_strand.hpp>
#include <boost/asio/post.hpp>
namespace librbd {
namespace asio {
class ContextWQ {
public:
explicit ContextWQ(CephContext* cct, boost::asio::io_context& io_context);
~ContextWQ();
void drain();
void queue(Context *ctx, int r = 0) {
++m_queued_ops;
// ensure all legacy ContextWQ users are dispatched sequentially for
// backwards compatibility (i.e. might not be concurrent thread-safe)
boost::asio::post(*m_strand, [this, ctx, r]() {
ctx->complete(r);
ceph_assert(m_queued_ops > 0);
--m_queued_ops;
});
}
private:
CephContext* m_cct;
boost::asio::io_context& m_io_context;
std::unique_ptr<boost::asio::io_context::strand> m_strand;
std::atomic<uint64_t> m_queued_ops;
void drain_handler(Context* ctx);
};
} // namespace asio
} // namespace librbd
#endif // CEPH_LIBRBD_ASIO_CONTEXT_WQ_H
| 1,214 | 21.924528 | 76 | h |
null | ceph-main/src/librbd/asio/Utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_ASIO_UTILS_H
#define CEPH_LIBRBD_ASIO_UTILS_H
#include "include/Context.h"
#include "include/rados/librados_fwd.hpp"
#include <boost/system/error_code.hpp>
namespace librbd {
namespace asio {
namespace util {
template <typename T>
auto get_context_adapter(T&& t) {
return [t = std::move(t)](boost::system::error_code ec) {
t->complete(-ec.value());
};
}
template <typename T>
auto get_callback_adapter(T&& t) {
return [t = std::move(t)](boost::system::error_code ec, auto&& ... args) {
t(-ec.value(), std::forward<decltype(args)>(args)...);
};
}
} // namespace util
} // namespace asio
} // namespace librbd
#endif // CEPH_LIBRBD_ASIO_UTILS_H
| 792 | 22.323529 | 76 | h |
null | ceph-main/src/librbd/cache/ImageWriteback.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "ImageWriteback.h"
#include "include/buffer.h"
#include "common/dout.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/ImageRequest.h"
#include "librbd/io/ReadResult.h"
#undef dout_subsys
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ImageWriteback: " << __func__ << ": "
namespace librbd {
namespace cache {
template <typename I>
ImageWriteback<I>::ImageWriteback(I &image_ctx) : m_image_ctx(image_ctx) {
}
template <typename I>
void ImageWriteback<I>::aio_read(Extents &&image_extents, bufferlist *bl,
int fadvise_flags, Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "image_extents=" << image_extents << ", "
<< "on_finish=" << on_finish << dendl;
ImageCtx *image_ctx = util::get_image_ctx(&m_image_ctx);
auto aio_comp = io::AioCompletion::create_and_start(
on_finish, image_ctx, io::AIO_TYPE_READ);
ZTracer::Trace trace;
auto req = io::ImageDispatchSpec::create_read(
*image_ctx, io::IMAGE_DISPATCH_LAYER_WRITEBACK_CACHE, aio_comp,
std::move(image_extents), io::ImageArea::DATA, io::ReadResult{bl},
image_ctx->get_data_io_context(), fadvise_flags, 0, trace);
req->send();
}
template <typename I>
void ImageWriteback<I>::aio_write(Extents &&image_extents,
ceph::bufferlist&& bl,
int fadvise_flags, Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "image_extents=" << image_extents << ", "
<< "on_finish=" << on_finish << dendl;
ImageCtx *image_ctx = util::get_image_ctx(&m_image_ctx);
auto aio_comp = io::AioCompletion::create_and_start(
on_finish, image_ctx, io::AIO_TYPE_WRITE);
ZTracer::Trace trace;
auto req = io::ImageDispatchSpec::create_write(
*image_ctx, io::IMAGE_DISPATCH_LAYER_WRITEBACK_CACHE, aio_comp,
std::move(image_extents), io::ImageArea::DATA, std::move(bl),
fadvise_flags, trace);
req->send();
}
template <typename I>
void ImageWriteback<I>::aio_discard(uint64_t offset, uint64_t length,
uint32_t discard_granularity_bytes,
Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "offset=" << offset << ", "
<< "length=" << length << ", "
<< "on_finish=" << on_finish << dendl;
ImageCtx *image_ctx = util::get_image_ctx(&m_image_ctx);
auto aio_comp = io::AioCompletion::create_and_start(
on_finish, image_ctx, io::AIO_TYPE_DISCARD);
ZTracer::Trace trace;
auto req = io::ImageDispatchSpec::create_discard(
*image_ctx, io::IMAGE_DISPATCH_LAYER_WRITEBACK_CACHE, aio_comp,
{{offset, length}}, io::ImageArea::DATA, discard_granularity_bytes, trace);
req->send();
}
template <typename I>
void ImageWriteback<I>::aio_flush(io::FlushSource flush_source,
Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "on_finish=" << on_finish << dendl;
ImageCtx *image_ctx = util::get_image_ctx(&m_image_ctx);
auto aio_comp = io::AioCompletion::create_and_start(
on_finish, image_ctx, io::AIO_TYPE_FLUSH);
ZTracer::Trace trace;
auto req = io::ImageDispatchSpec::create_flush(
*image_ctx, io::IMAGE_DISPATCH_LAYER_WRITEBACK_CACHE, aio_comp,
flush_source, trace);
req->send();
}
template <typename I>
void ImageWriteback<I>::aio_writesame(uint64_t offset, uint64_t length,
ceph::bufferlist&& bl,
int fadvise_flags, Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "offset=" << offset << ", "
<< "length=" << length << ", "
<< "data_len=" << bl.length() << ", "
<< "on_finish=" << on_finish << dendl;
ImageCtx *image_ctx = util::get_image_ctx(&m_image_ctx);
auto aio_comp = io::AioCompletion::create_and_start(
on_finish, image_ctx, io::AIO_TYPE_WRITESAME);
ZTracer::Trace trace;
auto req = io::ImageDispatchSpec::create_write_same(
*image_ctx, io::IMAGE_DISPATCH_LAYER_WRITEBACK_CACHE, aio_comp,
{{offset, length}}, io::ImageArea::DATA, std::move(bl),
fadvise_flags, trace);
req->send();
}
template <typename I>
void ImageWriteback<I>::aio_compare_and_write(Extents &&image_extents,
ceph::bufferlist&& cmp_bl,
ceph::bufferlist&& bl,
uint64_t *mismatch_offset,
int fadvise_flags,
Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "image_extents=" << image_extents << ", "
<< "on_finish=" << on_finish << dendl;
ImageCtx *image_ctx = util::get_image_ctx(&m_image_ctx);
auto aio_comp = io::AioCompletion::create_and_start(
on_finish, image_ctx, io::AIO_TYPE_COMPARE_AND_WRITE);
ZTracer::Trace trace;
auto req = io::ImageDispatchSpec::create_compare_and_write(
*image_ctx, io::IMAGE_DISPATCH_LAYER_WRITEBACK_CACHE, aio_comp,
std::move(image_extents), io::ImageArea::DATA, std::move(cmp_bl),
std::move(bl), mismatch_offset, fadvise_flags, trace);
req->send();
}
} // namespace cache
} // namespace librbd
template class librbd::cache::ImageWriteback<librbd::ImageCtx>;
| 5,731 | 37.993197 | 79 | cc |
null | ceph-main/src/librbd/cache/ImageWriteback.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_IMAGE_WRITEBACK
#define CEPH_LIBRBD_CACHE_IMAGE_WRITEBACK
#include "include/buffer_fwd.h"
#include "include/int_types.h"
#include "librbd/io/Types.h"
#include <vector>
class Context;
namespace librbd {
struct ImageCtx;
namespace cache {
class ImageWritebackInterface {
public:
typedef std::vector<std::pair<uint64_t,uint64_t> > Extents;
virtual ~ImageWritebackInterface() {
}
virtual void aio_read(Extents &&image_extents, ceph::bufferlist *bl,
int fadvise_flags, Context *on_finish) = 0;
virtual void aio_write(Extents &&image_extents, ceph::bufferlist&& bl,
int fadvise_flags, Context *on_finish) = 0;
virtual void aio_discard(uint64_t offset, uint64_t length,
uint32_t discard_granularity_bytes, Context *on_finish) = 0;
virtual void aio_flush(io::FlushSource flush_source, Context *on_finish) = 0 ;
virtual void aio_writesame(uint64_t offset, uint64_t length,
ceph::bufferlist&& bl,
int fadvise_flags, Context *on_finish) = 0;
virtual void aio_compare_and_write(Extents &&image_extents,
ceph::bufferlist&& cmp_bl,
ceph::bufferlist&& bl,
uint64_t *mismatch_offset,
int fadvise_flags, Context *on_finish) = 0;
};
/**
* client-side, image extent cache writeback handler
*/
template <typename ImageCtxT = librbd::ImageCtx>
class ImageWriteback : public ImageWritebackInterface {
public:
using ImageWritebackInterface::Extents;
explicit ImageWriteback(ImageCtxT &image_ctx);
void aio_read(Extents &&image_extents, ceph::bufferlist *bl,
int fadvise_flags, Context *on_finish);
void aio_write(Extents &&image_extents, ceph::bufferlist&& bl,
int fadvise_flags, Context *on_finish);
void aio_discard(uint64_t offset, uint64_t length,
uint32_t discard_granularity_bytes, Context *on_finish);
void aio_flush(io::FlushSource flush_source, Context *on_finish);
void aio_writesame(uint64_t offset, uint64_t length,
ceph::bufferlist&& bl,
int fadvise_flags, Context *on_finish);
void aio_compare_and_write(Extents &&image_extents,
ceph::bufferlist&& cmp_bl,
ceph::bufferlist&& bl,
uint64_t *mismatch_offset,
int fadvise_flags, Context *on_finish);
private:
ImageCtxT &m_image_ctx;
};
} // namespace cache
} // namespace librbd
extern template class librbd::cache::ImageWriteback<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CACHE_IMAGE_WRITEBACK
| 2,899 | 36.179487 | 87 | h |
null | ceph-main/src/librbd/cache/ObjectCacherObjectDispatch.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/cache/ObjectCacherObjectDispatch.h"
#include "include/neorados/RADOS.hpp"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/Journal.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/cache/ObjectCacherWriteback.h"
#include "librbd/io/ObjectDispatchSpec.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/io/ReadResult.h"
#include "librbd/io/Types.h"
#include "librbd/io/Utils.h"
#include "osd/osd_types.h"
#include "osdc/WritebackHandler.h"
#include <vector>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::ObjectCacherObjectDispatch: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace cache {
using librbd::util::data_object_name;
namespace {
typedef std::vector<ObjectExtent> ObjectExtents;
} // anonymous namespace
template <typename I>
struct ObjectCacherObjectDispatch<I>::C_InvalidateCache : public Context {
ObjectCacherObjectDispatch* dispatcher;
bool purge_on_error;
Context *on_finish;
C_InvalidateCache(ObjectCacherObjectDispatch* dispatcher,
bool purge_on_error, Context *on_finish)
: dispatcher(dispatcher), purge_on_error(purge_on_error),
on_finish(on_finish) {
}
void finish(int r) override {
ceph_assert(ceph_mutex_is_locked(dispatcher->m_cache_lock));
auto cct = dispatcher->m_image_ctx->cct;
if (r == -EBLOCKLISTED) {
lderr(cct) << "blocklisted during flush (purging)" << dendl;
dispatcher->m_object_cacher->purge_set(dispatcher->m_object_set);
} else if (r < 0 && purge_on_error) {
lderr(cct) << "failed to invalidate cache (purging): "
<< cpp_strerror(r) << dendl;
dispatcher->m_object_cacher->purge_set(dispatcher->m_object_set);
} else if (r != 0) {
lderr(cct) << "failed to invalidate cache: " << cpp_strerror(r) << dendl;
}
auto unclean = dispatcher->m_object_cacher->release_set(
dispatcher->m_object_set);
if (unclean == 0) {
r = 0;
} else {
lderr(cct) << "could not release all objects from cache: "
<< unclean << " bytes remain" << dendl;
if (r == 0) {
r = -EBUSY;
}
}
on_finish->complete(r);
}
};
template <typename I>
ObjectCacherObjectDispatch<I>::ObjectCacherObjectDispatch(
I* image_ctx, size_t max_dirty, bool writethrough_until_flush)
: m_image_ctx(image_ctx), m_max_dirty(max_dirty),
m_writethrough_until_flush(writethrough_until_flush),
m_cache_lock(ceph::make_mutex(util::unique_lock_name(
"librbd::cache::ObjectCacherObjectDispatch::cache_lock", this))) {
ceph_assert(m_image_ctx->data_ctx.is_valid());
}
template <typename I>
ObjectCacherObjectDispatch<I>::~ObjectCacherObjectDispatch() {
delete m_object_cacher;
delete m_object_set;
delete m_writeback_handler;
}
template <typename I>
void ObjectCacherObjectDispatch<I>::init() {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
m_cache_lock.lock();
ldout(cct, 5) << "enabling caching..." << dendl;
m_writeback_handler = new ObjectCacherWriteback(m_image_ctx, m_cache_lock);
auto init_max_dirty = m_max_dirty;
if (m_writethrough_until_flush) {
init_max_dirty = 0;
}
auto cache_size =
m_image_ctx->config.template get_val<Option::size_t>("rbd_cache_size");
auto target_dirty =
m_image_ctx->config.template get_val<Option::size_t>("rbd_cache_target_dirty");
auto max_dirty_age =
m_image_ctx->config.template get_val<double>("rbd_cache_max_dirty_age");
auto block_writes_upfront =
m_image_ctx->config.template get_val<bool>("rbd_cache_block_writes_upfront");
auto max_dirty_object =
m_image_ctx->config.template get_val<uint64_t>("rbd_cache_max_dirty_object");
ldout(cct, 5) << "Initial cache settings:"
<< " size=" << cache_size
<< " num_objects=" << 10
<< " max_dirty=" << init_max_dirty
<< " target_dirty=" << target_dirty
<< " max_dirty_age=" << max_dirty_age << dendl;
m_object_cacher = new ObjectCacher(cct, m_image_ctx->perfcounter->get_name(),
*m_writeback_handler, m_cache_lock,
nullptr, nullptr, cache_size,
10, /* reset this in init */
init_max_dirty, target_dirty,
max_dirty_age, block_writes_upfront);
// size object cache appropriately
if (max_dirty_object == 0) {
max_dirty_object = std::min<uint64_t>(
2000, std::max<uint64_t>(10, cache_size / 100 /
sizeof(ObjectCacher::Object)));
}
ldout(cct, 5) << " cache bytes " << cache_size
<< " -> about " << max_dirty_object << " objects" << dendl;
m_object_cacher->set_max_objects(max_dirty_object);
m_object_set = new ObjectCacher::ObjectSet(nullptr,
m_image_ctx->data_ctx.get_id(), 0);
m_object_cacher->start();
m_cache_lock.unlock();
// add ourself to the IO object dispatcher chain
if (m_max_dirty > 0) {
m_image_ctx->disable_zero_copy = true;
}
m_image_ctx->io_object_dispatcher->register_dispatch(this);
}
template <typename I>
void ObjectCacherObjectDispatch<I>::shut_down(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
// chain shut down in reverse order
// shut down the cache
on_finish = new LambdaContext([this, on_finish](int r) {
m_object_cacher->stop();
on_finish->complete(r);
});
// ensure we aren't holding the cache lock post-flush
on_finish = util::create_async_context_callback(*m_image_ctx, on_finish);
// invalidate any remaining cache entries
on_finish = new C_InvalidateCache(this, true, on_finish);
// flush all pending writeback state
std::lock_guard locker{m_cache_lock};
m_object_cacher->release_set(m_object_set);
m_object_cacher->flush_set(m_object_set, on_finish);
}
template <typename I>
bool ObjectCacherObjectDispatch<I>::read(
uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
// IO chained in reverse order
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "object_no=" << object_no << " " << *extents << dendl;
if (extents->size() == 0) {
ldout(cct, 20) << "no extents to read" << dendl;
return false;
}
if (version != nullptr) {
// we currently don't cache read versions
// and don't support reading more than one extent
return false;
}
// ensure we aren't holding the cache lock post-read
on_dispatched = util::create_async_context_callback(*m_image_ctx,
on_dispatched);
// embed the RBD-internal read flags in the generic RADOS op_flags and
op_flags = ((op_flags & ~ObjectCacherWriteback::READ_FLAGS_MASK) |
((read_flags << ObjectCacherWriteback::READ_FLAGS_SHIFT) &
ObjectCacherWriteback::READ_FLAGS_MASK));
ceph::bufferlist* bl;
if (extents->size() > 1) {
auto req = new io::ReadResult::C_ObjectReadMergedExtents(
cct, extents, on_dispatched);
on_dispatched = req;
bl = &req->bl;
} else {
bl = &extents->front().bl;
}
m_image_ctx->image_lock.lock_shared();
auto rd = m_object_cacher->prepare_read(
io_context->read_snap().value_or(CEPH_NOSNAP), bl, op_flags);
m_image_ctx->image_lock.unlock_shared();
uint64_t off = 0;
for (auto& read_extent: *extents) {
ObjectExtent extent(data_object_name(m_image_ctx, object_no), object_no,
read_extent.offset, read_extent.length, 0);
extent.oloc.pool = m_image_ctx->data_ctx.get_id();
extent.buffer_extents.push_back({off, read_extent.length});
rd->extents.push_back(extent);
off += read_extent.length;
}
ZTracer::Trace trace(parent_trace);
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
m_cache_lock.lock();
int r = m_object_cacher->readx(rd, m_object_set, on_dispatched, &trace);
m_cache_lock.unlock();
if (r != 0) {
on_dispatched->complete(r);
}
return true;
}
template <typename I>
bool ObjectCacherObjectDispatch<I>::discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "object_no=" << object_no << " " << object_off << "~"
<< object_len << dendl;
ObjectExtents object_extents;
object_extents.emplace_back(data_object_name(m_image_ctx, object_no),
object_no, object_off, object_len, 0);
// discard the cache state after changes are committed to disk (and to
// prevent races w/ readahead)
auto ctx = *on_finish;
*on_finish = new LambdaContext(
[this, object_extents, ctx](int r) {
m_cache_lock.lock();
m_object_cacher->discard_set(m_object_set, object_extents);
m_cache_lock.unlock();
ctx->complete(r);
});
// ensure we aren't holding the cache lock post-write
on_dispatched = util::create_async_context_callback(*m_image_ctx,
on_dispatched);
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
// ensure any in-flight writeback is complete before advancing
// the discard request
std::lock_guard locker{m_cache_lock};
m_object_cacher->discard_writeback(m_object_set, object_extents,
on_dispatched);
return true;
}
template <typename I>
bool ObjectCacherObjectDispatch<I>::write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "object_no=" << object_no << " " << object_off << "~"
<< data.length() << dendl;
// ensure we aren't holding the cache lock post-write
on_dispatched = util::create_async_context_callback(*m_image_ctx,
on_dispatched);
// cache layer does not handle version checking
if (assert_version.has_value() ||
(write_flags & io::OBJECT_WRITE_FLAG_CREATE_EXCLUSIVE) != 0) {
ObjectExtents object_extents;
object_extents.emplace_back(data_object_name(m_image_ctx, object_no),
object_no, object_off, data.length(), 0);
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
// ensure any in-flight writeback is complete before advancing
// the write request
std::lock_guard locker{m_cache_lock};
m_object_cacher->discard_writeback(m_object_set, object_extents,
on_dispatched);
return true;
}
SnapContext snapc;
if (io_context->write_snap_context()) {
auto write_snap_context = *io_context->write_snap_context();
snapc = SnapContext(write_snap_context.first,
{write_snap_context.second.begin(),
write_snap_context.second.end()});
}
m_image_ctx->image_lock.lock_shared();
ObjectCacher::OSDWrite *wr = m_object_cacher->prepare_write(
snapc, data, ceph::real_time::min(), op_flags, *journal_tid);
m_image_ctx->image_lock.unlock_shared();
ObjectExtent extent(data_object_name(m_image_ctx, object_no),
object_no, object_off, data.length(), 0);
extent.oloc.pool = m_image_ctx->data_ctx.get_id();
extent.buffer_extents.push_back({0, data.length()});
wr->extents.push_back(extent);
ZTracer::Trace trace(parent_trace);
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
std::lock_guard locker{m_cache_lock};
m_object_cacher->writex(wr, m_object_set, on_dispatched, &trace);
return true;
}
template <typename I>
bool ObjectCacherObjectDispatch<I>::write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
io::LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "object_no=" << object_no << " " << object_off << "~"
<< object_len << dendl;
// ObjectCacher doesn't support write-same so convert to regular write
io::LightweightObjectExtent extent(object_no, object_off, object_len, 0);
extent.buffer_extents = std::move(buffer_extents);
bufferlist ws_data;
io::util::assemble_write_same_extent(extent, data, &ws_data, true);
return write(object_no, object_off, std::move(ws_data), io_context, op_flags,
0, std::nullopt, parent_trace, object_dispatch_flags,
journal_tid, dispatch_result, on_finish, on_dispatched);
}
template <typename I>
bool ObjectCacherObjectDispatch<I>::compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* object_dispatch_flags, uint64_t* journal_tid,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "object_no=" << object_no << " " << object_off << "~"
<< cmp_data.length() << dendl;
// pass-through the compare-and-write request since it's not a supported
// operation of the ObjectCacher
ObjectExtents object_extents;
object_extents.emplace_back(data_object_name(m_image_ctx, object_no),
object_no, object_off, cmp_data.length(), 0);
// if compare succeeds, discard the cache state after changes are
// committed to disk
auto ctx = *on_finish;
*on_finish = new LambdaContext(
[this, object_extents, ctx](int r) {
// ObjectCacher doesn't provide a way to reliably invalidate
// extents: in case of a racing read (if the bh is in RX state),
// release_set() just returns while discard_set() populates the
// extent with zeroes. Neither is OK but the latter is better
// because it is at least deterministic...
if (r == 0) {
m_cache_lock.lock();
m_object_cacher->discard_set(m_object_set, object_extents);
m_cache_lock.unlock();
}
ctx->complete(r);
});
// ensure we aren't holding the cache lock post-flush
on_dispatched = util::create_async_context_callback(*m_image_ctx,
on_dispatched);
// flush any pending writes from the cache before compare
ZTracer::Trace trace(parent_trace);
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
std::lock_guard cache_locker{m_cache_lock};
m_object_cacher->flush_set(m_object_set, object_extents, &trace,
on_dispatched);
return true;
}
template <typename I>
bool ObjectCacherObjectDispatch<I>::flush(
io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
// ensure we aren't holding the cache lock post-flush
on_dispatched = util::create_async_context_callback(*m_image_ctx,
on_dispatched);
std::lock_guard locker{m_cache_lock};
if (flush_source == io::FLUSH_SOURCE_USER && !m_user_flushed) {
m_user_flushed = true;
if (m_writethrough_until_flush && m_max_dirty > 0) {
m_object_cacher->set_max_dirty(m_max_dirty);
ldout(cct, 5) << "saw first user flush, enabling writeback" << dendl;
}
}
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
m_object_cacher->flush_set(m_object_set, on_dispatched);
return true;
}
template <typename I>
bool ObjectCacherObjectDispatch<I>::invalidate_cache(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
// ensure we aren't holding the cache lock post-flush
on_finish = util::create_async_context_callback(*m_image_ctx, on_finish);
// invalidate any remaining cache entries
on_finish = new C_InvalidateCache(this, false, on_finish);
std::lock_guard locker{m_cache_lock};
m_object_cacher->release_set(m_object_set);
m_object_cacher->flush_set(m_object_set, on_finish);
return true;
}
template <typename I>
bool ObjectCacherObjectDispatch<I>::reset_existence_cache(
Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
std::lock_guard locker{m_cache_lock};
m_object_cacher->clear_nonexistence(m_object_set);
return false;
}
} // namespace cache
} // namespace librbd
template class librbd::cache::ObjectCacherObjectDispatch<librbd::ImageCtx>;
| 17,654 | 35.252567 | 83 | cc |
null | ceph-main/src/librbd/cache/ObjectCacherObjectDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_OBJECT_CACHER_OBJECT_DISPATCH_H
#define CEPH_LIBRBD_CACHE_OBJECT_CACHER_OBJECT_DISPATCH_H
#include "librbd/io/ObjectDispatchInterface.h"
#include "common/ceph_mutex.h"
#include "osdc/ObjectCacher.h"
struct WritebackHandler;
namespace librbd {
class ImageCtx;
namespace cache {
/**
* Facade around the OSDC object cacher to make it align with
* the object dispatcher interface
*/
template <typename ImageCtxT = ImageCtx>
class ObjectCacherObjectDispatch : public io::ObjectDispatchInterface {
public:
static ObjectCacherObjectDispatch* create(ImageCtxT* image_ctx,
size_t max_dirty,
bool writethrough_until_flush) {
return new ObjectCacherObjectDispatch(image_ctx, max_dirty,
writethrough_until_flush);
}
ObjectCacherObjectDispatch(ImageCtxT* image_ctx, size_t max_dirty,
bool writethrough_until_flush);
~ObjectCacherObjectDispatch() override;
io::ObjectDispatchLayer get_dispatch_layer() const override {
return io::OBJECT_DISPATCH_LAYER_CACHE;
}
void init();
void shut_down(Context* on_finish) override;
bool read(
uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
io::LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* object_dispatch_flags, uint64_t* journal_tid,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool flush(
io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool list_snaps(
uint64_t object_no, io::Extents&& extents, io::SnapIds&& snap_ids,
int list_snap_flags, const ZTracer::Trace &parent_trace,
io::SnapshotDelta* snapshot_delta, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool invalidate_cache(Context* on_finish) override;
bool reset_existence_cache(Context* on_finish) override;
void extent_overwritten(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
uint64_t journal_tid, uint64_t new_journal_tid) {
}
int prepare_copyup(
uint64_t object_no,
io::SnapshotSparseBufferlist* snapshot_sparse_bufferlist) override {
return 0;
}
private:
struct C_InvalidateCache;
ImageCtxT* m_image_ctx;
size_t m_max_dirty;
bool m_writethrough_until_flush;
ceph::mutex m_cache_lock;
ObjectCacher *m_object_cacher = nullptr;
ObjectCacher::ObjectSet *m_object_set = nullptr;
WritebackHandler *m_writeback_handler = nullptr;
bool m_user_flushed = false;
};
} // namespace cache
} // namespace librbd
extern template class librbd::cache::ObjectCacherObjectDispatch<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CACHE_OBJECT_CACHER_OBJECT_DISPATCH_H
| 4,671 | 34.12782 | 82 | h |
null | ceph-main/src/librbd/cache/ObjectCacherWriteback.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include "librbd/cache/ObjectCacherWriteback.h"
#include "common/ceph_context.h"
#include "common/dout.h"
#include "common/ceph_mutex.h"
#include "osdc/Striper.h"
#include "include/Context.h"
#include "include/neorados/RADOS.hpp"
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/internal.h"
#include "librbd/ObjectMap.h"
#include "librbd/Journal.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ObjectDispatchSpec.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/io/ReadResult.h"
#include "librbd/io/Utils.h"
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::ObjectCacherWriteback: "
using namespace std;
namespace librbd {
namespace cache {
/**
* context to wrap another context in a Mutex
*
* @param cct cct
* @param c context to finish
* @param l mutex to lock
*/
class C_ReadRequest : public Context {
public:
C_ReadRequest(CephContext *cct, Context *c, ceph::mutex *cache_lock)
: m_cct(cct), m_ctx(c), m_cache_lock(cache_lock) {
}
void finish(int r) override {
ldout(m_cct, 20) << "aio_cb completing " << dendl;
{
std::lock_guard cache_locker{*m_cache_lock};
m_ctx->complete(r);
}
ldout(m_cct, 20) << "aio_cb finished" << dendl;
}
private:
CephContext *m_cct;
Context *m_ctx;
ceph::mutex *m_cache_lock;
};
class C_OrderedWrite : public Context {
public:
C_OrderedWrite(CephContext *cct,
ObjectCacherWriteback::write_result_d *result,
const ZTracer::Trace &trace, ObjectCacherWriteback *wb)
: m_cct(cct), m_result(result), m_trace(trace), m_wb_handler(wb) {}
~C_OrderedWrite() override {}
void finish(int r) override {
ldout(m_cct, 20) << "C_OrderedWrite completing " << m_result << dendl;
{
std::lock_guard l{m_wb_handler->m_lock};
ceph_assert(!m_result->done);
m_result->done = true;
m_result->ret = r;
m_wb_handler->complete_writes(m_result->oid);
}
ldout(m_cct, 20) << "C_OrderedWrite finished " << m_result << dendl;
m_trace.event("finish");
}
private:
CephContext *m_cct;
ObjectCacherWriteback::write_result_d *m_result;
ZTracer::Trace m_trace;
ObjectCacherWriteback *m_wb_handler;
};
struct C_CommitIOEventExtent : public Context {
ImageCtx *image_ctx;
uint64_t journal_tid;
uint64_t offset;
uint64_t length;
C_CommitIOEventExtent(ImageCtx *image_ctx, uint64_t journal_tid,
uint64_t offset, uint64_t length)
: image_ctx(image_ctx), journal_tid(journal_tid), offset(offset),
length(length) {
}
void finish(int r) override {
// all IO operations are flushed prior to closing the journal
ceph_assert(image_ctx->journal != nullptr);
image_ctx->journal->commit_io_event_extent(journal_tid, offset, length, r);
}
};
ObjectCacherWriteback::ObjectCacherWriteback(ImageCtx *ictx, ceph::mutex& lock)
: m_tid(0), m_lock(lock), m_ictx(ictx) {
}
void ObjectCacherWriteback::read(const object_t& oid, uint64_t object_no,
const object_locator_t& oloc,
uint64_t off, uint64_t len, snapid_t snapid,
bufferlist *pbl, uint64_t trunc_size,
__u32 trunc_seq, int op_flags,
const ZTracer::Trace &parent_trace,
Context *onfinish)
{
ZTracer::Trace trace;
if (parent_trace.valid()) {
trace.init("", &m_ictx->trace_endpoint, &parent_trace);
trace.copy_name("cache read " + oid.name);
trace.event("start");
}
// on completion, take the mutex and then call onfinish.
onfinish = new C_ReadRequest(m_ictx->cct, onfinish, &m_lock);
// re-use standard object read state machine
auto aio_comp = io::AioCompletion::create_and_start(onfinish, m_ictx,
io::AIO_TYPE_READ);
aio_comp->read_result = io::ReadResult{pbl};
aio_comp->set_request_count(1);
auto req_comp = new io::ReadResult::C_ObjectReadRequest(
aio_comp, {{off, len, {{0, len}}}});
auto io_context = m_ictx->duplicate_data_io_context();
if (snapid != CEPH_NOSNAP) {
io_context->read_snap(snapid);
}
// extract the embedded RBD read flags from the op_flags
int read_flags = (op_flags & READ_FLAGS_MASK) >> READ_FLAGS_SHIFT;
op_flags &= ~READ_FLAGS_MASK;
auto req = io::ObjectDispatchSpec::create_read(
m_ictx, io::OBJECT_DISPATCH_LAYER_CACHE, object_no, &req_comp->extents,
io_context, op_flags, read_flags, trace, nullptr, req_comp);
req->send();
}
bool ObjectCacherWriteback::may_copy_on_write(const object_t& oid,
uint64_t read_off,
uint64_t read_len,
snapid_t snapid)
{
std::shared_lock image_locker(m_ictx->image_lock);
uint64_t raw_overlap = 0;
uint64_t object_overlap = 0;
m_ictx->get_parent_overlap(m_ictx->snap_id, &raw_overlap);
if (raw_overlap > 0) {
uint64_t object_no = oid_to_object_no(oid.name, m_ictx->object_prefix);
auto [parent_extents, area] = io::util::object_to_area_extents(
m_ictx, object_no, {{0, m_ictx->layout.object_size}});
object_overlap = m_ictx->prune_parent_extents(parent_extents, area,
raw_overlap, false);
}
bool may = object_overlap > 0;
ldout(m_ictx->cct, 10) << "may_copy_on_write " << oid << " " << read_off
<< "~" << read_len << " = " << may << dendl;
return may;
}
ceph_tid_t ObjectCacherWriteback::write(const object_t& oid,
const object_locator_t& oloc,
uint64_t off, uint64_t len,
const SnapContext& snapc,
const bufferlist &bl,
ceph::real_time mtime,
uint64_t trunc_size,
__u32 trunc_seq, ceph_tid_t journal_tid,
const ZTracer::Trace &parent_trace,
Context *oncommit)
{
ZTracer::Trace trace;
if (parent_trace.valid()) {
trace.init("", &m_ictx->trace_endpoint, &parent_trace);
trace.copy_name("writeback " + oid.name);
trace.event("start");
}
uint64_t object_no = oid_to_object_no(oid.name, m_ictx->object_prefix);
write_result_d *result = new write_result_d(oid.name, oncommit);
m_writes[oid.name].push(result);
ldout(m_ictx->cct, 20) << "write will wait for result " << result << dendl;
bufferlist bl_copy(bl);
Context *ctx = new C_OrderedWrite(m_ictx->cct, result, trace, this);
ctx = util::create_async_context_callback(*m_ictx, ctx);
auto io_context = m_ictx->duplicate_data_io_context();
if (!snapc.empty()) {
io_context->write_snap_context(
{{snapc.seq, {snapc.snaps.begin(), snapc.snaps.end()}}});
}
auto req = io::ObjectDispatchSpec::create_write(
m_ictx, io::OBJECT_DISPATCH_LAYER_CACHE, object_no, off, std::move(bl_copy),
io_context, 0, 0, std::nullopt, journal_tid, trace, ctx);
req->object_dispatch_flags = (
io::OBJECT_DISPATCH_FLAG_FLUSH |
io::OBJECT_DISPATCH_FLAG_WILL_RETRY_ON_ERROR);
req->send();
return ++m_tid;
}
void ObjectCacherWriteback::overwrite_extent(const object_t& oid, uint64_t off,
uint64_t len,
ceph_tid_t original_journal_tid,
ceph_tid_t new_journal_tid) {
ldout(m_ictx->cct, 20) << __func__ << ": " << oid << " "
<< off << "~" << len << " "
<< "journal_tid=" << original_journal_tid << ", "
<< "new_journal_tid=" << new_journal_tid << dendl;
uint64_t object_no = oid_to_object_no(oid.name, m_ictx->object_prefix);
// all IO operations are flushed prior to closing the journal
ceph_assert(original_journal_tid != 0 && m_ictx->journal != NULL);
auto [image_extents, _] = io::util::object_to_area_extents(m_ictx, object_no,
{{off, len}});
for (auto it = image_extents.begin(); it != image_extents.end(); ++it) {
if (new_journal_tid != 0) {
// ensure new journal event is safely committed to disk before
// committing old event
m_ictx->journal->flush_event(
new_journal_tid, new C_CommitIOEventExtent(m_ictx,
original_journal_tid,
it->first, it->second));
} else {
m_ictx->journal->commit_io_event_extent(original_journal_tid, it->first,
it->second, 0);
}
}
}
void ObjectCacherWriteback::complete_writes(const std::string& oid)
{
ceph_assert(ceph_mutex_is_locked(m_lock));
std::queue<write_result_d*>& results = m_writes[oid];
ldout(m_ictx->cct, 20) << "complete_writes() oid " << oid << dendl;
std::list<write_result_d*> finished;
while (!results.empty()) {
write_result_d *result = results.front();
if (!result->done)
break;
finished.push_back(result);
results.pop();
}
if (results.empty())
m_writes.erase(oid);
for (std::list<write_result_d*>::iterator it = finished.begin();
it != finished.end(); ++it) {
write_result_d *result = *it;
ldout(m_ictx->cct, 20) << "complete_writes() completing " << result
<< dendl;
result->oncommit->complete(result->ret);
delete result;
}
}
} // namespace cache
} // namespace librbd
| 10,156 | 34.267361 | 80 | cc |
null | ceph-main/src/librbd/cache/ObjectCacherWriteback.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_OBJECT_CACHER_WRITEBACK_H
#define CEPH_LIBRBD_CACHE_OBJECT_CACHER_WRITEBACK_H
#include "common/snap_types.h"
#include "osd/osd_types.h"
#include "osdc/WritebackHandler.h"
#include <queue>
class Context;
namespace librbd {
struct ImageCtx;
namespace cache {
class ObjectCacherWriteback : public WritebackHandler {
public:
static const int READ_FLAGS_MASK = 0xF000;
static const int READ_FLAGS_SHIFT = 24;
ObjectCacherWriteback(ImageCtx *ictx, ceph::mutex& lock);
// Note that oloc, trunc_size, and trunc_seq are ignored
void read(const object_t& oid, uint64_t object_no,
const object_locator_t& oloc, uint64_t off, uint64_t len,
snapid_t snapid, bufferlist *pbl, uint64_t trunc_size,
__u32 trunc_seq, int op_flags,
const ZTracer::Trace &parent_trace, Context *onfinish) override;
// Determine whether a read to this extent could be affected by a
// write-triggered copy-on-write
bool may_copy_on_write(const object_t& oid, uint64_t read_off,
uint64_t read_len, snapid_t snapid) override;
// Note that oloc, trunc_size, and trunc_seq are ignored
ceph_tid_t write(const object_t& oid, const object_locator_t& oloc,
uint64_t off, uint64_t len,
const SnapContext& snapc, const bufferlist &bl,
ceph::real_time mtime, uint64_t trunc_size,
__u32 trunc_seq, ceph_tid_t journal_tid,
const ZTracer::Trace &parent_trace,
Context *oncommit) override;
using WritebackHandler::write;
void overwrite_extent(const object_t& oid, uint64_t off,
uint64_t len, ceph_tid_t original_journal_tid,
ceph_tid_t new_journal_tid) override;
struct write_result_d {
bool done;
int ret;
std::string oid;
Context *oncommit;
write_result_d(const std::string& oid, Context *oncommit) :
done(false), ret(0), oid(oid), oncommit(oncommit) {}
private:
write_result_d(const write_result_d& rhs);
const write_result_d& operator=(const write_result_d& rhs);
};
private:
void complete_writes(const std::string& oid);
ceph_tid_t m_tid;
ceph::mutex& m_lock;
librbd::ImageCtx *m_ictx;
ceph::unordered_map<std::string, std::queue<write_result_d*> > m_writes;
friend class C_OrderedWrite;
};
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_OBJECT_CACHER_WRITEBACK_H
| 2,594 | 31.848101 | 76 | h |
null | ceph-main/src/librbd/cache/ParentCacheObjectDispatch.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/errno.h"
#include "include/neorados/RADOS.hpp"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/cache/ParentCacheObjectDispatch.h"
#include "librbd/io/ObjectDispatchSpec.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/plugin/Api.h"
#include "osd/osd_types.h"
#include "osdc/WritebackHandler.h"
#include <vector>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::ParentCacheObjectDispatch: " \
<< this << " " << __func__ << ": "
using namespace std;
using namespace ceph::immutable_obj_cache;
using librbd::util::data_object_name;
namespace librbd {
namespace cache {
template <typename I>
ParentCacheObjectDispatch<I>::ParentCacheObjectDispatch(
I* image_ctx, plugin::Api<I>& plugin_api)
: m_image_ctx(image_ctx), m_plugin_api(plugin_api),
m_lock(ceph::make_mutex(
"librbd::cache::ParentCacheObjectDispatch::lock", true, false)) {
ceph_assert(m_image_ctx->data_ctx.is_valid());
auto controller_path = image_ctx->cct->_conf.template get_val<std::string>(
"immutable_object_cache_sock");
m_cache_client = new CacheClient(controller_path.c_str(), m_image_ctx->cct);
}
template <typename I>
ParentCacheObjectDispatch<I>::~ParentCacheObjectDispatch() {
delete m_cache_client;
m_cache_client = nullptr;
}
template <typename I>
void ParentCacheObjectDispatch<I>::init(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
if (m_image_ctx->child == nullptr) {
ldout(cct, 5) << "non-parent image: skipping" << dendl;
if (on_finish != nullptr) {
on_finish->complete(-EINVAL);
}
return;
}
m_image_ctx->io_object_dispatcher->register_dispatch(this);
std::unique_lock locker{m_lock};
create_cache_session(on_finish, false);
}
template <typename I>
bool ParentCacheObjectDispatch<I>::read(
uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "object_no=" << object_no << " " << *extents << dendl;
if (version != nullptr) {
// we currently don't cache read versions
return false;
}
string oid = data_object_name(m_image_ctx, object_no);
/* if RO daemon still don't startup, or RO daemon crash,
* or session occur any error, try to re-connect daemon.*/
std::unique_lock locker{m_lock};
if (!m_cache_client->is_session_work()) {
create_cache_session(nullptr, true);
ldout(cct, 5) << "Parent cache try to re-connect to RO daemon. "
<< "dispatch current request to lower object layer" << dendl;
return false;
}
CacheGenContextURef ctx = make_gen_lambda_context<ObjectCacheRequest*,
std::function<void(ObjectCacheRequest*)>>
([this, extents, dispatch_result, on_dispatched, object_no, io_context,
read_flags, &parent_trace]
(ObjectCacheRequest* ack) {
handle_read_cache(ack, object_no, extents, io_context, read_flags,
parent_trace, dispatch_result, on_dispatched);
});
m_cache_client->lookup_object(m_image_ctx->data_ctx.get_namespace(),
m_image_ctx->data_ctx.get_id(),
io_context->read_snap().value_or(CEPH_NOSNAP),
m_image_ctx->layout.object_size,
oid, std::move(ctx));
return true;
}
template <typename I>
void ParentCacheObjectDispatch<I>::handle_read_cache(
ObjectCacheRequest* ack, uint64_t object_no, io::ReadExtents* extents,
IOContext io_context, int read_flags, const ZTracer::Trace &parent_trace,
io::DispatchResult* dispatch_result, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
if(ack->type != RBDSC_READ_REPLY) {
// go back to read rados
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
on_dispatched->complete(0);
return;
}
ceph_assert(ack->type == RBDSC_READ_REPLY);
std::string file_path = ((ObjectCacheReadReplyData*)ack)->cache_path;
if (file_path.empty()) {
if ((read_flags & io::READ_FLAG_DISABLE_READ_FROM_PARENT) != 0) {
on_dispatched->complete(-ENOENT);
return;
}
auto ctx = new LambdaContext(
[this, dispatch_result, on_dispatched](int r) {
if (r < 0 && r != -ENOENT) {
lderr(m_image_ctx->cct) << "failed to read parent: "
<< cpp_strerror(r) << dendl;
}
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
on_dispatched->complete(r);
});
m_plugin_api.read_parent(m_image_ctx, object_no, extents,
io_context->read_snap().value_or(CEPH_NOSNAP),
parent_trace, ctx);
return;
}
int read_len = 0;
for (auto& extent: *extents) {
// try to read from parent image cache
int r = read_object(file_path, &extent.bl, extent.offset, extent.length,
on_dispatched);
if (r < 0) {
// cache read error, fall back to read rados
for (auto& read_extent: *extents) {
// clear read bufferlists
if (&read_extent == &extent) {
break;
}
read_extent.bl.clear();
}
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
on_dispatched->complete(0);
return;
}
read_len += r;
}
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
on_dispatched->complete(read_len);
}
template <typename I>
int ParentCacheObjectDispatch<I>::handle_register_client(bool reg) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
if (!reg) {
lderr(cct) << "Parent cache register fails." << dendl;
}
return 0;
}
template <typename I>
void ParentCacheObjectDispatch<I>::create_cache_session(Context* on_finish,
bool is_reconnect) {
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
if (m_connecting) {
return;
}
m_connecting = true;
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
Context* register_ctx = new LambdaContext([this, cct, on_finish](int ret) {
if (ret < 0) {
lderr(cct) << "Parent cache fail to register client." << dendl;
}
handle_register_client(ret < 0 ? false : true);
ceph_assert(m_connecting);
m_connecting = false;
if (on_finish != nullptr) {
on_finish->complete(0);
}
});
Context* connect_ctx = new LambdaContext(
[this, cct, register_ctx](int ret) {
if (ret < 0) {
lderr(cct) << "Parent cache fail to connect RO daemon." << dendl;
register_ctx->complete(ret);
return;
}
ldout(cct, 20) << "Parent cache connected to RO daemon." << dendl;
m_cache_client->register_client(register_ctx);
});
if (m_cache_client != nullptr && is_reconnect) {
// CacheClient's destruction will cleanup all details on old session.
delete m_cache_client;
// create new CacheClient to connect RO daemon.
auto controller_path = cct->_conf.template get_val<std::string>(
"immutable_object_cache_sock");
m_cache_client = new CacheClient(controller_path.c_str(), m_image_ctx->cct);
}
m_cache_client->run();
m_cache_client->connect(connect_ctx);
}
template <typename I>
int ParentCacheObjectDispatch<I>::read_object(
std::string file_path, ceph::bufferlist* read_data, uint64_t offset,
uint64_t length, Context *on_finish) {
auto *cct = m_image_ctx->cct;
ldout(cct, 20) << "file path: " << file_path << dendl;
std::string error;
int ret = read_data->pread_file(file_path.c_str(), offset, length, &error);
if (ret < 0) {
ldout(cct, 5) << "read from file return error: " << error
<< "file path= " << file_path
<< dendl;
return ret;
}
return read_data->length();
}
} // namespace cache
} // namespace librbd
template class librbd::cache::ParentCacheObjectDispatch<librbd::ImageCtx>;
| 8,334 | 30.812977 | 80 | cc |
null | ceph-main/src/librbd/cache/ParentCacheObjectDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_PARENT_CACHER_OBJECT_DISPATCH_H
#define CEPH_LIBRBD_CACHE_PARENT_CACHER_OBJECT_DISPATCH_H
#include "librbd/io/ObjectDispatchInterface.h"
#include "common/ceph_mutex.h"
#include "librbd/cache/TypeTraits.h"
#include "tools/immutable_object_cache/CacheClient.h"
#include "tools/immutable_object_cache/Types.h"
namespace librbd {
class ImageCtx;
namespace plugin { template <typename> struct Api; }
namespace cache {
template <typename ImageCtxT = ImageCtx>
class ParentCacheObjectDispatch : public io::ObjectDispatchInterface {
// mock unit testing support
typedef cache::TypeTraits<ImageCtxT> TypeTraits;
typedef typename TypeTraits::CacheClient CacheClient;
public:
static ParentCacheObjectDispatch* create(ImageCtxT* image_ctx,
plugin::Api<ImageCtxT>& plugin_api) {
return new ParentCacheObjectDispatch(image_ctx, plugin_api);
}
ParentCacheObjectDispatch(ImageCtxT* image_ctx,
plugin::Api<ImageCtxT>& plugin_api);
~ParentCacheObjectDispatch() override;
io::ObjectDispatchLayer get_dispatch_layer() const override {
return io::OBJECT_DISPATCH_LAYER_PARENT_CACHE;
}
void init(Context* on_finish = nullptr);
void shut_down(Context* on_finish) {
m_image_ctx->op_work_queue->queue(on_finish, 0);
}
bool read(
uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
return false;
}
bool write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
return false;
}
bool write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
io::LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
return false;
}
bool compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* object_dispatch_flags, uint64_t* journal_tid,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
return false;
}
bool flush(
io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
uint64_t* journal_id, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
return false;
}
bool list_snaps(
uint64_t object_no, io::Extents&& extents, io::SnapIds&& snap_ids,
int list_snap_flags, const ZTracer::Trace &parent_trace,
io::SnapshotDelta* snapshot_delta, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool invalidate_cache(Context* on_finish) {
return false;
}
bool reset_existence_cache(Context* on_finish) {
return false;
}
void extent_overwritten(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
uint64_t journal_tid, uint64_t new_journal_tid) {
}
int prepare_copyup(
uint64_t object_no,
io::SnapshotSparseBufferlist* snapshot_sparse_bufferlist) override {
return 0;
}
ImageCtxT* get_image_ctx() {
return m_image_ctx;
}
CacheClient* get_cache_client() {
return m_cache_client;
}
private:
int read_object(std::string file_path, ceph::bufferlist* read_data,
uint64_t offset, uint64_t length, Context *on_finish);
void handle_read_cache(ceph::immutable_obj_cache::ObjectCacheRequest* ack,
uint64_t object_no, io::ReadExtents* extents,
IOContext io_context, int read_flags,
const ZTracer::Trace &parent_trace,
io::DispatchResult* dispatch_result,
Context* on_dispatched);
int handle_register_client(bool reg);
void create_cache_session(Context* on_finish, bool is_reconnect);
ImageCtxT* m_image_ctx;
plugin::Api<ImageCtxT>& m_plugin_api;
ceph::mutex m_lock;
CacheClient *m_cache_client = nullptr;
bool m_connecting = false;
};
} // namespace cache
} // namespace librbd
extern template class librbd::cache::ParentCacheObjectDispatch<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CACHE_PARENT_CACHER_OBJECT_DISPATCH_H
| 5,482 | 32.845679 | 81 | h |
null | ceph-main/src/librbd/cache/TypeTraits.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_TYPE_TRAITS_H
#define CEPH_LIBRBD_CACHE_TYPE_TRAITS_H
namespace ceph {
namespace immutable_obj_cache {
class CacheClient;
} // namespace immutable_obj_cache
} // namespace ceph
namespace librbd {
namespace cache {
template <typename ImageCtxT>
struct TypeTraits {
typedef ceph::immutable_obj_cache::CacheClient CacheClient;
};
} // namespace librbd
} // namespace cache
#endif
| 515 | 18.111111 | 70 | h |
null | ceph-main/src/librbd/cache/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_TYPES_H
#define CEPH_LIBRBD_CACHE_TYPES_H
#include <list>
#include <string>
class Context;
namespace librbd {
namespace cache {
enum ImageCacheType {
IMAGE_CACHE_TYPE_RWL = 1,
IMAGE_CACHE_TYPE_SSD,
IMAGE_CACHE_TYPE_UNKNOWN
};
typedef std::list<Context *> Contexts;
const std::string PERSISTENT_CACHE_STATE = ".rbd_persistent_cache_state";
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_TYPES_H
| 557 | 18.241379 | 73 | h |
null | ceph-main/src/librbd/cache/Utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_UTILS_H
#define CEPH_LIBRBD_CACHE_UTILS_H
#include "acconfig.h"
#include <string>
class Context;
namespace librbd {
struct ImageCtx;
namespace cache {
namespace util {
template <typename T>
bool is_pwl_enabled(T& image_ctx) {
#if defined(WITH_RBD_RWL) || defined(WITH_RBD_SSD_CACHE)
auto value = image_ctx.config.template get_val<std::string>("rbd_persistent_cache_mode");
return value == "disabled" ? false : true;
#else
return false;
#endif // WITH_RBD_RWL
}
} // namespace util
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_UTILS_H
| 699 | 19.588235 | 91 | h |
null | ceph-main/src/librbd/cache/WriteAroundObjectDispatch.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/cache/WriteAroundObjectDispatch.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/io/ObjectDispatchSpec.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::WriteAroundObjectDispatch: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace cache {
using librbd::util::data_object_name;
template <typename I>
WriteAroundObjectDispatch<I>::WriteAroundObjectDispatch(
I* image_ctx, size_t max_dirty, bool writethrough_until_flush)
: m_image_ctx(image_ctx), m_init_max_dirty(max_dirty), m_max_dirty(max_dirty),
m_lock(ceph::make_mutex(util::unique_lock_name(
"librbd::cache::WriteAroundObjectDispatch::lock", this))) {
if (writethrough_until_flush) {
m_max_dirty = 0;
}
}
template <typename I>
WriteAroundObjectDispatch<I>::~WriteAroundObjectDispatch() {
}
template <typename I>
void WriteAroundObjectDispatch<I>::init() {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
// add ourself to the IO object dispatcher chain
if (m_init_max_dirty > 0) {
m_image_ctx->disable_zero_copy = true;
}
m_image_ctx->io_object_dispatcher->register_dispatch(this);
}
template <typename I>
void WriteAroundObjectDispatch<I>::shut_down(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
on_finish->complete(0);
}
template <typename I>
bool WriteAroundObjectDispatch<I>::read(
uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
bool handled = false;
for (auto& extent: *extents) {
handled |= dispatch_unoptimized_io(object_no, extent.offset, extent.length,
dispatch_result, on_dispatched);
}
return handled;
}
template <typename I>
bool WriteAroundObjectDispatch<I>::discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << object_len << dendl;
return dispatch_io(object_no, object_off, object_len, 0, dispatch_result,
on_finish, on_dispatched);
}
template <typename I>
bool WriteAroundObjectDispatch<I>::write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context**on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << data.length() << dendl;
return dispatch_io(object_no, object_off, data.length(), op_flags,
dispatch_result, on_finish, on_dispatched);
}
template <typename I>
bool WriteAroundObjectDispatch<I>::write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
io::LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context**on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << object_len << dendl;
return dispatch_io(object_no, object_off, object_len, op_flags,
dispatch_result, on_finish, on_dispatched);
}
template <typename I>
bool WriteAroundObjectDispatch<I>::compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* object_dispatch_flags, uint64_t* journal_tid,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
return dispatch_unoptimized_io(object_no, object_off, cmp_data.length(),
dispatch_result, on_dispatched);
}
template <typename I>
bool WriteAroundObjectDispatch<I>::flush(
io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
std::lock_guard locker{m_lock};
if (flush_source == io::FLUSH_SOURCE_USER && !m_user_flushed) {
m_user_flushed = true;
if (m_max_dirty == 0 && m_init_max_dirty > 0) {
ldout(cct, 5) << "first user flush: enabling write-around" << dendl;
m_max_dirty = m_init_max_dirty;
}
}
if (m_in_flight_io_tids.empty()) {
// no in-flight IO (also implies no queued/blocked IO)
return false;
}
auto tid = ++m_last_tid;
auto ctx = util::create_async_context_callback(*m_image_ctx, *on_finish);
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
*on_finish = new LambdaContext([this, tid](int r) {
handle_in_flight_flush_complete(r, tid);
});
if (m_queued_ios.empty() && m_blocked_ios.empty()) {
// immediately allow the flush to be dispatched
ldout(cct, 20) << "dispatching: tid=" << tid << dendl;
m_in_flight_flushes.emplace(tid, ctx);
return false;
}
// cannot dispatch the flush until after preceeding IO is dispatched
ldout(cct, 20) << "queueing: tid=" << tid << dendl;
m_queued_flushes.emplace(tid, QueuedFlush{ctx, on_dispatched});
return true;
}
template <typename I>
bool WriteAroundObjectDispatch<I>::dispatch_unoptimized_io(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
io::DispatchResult* dispatch_result, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
m_lock.lock();
auto in_flight_extents_it = m_in_flight_extents.find(object_no);
if (in_flight_extents_it == m_in_flight_extents.end() ||
!in_flight_extents_it->second.intersects(object_off, object_len)) {
// no IO in-flight to the specified extent
m_lock.unlock();
return false;
}
// write IO is in-flight -- it needs to complete before the unoptimized
// IO can be dispatched
auto tid = ++m_last_tid;
ldout(cct, 20) << "blocked by in-flight IO: tid=" << tid << dendl;
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
m_blocked_unoptimized_ios[object_no].emplace(
tid, BlockedIO{object_off, object_len, nullptr, on_dispatched});
m_lock.unlock();
return true;
}
template <typename I>
bool WriteAroundObjectDispatch<I>::dispatch_io(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
int op_flags, io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
m_lock.lock();
if (m_max_dirty == 0) {
// write-through mode is active -- no-op the cache
m_lock.unlock();
return false;
}
if ((op_flags & LIBRADOS_OP_FLAG_FADVISE_FUA) != 0) {
// force unit access flag is set -- disable write-around
m_lock.unlock();
return dispatch_unoptimized_io(object_no, object_off, object_len,
dispatch_result, on_dispatched);
}
auto tid = ++m_last_tid;
auto ctx = util::create_async_context_callback(*m_image_ctx, *on_finish);
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
*on_finish = new LambdaContext(
[this, tid, object_no, object_off, object_len](int r) {
handle_in_flight_io_complete(r, tid, object_no, object_off, object_len);
});
bool blocked = block_overlapping_io(&m_in_flight_extents[object_no],
object_off, object_len);
if (blocked) {
ldout(cct, 20) << "blocked on overlap: tid=" << tid << dendl;
m_queued_or_blocked_io_tids.insert(tid);
m_blocked_ios[object_no].emplace(tid, BlockedIO{object_off, object_len, ctx,
on_dispatched});
m_lock.unlock();
} else if (can_dispatch_io(tid, object_len)) {
m_lock.unlock();
ldout(cct, 20) << "dispatching: tid=" << tid << dendl;
on_dispatched->complete(0);
ctx->complete(0);
} else {
ldout(cct, 20) << "queueing: tid=" << tid << dendl;
m_queued_or_blocked_io_tids.insert(tid);
m_queued_ios.emplace(tid, QueuedIO{object_len, ctx, on_dispatched});
m_lock.unlock();
}
return true;
}
template <typename I>
bool WriteAroundObjectDispatch<I>::block_overlapping_io(
InFlightObjectExtents* in_flight_object_extents, uint64_t object_off,
uint64_t object_len) {
if (in_flight_object_extents->intersects(object_off, object_len)) {
return true;
}
in_flight_object_extents->insert(object_off, object_len);
return false;
}
template <typename I>
void WriteAroundObjectDispatch<I>::unblock_overlapping_ios(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
Contexts* unoptimized_io_dispatches) {
auto cct = m_image_ctx->cct;
ceph_assert(ceph_mutex_is_locked(m_lock));
auto in_flight_extents_it = m_in_flight_extents.find(object_no);
ceph_assert(in_flight_extents_it != m_in_flight_extents.end());
auto& in_flight_object_extents = in_flight_extents_it->second;
in_flight_object_extents.erase(object_off, object_len);
// handle unoptimized IOs that were blocked by in-flight IO
InFlightObjectExtents blocked_unoptimized_ios;
auto blocked_unoptimized_ios_it = m_blocked_unoptimized_ios.find(object_no);
if (blocked_unoptimized_ios_it != m_blocked_unoptimized_ios.end()) {
auto& blocked_unoptimized_object_ios = blocked_unoptimized_ios_it->second;
for (auto it = blocked_unoptimized_object_ios.begin();
it != blocked_unoptimized_object_ios.end();) {
auto& blocked_io = it->second;
if (!in_flight_object_extents.intersects(blocked_io.offset,
blocked_io.length)) {
unoptimized_io_dispatches->emplace(it->first, blocked_io.on_dispatched);
it = blocked_unoptimized_object_ios.erase(it);
} else {
blocked_unoptimized_ios.union_insert(blocked_io.offset,
blocked_io.length);
++it;
}
}
if (blocked_unoptimized_object_ios.empty()) {
m_blocked_unoptimized_ios.erase(blocked_unoptimized_ios_it);
}
}
// handle optimized IOs that were blocked
auto blocked_io_it = m_blocked_ios.find(object_no);
if (blocked_io_it != m_blocked_ios.end()) {
auto& blocked_object_ios = blocked_io_it->second;
auto blocked_object_ios_it = blocked_object_ios.begin();
while (blocked_object_ios_it != blocked_object_ios.end()) {
auto next_blocked_object_ios_it = blocked_object_ios_it;
++next_blocked_object_ios_it;
auto& blocked_io = blocked_object_ios_it->second;
if (blocked_unoptimized_ios.intersects(blocked_io.offset,
blocked_io.length) ||
block_overlapping_io(&in_flight_object_extents, blocked_io.offset,
blocked_io.length)) {
break;
}
// move unblocked IO to the queued list, which will get processed when
// there is capacity
auto tid = blocked_object_ios_it->first;
ldout(cct, 20) << "queueing unblocked: tid=" << tid << dendl;
m_queued_ios.emplace(tid, blocked_io);
blocked_object_ios.erase(blocked_object_ios_it);
blocked_object_ios_it = next_blocked_object_ios_it;
}
if (blocked_object_ios.empty()) {
m_blocked_ios.erase(blocked_io_it);
}
}
if (in_flight_object_extents.empty()) {
m_in_flight_extents.erase(in_flight_extents_it);
}
}
template <typename I>
bool WriteAroundObjectDispatch<I>::can_dispatch_io(
uint64_t tid, uint64_t length) {
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_in_flight_bytes == 0 || m_in_flight_bytes + length <= m_max_dirty) {
// no in-flight IO or still under max write-around in-flight limit.
// allow the dispatcher to proceed to send the IO but complete it back
// to the invoker.
m_in_flight_bytes += length;
m_in_flight_io_tids.insert(tid);
return true;
}
return false;
}
template <typename I>
void WriteAroundObjectDispatch<I>::handle_in_flight_io_complete(
int r, uint64_t tid, uint64_t object_no, uint64_t object_off,
uint64_t object_len) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << ", tid=" << tid << dendl;
m_lock.lock();
m_in_flight_io_tids.erase(tid);
ceph_assert(m_in_flight_bytes >= object_len);
m_in_flight_bytes -= object_len;
if (r < 0) {
lderr(cct) << "IO error encountered: tid=" << tid << ": "
<< cpp_strerror(r) << dendl;
if (m_pending_flush_error == 0) {
m_pending_flush_error = r;
}
}
// any overlapping blocked IOs can be queued now
Contexts unoptimized_io_dispatches;
unblock_overlapping_ios(object_no, object_off, object_len,
&unoptimized_io_dispatches);
// collect any flushes that are ready for completion
int pending_flush_error = 0;
auto finished_flushes = collect_finished_flushes();
if (!finished_flushes.empty()) {
std::swap(pending_flush_error, m_pending_flush_error);
}
// collect any queued IOs that are ready for dispatch
auto ready_ios = collect_ready_ios();
// collect any queued flushes that were tied to queued IOs
auto ready_flushes = collect_ready_flushes();
m_lock.unlock();
// dispatch any ready unoptimized IOs
for (auto& it : unoptimized_io_dispatches) {
ldout(cct, 20) << "dispatching unoptimized IO: tid=" << it.first << dendl;
it.second->complete(0);
}
// complete flushes that were waiting on in-flight IO
// (and propagate any IO error to first flush)
for (auto& it : finished_flushes) {
ldout(cct, 20) << "completing flush: tid=" << it.first << ", "
<< "r=" << pending_flush_error << dendl;
it.second->complete(pending_flush_error);
}
// dispatch any ready queued IOs
for (auto& it : ready_ios) {
ldout(cct, 20) << "dispatching IO: tid=" << it.first << dendl;
it.second.on_dispatched->complete(0);
it.second.on_finish->complete(0);
}
// dispatch any ready flushes
for (auto& it : ready_flushes) {
ldout(cct, 20) << "dispatching flush: tid=" << it.first << dendl;
it.second->complete(0);
}
}
template <typename I>
void WriteAroundObjectDispatch<I>::handle_in_flight_flush_complete(
int r, uint64_t tid) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << ", tid=" << tid << dendl;
m_lock.lock();
// move the in-flight flush to the pending completion list
auto it = m_in_flight_flushes.find(tid);
ceph_assert(it != m_in_flight_flushes.end());
m_pending_flushes.emplace(it->first, it->second);
m_in_flight_flushes.erase(it);
// collect any flushes that are ready for completion
int pending_flush_error = 0;
auto finished_flushes = collect_finished_flushes();
if (!finished_flushes.empty()) {
std::swap(pending_flush_error, m_pending_flush_error);
}
m_lock.unlock();
// complete flushes that were waiting on in-flight IO
// (and propagate any IO errors)
for (auto& it : finished_flushes) {
ldout(cct, 20) << "completing flush: tid=" << it.first << dendl;
it.second->complete(pending_flush_error);
pending_flush_error = 0;
}
}
template <typename I>
typename WriteAroundObjectDispatch<I>::QueuedIOs
WriteAroundObjectDispatch<I>::collect_ready_ios() {
ceph_assert(ceph_mutex_is_locked(m_lock));
QueuedIOs queued_ios;
while (true) {
auto it = m_queued_ios.begin();
if (it == m_queued_ios.end() ||
!can_dispatch_io(it->first, it->second.length)) {
break;
}
queued_ios.emplace(it->first, it->second);
m_queued_or_blocked_io_tids.erase(it->first);
m_queued_ios.erase(it);
}
return queued_ios;
}
template <typename I>
typename WriteAroundObjectDispatch<I>::Contexts
WriteAroundObjectDispatch<I>::collect_ready_flushes() {
ceph_assert(ceph_mutex_is_locked(m_lock));
Contexts ready_flushes;
auto io_tid_it = m_queued_or_blocked_io_tids.begin();
while (true) {
auto it = m_queued_flushes.begin();
if (it == m_queued_flushes.end() ||
(io_tid_it != m_queued_or_blocked_io_tids.end() &&
*io_tid_it < it->first)) {
break;
}
m_in_flight_flushes.emplace(it->first, it->second.on_finish);
ready_flushes.emplace(it->first, it->second.on_dispatched);
m_queued_flushes.erase(it);
}
return ready_flushes;
}
template <typename I>
typename WriteAroundObjectDispatch<I>::Contexts
WriteAroundObjectDispatch<I>::collect_finished_flushes() {
ceph_assert(ceph_mutex_is_locked(m_lock));
Contexts finished_flushes;
auto io_tid_it = m_in_flight_io_tids.begin();
while (true) {
auto it = m_pending_flushes.begin();
if (it == m_pending_flushes.end() ||
(io_tid_it != m_in_flight_io_tids.end() && *io_tid_it < it->first)) {
break;
}
finished_flushes.emplace(it->first, it->second);
m_pending_flushes.erase(it);
}
return finished_flushes;
}
} // namespace cache
} // namespace librbd
template class librbd::cache::WriteAroundObjectDispatch<librbd::ImageCtx>;
| 18,026 | 33.271863 | 80 | cc |
null | ceph-main/src/librbd/cache/WriteAroundObjectDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_WRITE_AROUND_OBJECT_DISPATCH_H
#define CEPH_LIBRBD_CACHE_WRITE_AROUND_OBJECT_DISPATCH_H
#include "librbd/io/ObjectDispatchInterface.h"
#include "include/interval_set.h"
#include "common/ceph_mutex.h"
#include "librbd/io/Types.h"
#include <map>
#include <set>
#include <string>
struct Context;
namespace librbd {
struct ImageCtx;
namespace cache {
template <typename ImageCtxT = ImageCtx>
class WriteAroundObjectDispatch : public io::ObjectDispatchInterface {
public:
static WriteAroundObjectDispatch* create(ImageCtxT* image_ctx,
size_t max_dirty,
bool writethrough_until_flush) {
return new WriteAroundObjectDispatch(image_ctx, max_dirty,
writethrough_until_flush);
}
WriteAroundObjectDispatch(ImageCtxT* image_ctx, size_t max_dirty,
bool writethrough_until_flush);
~WriteAroundObjectDispatch() override;
io::ObjectDispatchLayer get_dispatch_layer() const override {
return io::OBJECT_DISPATCH_LAYER_CACHE;
}
void init();
void shut_down(Context* on_finish) override;
bool read(
uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context**on_finish, Context* on_dispatched) override;
bool write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context**on_finish, Context* on_dispatched) override;
bool write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
io::LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context**on_finish, Context* on_dispatched) override;
bool compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* object_dispatch_flags, uint64_t* journal_tid,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool flush(
io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool list_snaps(
uint64_t object_no, io::Extents&& extents, io::SnapIds&& snap_ids,
int list_snap_flags, const ZTracer::Trace &parent_trace,
io::SnapshotDelta* snapshot_delta, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool invalidate_cache(Context* on_finish) override {
return false;
}
bool reset_existence_cache(Context* on_finish) override {
return false;
}
void extent_overwritten(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
uint64_t journal_tid, uint64_t new_journal_tid) override {
}
int prepare_copyup(
uint64_t object_no,
io::SnapshotSparseBufferlist* snapshot_sparse_bufferlist) override {
return 0;
}
private:
struct QueuedIO {
QueuedIO(uint64_t length, Context* on_finish, Context* on_dispatched)
: length(length), on_finish(on_finish), on_dispatched(on_dispatched) {
}
uint64_t length;
Context* on_finish;
Context* on_dispatched;
};
struct QueuedFlush {
QueuedFlush(Context* on_finish, Context* on_dispatched)
: on_finish(on_finish), on_dispatched(on_dispatched) {
}
Context* on_finish;
Context* on_dispatched;
};
struct BlockedIO : public QueuedIO {
BlockedIO(uint64_t offset, uint64_t length, Context* on_finish,
Context* on_dispatched)
: QueuedIO(length, on_finish, on_dispatched), offset(offset) {
}
uint64_t offset;
};
typedef std::map<uint64_t, QueuedIO> QueuedIOs;
typedef std::map<uint64_t, QueuedFlush> QueuedFlushes;
typedef std::map<uint64_t, BlockedIO> BlockedObjectIOs;
typedef std::map<uint64_t, BlockedObjectIOs> BlockedIOs;
typedef std::map<uint64_t, Context*> Contexts;
typedef std::set<uint64_t> Tids;
typedef interval_set<uint64_t> InFlightObjectExtents;
typedef std::map<uint64_t, InFlightObjectExtents> InFlightExtents;
ImageCtxT* m_image_ctx;
size_t m_init_max_dirty;
size_t m_max_dirty;
ceph::mutex m_lock;
bool m_user_flushed = false;
uint64_t m_last_tid = 0;
uint64_t m_in_flight_bytes = 0;
Tids m_in_flight_io_tids;
InFlightExtents m_in_flight_extents;
BlockedIOs m_blocked_ios;
QueuedIOs m_queued_ios;
Tids m_queued_or_blocked_io_tids;
BlockedIOs m_blocked_unoptimized_ios;
QueuedFlushes m_queued_flushes;
Contexts m_in_flight_flushes;
Contexts m_pending_flushes;
int m_pending_flush_error = 0;
bool dispatch_unoptimized_io(uint64_t object_no, uint64_t object_off,
uint64_t object_len,
io::DispatchResult* dispatch_result,
Context* on_dispatched);
bool dispatch_io(uint64_t object_no, uint64_t object_off,
uint64_t object_len, int op_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatch);
bool block_overlapping_io(InFlightObjectExtents* in_flight_object_extents,
uint64_t object_off, uint64_t object_len);
void unblock_overlapping_ios(uint64_t object_no, uint64_t object_off,
uint64_t object_len,
Contexts* unoptimized_io_dispatches);
bool can_dispatch_io(uint64_t tid, uint64_t length);
void handle_in_flight_io_complete(int r, uint64_t tid, uint64_t object_no,
uint64_t object_off, uint64_t object_len);
void handle_in_flight_flush_complete(int r, uint64_t tid);
QueuedIOs collect_ready_ios();
Contexts collect_ready_flushes();
Contexts collect_finished_flushes();
};
} // namespace cache
} // namespace librbd
extern template class librbd::cache::WriteAroundObjectDispatch<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CACHE_WRITE_AROUND_OBJECT_DISPATCH_H
| 7,237 | 32.981221 | 81 | h |
null | ceph-main/src/librbd/cache/WriteLogImageDispatch.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/dout.h"
#include "include/neorados/RADOS.hpp"
#include "librbd/cache/pwl/AbstractWriteLog.h"
#include "librbd/cache/pwl/ShutdownRequest.h"
#include "librbd/cache/WriteLogImageDispatch.h"
#include "librbd/ImageCtx.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/Utils.h"
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::WriteLogImageDispatch: " << this << " " \
<< __func__ << ": "
namespace librbd {
namespace cache {
template <typename I>
void WriteLogImageDispatch<I>::shut_down(Context* on_finish) {
ceph_assert(m_image_cache != nullptr);
Context* ctx = new LambdaContext(
[this, on_finish](int r) {
m_image_cache = nullptr;
on_finish->complete(r);
});
cache::pwl::ShutdownRequest<I> *req = cache::pwl::ShutdownRequest<I>::create(
*m_image_ctx, m_image_cache, m_plugin_api, ctx);
req->send();
}
template <typename I>
bool WriteLogImageDispatch<I>::read(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
io::ReadResult &&read_result, IOContext io_context,
int op_flags, int read_flags,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
if (*image_dispatch_flags & io::IMAGE_DISPATCH_FLAG_CRYPTO_HEADER) {
return false;
}
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "image_extents=" << image_extents << dendl;
if (io_context->read_snap().value_or(CEPH_NOSNAP) != CEPH_NOSNAP) {
return false;
}
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
if (preprocess_length(aio_comp, image_extents)) {
return true;
}
m_plugin_api.update_aio_comp(aio_comp, 1, read_result, image_extents);
auto *req_comp = m_plugin_api.create_image_read_request(aio_comp, 0, image_extents);
m_image_cache->read(std::move(image_extents),
&req_comp->bl, op_flags,
req_comp);
return true;
}
template <typename I>
bool WriteLogImageDispatch<I>::write(
io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
if (*image_dispatch_flags & io::IMAGE_DISPATCH_FLAG_CRYPTO_HEADER) {
return false;
}
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "image_extents=" << image_extents << dendl;
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
if (preprocess_length(aio_comp, image_extents)) {
return true;
}
m_plugin_api.update_aio_comp(aio_comp, 1);
io::C_AioRequest *req_comp = m_plugin_api.create_aio_request(aio_comp);
m_image_cache->write(std::move(image_extents),
std::move(bl), op_flags, req_comp);
return true;
}
template <typename I>
bool WriteLogImageDispatch<I>::discard(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
if (*image_dispatch_flags & io::IMAGE_DISPATCH_FLAG_CRYPTO_HEADER) {
return false;
}
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "image_extents=" << image_extents << dendl;
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
if (preprocess_length(aio_comp, image_extents)) {
return true;
}
m_plugin_api.update_aio_comp(aio_comp, image_extents.size());
for (auto &extent : image_extents) {
io::C_AioRequest *req_comp = m_plugin_api.create_aio_request(aio_comp);
m_image_cache->discard(extent.first, extent.second,
discard_granularity_bytes,
req_comp);
}
return true;
}
template <typename I>
bool WriteLogImageDispatch<I>::write_same(
io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
if (*image_dispatch_flags & io::IMAGE_DISPATCH_FLAG_CRYPTO_HEADER) {
return false;
}
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "image_extents=" << image_extents << dendl;
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
if (preprocess_length(aio_comp, image_extents)) {
return true;
}
m_plugin_api.update_aio_comp(aio_comp, image_extents.size());
for (auto &extent : image_extents) {
io::C_AioRequest *req_comp = m_plugin_api.create_aio_request(aio_comp);
m_image_cache->writesame(extent.first, extent.second,
std::move(bl), op_flags,
req_comp);
}
return true;
}
template <typename I>
bool WriteLogImageDispatch<I>::compare_and_write(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
if (*image_dispatch_flags & io::IMAGE_DISPATCH_FLAG_CRYPTO_HEADER) {
return false;
}
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "image_extents=" << image_extents << dendl;
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
if (preprocess_length(aio_comp, image_extents)) {
return true;
}
m_plugin_api.update_aio_comp(aio_comp, 1);
io::C_AioRequest *req_comp = m_plugin_api.create_aio_request(aio_comp);
m_image_cache->compare_and_write(
std::move(image_extents), std::move(cmp_bl), std::move(bl),
mismatch_offset, op_flags, req_comp);
return true;
}
template <typename I>
bool WriteLogImageDispatch<I>::flush(
io::AioCompletion* aio_comp, io::FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << dendl;
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
m_plugin_api.update_aio_comp(aio_comp, 1);
io::C_AioRequest *req_comp = m_plugin_api.create_aio_request(aio_comp);
m_image_cache->flush(flush_source, req_comp);
return true;
}
template <typename I>
bool WriteLogImageDispatch<I>::list_snaps(
io::AioCompletion* aio_comp, io::Extents&& image_extents,
io::SnapIds&& snap_ids,
int list_snaps_flags, io::SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
return false;
}
template <typename I>
bool WriteLogImageDispatch<I>::preprocess_length(
io::AioCompletion* aio_comp, io::Extents &image_extents) const {
auto total_bytes = io::util::get_extents_length(image_extents);
if (total_bytes == 0) {
m_plugin_api.update_aio_comp(aio_comp, 0);
return true;
}
return false;
}
template <typename I>
bool WriteLogImageDispatch<I>::invalidate_cache(Context* on_finish) {
m_image_cache->invalidate(on_finish);
return true;
}
} // namespace io
} // namespace librbd
template class librbd::cache::WriteLogImageDispatch<librbd::ImageCtx>;
| 7,769 | 31.923729 | 87 | cc |
null | ceph-main/src/librbd/cache/WriteLogImageDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_WRITELOG_IMAGE_DISPATCH_H
#define CEPH_LIBRBD_WRITELOG_IMAGE_DISPATCH_H
#include "librbd/io/ImageDispatchInterface.h"
#include "include/int_types.h"
#include "include/buffer.h"
#include "common/zipkin_trace.h"
#include "librbd/io/ReadResult.h"
#include "librbd/io/Types.h"
#include "librbd/plugin/Api.h"
struct Context;
namespace librbd {
struct ImageCtx;
namespace cache {
namespace pwl { template <typename> class AbstractWriteLog; }
template <typename ImageCtxT>
class WriteLogImageDispatch : public io::ImageDispatchInterface {
public:
WriteLogImageDispatch(ImageCtxT* image_ctx,
pwl::AbstractWriteLog<ImageCtx> *image_cache,
plugin::Api<ImageCtxT>& plugin_api) :
m_image_ctx(image_ctx), m_image_cache(image_cache),
m_plugin_api(plugin_api) {
}
io::ImageDispatchLayer get_dispatch_layer() const override {
return io::IMAGE_DISPATCH_LAYER_WRITEBACK_CACHE;
}
void shut_down(Context* on_finish) override;
bool read(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
io::ReadResult &&read_result, IOContext io_context,
int op_flags, int read_flags,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool write(
io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool discard(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool write_same(
io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool compare_and_write(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool flush(
io::AioCompletion* aio_comp, io::FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool list_snaps(
io::AioCompletion* aio_comp, io::Extents&& image_extents,
io::SnapIds&& snap_ids, int list_snaps_flags,
io::SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool invalidate_cache(Context* on_finish) override;
private:
ImageCtxT* m_image_ctx;
pwl::AbstractWriteLog<ImageCtx> *m_image_cache;
plugin::Api<ImageCtxT>& m_plugin_api;
bool preprocess_length(
io::AioCompletion* aio_comp, io::Extents &image_extents) const;
};
} // namespace cache
} // namespace librbd
extern template class librbd::cache::WriteLogImageDispatch<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_WRITELOG_IMAGE_DISPATCH_H
| 3,942 | 36.198113 | 80 | h |
null | ceph-main/src/librbd/cache/pwl/AbstractWriteLog.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "AbstractWriteLog.h"
#include "include/buffer.h"
#include "include/Context.h"
#include "include/ceph_assert.h"
#include "common/deleter.h"
#include "common/dout.h"
#include "common/environment.h"
#include "common/errno.h"
#include "common/hostname.h"
#include "common/WorkQueue.h"
#include "common/Timer.h"
#include "common/perf_counters.h"
#include "librbd/ImageCtx.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/cache/pwl/ImageCacheState.h"
#include "librbd/cache/pwl/LogEntry.h"
#include "librbd/plugin/Api.h"
#include <map>
#include <vector>
#undef dout_subsys
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::AbstractWriteLog: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace cache {
namespace pwl {
using namespace std;
using namespace librbd::cache::pwl;
typedef AbstractWriteLog<ImageCtx>::Extent Extent;
typedef AbstractWriteLog<ImageCtx>::Extents Extents;
template <typename I>
AbstractWriteLog<I>::AbstractWriteLog(
I &image_ctx, librbd::cache::pwl::ImageCacheState<I>* cache_state,
Builder<This> *builder, cache::ImageWritebackInterface& image_writeback,
plugin::Api<I>& plugin_api)
: m_builder(builder),
m_write_log_guard(image_ctx.cct),
m_flush_guard(image_ctx.cct),
m_flush_guard_lock(ceph::make_mutex(pwl::unique_lock_name(
"librbd::cache::pwl::AbstractWriteLog::m_flush_guard_lock", this))),
m_deferred_dispatch_lock(ceph::make_mutex(pwl::unique_lock_name(
"librbd::cache::pwl::AbstractWriteLog::m_deferred_dispatch_lock", this))),
m_blockguard_lock(ceph::make_mutex(pwl::unique_lock_name(
"librbd::cache::pwl::AbstractWriteLog::m_blockguard_lock", this))),
m_thread_pool(
image_ctx.cct, "librbd::cache::pwl::AbstractWriteLog::thread_pool",
"tp_pwl", 4, ""),
m_cache_state(cache_state),
m_image_ctx(image_ctx),
m_log_pool_size(DEFAULT_POOL_SIZE),
m_image_writeback(image_writeback),
m_plugin_api(plugin_api),
m_log_retire_lock(ceph::make_mutex(pwl::unique_lock_name(
"librbd::cache::pwl::AbstractWriteLog::m_log_retire_lock", this))),
m_entry_reader_lock("librbd::cache::pwl::AbstractWriteLog::m_entry_reader_lock"),
m_log_append_lock(ceph::make_mutex(pwl::unique_lock_name(
"librbd::cache::pwl::AbstractWriteLog::m_log_append_lock", this))),
m_lock(ceph::make_mutex(pwl::unique_lock_name(
"librbd::cache::pwl::AbstractWriteLog::m_lock", this))),
m_blocks_to_log_entries(image_ctx.cct),
m_work_queue("librbd::cache::pwl::ReplicatedWriteLog::work_queue",
ceph::make_timespan(
image_ctx.config.template get_val<uint64_t>(
"rbd_op_thread_timeout")),
&m_thread_pool)
{
CephContext *cct = m_image_ctx.cct;
m_plugin_api.get_image_timer_instance(cct, &m_timer, &m_timer_lock);
}
template <typename I>
AbstractWriteLog<I>::~AbstractWriteLog() {
ldout(m_image_ctx.cct, 15) << "enter" << dendl;
{
std::lock_guard timer_locker(*m_timer_lock);
std::lock_guard locker(m_lock);
m_timer->cancel_event(m_timer_ctx);
m_thread_pool.stop();
ceph_assert(m_deferred_ios.size() == 0);
ceph_assert(m_ops_to_flush.size() == 0);
ceph_assert(m_ops_to_append.size() == 0);
ceph_assert(m_flush_ops_in_flight == 0);
delete m_cache_state;
m_cache_state = nullptr;
}
ldout(m_image_ctx.cct, 15) << "exit" << dendl;
}
template <typename I>
void AbstractWriteLog<I>::perf_start(std::string name) {
PerfCountersBuilder plb(m_image_ctx.cct, name, l_librbd_pwl_first,
l_librbd_pwl_last);
// Latency axis configuration for op histograms, values are in nanoseconds
PerfHistogramCommon::axis_config_d op_hist_x_axis_config{
"Latency (nsec)",
PerfHistogramCommon::SCALE_LOG2, ///< Latency in logarithmic scale
0, ///< Start at 0
5000, ///< Quantization unit is 5usec
16, ///< Ranges into the mS
};
// Syncpoint logentry number x-axis configuration for op histograms
PerfHistogramCommon::axis_config_d sp_logentry_number_config{
"logentry number",
PerfHistogramCommon::SCALE_LINEAR, // log entry number in linear scale
0, // Start at 0
1, // Quantization unit is 1
260, // Up to 260 > (MAX_WRITES_PER_SYNC_POINT)
};
// Syncpoint bytes number y-axis configuration for op histogram
PerfHistogramCommon::axis_config_d sp_bytes_number_config{
"Number of SyncPoint",
PerfHistogramCommon::SCALE_LOG2, // Request size in logarithmic scale
0, // Start at 0
512, // Quantization unit is 512
17, // Writes up to 8M >= MAX_BYTES_PER_SYNC_POINT
};
// Op size axis configuration for op histogram y axis, values are in bytes
PerfHistogramCommon::axis_config_d op_hist_y_axis_config{
"Request size (bytes)",
PerfHistogramCommon::SCALE_LOG2, ///< Request size in logarithmic scale
0, ///< Start at 0
512, ///< Quantization unit is 512 bytes
16, ///< Writes up to >32k
};
// Num items configuration for op histogram y axis, values are in items
PerfHistogramCommon::axis_config_d op_hist_y_axis_count_config{
"Number of items",
PerfHistogramCommon::SCALE_LINEAR, ///< Request size in linear scale
0, ///< Start at 0
1, ///< Quantization unit is 1
32, ///< Writes up to >32k
};
plb.add_u64_counter(l_librbd_pwl_rd_req, "rd", "Reads");
plb.add_u64_counter(l_librbd_pwl_rd_bytes, "rd_bytes", "Data size in reads");
plb.add_time_avg(l_librbd_pwl_rd_latency, "rd_latency", "Latency of reads");
plb.add_u64_counter(l_librbd_pwl_rd_hit_req, "hit_rd", "Reads completely hitting RWL");
plb.add_u64_counter(l_librbd_pwl_rd_hit_bytes, "rd_hit_bytes", "Bytes read from RWL");
plb.add_time_avg(l_librbd_pwl_rd_hit_latency, "hit_rd_latency", "Latency of read hits");
plb.add_u64_counter(l_librbd_pwl_rd_part_hit_req, "part_hit_rd", "reads partially hitting RWL");
plb.add_u64_counter_histogram(
l_librbd_pwl_syncpoint_hist, "syncpoint_logentry_bytes_histogram",
sp_logentry_number_config, sp_bytes_number_config,
"Histogram of syncpoint's logentry numbers vs bytes number");
plb.add_u64_counter(l_librbd_pwl_wr_req, "wr", "Writes");
plb.add_u64_counter(l_librbd_pwl_wr_bytes, "wr_bytes", "Data size in writes");
plb.add_u64_counter(l_librbd_pwl_wr_req_def, "wr_def", "Writes deferred for resources");
plb.add_u64_counter(l_librbd_pwl_wr_req_def_lanes, "wr_def_lanes", "Writes deferred for lanes");
plb.add_u64_counter(l_librbd_pwl_wr_req_def_log, "wr_def_log", "Writes deferred for log entries");
plb.add_u64_counter(l_librbd_pwl_wr_req_def_buf, "wr_def_buf", "Writes deferred for buffers");
plb.add_u64_counter(l_librbd_pwl_wr_req_overlap, "wr_overlap", "Writes overlapping with prior in-progress writes");
plb.add_u64_counter(l_librbd_pwl_wr_req_queued, "wr_q_barrier", "Writes queued for prior barriers (aio_flush)");
plb.add_u64_counter(l_librbd_pwl_log_ops, "log_ops", "Log appends");
plb.add_u64_avg(l_librbd_pwl_log_op_bytes, "log_op_bytes", "Average log append bytes");
plb.add_time_avg(
l_librbd_pwl_req_arr_to_all_t, "req_arr_to_all_t",
"Average arrival to allocation time (time deferred for overlap)");
plb.add_time_avg(
l_librbd_pwl_req_arr_to_dis_t, "req_arr_to_dis_t",
"Average arrival to dispatch time (includes time deferred for overlaps and allocation)");
plb.add_time_avg(
l_librbd_pwl_req_all_to_dis_t, "req_all_to_dis_t",
"Average allocation to dispatch time (time deferred for log resources)");
plb.add_time_avg(
l_librbd_pwl_wr_latency, "wr_latency",
"Latency of writes (persistent completion)");
plb.add_u64_counter_histogram(
l_librbd_pwl_wr_latency_hist, "wr_latency_bytes_histogram",
op_hist_x_axis_config, op_hist_y_axis_config,
"Histogram of write request latency (nanoseconds) vs. bytes written");
plb.add_time_avg(
l_librbd_pwl_wr_caller_latency, "caller_wr_latency",
"Latency of write completion to caller");
plb.add_time_avg(
l_librbd_pwl_nowait_req_arr_to_all_t, "req_arr_to_all_nw_t",
"Average arrival to allocation time (time deferred for overlap)");
plb.add_time_avg(
l_librbd_pwl_nowait_req_arr_to_dis_t, "req_arr_to_dis_nw_t",
"Average arrival to dispatch time (includes time deferred for overlaps and allocation)");
plb.add_time_avg(
l_librbd_pwl_nowait_req_all_to_dis_t, "req_all_to_dis_nw_t",
"Average allocation to dispatch time (time deferred for log resources)");
plb.add_time_avg(
l_librbd_pwl_nowait_wr_latency, "wr_latency_nw",
"Latency of writes (persistent completion) not deferred for free space");
plb.add_u64_counter_histogram(
l_librbd_pwl_nowait_wr_latency_hist, "wr_latency_nw_bytes_histogram",
op_hist_x_axis_config, op_hist_y_axis_config,
"Histogram of write request latency (nanoseconds) vs. bytes written for writes not deferred for free space");
plb.add_time_avg(
l_librbd_pwl_nowait_wr_caller_latency, "caller_wr_latency_nw",
"Latency of write completion to callerfor writes not deferred for free space");
plb.add_time_avg(l_librbd_pwl_log_op_alloc_t, "op_alloc_t", "Average buffer pmemobj_reserve() time");
plb.add_u64_counter_histogram(
l_librbd_pwl_log_op_alloc_t_hist, "op_alloc_t_bytes_histogram",
op_hist_x_axis_config, op_hist_y_axis_config,
"Histogram of buffer pmemobj_reserve() time (nanoseconds) vs. bytes written");
plb.add_time_avg(l_librbd_pwl_log_op_dis_to_buf_t, "op_dis_to_buf_t", "Average dispatch to buffer persist time");
plb.add_time_avg(l_librbd_pwl_log_op_dis_to_app_t, "op_dis_to_app_t", "Average dispatch to log append time");
plb.add_time_avg(l_librbd_pwl_log_op_dis_to_cmp_t, "op_dis_to_cmp_t", "Average dispatch to persist completion time");
plb.add_u64_counter_histogram(
l_librbd_pwl_log_op_dis_to_cmp_t_hist, "op_dis_to_cmp_t_bytes_histogram",
op_hist_x_axis_config, op_hist_y_axis_config,
"Histogram of op dispatch to persist complete time (nanoseconds) vs. bytes written");
plb.add_time_avg(
l_librbd_pwl_log_op_buf_to_app_t, "op_buf_to_app_t",
"Average buffer persist to log append time (write data persist/replicate + wait for append time)");
plb.add_time_avg(
l_librbd_pwl_log_op_buf_to_bufc_t, "op_buf_to_bufc_t",
"Average buffer persist time (write data persist/replicate time)");
plb.add_u64_counter_histogram(
l_librbd_pwl_log_op_buf_to_bufc_t_hist, "op_buf_to_bufc_t_bytes_histogram",
op_hist_x_axis_config, op_hist_y_axis_config,
"Histogram of write buffer persist time (nanoseconds) vs. bytes written");
plb.add_time_avg(
l_librbd_pwl_log_op_app_to_cmp_t, "op_app_to_cmp_t",
"Average log append to persist complete time (log entry append/replicate + wait for complete time)");
plb.add_time_avg(
l_librbd_pwl_log_op_app_to_appc_t, "op_app_to_appc_t",
"Average log append to persist complete time (log entry append/replicate time)");
plb.add_u64_counter_histogram(
l_librbd_pwl_log_op_app_to_appc_t_hist, "op_app_to_appc_t_bytes_histogram",
op_hist_x_axis_config, op_hist_y_axis_config,
"Histogram of log append persist time (nanoseconds) (vs. op bytes)");
plb.add_u64_counter(l_librbd_pwl_discard, "discard", "Discards");
plb.add_u64_counter(l_librbd_pwl_discard_bytes, "discard_bytes", "Bytes discarded");
plb.add_time_avg(l_librbd_pwl_discard_latency, "discard_lat", "Discard latency");
plb.add_u64_counter(l_librbd_pwl_aio_flush, "aio_flush", "AIO flush (flush to RWL)");
plb.add_u64_counter(l_librbd_pwl_aio_flush_def, "aio_flush_def", "AIO flushes deferred for resources");
plb.add_time_avg(l_librbd_pwl_aio_flush_latency, "aio_flush_lat", "AIO flush latency");
plb.add_u64_counter(l_librbd_pwl_ws,"ws", "Write Sames");
plb.add_u64_counter(l_librbd_pwl_ws_bytes, "ws_bytes", "Write Same bytes to image");
plb.add_time_avg(l_librbd_pwl_ws_latency, "ws_lat", "Write Same latency");
plb.add_u64_counter(l_librbd_pwl_cmp, "cmp", "Compare and Write requests");
plb.add_u64_counter(l_librbd_pwl_cmp_bytes, "cmp_bytes", "Compare and Write bytes compared/written");
plb.add_time_avg(l_librbd_pwl_cmp_latency, "cmp_lat", "Compare and Write latency");
plb.add_u64_counter(l_librbd_pwl_cmp_fails, "cmp_fails", "Compare and Write compare fails");
plb.add_u64_counter(l_librbd_pwl_internal_flush, "internal_flush", "Flush RWL (write back to OSD)");
plb.add_time_avg(l_librbd_pwl_writeback_latency, "writeback_lat", "write back to OSD latency");
plb.add_u64_counter(l_librbd_pwl_invalidate_cache, "invalidate", "Invalidate RWL");
plb.add_u64_counter(l_librbd_pwl_invalidate_discard_cache, "discard", "Discard and invalidate RWL");
plb.add_time_avg(l_librbd_pwl_append_tx_t, "append_tx_lat", "Log append transaction latency");
plb.add_u64_counter_histogram(
l_librbd_pwl_append_tx_t_hist, "append_tx_lat_histogram",
op_hist_x_axis_config, op_hist_y_axis_count_config,
"Histogram of log append transaction time (nanoseconds) vs. entries appended");
plb.add_time_avg(l_librbd_pwl_retire_tx_t, "retire_tx_lat", "Log retire transaction latency");
plb.add_u64_counter_histogram(
l_librbd_pwl_retire_tx_t_hist, "retire_tx_lat_histogram",
op_hist_x_axis_config, op_hist_y_axis_count_config,
"Histogram of log retire transaction time (nanoseconds) vs. entries retired");
m_perfcounter = plb.create_perf_counters();
m_image_ctx.cct->get_perfcounters_collection()->add(m_perfcounter);
}
template <typename I>
void AbstractWriteLog<I>::perf_stop() {
ceph_assert(m_perfcounter);
m_image_ctx.cct->get_perfcounters_collection()->remove(m_perfcounter);
delete m_perfcounter;
}
template <typename I>
void AbstractWriteLog<I>::log_perf() {
bufferlist bl;
Formatter *f = Formatter::create("json-pretty");
bl.append("Perf dump follows\n--- Begin perf dump ---\n");
bl.append("{\n");
stringstream ss;
utime_t now = ceph_clock_now();
ss << "\"test_time\": \"" << now << "\",";
ss << "\"image\": \"" << m_image_ctx.name << "\",";
bl.append(ss);
bl.append("\"stats\": ");
m_image_ctx.cct->get_perfcounters_collection()->dump_formatted(f, false, false);
f->flush(bl);
bl.append(",\n\"histograms\": ");
m_image_ctx.cct->get_perfcounters_collection()->dump_formatted_histograms(f, 0);
f->flush(bl);
delete f;
bl.append("}\n--- End perf dump ---\n");
bl.append('\0');
ldout(m_image_ctx.cct, 1) << bl.c_str() << dendl;
}
template <typename I>
void AbstractWriteLog<I>::periodic_stats() {
std::unique_lock locker(m_lock);
ldout(m_image_ctx.cct, 5) << "STATS: m_log_entries=" << m_log_entries.size()
<< ", m_dirty_log_entries=" << m_dirty_log_entries.size()
<< ", m_free_log_entries=" << m_free_log_entries
<< ", m_bytes_allocated=" << m_bytes_allocated
<< ", m_bytes_cached=" << m_bytes_cached
<< ", m_bytes_dirty=" << m_bytes_dirty
<< ", bytes available=" << m_bytes_allocated_cap - m_bytes_allocated
<< ", m_first_valid_entry=" << m_first_valid_entry
<< ", m_first_free_entry=" << m_first_free_entry
<< ", m_current_sync_gen=" << m_current_sync_gen
<< ", m_flushed_sync_gen=" << m_flushed_sync_gen
<< dendl;
update_image_cache_state();
write_image_cache_state(locker);
}
template <typename I>
void AbstractWriteLog<I>::arm_periodic_stats() {
ceph_assert(ceph_mutex_is_locked(*m_timer_lock));
m_timer_ctx = new LambdaContext([this](int r) {
/* m_timer_lock is held */
periodic_stats();
arm_periodic_stats();
});
m_timer->add_event_after(LOG_STATS_INTERVAL_SECONDS, m_timer_ctx);
}
template <typename I>
void AbstractWriteLog<I>::update_entries(std::shared_ptr<GenericLogEntry> *log_entry,
WriteLogCacheEntry *cache_entry, std::map<uint64_t, bool> &missing_sync_points,
std::map<uint64_t, std::shared_ptr<SyncPointLogEntry>> &sync_point_entries,
uint64_t entry_index) {
bool writer = cache_entry->is_writer();
if (cache_entry->is_sync_point()) {
ldout(m_image_ctx.cct, 20) << "Entry " << entry_index
<< " is a sync point. cache_entry=[" << *cache_entry << "]" << dendl;
auto sync_point_entry = std::make_shared<SyncPointLogEntry>(cache_entry->sync_gen_number);
*log_entry = sync_point_entry;
sync_point_entries[cache_entry->sync_gen_number] = sync_point_entry;
missing_sync_points.erase(cache_entry->sync_gen_number);
m_current_sync_gen = cache_entry->sync_gen_number;
} else if (cache_entry->is_write()) {
ldout(m_image_ctx.cct, 20) << "Entry " << entry_index
<< " is a write. cache_entry=[" << *cache_entry << "]" << dendl;
auto write_entry =
m_builder->create_write_log_entry(nullptr, cache_entry->image_offset_bytes, cache_entry->write_bytes);
write_data_to_buffer(write_entry, cache_entry);
*log_entry = write_entry;
} else if (cache_entry->is_writesame()) {
ldout(m_image_ctx.cct, 20) << "Entry " << entry_index
<< " is a write same. cache_entry=[" << *cache_entry << "]" << dendl;
auto ws_entry =
m_builder->create_writesame_log_entry(nullptr, cache_entry->image_offset_bytes,
cache_entry->write_bytes, cache_entry->ws_datalen);
write_data_to_buffer(ws_entry, cache_entry);
*log_entry = ws_entry;
} else if (cache_entry->is_discard()) {
ldout(m_image_ctx.cct, 20) << "Entry " << entry_index
<< " is a discard. cache_entry=[" << *cache_entry << "]" << dendl;
auto discard_entry =
std::make_shared<DiscardLogEntry>(nullptr, cache_entry->image_offset_bytes, cache_entry->write_bytes,
m_discard_granularity_bytes);
*log_entry = discard_entry;
} else {
lderr(m_image_ctx.cct) << "Unexpected entry type in entry " << entry_index
<< ", cache_entry=[" << *cache_entry << "]" << dendl;
}
if (writer) {
ldout(m_image_ctx.cct, 20) << "Entry " << entry_index
<< " writes. cache_entry=[" << *cache_entry << "]" << dendl;
if (!sync_point_entries[cache_entry->sync_gen_number]) {
missing_sync_points[cache_entry->sync_gen_number] = true;
}
}
}
template <typename I>
void AbstractWriteLog<I>::update_sync_points(std::map<uint64_t, bool> &missing_sync_points,
std::map<uint64_t, std::shared_ptr<SyncPointLogEntry>> &sync_point_entries,
DeferredContexts &later) {
/* Create missing sync points. These must not be appended until the
* entry reload is complete and the write map is up to
* date. Currently this is handled by the deferred contexts object
* passed to new_sync_point(). These contexts won't be completed
* until this function returns. */
for (auto &kv : missing_sync_points) {
ldout(m_image_ctx.cct, 5) << "Adding sync point " << kv.first << dendl;
if (0 == m_current_sync_gen) {
/* The unlikely case where the log contains writing entries, but no sync
* points (e.g. because they were all retired) */
m_current_sync_gen = kv.first-1;
}
ceph_assert(kv.first == m_current_sync_gen+1);
init_flush_new_sync_point(later);
ceph_assert(kv.first == m_current_sync_gen);
sync_point_entries[kv.first] = m_current_sync_point->log_entry;
}
/*
* Iterate over the log entries again (this time via the global
* entries list), connecting write entries to their sync points and
* updating the sync point stats.
*
* Add writes to the write log map.
*/
std::shared_ptr<SyncPointLogEntry> previous_sync_point_entry = nullptr;
for (auto &log_entry : m_log_entries) {
if ((log_entry->write_bytes() > 0) || (log_entry->bytes_dirty() > 0)) {
/* This entry is one of the types that write */
auto gen_write_entry = static_pointer_cast<GenericWriteLogEntry>(log_entry);
if (gen_write_entry) {
auto sync_point_entry = sync_point_entries[gen_write_entry->ram_entry.sync_gen_number];
if (!sync_point_entry) {
lderr(m_image_ctx.cct) << "Sync point missing for entry=[" << *gen_write_entry << "]" << dendl;
ceph_assert(false);
} else {
gen_write_entry->sync_point_entry = sync_point_entry;
sync_point_entry->writes++;
sync_point_entry->bytes += gen_write_entry->ram_entry.write_bytes;
sync_point_entry->writes_completed++;
m_blocks_to_log_entries.add_log_entry(gen_write_entry);
/* This entry is only dirty if its sync gen number is > the flushed
* sync gen number from the root object. */
if (gen_write_entry->ram_entry.sync_gen_number > m_flushed_sync_gen) {
m_dirty_log_entries.push_back(log_entry);
m_bytes_dirty += gen_write_entry->bytes_dirty();
} else {
gen_write_entry->set_flushed(true);
sync_point_entry->writes_flushed++;
}
/* calc m_bytes_allocated & m_bytes_cached */
inc_allocated_cached_bytes(log_entry);
}
}
} else {
/* This entry is sync point entry */
auto sync_point_entry = static_pointer_cast<SyncPointLogEntry>(log_entry);
if (sync_point_entry) {
if (previous_sync_point_entry) {
previous_sync_point_entry->next_sync_point_entry = sync_point_entry;
if (previous_sync_point_entry->ram_entry.sync_gen_number > m_flushed_sync_gen) {
sync_point_entry->prior_sync_point_flushed = false;
ceph_assert(!previous_sync_point_entry->prior_sync_point_flushed ||
(0 == previous_sync_point_entry->writes) ||
(previous_sync_point_entry->writes >= previous_sync_point_entry->writes_flushed));
} else {
sync_point_entry->prior_sync_point_flushed = true;
ceph_assert(previous_sync_point_entry->prior_sync_point_flushed);
ceph_assert(previous_sync_point_entry->writes == previous_sync_point_entry->writes_flushed);
}
} else {
/* There are no previous sync points, so we'll consider them flushed */
sync_point_entry->prior_sync_point_flushed = true;
}
previous_sync_point_entry = sync_point_entry;
ldout(m_image_ctx.cct, 10) << "Loaded to sync point=[" << *sync_point_entry << dendl;
}
}
}
if (0 == m_current_sync_gen) {
/* If a re-opened log was completely flushed, we'll have found no sync point entries here,
* and not advanced m_current_sync_gen. Here we ensure it starts past the last flushed sync
* point recorded in the log. */
m_current_sync_gen = m_flushed_sync_gen;
}
}
template <typename I>
void AbstractWriteLog<I>::pwl_init(Context *on_finish, DeferredContexts &later) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
ceph_assert(m_cache_state);
std::lock_guard locker(m_lock);
ceph_assert(!m_initialized);
ldout(cct,5) << "image name: " << m_image_ctx.name << " id: " << m_image_ctx.id << dendl;
if (!m_cache_state->present) {
m_cache_state->host = ceph_get_short_hostname();
m_cache_state->size = m_image_ctx.config.template get_val<uint64_t>(
"rbd_persistent_cache_size");
string path = m_image_ctx.config.template get_val<string>(
"rbd_persistent_cache_path");
std::string pool_name = m_image_ctx.md_ctx.get_pool_name();
m_cache_state->path = path + "/rbd-pwl." + pool_name + "." + m_image_ctx.id + ".pool";
}
ldout(cct,5) << "pwl_size: " << m_cache_state->size << dendl;
ldout(cct,5) << "pwl_path: " << m_cache_state->path << dendl;
m_log_pool_name = m_cache_state->path;
m_log_pool_size = max(m_cache_state->size, MIN_POOL_SIZE);
m_log_pool_size = p2align(m_log_pool_size, POOL_SIZE_ALIGN);
ldout(cct, 5) << "pool " << m_log_pool_name << " size " << m_log_pool_size
<< " (adjusted from " << m_cache_state->size << ")" << dendl;
if ((!m_cache_state->present) &&
(access(m_log_pool_name.c_str(), F_OK) == 0)) {
ldout(cct, 5) << "There's an existing pool file " << m_log_pool_name
<< ", While there's no cache in the image metadata." << dendl;
if (remove(m_log_pool_name.c_str()) != 0) {
lderr(cct) << "failed to remove the pool file " << m_log_pool_name
<< dendl;
on_finish->complete(-errno);
return;
} else {
ldout(cct, 5) << "Removed the existing pool file." << dendl;
}
} else if ((m_cache_state->present) &&
(access(m_log_pool_name.c_str(), F_OK) != 0)) {
lderr(cct) << "can't find the existed pool file: " << m_log_pool_name
<< ". error: " << cpp_strerror(-errno) << dendl;
on_finish->complete(-errno);
return;
}
bool succeeded = initialize_pool(on_finish, later);
if (!succeeded) {
return ;
}
ldout(cct,1) << "pool " << m_log_pool_name << " has " << m_total_log_entries
<< " log entries, " << m_free_log_entries << " of which are free."
<< " first_valid=" << m_first_valid_entry
<< ", first_free=" << m_first_free_entry
<< ", flushed_sync_gen=" << m_flushed_sync_gen
<< ", m_current_sync_gen=" << m_current_sync_gen << dendl;
if (m_first_free_entry == m_first_valid_entry) {
ldout(cct,1) << "write log is empty" << dendl;
m_cache_state->empty = true;
}
/* Start the sync point following the last one seen in the
* log. Flush the last sync point created during the loading of the
* existing log entries. */
init_flush_new_sync_point(later);
ldout(cct,20) << "new sync point = [" << m_current_sync_point << "]" << dendl;
m_initialized = true;
// Start the thread
m_thread_pool.start();
/* Do these after we drop lock */
later.add(new LambdaContext([this](int r) {
/* Log stats for the first time */
periodic_stats();
/* Arm periodic stats logging for the first time */
std::lock_guard timer_locker(*m_timer_lock);
arm_periodic_stats();
}));
m_image_ctx.op_work_queue->queue(on_finish, 0);
}
template <typename I>
void AbstractWriteLog<I>::write_image_cache_state(std::unique_lock<ceph::mutex>& locker) {
using klass = AbstractWriteLog<I>;
Context *ctx = util::create_context_callback<
klass, &klass::handle_write_image_cache_state>(this);
m_cache_state->write_image_cache_state(locker, ctx);
}
template <typename I>
void AbstractWriteLog<I>::update_image_cache_state() {
ldout(m_image_ctx.cct, 10) << dendl;
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
m_cache_state->allocated_bytes = m_bytes_allocated;
m_cache_state->cached_bytes = m_bytes_cached;
m_cache_state->dirty_bytes = m_bytes_dirty;
m_cache_state->free_bytes = m_bytes_allocated_cap - m_bytes_allocated;
m_cache_state->hits_full = m_perfcounter->get(l_librbd_pwl_rd_hit_req);
m_cache_state->hits_partial = m_perfcounter->get(l_librbd_pwl_rd_part_hit_req);
m_cache_state->misses = m_perfcounter->get(l_librbd_pwl_rd_req) -
m_cache_state->hits_full - m_cache_state->hits_partial;
m_cache_state->hit_bytes = m_perfcounter->get(l_librbd_pwl_rd_hit_bytes);
m_cache_state->miss_bytes = m_perfcounter->get(l_librbd_pwl_rd_bytes) -
m_cache_state->hit_bytes;
}
template <typename I>
void AbstractWriteLog<I>::handle_write_image_cache_state(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to update image cache state: " << cpp_strerror(r)
<< dendl;
return;
}
}
template <typename I>
void AbstractWriteLog<I>::init(Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
auto pname = std::string("librbd-pwl-") + m_image_ctx.id +
std::string("-") + m_image_ctx.md_ctx.get_pool_name() +
std::string("-") + m_image_ctx.name;
perf_start(pname);
ceph_assert(!m_initialized);
Context *ctx = new LambdaContext(
[this, on_finish](int r) {
if (r >= 0) {
std::unique_lock locker(m_lock);
update_image_cache_state();
m_cache_state->write_image_cache_state(locker, on_finish);
} else {
on_finish->complete(r);
}
});
DeferredContexts later;
pwl_init(ctx, later);
}
template <typename I>
void AbstractWriteLog<I>::shut_down(Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
ldout(cct,5) << "image name: " << m_image_ctx.name << " id: " << m_image_ctx.id << dendl;
Context *ctx = new LambdaContext(
[this, on_finish](int r) {
if (m_perfcounter) {
perf_stop();
}
ldout(m_image_ctx.cct, 6) << "shutdown complete" << dendl;
m_image_ctx.op_work_queue->queue(on_finish, r);
});
ctx = new LambdaContext(
[this, ctx](int r) {
ldout(m_image_ctx.cct, 6) << "image cache cleaned" << dendl;
Context *next_ctx = override_ctx(r, ctx);
periodic_stats();
std::unique_lock locker(m_lock);
check_image_cache_state_clean();
m_wake_up_enabled = false;
m_log_entries.clear();
m_cache_state->clean = true;
m_cache_state->empty = true;
remove_pool_file();
update_image_cache_state();
m_cache_state->write_image_cache_state(locker, next_ctx);
});
ctx = new LambdaContext(
[this, ctx](int r) {
Context *next_ctx = override_ctx(r, ctx);
ldout(m_image_ctx.cct, 6) << "waiting for in flight operations" << dendl;
// Wait for in progress IOs to complete
next_ctx = util::create_async_context_callback(&m_work_queue, next_ctx);
m_async_op_tracker.wait_for_ops(next_ctx);
});
ctx = new LambdaContext(
[this, ctx](int r) {
Context *next_ctx = override_ctx(r, ctx);
{
/* Sync with process_writeback_dirty_entries() */
RWLock::WLocker entry_reader_wlocker(m_entry_reader_lock);
m_shutting_down = true;
/* Flush all writes to OSDs (unless disabled) and wait for all
in-progress flush writes to complete */
ldout(m_image_ctx.cct, 6) << "flushing" << dendl;
periodic_stats();
}
flush_dirty_entries(next_ctx);
});
ctx = new LambdaContext(
[this, ctx](int r) {
ldout(m_image_ctx.cct, 6) << "Done internal_flush in shutdown" << dendl;
m_work_queue.queue(ctx, r);
});
/* Complete all in-flight writes before shutting down */
ldout(m_image_ctx.cct, 6) << "internal_flush in shutdown" << dendl;
internal_flush(false, ctx);
}
template <typename I>
void AbstractWriteLog<I>::read(Extents&& image_extents,
ceph::bufferlist* bl,
int fadvise_flags, Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
utime_t now = ceph_clock_now();
on_finish = new LambdaContext(
[this, on_finish](int r) {
m_async_op_tracker.finish_op();
on_finish->complete(r);
});
C_ReadRequest *read_ctx = m_builder->create_read_request(
cct, now, m_perfcounter, bl, on_finish);
ldout(cct, 20) << "name: " << m_image_ctx.name << " id: " << m_image_ctx.id
<< "image_extents=" << image_extents
<< ", bl=" << bl
<< ", on_finish=" << on_finish << dendl;
ceph_assert(m_initialized);
bl->clear();
m_perfcounter->inc(l_librbd_pwl_rd_req, 1);
std::vector<std::shared_ptr<GenericWriteLogEntry>> log_entries_to_read;
std::vector<bufferlist*> bls_to_read;
m_async_op_tracker.start_op();
Context *ctx = new LambdaContext(
[this, read_ctx, fadvise_flags](int r) {
if (read_ctx->miss_extents.empty()) {
/* All of this read comes from RWL */
read_ctx->complete(0);
} else {
/* Pass the read misses on to the layer below RWL */
m_image_writeback.aio_read(
std::move(read_ctx->miss_extents), &read_ctx->miss_bl,
fadvise_flags, read_ctx);
}
});
/*
* The strategy here is to look up all the WriteLogMapEntries that overlap
* this read, and iterate through those to separate this read into hits and
* misses. A new Extents object is produced here with Extents for each miss
* region. The miss Extents is then passed on to the read cache below RWL. We
* also produce an ImageExtentBufs for all the extents (hit or miss) in this
* read. When the read from the lower cache layer completes, we iterate
* through the ImageExtentBufs and insert buffers for each cache hit at the
* appropriate spot in the bufferlist returned from below for the miss
* read. The buffers we insert here refer directly to regions of various
* write log entry data buffers.
*
* Locking: These buffer objects hold a reference on the write log entries
* they refer to. Log entries can't be retired until there are no references.
* The GenericWriteLogEntry references are released by the buffer destructor.
*/
for (auto &extent : image_extents) {
uint64_t extent_offset = 0;
RWLock::RLocker entry_reader_locker(m_entry_reader_lock);
WriteLogMapEntries map_entries = m_blocks_to_log_entries.find_map_entries(
block_extent(extent));
for (auto &map_entry : map_entries) {
Extent entry_image_extent(pwl::image_extent(map_entry.block_extent));
/* If this map entry starts after the current image extent offset ... */
if (entry_image_extent.first > extent.first + extent_offset) {
/* ... add range before map_entry to miss extents */
uint64_t miss_extent_start = extent.first + extent_offset;
uint64_t miss_extent_length = entry_image_extent.first -
miss_extent_start;
Extent miss_extent(miss_extent_start, miss_extent_length);
read_ctx->miss_extents.push_back(miss_extent);
/* Add miss range to read extents */
auto miss_extent_buf = std::make_shared<ImageExtentBuf>(miss_extent);
read_ctx->read_extents.push_back(miss_extent_buf);
extent_offset += miss_extent_length;
}
ceph_assert(entry_image_extent.first <= extent.first + extent_offset);
uint64_t entry_offset = 0;
/* If this map entry starts before the current image extent offset ... */
if (entry_image_extent.first < extent.first + extent_offset) {
/* ... compute offset into log entry for this read extent */
entry_offset = (extent.first + extent_offset) - entry_image_extent.first;
}
/* This read hit ends at the end of the extent or the end of the log
entry, whichever is less. */
uint64_t entry_hit_length = min(entry_image_extent.second - entry_offset,
extent.second - extent_offset);
Extent hit_extent(entry_image_extent.first, entry_hit_length);
if (0 == map_entry.log_entry->write_bytes() &&
0 < map_entry.log_entry->bytes_dirty()) {
/* discard log entry */
ldout(cct, 20) << "discard log entry" << dendl;
auto discard_entry = map_entry.log_entry;
ldout(cct, 20) << "read hit on discard entry: log_entry="
<< *discard_entry
<< dendl;
/* Discards read as zero, so we'll construct a bufferlist of zeros */
bufferlist zero_bl;
zero_bl.append_zero(entry_hit_length);
/* Add hit extent to read extents */
auto hit_extent_buf = std::make_shared<ImageExtentBuf>(
hit_extent, zero_bl);
read_ctx->read_extents.push_back(hit_extent_buf);
} else {
ldout(cct, 20) << "write or writesame log entry" << dendl;
/* write and writesame log entry */
/* Offset of the map entry into the log entry's buffer */
uint64_t map_entry_buffer_offset = entry_image_extent.first -
map_entry.log_entry->ram_entry.image_offset_bytes;
/* Offset into the log entry buffer of this read hit */
uint64_t read_buffer_offset = map_entry_buffer_offset + entry_offset;
/* Create buffer object referring to pmem pool for this read hit */
collect_read_extents(
read_buffer_offset, map_entry, log_entries_to_read, bls_to_read,
entry_hit_length, hit_extent, read_ctx);
}
/* Exclude RWL hit range from buffer and extent */
extent_offset += entry_hit_length;
ldout(cct, 20) << map_entry << dendl;
}
/* If the last map entry didn't consume the entire image extent ... */
if (extent.second > extent_offset) {
/* ... add the rest of this extent to miss extents */
uint64_t miss_extent_start = extent.first + extent_offset;
uint64_t miss_extent_length = extent.second - extent_offset;
Extent miss_extent(miss_extent_start, miss_extent_length);
read_ctx->miss_extents.push_back(miss_extent);
/* Add miss range to read extents */
auto miss_extent_buf = std::make_shared<ImageExtentBuf>(miss_extent);
read_ctx->read_extents.push_back(miss_extent_buf);
extent_offset += miss_extent_length;
}
}
ldout(cct, 20) << "miss_extents=" << read_ctx->miss_extents
<< ", miss_bl=" << read_ctx->miss_bl << dendl;
complete_read(log_entries_to_read, bls_to_read, ctx);
}
template <typename I>
void AbstractWriteLog<I>::write(Extents &&image_extents,
bufferlist&& bl,
int fadvise_flags,
Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "aio_write" << dendl;
utime_t now = ceph_clock_now();
m_perfcounter->inc(l_librbd_pwl_wr_req, 1);
ceph_assert(m_initialized);
/* Split image extents larger than 1M. This isn't strictly necessary but
* makes libpmemobj allocator's job easier and reduces pmemobj_defrag() cost.
* We plan to manage pmem space and allocation by ourselves in the future.
*/
Extents split_image_extents;
uint64_t max_extent_size = get_max_extent();
if (max_extent_size != 0) {
for (auto extent : image_extents) {
if (extent.second > max_extent_size) {
uint64_t off = extent.first;
uint64_t extent_bytes = extent.second;
for (int i = 0; extent_bytes != 0; ++i) {
Extent _ext;
_ext.first = off + i * max_extent_size;
_ext.second = std::min(max_extent_size, extent_bytes);
extent_bytes = extent_bytes - _ext.second ;
split_image_extents.emplace_back(_ext);
}
} else {
split_image_extents.emplace_back(extent);
}
}
} else {
split_image_extents = image_extents;
}
C_WriteRequestT *write_req =
m_builder->create_write_request(*this, now, std::move(split_image_extents),
std::move(bl), fadvise_flags, m_lock,
m_perfcounter, on_finish);
m_perfcounter->inc(l_librbd_pwl_wr_bytes,
write_req->image_extents_summary.total_bytes);
/* The lambda below will be called when the block guard for all
* blocks affected by this write is obtained */
GuardedRequestFunctionContext *guarded_ctx =
new GuardedRequestFunctionContext([this,
write_req](GuardedRequestFunctionContext &guard_ctx) {
write_req->blockguard_acquired(guard_ctx);
alloc_and_dispatch_io_req(write_req);
});
detain_guarded_request(write_req, guarded_ctx, false);
}
template <typename I>
void AbstractWriteLog<I>::discard(uint64_t offset, uint64_t length,
uint32_t discard_granularity_bytes,
Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
utime_t now = ceph_clock_now();
m_perfcounter->inc(l_librbd_pwl_discard, 1);
Extents discard_extents = {{offset, length}};
m_discard_granularity_bytes = discard_granularity_bytes;
ceph_assert(m_initialized);
auto *discard_req =
new C_DiscardRequestT(*this, now, std::move(discard_extents), discard_granularity_bytes,
m_lock, m_perfcounter, on_finish);
/* The lambda below will be called when the block guard for all
* blocks affected by this write is obtained */
GuardedRequestFunctionContext *guarded_ctx =
new GuardedRequestFunctionContext([this, discard_req](GuardedRequestFunctionContext &guard_ctx) {
discard_req->blockguard_acquired(guard_ctx);
alloc_and_dispatch_io_req(discard_req);
});
detain_guarded_request(discard_req, guarded_ctx, false);
}
/**
* Aio_flush completes when all previously completed writes are
* flushed to persistent cache. We make a best-effort attempt to also
* defer until all in-progress writes complete, but we may not know
* about all of the writes the application considers in-progress yet,
* due to uncertainty in the IO submission workq (multiple WQ threads
* may allow out-of-order submission).
*
* This flush operation will not wait for writes deferred for overlap
* in the block guard.
*/
template <typename I>
void AbstractWriteLog<I>::flush(io::FlushSource flush_source, Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "on_finish=" << on_finish << " flush_source=" << flush_source << dendl;
if (io::FLUSH_SOURCE_SHUTDOWN == flush_source || io::FLUSH_SOURCE_INTERNAL == flush_source ||
io::FLUSH_SOURCE_WRITE_BLOCK == flush_source) {
internal_flush(false, on_finish);
return;
}
m_perfcounter->inc(l_librbd_pwl_aio_flush, 1);
/* May be called even if initialization fails */
if (!m_initialized) {
ldout(cct, 05) << "never initialized" << dendl;
/* Deadlock if completed here */
m_image_ctx.op_work_queue->queue(on_finish, 0);
return;
}
{
std::shared_lock image_locker(m_image_ctx.image_lock);
if (m_image_ctx.snap_id != CEPH_NOSNAP || m_image_ctx.read_only) {
on_finish->complete(-EROFS);
return;
}
}
auto flush_req = make_flush_req(on_finish);
GuardedRequestFunctionContext *guarded_ctx =
new GuardedRequestFunctionContext([this, flush_req](GuardedRequestFunctionContext &guard_ctx) {
ldout(m_image_ctx.cct, 20) << "flush_req=" << flush_req << " cell=" << guard_ctx.cell << dendl;
ceph_assert(guard_ctx.cell);
flush_req->detained = guard_ctx.state.detained;
/* We don't call flush_req->set_cell(), because the block guard will be released here */
{
DeferredContexts post_unlock; /* Do these when the lock below is released */
std::lock_guard locker(m_lock);
if (!m_persist_on_flush && m_persist_on_write_until_flush) {
m_persist_on_flush = true;
ldout(m_image_ctx.cct, 5) << "now persisting on flush" << dendl;
}
/*
* Create a new sync point if there have been writes since the last
* one.
*
* We do not flush the caches below the RWL here.
*/
flush_new_sync_point_if_needed(flush_req, post_unlock);
}
release_guarded_request(guard_ctx.cell);
});
detain_guarded_request(flush_req, guarded_ctx, true);
}
template <typename I>
void AbstractWriteLog<I>::writesame(uint64_t offset, uint64_t length,
bufferlist&& bl, int fadvise_flags,
Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "aio_writesame" << dendl;
utime_t now = ceph_clock_now();
Extents ws_extents = {{offset, length}};
m_perfcounter->inc(l_librbd_pwl_ws, 1);
ceph_assert(m_initialized);
/* A write same request is also a write request. The key difference is the
* write same data buffer is shorter than the extent of the request. The full
* extent will be used in the block guard, and appear in
* m_blocks_to_log_entries_map. The data buffer allocated for the WS is only
* as long as the length of the bl here, which is the pattern that's repeated
* in the image for the entire length of this WS. Read hits and flushing of
* write sames are different than normal writes. */
C_WriteSameRequestT *ws_req =
m_builder->create_writesame_request(*this, now, std::move(ws_extents), std::move(bl),
fadvise_flags, m_lock, m_perfcounter, on_finish);
m_perfcounter->inc(l_librbd_pwl_ws_bytes, ws_req->image_extents_summary.total_bytes);
/* The lambda below will be called when the block guard for all
* blocks affected by this write is obtained */
GuardedRequestFunctionContext *guarded_ctx =
new GuardedRequestFunctionContext([this, ws_req](GuardedRequestFunctionContext &guard_ctx) {
ws_req->blockguard_acquired(guard_ctx);
alloc_and_dispatch_io_req(ws_req);
});
detain_guarded_request(ws_req, guarded_ctx, false);
}
template <typename I>
void AbstractWriteLog<I>::compare_and_write(Extents &&image_extents,
bufferlist&& cmp_bl,
bufferlist&& bl,
uint64_t *mismatch_offset,
int fadvise_flags,
Context *on_finish) {
ldout(m_image_ctx.cct, 20) << dendl;
utime_t now = ceph_clock_now();
m_perfcounter->inc(l_librbd_pwl_cmp, 1);
ceph_assert(m_initialized);
/* A compare and write request is also a write request. We only allocate
* resources and dispatch this write request if the compare phase
* succeeds. */
C_WriteRequestT *cw_req =
m_builder->create_comp_and_write_request(
*this, now, std::move(image_extents), std::move(cmp_bl), std::move(bl),
mismatch_offset, fadvise_flags, m_lock, m_perfcounter, on_finish);
m_perfcounter->inc(l_librbd_pwl_cmp_bytes, cw_req->image_extents_summary.total_bytes);
/* The lambda below will be called when the block guard for all
* blocks affected by this write is obtained */
GuardedRequestFunctionContext *guarded_ctx =
new GuardedRequestFunctionContext([this, cw_req](GuardedRequestFunctionContext &guard_ctx) {
cw_req->blockguard_acquired(guard_ctx);
auto read_complete_ctx = new LambdaContext(
[this, cw_req](int r) {
ldout(m_image_ctx.cct, 20) << "name: " << m_image_ctx.name << " id: " << m_image_ctx.id
<< "cw_req=" << cw_req << dendl;
/* Compare read_bl to cmp_bl to determine if this will produce a write */
ceph_assert(cw_req->read_bl.length() <= cw_req->cmp_bl.length());
ceph_assert(cw_req->read_bl.length() == cw_req->image_extents_summary.total_bytes);
bufferlist sub_cmp_bl;
sub_cmp_bl.substr_of(cw_req->cmp_bl, 0, cw_req->read_bl.length());
if (sub_cmp_bl.contents_equal(cw_req->read_bl)) {
/* Compare phase succeeds. Begin write */
ldout(m_image_ctx.cct, 5) << " cw_req=" << cw_req << " compare matched" << dendl;
cw_req->compare_succeeded = true;
*cw_req->mismatch_offset = 0;
/* Continue with this request as a write. Blockguard release and
* user request completion handled as if this were a plain
* write. */
alloc_and_dispatch_io_req(cw_req);
} else {
/* Compare phase fails. Comp-and write ends now. */
ldout(m_image_ctx.cct, 15) << " cw_req=" << cw_req << " compare failed" << dendl;
/* Bufferlist doesn't tell us where they differed, so we'll have to determine that here */
uint64_t bl_index = 0;
for (bl_index = 0; bl_index < sub_cmp_bl.length(); bl_index++) {
if (sub_cmp_bl[bl_index] != cw_req->read_bl[bl_index]) {
ldout(m_image_ctx.cct, 15) << " cw_req=" << cw_req << " mismatch at " << bl_index << dendl;
break;
}
}
cw_req->compare_succeeded = false;
*cw_req->mismatch_offset = bl_index;
cw_req->complete_user_request(-EILSEQ);
cw_req->release_cell();
cw_req->complete(0);
}
});
/* Read phase of comp-and-write must read through RWL */
Extents image_extents_copy = cw_req->image_extents;
read(std::move(image_extents_copy), &cw_req->read_bl, cw_req->fadvise_flags, read_complete_ctx);
});
detain_guarded_request(cw_req, guarded_ctx, false);
}
template <typename I>
void AbstractWriteLog<I>::flush(Context *on_finish) {
internal_flush(false, on_finish);
}
template <typename I>
void AbstractWriteLog<I>::invalidate(Context *on_finish) {
internal_flush(true, on_finish);
}
template <typename I>
CephContext *AbstractWriteLog<I>::get_context() {
return m_image_ctx.cct;
}
template <typename I>
BlockGuardCell* AbstractWriteLog<I>::detain_guarded_request_helper(GuardedRequest &req)
{
CephContext *cct = m_image_ctx.cct;
BlockGuardCell *cell;
ceph_assert(ceph_mutex_is_locked_by_me(m_blockguard_lock));
ldout(cct, 20) << dendl;
int r = m_write_log_guard.detain(req.block_extent, &req, &cell);
ceph_assert(r>=0);
if (r > 0) {
ldout(cct, 20) << "detaining guarded request due to in-flight requests: "
<< "req=" << req << dendl;
return nullptr;
}
ldout(cct, 20) << "in-flight request cell: " << cell << dendl;
return cell;
}
template <typename I>
BlockGuardCell* AbstractWriteLog<I>::detain_guarded_request_barrier_helper(
GuardedRequest &req)
{
BlockGuardCell *cell = nullptr;
ceph_assert(ceph_mutex_is_locked_by_me(m_blockguard_lock));
ldout(m_image_ctx.cct, 20) << dendl;
if (m_barrier_in_progress) {
req.guard_ctx->state.queued = true;
m_awaiting_barrier.push_back(req);
} else {
bool barrier = req.guard_ctx->state.barrier;
if (barrier) {
m_barrier_in_progress = true;
req.guard_ctx->state.current_barrier = true;
}
cell = detain_guarded_request_helper(req);
if (barrier) {
/* Only non-null if the barrier acquires the guard now */
m_barrier_cell = cell;
}
}
return cell;
}
template <typename I>
void AbstractWriteLog<I>::detain_guarded_request(
C_BlockIORequestT *request,
GuardedRequestFunctionContext *guarded_ctx,
bool is_barrier)
{
BlockExtent extent;
if (request) {
extent = request->image_extents_summary.block_extent();
} else {
extent = block_extent(whole_volume_extent());
}
auto req = GuardedRequest(extent, guarded_ctx, is_barrier);
BlockGuardCell *cell = nullptr;
ldout(m_image_ctx.cct, 20) << dendl;
{
std::lock_guard locker(m_blockguard_lock);
cell = detain_guarded_request_barrier_helper(req);
}
if (cell) {
req.guard_ctx->cell = cell;
req.guard_ctx->complete(0);
}
}
template <typename I>
void AbstractWriteLog<I>::release_guarded_request(BlockGuardCell *released_cell)
{
CephContext *cct = m_image_ctx.cct;
WriteLogGuard::BlockOperations block_reqs;
ldout(cct, 20) << "released_cell=" << released_cell << dendl;
{
std::lock_guard locker(m_blockguard_lock);
m_write_log_guard.release(released_cell, &block_reqs);
for (auto &req : block_reqs) {
req.guard_ctx->state.detained = true;
BlockGuardCell *detained_cell = detain_guarded_request_helper(req);
if (detained_cell) {
if (req.guard_ctx->state.current_barrier) {
/* The current barrier is acquiring the block guard, so now we know its cell */
m_barrier_cell = detained_cell;
/* detained_cell could be == released_cell here */
ldout(cct, 20) << "current barrier cell=" << detained_cell << " req=" << req << dendl;
}
req.guard_ctx->cell = detained_cell;
m_work_queue.queue(req.guard_ctx);
}
}
if (m_barrier_in_progress && (released_cell == m_barrier_cell)) {
ldout(cct, 20) << "current barrier released cell=" << released_cell << dendl;
/* The released cell is the current barrier request */
m_barrier_in_progress = false;
m_barrier_cell = nullptr;
/* Move waiting requests into the blockguard. Stop if there's another barrier */
while (!m_barrier_in_progress && !m_awaiting_barrier.empty()) {
auto &req = m_awaiting_barrier.front();
ldout(cct, 20) << "submitting queued request to blockguard: " << req << dendl;
BlockGuardCell *detained_cell = detain_guarded_request_barrier_helper(req);
if (detained_cell) {
req.guard_ctx->cell = detained_cell;
m_work_queue.queue(req.guard_ctx);
}
m_awaiting_barrier.pop_front();
}
}
}
ldout(cct, 20) << "exit" << dendl;
}
template <typename I>
void AbstractWriteLog<I>::append_scheduled(GenericLogOperations &ops, bool &ops_remain,
bool &appending, bool isRWL)
{
const unsigned long int OPS_APPENDED = isRWL ? MAX_ALLOC_PER_TRANSACTION
: MAX_WRITES_PER_SYNC_POINT;
{
std::lock_guard locker(m_lock);
if (!appending && m_appending) {
/* Another thread is appending */
ldout(m_image_ctx.cct, 15) << "Another thread is appending" << dendl;
return;
}
if (m_ops_to_append.size()) {
appending = true;
m_appending = true;
auto last_in_batch = m_ops_to_append.begin();
unsigned int ops_to_append = m_ops_to_append.size();
if (ops_to_append > OPS_APPENDED) {
ops_to_append = OPS_APPENDED;
}
std::advance(last_in_batch, ops_to_append);
ops.splice(ops.end(), m_ops_to_append, m_ops_to_append.begin(), last_in_batch);
ops_remain = true; /* Always check again before leaving */
ldout(m_image_ctx.cct, 20) << "appending " << ops.size() << ", remain "
<< m_ops_to_append.size() << dendl;
} else if (isRWL) {
ops_remain = false;
if (appending) {
appending = false;
m_appending = false;
}
}
}
}
template <typename I>
void AbstractWriteLog<I>::schedule_append(GenericLogOperationsVector &ops, C_BlockIORequestT *req)
{
GenericLogOperations to_append(ops.begin(), ops.end());
schedule_append_ops(to_append, req);
}
template <typename I>
void AbstractWriteLog<I>::schedule_append(GenericLogOperationSharedPtr op, C_BlockIORequestT *req)
{
GenericLogOperations to_append { op };
schedule_append_ops(to_append, req);
}
/*
* Complete a set of write ops with the result of append_op_entries.
*/
template <typename I>
void AbstractWriteLog<I>::complete_op_log_entries(GenericLogOperations &&ops,
const int result)
{
GenericLogEntries dirty_entries;
int published_reserves = 0;
ldout(m_image_ctx.cct, 20) << __func__ << ": completing" << dendl;
for (auto &op : ops) {
utime_t now = ceph_clock_now();
auto log_entry = op->get_log_entry();
log_entry->completed = true;
if (op->is_writing_op()) {
op->mark_log_entry_completed();
dirty_entries.push_back(log_entry);
}
if (log_entry->is_write_entry()) {
release_ram(log_entry);
}
if (op->reserved_allocated()) {
published_reserves++;
}
{
std::lock_guard locker(m_lock);
m_unpublished_reserves -= published_reserves;
m_dirty_log_entries.splice(m_dirty_log_entries.end(), dirty_entries);
}
op->complete(result);
m_perfcounter->tinc(l_librbd_pwl_log_op_dis_to_app_t,
op->log_append_start_time - op->dispatch_time);
m_perfcounter->tinc(l_librbd_pwl_log_op_dis_to_cmp_t, now - op->dispatch_time);
m_perfcounter->hinc(l_librbd_pwl_log_op_dis_to_cmp_t_hist,
utime_t(now - op->dispatch_time).to_nsec(),
log_entry->ram_entry.write_bytes);
utime_t app_lat = op->log_append_comp_time - op->log_append_start_time;
m_perfcounter->tinc(l_librbd_pwl_log_op_app_to_appc_t, app_lat);
m_perfcounter->hinc(l_librbd_pwl_log_op_app_to_appc_t_hist, app_lat.to_nsec(),
log_entry->ram_entry.write_bytes);
m_perfcounter->tinc(l_librbd_pwl_log_op_app_to_cmp_t, now - op->log_append_start_time);
}
// New entries may be flushable
{
std::lock_guard locker(m_lock);
wake_up();
}
}
/**
* Dispatch as many deferred writes as possible
*/
template <typename I>
void AbstractWriteLog<I>::dispatch_deferred_writes(void)
{
C_BlockIORequestT *front_req = nullptr; /* req still on front of deferred list */
C_BlockIORequestT *allocated_req = nullptr; /* req that was allocated, and is now off the list */
bool allocated = false; /* front_req allocate succeeded */
bool cleared_dispatching_flag = false;
/* If we can't become the dispatcher, we'll exit */
{
std::lock_guard locker(m_lock);
if (m_dispatching_deferred_ops ||
!m_deferred_ios.size()) {
return;
}
m_dispatching_deferred_ops = true;
}
/* There are ops to dispatch, and this should be the only thread dispatching them */
{
std::lock_guard deferred_dispatch(m_deferred_dispatch_lock);
do {
{
std::lock_guard locker(m_lock);
ceph_assert(m_dispatching_deferred_ops);
if (allocated) {
/* On the 2..n-1 th time we get lock, front_req->alloc_resources() will
* have succeeded, and we'll need to pop it off the deferred ops list
* here. */
ceph_assert(front_req);
ceph_assert(!allocated_req);
m_deferred_ios.pop_front();
allocated_req = front_req;
front_req = nullptr;
allocated = false;
}
ceph_assert(!allocated);
if (!allocated && front_req) {
/* front_req->alloc_resources() failed on the last iteration.
* We'll stop dispatching. */
wake_up();
front_req = nullptr;
ceph_assert(!cleared_dispatching_flag);
m_dispatching_deferred_ops = false;
cleared_dispatching_flag = true;
} else {
ceph_assert(!front_req);
if (m_deferred_ios.size()) {
/* New allocation candidate */
front_req = m_deferred_ios.front();
} else {
ceph_assert(!cleared_dispatching_flag);
m_dispatching_deferred_ops = false;
cleared_dispatching_flag = true;
}
}
}
/* Try allocating for front_req before we decide what to do with allocated_req
* (if any) */
if (front_req) {
ceph_assert(!cleared_dispatching_flag);
allocated = front_req->alloc_resources();
}
if (allocated_req && front_req && allocated) {
/* Push dispatch of the first allocated req to a wq */
m_work_queue.queue(new LambdaContext(
[allocated_req](int r) {
allocated_req->dispatch();
}), 0);
allocated_req = nullptr;
}
ceph_assert(!(allocated_req && front_req && allocated));
/* Continue while we're still considering the front of the deferred ops list */
} while (front_req);
ceph_assert(!allocated);
}
ceph_assert(cleared_dispatching_flag);
/* If any deferred requests were allocated, the last one will still be in allocated_req */
if (allocated_req) {
allocated_req->dispatch();
}
}
/**
* Returns the lanes used by this write, and attempts to dispatch the next
* deferred write
*/
template <typename I>
void AbstractWriteLog<I>::release_write_lanes(C_BlockIORequestT *req)
{
{
std::lock_guard locker(m_lock);
m_free_lanes += req->image_extents.size();
}
dispatch_deferred_writes();
}
/**
* Attempts to allocate log resources for a write. Write is dispatched if
* resources are available, or queued if they aren't.
*/
template <typename I>
void AbstractWriteLog<I>::alloc_and_dispatch_io_req(C_BlockIORequestT *req)
{
bool dispatch_here = false;
{
/* If there are already deferred writes, queue behind them for resources */
{
std::lock_guard locker(m_lock);
dispatch_here = m_deferred_ios.empty();
// Only flush req's total_bytes is the max uint64
if (req->image_extents_summary.total_bytes ==
std::numeric_limits<uint64_t>::max() &&
static_cast<C_FlushRequestT *>(req)->internal == true) {
dispatch_here = true;
}
}
if (dispatch_here) {
dispatch_here = req->alloc_resources();
}
if (dispatch_here) {
ldout(m_image_ctx.cct, 20) << "dispatching" << dendl;
req->dispatch();
} else {
req->deferred();
{
std::lock_guard locker(m_lock);
m_deferred_ios.push_back(req);
}
ldout(m_image_ctx.cct, 20) << "deferred IOs: " << m_deferred_ios.size() << dendl;
dispatch_deferred_writes();
}
}
}
template <typename I>
bool AbstractWriteLog<I>::check_allocation(
C_BlockIORequestT *req, uint64_t bytes_cached, uint64_t bytes_dirtied,
uint64_t bytes_allocated, uint32_t num_lanes, uint32_t num_log_entries,
uint32_t num_unpublished_reserves) {
bool alloc_succeeds = true;
bool no_space = false;
{
std::lock_guard locker(m_lock);
if (m_free_lanes < num_lanes) {
ldout(m_image_ctx.cct, 20) << "not enough free lanes (need "
<< num_lanes
<< ", have " << m_free_lanes << ") "
<< *req << dendl;
alloc_succeeds = false;
/* This isn't considered a "no space" alloc fail. Lanes are a throttling mechanism. */
}
if (m_free_log_entries < num_log_entries) {
ldout(m_image_ctx.cct, 20) << "not enough free entries (need "
<< num_log_entries
<< ", have " << m_free_log_entries << ") "
<< *req << dendl;
alloc_succeeds = false;
no_space = true; /* Entries must be retired */
}
/* Don't attempt buffer allocate if we've exceeded the "full" threshold */
if (m_bytes_allocated + bytes_allocated > m_bytes_allocated_cap) {
ldout(m_image_ctx.cct, 20) << "Waiting for allocation cap (cap="
<< m_bytes_allocated_cap
<< ", allocated=" << m_bytes_allocated
<< ") in write [" << *req << "]" << dendl;
alloc_succeeds = false;
no_space = true; /* Entries must be retired */
}
}
if (alloc_succeeds) {
reserve_cache(req, alloc_succeeds, no_space);
}
if (alloc_succeeds) {
std::unique_lock locker(m_lock);
/* We need one free log entry per extent (each is a separate entry), and
* one free "lane" for remote replication. */
if ((m_free_lanes >= num_lanes) &&
(m_free_log_entries >= num_log_entries) &&
(m_bytes_allocated_cap >= m_bytes_allocated + bytes_allocated)) {
m_free_lanes -= num_lanes;
m_free_log_entries -= num_log_entries;
m_unpublished_reserves += num_unpublished_reserves;
m_bytes_allocated += bytes_allocated;
m_bytes_cached += bytes_cached;
m_bytes_dirty += bytes_dirtied;
if (m_cache_state->clean && bytes_dirtied > 0) {
m_cache_state->clean = false;
update_image_cache_state();
write_image_cache_state(locker);
}
} else {
alloc_succeeds = false;
}
}
if (!alloc_succeeds && no_space) {
/* Expedite flushing and/or retiring */
std::lock_guard locker(m_lock);
m_alloc_failed_since_retire = true;
m_last_alloc_fail = ceph_clock_now();
}
return alloc_succeeds;
}
template <typename I>
C_FlushRequest<AbstractWriteLog<I>>* AbstractWriteLog<I>::make_flush_req(Context *on_finish) {
utime_t flush_begins = ceph_clock_now();
bufferlist bl;
auto *flush_req =
new C_FlushRequestT(*this, flush_begins, Extents({whole_volume_extent()}),
std::move(bl), 0, m_lock, m_perfcounter, on_finish);
return flush_req;
}
template <typename I>
void AbstractWriteLog<I>::wake_up() {
CephContext *cct = m_image_ctx.cct;
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
if (!m_wake_up_enabled) {
// wake_up is disabled during shutdown after flushing completes
ldout(m_image_ctx.cct, 6) << "deferred processing disabled" << dendl;
return;
}
if (m_wake_up_requested && m_wake_up_scheduled) {
return;
}
ldout(cct, 20) << dendl;
/* Wake-up can be requested while it's already scheduled */
m_wake_up_requested = true;
/* Wake-up cannot be scheduled if it's already scheduled */
if (m_wake_up_scheduled) {
return;
}
m_wake_up_scheduled = true;
m_async_process_work++;
m_async_op_tracker.start_op();
m_work_queue.queue(new LambdaContext(
[this](int r) {
process_work();
m_async_op_tracker.finish_op();
m_async_process_work--;
}), 0);
}
template <typename I>
bool AbstractWriteLog<I>::can_flush_entry(std::shared_ptr<GenericLogEntry> log_entry) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "" << dendl;
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
if (m_invalidating) {
return true;
}
/* For OWB we can flush entries with the same sync gen number (write between
* aio_flush() calls) concurrently. Here we'll consider an entry flushable if
* its sync gen number is <= the lowest sync gen number carried by all the
* entries currently flushing.
*
* If the entry considered here bears a sync gen number lower than a
* previously flushed entry, the application had to have submitted the write
* bearing the higher gen number before the write with the lower gen number
* completed. So, flushing these concurrently is OK.
*
* If the entry considered here bears a sync gen number higher than a
* currently flushing entry, the write with the lower gen number may have
* completed to the application before the write with the higher sync gen
* number was submitted, and the application may rely on that completion
* order for volume consistency. In this case the entry will not be
* considered flushable until all the entries bearing lower sync gen numbers
* finish flushing.
*/
if (m_flush_ops_in_flight &&
(log_entry->ram_entry.sync_gen_number > m_lowest_flushing_sync_gen)) {
return false;
}
return (log_entry->can_writeback() &&
(m_flush_ops_in_flight <= IN_FLIGHT_FLUSH_WRITE_LIMIT) &&
(m_flush_bytes_in_flight <= IN_FLIGHT_FLUSH_BYTES_LIMIT));
}
template <typename I>
void AbstractWriteLog<I>::detain_flush_guard_request(std::shared_ptr<GenericLogEntry> log_entry,
GuardedRequestFunctionContext *guarded_ctx) {
ldout(m_image_ctx.cct, 20) << dendl;
BlockExtent extent;
if (log_entry->is_sync_point()) {
extent = block_extent(whole_volume_extent());
} else {
extent = log_entry->ram_entry.block_extent();
}
auto req = GuardedRequest(extent, guarded_ctx, false);
BlockGuardCell *cell = nullptr;
{
std::lock_guard locker(m_flush_guard_lock);
m_flush_guard.detain(req.block_extent, &req, &cell);
}
if (cell) {
req.guard_ctx->cell = cell;
m_image_ctx.op_work_queue->queue(req.guard_ctx, 0);
}
}
template <typename I>
Context* AbstractWriteLog<I>::construct_flush_entry(std::shared_ptr<GenericLogEntry> log_entry,
bool invalidating) {
ldout(m_image_ctx.cct, 20) << "" << dendl;
/* Flush write completion action */
utime_t writeback_start_time = ceph_clock_now();
Context *ctx = new LambdaContext(
[this, log_entry, writeback_start_time, invalidating](int r) {
utime_t writeback_comp_time = ceph_clock_now();
m_perfcounter->tinc(l_librbd_pwl_writeback_latency,
writeback_comp_time - writeback_start_time);
{
std::lock_guard locker(m_lock);
if (r < 0) {
lderr(m_image_ctx.cct) << "failed to flush log entry"
<< cpp_strerror(r) << dendl;
m_dirty_log_entries.push_front(log_entry);
} else {
ceph_assert(m_bytes_dirty >= log_entry->bytes_dirty());
log_entry->set_flushed(true);
m_bytes_dirty -= log_entry->bytes_dirty();
sync_point_writer_flushed(log_entry->get_sync_point_entry());
ldout(m_image_ctx.cct, 20) << "flushed: " << log_entry
<< " invalidating=" << invalidating
<< dendl;
}
m_flush_ops_in_flight -= 1;
m_flush_bytes_in_flight -= log_entry->ram_entry.write_bytes;
wake_up();
}
});
/* Flush through lower cache before completing */
ctx = new LambdaContext(
[this, ctx, log_entry](int r) {
{
WriteLogGuard::BlockOperations block_reqs;
BlockGuardCell *detained_cell = nullptr;
std::lock_guard locker{m_flush_guard_lock};
m_flush_guard.release(log_entry->m_cell, &block_reqs);
for (auto &req : block_reqs) {
m_flush_guard.detain(req.block_extent, &req, &detained_cell);
if (detained_cell) {
req.guard_ctx->cell = detained_cell;
m_image_ctx.op_work_queue->queue(req.guard_ctx, 0);
}
}
}
if (r < 0) {
lderr(m_image_ctx.cct) << "failed to flush log entry"
<< cpp_strerror(r) << dendl;
ctx->complete(r);
} else {
m_image_writeback.aio_flush(io::FLUSH_SOURCE_WRITEBACK, ctx);
}
});
return ctx;
}
template <typename I>
void AbstractWriteLog<I>::process_writeback_dirty_entries() {
CephContext *cct = m_image_ctx.cct;
bool all_clean = false;
int flushed = 0;
bool has_write_entry = false;
bool need_update_state = false;
ldout(cct, 20) << "Look for dirty entries" << dendl;
{
DeferredContexts post_unlock;
GenericLogEntries entries_to_flush;
std::shared_lock entry_reader_locker(m_entry_reader_lock);
std::lock_guard locker(m_lock);
while (flushed < IN_FLIGHT_FLUSH_WRITE_LIMIT) {
if (m_shutting_down) {
ldout(cct, 5) << "Flush during shutdown suppressed" << dendl;
/* Do flush complete only when all flush ops are finished */
all_clean = !m_flush_ops_in_flight;
break;
}
if (m_dirty_log_entries.empty()) {
ldout(cct, 20) << "Nothing new to flush" << dendl;
/* Do flush complete only when all flush ops are finished */
all_clean = !m_flush_ops_in_flight;
if (!m_cache_state->clean && all_clean) {
m_cache_state->clean = true;
update_image_cache_state();
need_update_state = true;
}
break;
}
auto candidate = m_dirty_log_entries.front();
bool flushable = can_flush_entry(candidate);
if (flushable) {
entries_to_flush.push_back(candidate);
flushed++;
if (!has_write_entry)
has_write_entry = candidate->is_write_entry();
m_dirty_log_entries.pop_front();
// To track candidate, we should add m_flush_ops_in_flight in here
{
if (!m_flush_ops_in_flight ||
(candidate->ram_entry.sync_gen_number < m_lowest_flushing_sync_gen)) {
m_lowest_flushing_sync_gen = candidate->ram_entry.sync_gen_number;
}
m_flush_ops_in_flight += 1;
/* For write same this is the bytes affected by the flush op, not the bytes transferred */
m_flush_bytes_in_flight += candidate->ram_entry.write_bytes;
}
} else {
ldout(cct, 20) << "Next dirty entry isn't flushable yet" << dendl;
break;
}
}
construct_flush_entries(entries_to_flush, post_unlock, has_write_entry);
}
if (need_update_state) {
std::unique_lock locker(m_lock);
write_image_cache_state(locker);
}
if (all_clean) {
/* All flushing complete, drain outside lock */
Contexts flush_contexts;
{
std::lock_guard locker(m_lock);
flush_contexts.swap(m_flush_complete_contexts);
}
finish_contexts(m_image_ctx.cct, flush_contexts, 0);
}
}
/* Returns true if the specified SyncPointLogEntry is considered flushed, and
* the log will be updated to reflect this. */
template <typename I>
bool AbstractWriteLog<I>::handle_flushed_sync_point(std::shared_ptr<SyncPointLogEntry> log_entry)
{
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
ceph_assert(log_entry);
if ((log_entry->writes_flushed == log_entry->writes) &&
log_entry->completed && log_entry->prior_sync_point_flushed &&
log_entry->next_sync_point_entry) {
ldout(m_image_ctx.cct, 20) << "All writes flushed up to sync point="
<< *log_entry << dendl;
log_entry->next_sync_point_entry->prior_sync_point_flushed = true;
/* Don't move the flushed sync gen num backwards. */
if (m_flushed_sync_gen < log_entry->ram_entry.sync_gen_number) {
m_flushed_sync_gen = log_entry->ram_entry.sync_gen_number;
}
m_async_op_tracker.start_op();
m_work_queue.queue(new LambdaContext(
[this, next = std::move(log_entry->next_sync_point_entry)](int r) {
bool handled_by_next;
{
std::lock_guard locker(m_lock);
handled_by_next = handle_flushed_sync_point(std::move(next));
}
if (!handled_by_next) {
persist_last_flushed_sync_gen();
}
m_async_op_tracker.finish_op();
}));
return true;
}
return false;
}
template <typename I>
void AbstractWriteLog<I>::sync_point_writer_flushed(std::shared_ptr<SyncPointLogEntry> log_entry)
{
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
ceph_assert(log_entry);
log_entry->writes_flushed++;
/* If this entry might be completely flushed, look closer */
if ((log_entry->writes_flushed == log_entry->writes) && log_entry->completed) {
ldout(m_image_ctx.cct, 15) << "All writes flushed for sync point="
<< *log_entry << dendl;
handle_flushed_sync_point(log_entry);
}
}
/* Make a new sync point and flush the previous during initialization, when there may or may
* not be a previous sync point */
template <typename I>
void AbstractWriteLog<I>::init_flush_new_sync_point(DeferredContexts &later) {
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
ceph_assert(!m_initialized); /* Don't use this after init */
if (!m_current_sync_point) {
/* First sync point since start */
new_sync_point(later);
} else {
flush_new_sync_point(nullptr, later);
}
}
/**
* Begin a new sync point
*/
template <typename I>
void AbstractWriteLog<I>::new_sync_point(DeferredContexts &later) {
CephContext *cct = m_image_ctx.cct;
std::shared_ptr<SyncPoint> old_sync_point = m_current_sync_point;
std::shared_ptr<SyncPoint> new_sync_point;
ldout(cct, 20) << dendl;
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
/* The first time this is called, if this is a newly created log,
* this makes the first sync gen number we'll use 1. On the first
* call for a re-opened log m_current_sync_gen will be the highest
* gen number from all the sync point entries found in the re-opened
* log, and this advances to the next sync gen number. */
++m_current_sync_gen;
new_sync_point = std::make_shared<SyncPoint>(m_current_sync_gen, cct);
m_current_sync_point = new_sync_point;
/* If this log has been re-opened, old_sync_point will initially be
* nullptr, but m_current_sync_gen may not be zero. */
if (old_sync_point) {
new_sync_point->setup_earlier_sync_point(old_sync_point, m_last_op_sequence_num);
m_perfcounter->hinc(l_librbd_pwl_syncpoint_hist,
old_sync_point->log_entry->writes,
old_sync_point->log_entry->bytes);
/* This sync point will acquire no more sub-ops. Activation needs
* to acquire m_lock, so defer to later*/
later.add(new LambdaContext(
[old_sync_point](int r) {
old_sync_point->prior_persisted_gather_activate();
}));
}
new_sync_point->prior_persisted_gather_set_finisher();
if (old_sync_point) {
ldout(cct,6) << "new sync point = [" << *m_current_sync_point
<< "], prior = [" << *old_sync_point << "]" << dendl;
} else {
ldout(cct,6) << "first sync point = [" << *m_current_sync_point
<< "]" << dendl;
}
}
template <typename I>
void AbstractWriteLog<I>::flush_new_sync_point(C_FlushRequestT *flush_req,
DeferredContexts &later) {
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
if (!flush_req) {
m_async_null_flush_finish++;
m_async_op_tracker.start_op();
Context *flush_ctx = new LambdaContext([this](int r) {
m_async_null_flush_finish--;
m_async_op_tracker.finish_op();
});
flush_req = make_flush_req(flush_ctx);
flush_req->internal = true;
}
/* Add a new sync point. */
new_sync_point(later);
std::shared_ptr<SyncPoint> to_append = m_current_sync_point->earlier_sync_point;
ceph_assert(to_append);
/* This flush request will append/persist the (now) previous sync point */
flush_req->to_append = to_append;
/* When the m_sync_point_persist Gather completes this sync point can be
* appended. The only sub for this Gather is the finisher Context for
* m_prior_log_entries_persisted, which records the result of the Gather in
* the sync point, and completes. TODO: Do we still need both of these
* Gathers?*/
Context * ctx = new LambdaContext([this, flush_req](int r) {
ldout(m_image_ctx.cct, 20) << "Flush req=" << flush_req
<< " sync point =" << flush_req->to_append
<< ". Ready to persist." << dendl;
alloc_and_dispatch_io_req(flush_req);
});
to_append->persist_gather_set_finisher(ctx);
/* The m_sync_point_persist Gather has all the subs it will ever have, and
* now has its finisher. If the sub is already complete, activation will
* complete the Gather. The finisher will acquire m_lock, so we'll activate
* this when we release m_lock.*/
later.add(new LambdaContext([to_append](int r) {
to_append->persist_gather_activate();
}));
/* The flush request completes when the sync point persists */
to_append->add_in_on_persisted_ctxs(flush_req);
}
template <typename I>
void AbstractWriteLog<I>::flush_new_sync_point_if_needed(C_FlushRequestT *flush_req,
DeferredContexts &later) {
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
/* If there have been writes since the last sync point ... */
if (m_current_sync_point->log_entry->writes) {
flush_new_sync_point(flush_req, later);
} else {
/* There have been no writes to the current sync point. */
if (m_current_sync_point->earlier_sync_point) {
/* If previous sync point hasn't completed, complete this flush
* with the earlier sync point. No alloc or dispatch needed. */
m_current_sync_point->earlier_sync_point->on_sync_point_persisted.push_back(flush_req);
} else {
/* The previous sync point has already completed and been
* appended. The current sync point has no writes, so this flush
* has nothing to wait for. This flush completes now. */
later.add(flush_req);
}
}
}
/*
* RWL internal flush - will actually flush the RWL.
*
* User flushes should arrive at aio_flush(), and only flush prior
* writes to all log replicas.
*
* Librbd internal flushes will arrive at flush(invalidate=false,
* discard=false), and traverse the block guard to ensure in-flight writes are
* flushed.
*/
template <typename I>
void AbstractWriteLog<I>::flush_dirty_entries(Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
bool all_clean;
bool flushing;
bool stop_flushing;
{
std::unique_lock locker(m_lock);
flushing = (0 != m_flush_ops_in_flight);
all_clean = m_dirty_log_entries.empty();
stop_flushing = (m_shutting_down);
if (!m_cache_state->clean && all_clean && !flushing) {
m_cache_state->clean = true;
update_image_cache_state();
write_image_cache_state(locker);
}
}
if (!flushing && (all_clean || stop_flushing)) {
/* Complete without holding m_lock */
if (all_clean) {
ldout(cct, 20) << "no dirty entries" << dendl;
} else {
ldout(cct, 5) << "flush during shutdown suppressed" << dendl;
}
on_finish->complete(0);
} else {
if (all_clean) {
ldout(cct, 5) << "flush ops still in progress" << dendl;
} else {
ldout(cct, 20) << "dirty entries remain" << dendl;
}
std::lock_guard locker(m_lock);
/* on_finish can't be completed yet */
m_flush_complete_contexts.push_back(new LambdaContext(
[this, on_finish](int r) {
flush_dirty_entries(on_finish);
}));
wake_up();
}
}
template <typename I>
void AbstractWriteLog<I>::internal_flush(bool invalidate, Context *on_finish) {
ldout(m_image_ctx.cct, 20) << "invalidate=" << invalidate << dendl;
if (m_perfcounter) {
if (invalidate) {
m_perfcounter->inc(l_librbd_pwl_invalidate_cache, 1);
} else {
m_perfcounter->inc(l_librbd_pwl_internal_flush, 1);
}
}
/* May be called even if initialization fails */
if (!m_initialized) {
ldout(m_image_ctx.cct, 05) << "never initialized" << dendl;
/* Deadlock if completed here */
m_image_ctx.op_work_queue->queue(on_finish, 0);
return;
}
/* Flush/invalidate must pass through block guard to ensure all layers of
* cache are consistently flush/invalidated. This ensures no in-flight write leaves
* some layers with valid regions, which may later produce inconsistent read
* results. */
GuardedRequestFunctionContext *guarded_ctx =
new GuardedRequestFunctionContext(
[this, on_finish, invalidate](GuardedRequestFunctionContext &guard_ctx) {
DeferredContexts on_exit;
ldout(m_image_ctx.cct, 20) << "cell=" << guard_ctx.cell << dendl;
ceph_assert(guard_ctx.cell);
Context *ctx = new LambdaContext(
[this, cell=guard_ctx.cell, invalidate, on_finish](int r) {
std::lock_guard locker(m_lock);
m_invalidating = false;
ldout(m_image_ctx.cct, 6) << "Done flush/invalidating (invalidate="
<< invalidate << ")" << dendl;
if (m_log_entries.size()) {
ldout(m_image_ctx.cct, 1) << "m_log_entries.size()="
<< m_log_entries.size()
<< ", front()=" << *m_log_entries.front()
<< dendl;
}
if (invalidate) {
ceph_assert(m_log_entries.size() == 0);
}
ceph_assert(m_dirty_log_entries.size() == 0);
m_image_ctx.op_work_queue->queue(on_finish, r);
release_guarded_request(cell);
});
ctx = new LambdaContext(
[this, ctx, invalidate](int r) {
Context *next_ctx = ctx;
ldout(m_image_ctx.cct, 6) << "flush_dirty_entries finished" << dendl;
if (r < 0) {
/* Override on_finish status with this error */
next_ctx = new LambdaContext([r, ctx](int _r) {
ctx->complete(r);
});
}
if (invalidate) {
{
std::lock_guard locker(m_lock);
ceph_assert(m_dirty_log_entries.size() == 0);
ceph_assert(!m_invalidating);
ldout(m_image_ctx.cct, 6) << "Invalidating" << dendl;
m_invalidating = true;
}
/* Discards all RWL entries */
while (retire_entries(MAX_ALLOC_PER_TRANSACTION)) { }
next_ctx->complete(0);
} else {
{
std::lock_guard locker(m_lock);
ceph_assert(m_dirty_log_entries.size() == 0);
ceph_assert(!m_invalidating);
}
m_image_writeback.aio_flush(io::FLUSH_SOURCE_WRITEBACK, next_ctx);
}
});
ctx = new LambdaContext(
[this, ctx](int r) {
flush_dirty_entries(ctx);
});
std::lock_guard locker(m_lock);
/* Even if we're throwing everything away, but we want the last entry to
* be a sync point so we can cleanly resume.
*
* Also, the blockguard only guarantees the replication of this op
* can't overlap with prior ops. It doesn't guarantee those are all
* completed and eligible for flush & retire, which we require here.
*/
auto flush_req = make_flush_req(ctx);
flush_new_sync_point_if_needed(flush_req, on_exit);
});
detain_guarded_request(nullptr, guarded_ctx, true);
}
template <typename I>
void AbstractWriteLog<I>::add_into_log_map(GenericWriteLogEntries &log_entries,
C_BlockIORequestT *req) {
req->copy_cache();
m_blocks_to_log_entries.add_log_entries(log_entries);
}
template <typename I>
bool AbstractWriteLog<I>::can_retire_entry(std::shared_ptr<GenericLogEntry> log_entry) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
ceph_assert(log_entry);
return log_entry->can_retire();
}
template <typename I>
void AbstractWriteLog<I>::check_image_cache_state_clean() {
ceph_assert(m_deferred_ios.empty());
ceph_assert(m_ops_to_append.empty());
ceph_assert(m_async_flush_ops == 0);
ceph_assert(m_async_append_ops == 0);
ceph_assert(m_dirty_log_entries.empty());
ceph_assert(m_ops_to_flush.empty());
ceph_assert(m_flush_ops_in_flight == 0);
ceph_assert(m_flush_bytes_in_flight == 0);
ceph_assert(m_bytes_dirty == 0);
ceph_assert(m_work_queue.empty());
}
} // namespace pwl
} // namespace cache
} // namespace librbd
template class librbd::cache::pwl::AbstractWriteLog<librbd::ImageCtx>;
| 86,929 | 38.730347 | 119 | cc |
null | ceph-main/src/librbd/cache/pwl/AbstractWriteLog.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_PARENT_WRITE_LOG
#define CEPH_LIBRBD_CACHE_PARENT_WRITE_LOG
#include "common/Timer.h"
#include "common/RWLock.h"
#include "common/WorkQueue.h"
#include "common/AsyncOpTracker.h"
#include "librbd/cache/ImageWriteback.h"
#include "librbd/Utils.h"
#include "librbd/BlockGuard.h"
#include "librbd/cache/Types.h"
#include "librbd/cache/pwl/LogOperation.h"
#include "librbd/cache/pwl/ReadRequest.h"
#include "librbd/cache/pwl/Request.h"
#include "librbd/cache/pwl/LogMap.h"
#include "librbd/cache/pwl/Builder.h"
#include <functional>
#include <list>
class Context;
namespace librbd {
struct ImageCtx;
namespace plugin { template <typename> struct Api; }
namespace cache {
namespace pwl {
class GenericLogEntry;
class GenericWriteLogEntry;
class SyncPointLogEntry;
class WriteLogEntry;
struct WriteLogCacheEntry;
typedef std::list<std::shared_ptr<WriteLogEntry>> WriteLogEntries;
typedef std::list<std::shared_ptr<GenericLogEntry>> GenericLogEntries;
typedef std::list<std::shared_ptr<GenericWriteLogEntry>> GenericWriteLogEntries;
typedef std::vector<std::shared_ptr<GenericLogEntry>> GenericLogEntriesVector;
typedef LogMapEntries<GenericWriteLogEntry> WriteLogMapEntries;
typedef LogMap<GenericWriteLogEntry> WriteLogMap;
/**** Write log entries end ****/
typedef librbd::BlockGuard<GuardedRequest> WriteLogGuard;
class DeferredContexts;
template <typename>
class ImageCacheState;
template<typename T>
class Builder;
template <typename T>
struct C_BlockIORequest;
template <typename T>
struct C_WriteRequest;
using GenericLogOperations = std::list<GenericLogOperationSharedPtr>;
template <typename ImageCtxT>
class AbstractWriteLog {
public:
typedef io::Extent Extent;
typedef io::Extents Extents;
using This = AbstractWriteLog<ImageCtxT>;
Builder<This> *m_builder;
AbstractWriteLog(ImageCtxT &image_ctx,
librbd::cache::pwl::ImageCacheState<ImageCtxT>* cache_state,
Builder<This> *builder,
cache::ImageWritebackInterface& image_writeback,
plugin::Api<ImageCtxT>& plugin_api);
virtual ~AbstractWriteLog();
AbstractWriteLog(const AbstractWriteLog&) = delete;
AbstractWriteLog &operator=(const AbstractWriteLog&) = delete;
/// IO methods
void read(
Extents&& image_extents, ceph::bufferlist *bl,
int fadvise_flags, Context *on_finish);
void write(
Extents&& image_extents, ceph::bufferlist&& bl,
int fadvise_flags,
Context *on_finish);
void discard(
uint64_t offset, uint64_t length,
uint32_t discard_granularity_bytes,
Context *on_finish);
void flush(
io::FlushSource flush_source, Context *on_finish);
void writesame(
uint64_t offset, uint64_t length,
ceph::bufferlist&& bl,
int fadvise_flags, Context *on_finish);
void compare_and_write(
Extents&& image_extents,
ceph::bufferlist&& cmp_bl, ceph::bufferlist&& bl,
uint64_t *mismatch_offset,int fadvise_flags,
Context *on_finish);
/// internal state methods
void init(Context *on_finish);
void shut_down(Context *on_finish);
void invalidate(Context *on_finish);
void flush(Context *on_finish);
using C_WriteRequestT = pwl::C_WriteRequest<This>;
using C_BlockIORequestT = pwl::C_BlockIORequest<This>;
using C_FlushRequestT = pwl::C_FlushRequest<This>;
using C_DiscardRequestT = pwl::C_DiscardRequest<This>;
using C_WriteSameRequestT = pwl::C_WriteSameRequest<This>;
CephContext * get_context();
void release_guarded_request(BlockGuardCell *cell);
void release_write_lanes(C_BlockIORequestT *req);
virtual bool alloc_resources(C_BlockIORequestT *req) = 0;
virtual void setup_schedule_append(
pwl::GenericLogOperationsVector &ops, bool do_early_flush,
C_BlockIORequestT *req) = 0;
void schedule_append(pwl::GenericLogOperationsVector &ops, C_BlockIORequestT *req = nullptr);
void schedule_append(pwl::GenericLogOperationSharedPtr op, C_BlockIORequestT *req = nullptr);
void flush_new_sync_point(C_FlushRequestT *flush_req,
pwl::DeferredContexts &later);
std::shared_ptr<pwl::SyncPoint> get_current_sync_point() {
return m_current_sync_point;
}
bool get_persist_on_flush() {
return m_persist_on_flush;
}
void inc_last_op_sequence_num() {
m_perfcounter->inc(l_librbd_pwl_log_ops, 1);
++m_last_op_sequence_num;
}
uint64_t get_last_op_sequence_num() {
return m_last_op_sequence_num;
}
uint64_t get_current_sync_gen() {
return m_current_sync_gen;
}
unsigned int get_free_lanes() {
return m_free_lanes;
}
uint32_t get_free_log_entries() {
return m_free_log_entries;
}
void add_into_log_map(pwl::GenericWriteLogEntries &log_entries,
C_BlockIORequestT *req);
virtual void complete_user_request(Context *&user_req, int r) = 0;
virtual void copy_bl_to_buffer(
WriteRequestResources *resources,
std::unique_ptr<WriteLogOperationSet> &op_set) {}
private:
typedef std::list<pwl::C_WriteRequest<This> *> C_WriteRequests;
typedef std::list<pwl::C_BlockIORequest<This> *> C_BlockIORequests;
std::atomic<bool> m_initialized = {false};
uint64_t m_bytes_dirty = 0; /* Total bytes yet to flush to RBD */
utime_t m_last_alloc_fail; /* Entry or buffer allocation fail seen */
pwl::WriteLogGuard m_write_log_guard;
/* Starts at 0 for a new write log. Incremented on every flush. */
uint64_t m_current_sync_gen = 0;
/* Starts at 0 on each sync gen increase. Incremented before applied
to an operation */
uint64_t m_last_op_sequence_num = 0;
bool m_persist_on_write_until_flush = true;
pwl::WriteLogGuard m_flush_guard;
mutable ceph::mutex m_flush_guard_lock;
/* Debug counters for the places m_async_op_tracker is used */
std::atomic<int> m_async_complete_ops = {0};
std::atomic<int> m_async_null_flush_finish = {0};
std::atomic<int> m_async_process_work = {0};
/* Hold m_deferred_dispatch_lock while consuming from m_deferred_ios. */
mutable ceph::mutex m_deferred_dispatch_lock;
/* Used in release/detain to make BlockGuard preserve submission order */
mutable ceph::mutex m_blockguard_lock;
/* Use m_blockguard_lock for the following 3 things */
bool m_barrier_in_progress = false;
BlockGuardCell *m_barrier_cell = nullptr;
bool m_wake_up_enabled = true;
Contexts m_flush_complete_contexts;
std::shared_ptr<pwl::SyncPoint> m_current_sync_point = nullptr;
bool m_persist_on_flush = false; //If false, persist each write before completion
int m_flush_ops_in_flight = 0;
int m_flush_bytes_in_flight = 0;
uint64_t m_lowest_flushing_sync_gen = 0;
/* Writes that have left the block guard, but are waiting for resources */
C_BlockIORequests m_deferred_ios;
/* Throttle writes concurrently allocating & replicating */
unsigned int m_free_lanes = pwl::MAX_CONCURRENT_WRITES;
SafeTimer *m_timer = nullptr; /* Used with m_timer_lock */
mutable ceph::mutex *m_timer_lock = nullptr; /* Used with and by m_timer */
Context *m_timer_ctx = nullptr;
ThreadPool m_thread_pool;
uint32_t m_discard_granularity_bytes;
BlockGuardCell* detain_guarded_request_helper(pwl::GuardedRequest &req);
BlockGuardCell* detain_guarded_request_barrier_helper(
pwl::GuardedRequest &req);
void detain_guarded_request(C_BlockIORequestT *request,
pwl::GuardedRequestFunctionContext *guarded_ctx,
bool is_barrier);
void perf_start(const std::string name);
void perf_stop();
void log_perf();
void periodic_stats();
void arm_periodic_stats();
void pwl_init(Context *on_finish, pwl::DeferredContexts &later);
void check_image_cache_state_clean();
void flush_dirty_entries(Context *on_finish);
bool can_flush_entry(const std::shared_ptr<pwl::GenericLogEntry> log_entry);
bool handle_flushed_sync_point(
std::shared_ptr<pwl::SyncPointLogEntry> log_entry);
void sync_point_writer_flushed(
std::shared_ptr<pwl::SyncPointLogEntry> log_entry);
void init_flush_new_sync_point(pwl::DeferredContexts &later);
void new_sync_point(pwl::DeferredContexts &later);
pwl::C_FlushRequest<AbstractWriteLog<ImageCtxT>>* make_flush_req(
Context *on_finish);
void flush_new_sync_point_if_needed(C_FlushRequestT *flush_req,
pwl::DeferredContexts &later);
void alloc_and_dispatch_io_req(C_BlockIORequestT *write_req);
void schedule_complete_op_log_entries(pwl::GenericLogOperations &&ops,
const int r);
void internal_flush(bool invalidate, Context *on_finish);
protected:
librbd::cache::pwl::ImageCacheState<ImageCtxT>* m_cache_state = nullptr;
std::atomic<bool> m_shutting_down = {false};
std::atomic<bool> m_invalidating = {false};
ImageCtxT &m_image_ctx;
std::string m_log_pool_name;
uint64_t m_log_pool_size;
uint32_t m_total_log_entries = 0;
uint32_t m_free_log_entries = 0;
std::atomic<uint64_t> m_bytes_allocated = {0}; /* Total bytes allocated in write buffers */
uint64_t m_bytes_cached = 0; /* Total bytes used in write buffers */
uint64_t m_bytes_allocated_cap = 0;
std::atomic<bool> m_alloc_failed_since_retire = {false};
cache::ImageWritebackInterface& m_image_writeback;
plugin::Api<ImageCtxT>& m_plugin_api;
/*
* When m_first_free_entry == m_first_valid_entry, the log is
* empty. There is always at least one free entry, which can't be
* used.
*/
uint64_t m_first_free_entry = 0; /* Entries from here to m_first_valid_entry-1 are free */
uint64_t m_first_valid_entry = 0; /* Entries from here to m_first_free_entry-1 are valid */
/* All writes bearing this and all prior sync gen numbers are flushed */
uint64_t m_flushed_sync_gen = 0;
AsyncOpTracker m_async_op_tracker;
/* Debug counters for the places m_async_op_tracker is used */
std::atomic<int> m_async_flush_ops = {0};
std::atomic<int> m_async_append_ops = {0};
/* Acquire locks in order declared here */
mutable ceph::mutex m_log_retire_lock;
/* Hold a read lock on m_entry_reader_lock to add readers to log entry
* bufs. Hold a write lock to prevent readers from being added (e.g. when
* removing log entries from the map). No lock required to remove readers. */
mutable RWLock m_entry_reader_lock;
/* Hold m_log_append_lock while appending or retiring log entries. */
mutable ceph::mutex m_log_append_lock;
/* Used for most synchronization */
mutable ceph::mutex m_lock;
/* Use m_blockguard_lock for the following 3 things */
pwl::WriteLogGuard::BlockOperations m_awaiting_barrier;
bool m_wake_up_requested = false;
bool m_wake_up_scheduled = false;
bool m_appending = false;
bool m_dispatching_deferred_ops = false;
pwl::GenericLogOperations m_ops_to_flush; /* Write ops needing flush in local log */
pwl::GenericLogOperations m_ops_to_append; /* Write ops needing event append in local log */
pwl::WriteLogMap m_blocks_to_log_entries;
/* New entries are at the back. Oldest at the front */
pwl::GenericLogEntries m_log_entries;
pwl::GenericLogEntries m_dirty_log_entries;
PerfCounters *m_perfcounter = nullptr;
unsigned int m_unpublished_reserves = 0;
ContextWQ m_work_queue;
void wake_up();
void update_entries(
std::shared_ptr<pwl::GenericLogEntry> *log_entry,
pwl::WriteLogCacheEntry *cache_entry,
std::map<uint64_t, bool> &missing_sync_points,
std::map<uint64_t,
std::shared_ptr<pwl::SyncPointLogEntry>> &sync_point_entries,
uint64_t entry_index);
void update_sync_points(
std::map<uint64_t, bool> &missing_sync_points,
std::map<uint64_t,
std::shared_ptr<pwl::SyncPointLogEntry>> &sync_point_entries,
pwl::DeferredContexts &later);
virtual void inc_allocated_cached_bytes(
std::shared_ptr<pwl::GenericLogEntry> log_entry) = 0;
Context *construct_flush_entry(
const std::shared_ptr<pwl::GenericLogEntry> log_entry, bool invalidating);
void detain_flush_guard_request(std::shared_ptr<GenericLogEntry> log_entry,
GuardedRequestFunctionContext *guarded_ctx);
void process_writeback_dirty_entries();
bool can_retire_entry(const std::shared_ptr<pwl::GenericLogEntry> log_entry);
void dispatch_deferred_writes(void);
void complete_op_log_entries(pwl::GenericLogOperations &&ops, const int r);
bool check_allocation(
C_BlockIORequestT *req, uint64_t bytes_cached, uint64_t bytes_dirtied,
uint64_t bytes_allocated, uint32_t num_lanes, uint32_t num_log_entries,
uint32_t num_unpublished_reserves);
void append_scheduled(
pwl::GenericLogOperations &ops, bool &ops_remain, bool &appending,
bool isRWL=false);
virtual void process_work() = 0;
virtual void append_scheduled_ops(void) = 0;
virtual void schedule_append_ops(pwl::GenericLogOperations &ops, C_BlockIORequestT *req) = 0;
virtual void remove_pool_file() = 0;
virtual bool initialize_pool(Context *on_finish,
pwl::DeferredContexts &later) = 0;
virtual void collect_read_extents(
uint64_t read_buffer_offset, LogMapEntry<GenericWriteLogEntry> map_entry,
std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries_to_read,
std::vector<bufferlist*> &bls_to_read, uint64_t entry_hit_length,
Extent hit_extent, pwl::C_ReadRequest *read_ctx) = 0;
virtual void complete_read(
std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries_to_read,
std::vector<bufferlist*> &bls_to_read, Context *ctx) = 0;
virtual void write_data_to_buffer(
std::shared_ptr<pwl::WriteLogEntry> ws_entry,
pwl::WriteLogCacheEntry *cache_entry) {}
virtual void release_ram(
const std::shared_ptr<pwl::GenericLogEntry> log_entry) {}
virtual void alloc_op_log_entries(pwl::GenericLogOperations &ops) {}
virtual bool retire_entries(const unsigned long int frees_per_tx) {
return false;
}
virtual void schedule_flush_and_append(
pwl::GenericLogOperationsVector &ops) {}
virtual void persist_last_flushed_sync_gen() {}
virtual void reserve_cache(C_BlockIORequestT *req, bool &alloc_succeeds,
bool &no_space) {}
virtual void construct_flush_entries(pwl::GenericLogEntries entries_to_flush,
DeferredContexts &post_unlock,
bool has_write_entry) = 0;
virtual uint64_t get_max_extent() {
return 0;
}
void update_image_cache_state(void);
void write_image_cache_state(std::unique_lock<ceph::mutex>& locker);
void handle_write_image_cache_state(int r);
};
} // namespace pwl
} // namespace cache
} // namespace librbd
extern template class librbd::cache::pwl::AbstractWriteLog<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CACHE_PARENT_WRITE_LOG
| 14,906 | 35.270073 | 95 | h |
null | ceph-main/src/librbd/cache/pwl/Builder.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_PWL_BUILDER_H
#define CEPH_LIBRBD_CACHE_PWL_BUILDER_H
namespace librbd {
namespace cache {
namespace pwl {
template <typename T>
class Builder {
public:
virtual ~Builder() {}
virtual std::shared_ptr<WriteLogEntry> create_write_log_entry(
uint64_t image_offset_bytes, uint64_t write_bytes) = 0;
virtual std::shared_ptr<WriteLogEntry> create_write_log_entry(
std::shared_ptr<SyncPointLogEntry> sync_point_entry,
uint64_t image_offset_bytes, uint64_t write_bytes) = 0;
virtual std::shared_ptr<WriteLogEntry> create_writesame_log_entry(
uint64_t image_offset_bytes, uint64_t write_bytes,
uint32_t data_length) = 0;
virtual std::shared_ptr<WriteLogEntry> create_writesame_log_entry(
std::shared_ptr<SyncPointLogEntry> sync_point_entry,
uint64_t image_offset_bytes, uint64_t write_bytes,
uint32_t data_length) = 0;
virtual C_WriteRequest<T> *create_write_request(
T &pwl, utime_t arrived, io::Extents &&image_extents,
bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req) = 0;
virtual C_WriteSameRequest<T> *create_writesame_request(
T &pwl, utime_t arrived, io::Extents &&image_extents,
bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req) = 0;
virtual C_WriteRequest<T> *create_comp_and_write_request(
T &pwl, utime_t arrived, io::Extents &&image_extents,
bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset,
const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req) = 0;
virtual std::shared_ptr<WriteLogOperation> create_write_log_operation(
WriteLogOperationSet &set, uint64_t image_offset_bytes,
uint64_t write_bytes, CephContext *cct,
std::shared_ptr<WriteLogEntry> write_log_entry) = 0;
virtual std::shared_ptr<WriteLogOperation> create_write_log_operation(
WriteLogOperationSet &set, uint64_t image_offset_bytes,
uint64_t write_bytes, uint32_t data_len, CephContext *cct,
std::shared_ptr<WriteLogEntry> writesame_log_entry) = 0;
virtual std::shared_ptr<pwl::DiscardLogOperation> create_discard_log_operation(
std::shared_ptr<SyncPoint> sync_point, uint64_t image_offset_bytes,
uint64_t write_bytes, uint32_t discard_granularity_bytes,
utime_t dispatch_time, PerfCounters *perfcounter, CephContext *cct) = 0;
virtual C_ReadRequest *create_read_request(CephContext *cct, utime_t arrived,
PerfCounters *perfcounter, ceph::bufferlist *bl, Context *on_finish) = 0;
};
} // namespace pwl
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_PWL_BUILDER_H
| 2,842 | 44.854839 | 81 | h |
null | ceph-main/src/librbd/cache/pwl/DiscardRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <filesystem>
#include "common/dout.h"
#include "common/errno.h"
#include "common/hostname.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/cache/pwl/DiscardRequest.h"
#include "librbd/cache/pwl/ImageCacheState.h"
#include "librbd/cache/Types.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl:DiscardRequest: " \
<< this << " " << __func__ << ": "
namespace fs = std::filesystem;
namespace librbd {
namespace cache {
namespace pwl {
using librbd::util::create_async_context_callback;
using librbd::util::create_context_callback;
template <typename I>
DiscardRequest<I>* DiscardRequest<I>::create(
I &image_ctx,
plugin::Api<I>& plugin_api,
Context *on_finish) {
return new DiscardRequest(image_ctx, plugin_api, on_finish);
}
template <typename I>
DiscardRequest<I>::DiscardRequest(
I &image_ctx,
plugin::Api<I>& plugin_api,
Context *on_finish)
: m_image_ctx(image_ctx),
m_plugin_api(plugin_api),
m_on_finish(create_async_context_callback(image_ctx, on_finish)),
m_error_result(0) {
}
template <typename I>
void DiscardRequest<I>::send() {
delete_image_cache_file();
}
template <typename I>
void DiscardRequest<I>::delete_image_cache_file() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
m_cache_state = ImageCacheState<I>::get_image_cache_state(&m_image_ctx, m_plugin_api);
if (!m_cache_state) {
remove_feature_bit();
return;
}
if (m_cache_state->present &&
!m_cache_state->host.compare(ceph_get_short_hostname()) &&
fs::exists(m_cache_state->path)) {
std::error_code ec;
fs::remove(m_cache_state->path, ec);
if (ec) {
lderr(cct) << "failed to remove persistent cache file: " << ec.message()
<< dendl;
// not fatal
}
}
remove_image_cache_state();
}
template <typename I>
void DiscardRequest<I>::remove_image_cache_state() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
using klass = DiscardRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_remove_image_cache_state>(
this);
m_cache_state->clear_image_cache_state(ctx);
}
template <typename I>
void DiscardRequest<I>::handle_remove_image_cache_state(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
if (r < 0) {
lderr(cct) << "failed to remove the image cache state: " << cpp_strerror(r)
<< dendl;
save_result(r);
finish();
return;
}
remove_feature_bit();
}
template <typename I>
void DiscardRequest<I>::remove_feature_bit() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
uint64_t new_features = m_image_ctx.features & ~RBD_FEATURE_DIRTY_CACHE;
uint64_t features_mask = RBD_FEATURE_DIRTY_CACHE;
ldout(cct, 10) << "old_features=" << m_image_ctx.features
<< ", new_features=" << new_features
<< ", features_mask=" << features_mask
<< dendl;
int r = librbd::cls_client::set_features(&m_image_ctx.md_ctx, m_image_ctx.header_oid,
new_features, features_mask);
m_image_ctx.features &= ~RBD_FEATURE_DIRTY_CACHE;
using klass = DiscardRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_remove_feature_bit>(
this);
ctx->complete(r);
}
template <typename I>
void DiscardRequest<I>::handle_remove_feature_bit(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
if (r < 0) {
lderr(cct) << "failed to remove the feature bit: " << cpp_strerror(r)
<< dendl;
save_result(r);
}
finish();
}
template <typename I>
void DiscardRequest<I>::finish() {
if (m_cache_state) {
delete m_cache_state;
m_cache_state = nullptr;
}
m_on_finish->complete(m_error_result);
delete this;
}
} // namespace pwl
} // namespace cache
} // namespace librbd
template class librbd::cache::pwl::DiscardRequest<librbd::ImageCtx>;
| 4,243 | 25.360248 | 89 | cc |
null | ceph-main/src/librbd/cache/pwl/DiscardRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_PWL_SHUTDOWN_REQUEST_H
#define CEPH_LIBRBD_CACHE_PWL_SHUTDOWN_REQUEST_H
class Context;
namespace librbd {
class ImageCtx;
namespace plugin { template <typename> struct Api; }
namespace cache {
namespace pwl {
template<typename>
class ImageCacheState;
template <typename ImageCtxT = ImageCtx>
class DiscardRequest {
public:
static DiscardRequest* create(
ImageCtxT &image_ctx,
plugin::Api<ImageCtxT>& plugin_api,
Context *on_finish);
void send();
private:
/**
* @verbatim
*
* Shutdown request goes through the following state machine:
*
* <start>
* |
* v
* REMOVE_IMAGE_CACHE_FILE
* |
* v
* REMOVE_IMAGE_CACHE_STATE
* |
* v
* REMOVE_IMAGE_FEATURE_BIT
* |
* v
* <finish>
*
* @endverbatim
*/
DiscardRequest(ImageCtxT &image_ctx,
plugin::Api<ImageCtxT>& plugin_api,
Context *on_finish);
ImageCtxT &m_image_ctx;
ImageCacheState<ImageCtxT>* m_cache_state;
plugin::Api<ImageCtxT>& m_plugin_api;
Context *m_on_finish;
int m_error_result;
void delete_image_cache_file();
void remove_image_cache_state();
void handle_remove_image_cache_state(int r);
void remove_feature_bit();
void handle_remove_feature_bit(int r);
void finish();
void save_result(int result) {
if (m_error_result == 0 && result < 0) {
m_error_result = result;
}
}
};
} // namespace pwl
} // namespace cache
} // namespace librbd
extern template class librbd::cache::pwl::DiscardRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CACHE_PWL_SHUTDOWN_REQUEST_H
| 1,719 | 17.901099 | 75 | h |
null | ceph-main/src/librbd/cache/pwl/ImageCacheState.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/cache/Types.h"
#include "librbd/cache/Utils.h"
#include "librbd/cache/pwl/ImageCacheState.h"
#include "librbd/ImageCtx.h"
#include "librbd/Operations.h"
#include "common/config_proxy.h"
#include "common/environment.h"
#include "common/hostname.h"
#include "librbd/plugin/Api.h"
#undef dout_subsys
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::ImageCacheState: " \
<< __func__ << ": "
namespace librbd {
namespace cache {
namespace pwl {
using namespace std;
template <typename I>
void ImageCacheState<I>::init_from_config() {
ldout(m_image_ctx->cct, 20) << dendl;
present = false;
empty = true;
clean = true;
host = "";
path = "";
ConfigProxy &config = m_image_ctx->config;
mode = config.get_val<std::string>("rbd_persistent_cache_mode");
size = 0;
}
template <typename I>
bool ImageCacheState<I>::init_from_metadata(json_spirit::mValue& json_root) {
ldout(m_image_ctx->cct, 20) << dendl;
try {
auto& o = json_root.get_obj();
present = o["present"].get_bool();
empty = o["empty"].get_bool();
clean = o["clean"].get_bool();
host = o["host"].get_str();
path = o["path"].get_str();
mode = o["mode"].get_str();
size = o["size"].get_uint64();
} catch (std::runtime_error& e) {
lderr(m_image_ctx->cct) << "failed to parse cache state: " << e.what()
<< dendl;
return false;
}
return true;
}
template <typename I>
void ImageCacheState<I>::write_image_cache_state(std::unique_lock<ceph::mutex>& locker,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked_by_me(*locker.mutex()));
stats_timestamp = ceph_clock_now();
json_spirit::mObject o;
o["present"] = present;
o["empty"] = empty;
o["clean"] = clean;
o["host"] = host;
o["path"] = path;
o["mode"] = mode;
o["size"] = size;
o["stats_timestamp"] = stats_timestamp.sec();
o["allocated_bytes"] = allocated_bytes;
o["cached_bytes"] = cached_bytes;
o["dirty_bytes"] = dirty_bytes;
o["free_bytes"] = free_bytes;
o["hits_full"] = hits_full;
o["hits_partial"] = hits_partial;
o["misses"] = misses;
o["hit_bytes"] = hit_bytes;
o["miss_bytes"] = miss_bytes;
std::string image_state_json = json_spirit::write(o);
locker.unlock();
std::shared_lock owner_lock{m_image_ctx->owner_lock};
ldout(m_image_ctx->cct, 20) << __func__ << " Store state: "
<< image_state_json << dendl;
m_plugin_api.execute_image_metadata_set(m_image_ctx, PERSISTENT_CACHE_STATE,
image_state_json, on_finish);
}
template <typename I>
void ImageCacheState<I>::clear_image_cache_state(Context *on_finish) {
std::shared_lock owner_lock{m_image_ctx->owner_lock};
ldout(m_image_ctx->cct, 20) << __func__ << " Remove state: " << dendl;
m_plugin_api.execute_image_metadata_remove(
m_image_ctx, PERSISTENT_CACHE_STATE, on_finish);
}
template <typename I>
ImageCacheState<I>* ImageCacheState<I>::create_image_cache_state(
I* image_ctx, plugin::Api<I>& plugin_api, int &r) {
std::string cache_state_str;
ImageCacheState<I>* cache_state = nullptr;
r = 0;
bool dirty_cache = plugin_api.test_image_features(image_ctx, RBD_FEATURE_DIRTY_CACHE);
if (dirty_cache) {
cls_client::metadata_get(&image_ctx->md_ctx, image_ctx->header_oid,
PERSISTENT_CACHE_STATE, &cache_state_str);
}
ldout(image_ctx->cct, 20) << "image_cache_state: " << cache_state_str << dendl;
bool pwl_enabled = cache::util::is_pwl_enabled(*image_ctx);
bool cache_desired = pwl_enabled;
cache_desired &= !image_ctx->read_only;
cache_desired &= !plugin_api.test_image_features(image_ctx, RBD_FEATURE_MIGRATING);
cache_desired &= !plugin_api.test_image_features(image_ctx, RBD_FEATURE_JOURNALING);
cache_desired &= !image_ctx->old_format;
if (!dirty_cache && !cache_desired) {
ldout(image_ctx->cct, 20) << "Do not desire to use image cache." << dendl;
} else if (dirty_cache && !cache_desired) {
lderr(image_ctx->cct) << "There's a dirty cache, but RWL cache is disabled."
<< dendl;
r = -EINVAL;
}else if ((!dirty_cache || cache_state_str.empty()) && cache_desired) {
cache_state = new ImageCacheState<I>(image_ctx, plugin_api);
cache_state->init_from_config();
} else {
ceph_assert(!cache_state_str.empty());
json_spirit::mValue json_root;
if (!json_spirit::read(cache_state_str.c_str(), json_root)) {
lderr(image_ctx->cct) << "failed to parse cache state" << dendl;
r = -EINVAL;
return nullptr;
}
cache_state = new ImageCacheState<I>(image_ctx, plugin_api);
if (!cache_state->init_from_metadata(json_root)) {
delete cache_state;
r = -EINVAL;
return nullptr;
}
if (!cache_state->present) {
cache_state->init_from_config();
}
}
return cache_state;
}
template <typename I>
ImageCacheState<I>* ImageCacheState<I>::get_image_cache_state(
I* image_ctx, plugin::Api<I>& plugin_api) {
ImageCacheState<I>* cache_state = nullptr;
string cache_state_str;
cls_client::metadata_get(&image_ctx->md_ctx, image_ctx->header_oid,
PERSISTENT_CACHE_STATE, &cache_state_str);
if (!cache_state_str.empty()) {
// ignore errors, best effort
cache_state = new ImageCacheState<I>(image_ctx, plugin_api);
json_spirit::mValue json_root;
if (!json_spirit::read(cache_state_str.c_str(), json_root)) {
lderr(image_ctx->cct) << "failed to parse cache state" << dendl;
} else {
cache_state->init_from_metadata(json_root);
}
}
return cache_state;
}
template <typename I>
bool ImageCacheState<I>::is_valid() {
if (this->present &&
(host.compare(ceph_get_short_hostname()) != 0)) {
auto cleanstring = "dirty";
if (this->clean) {
cleanstring = "clean";
}
lderr(m_image_ctx->cct) << "An image cache (RWL) remains on another host "
<< host << " which is " << cleanstring
<< ". Flush/close the image there to remove the "
<< "image cache" << dendl;
return false;
}
return true;
}
} // namespace pwl
} // namespace cache
} // namespace librbd
template class librbd::cache::pwl::ImageCacheState<librbd::ImageCtx>;
| 6,512 | 32.060914 | 88 | cc |
null | ceph-main/src/librbd/cache/pwl/ImageCacheState.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_RWL_IMAGE_CACHE_STATE_H
#define CEPH_LIBRBD_CACHE_RWL_IMAGE_CACHE_STATE_H
#include "json_spirit/json_spirit.h"
#include "librbd/ImageCtx.h"
#include "librbd/cache/Types.h"
#include <string>
namespace ceph {
class Formatter;
}
namespace librbd {
namespace plugin { template <typename> struct Api; }
namespace cache {
namespace pwl {
template <typename ImageCtxT = ImageCtx>
class ImageCacheState {
private:
ImageCtxT* m_image_ctx;
plugin::Api<ImageCtxT>& m_plugin_api;
public:
bool present = false;
bool empty = true;
bool clean = true;
std::string host;
std::string path;
std::string mode;
uint64_t size = 0;
/* After reloading, the following data does not need to be read,
* but recalculated. */
utime_t stats_timestamp;
uint64_t allocated_bytes = 0;
uint64_t cached_bytes = 0;
uint64_t dirty_bytes = 0;
uint64_t free_bytes = 0;
uint64_t hits_full = 0;
uint64_t hits_partial = 0;
uint64_t misses = 0;
uint64_t hit_bytes = 0;
uint64_t miss_bytes = 0;
ImageCacheState(ImageCtxT* image_ctx, plugin::Api<ImageCtxT>& plugin_api)
: m_image_ctx(image_ctx), m_plugin_api(plugin_api) {}
~ImageCacheState() {}
ImageCacheType get_image_cache_mode() const {
if (mode == "rwl") {
return IMAGE_CACHE_TYPE_RWL;
} else if (mode == "ssd") {
return IMAGE_CACHE_TYPE_SSD;
}
return IMAGE_CACHE_TYPE_UNKNOWN;
}
void init_from_config();
bool init_from_metadata(json_spirit::mValue& json_root);
void write_image_cache_state(std::unique_lock<ceph::mutex>& locker,
Context *on_finish);
void clear_image_cache_state(Context *on_finish);
static ImageCacheState<ImageCtxT>* create_image_cache_state(
ImageCtxT* image_ctx, plugin::Api<ImageCtxT>& plugin_api, int &r);
static ImageCacheState<ImageCtxT>* get_image_cache_state(
ImageCtxT* image_ctx, plugin::Api<ImageCtxT>& plugin_api);
bool is_valid();
};
} // namespace pwl
} // namespace cache
} // namespace librbd
extern template class librbd::cache::pwl::ImageCacheState<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CACHE_RWL_IMAGE_CACHE_STATE_H
| 2,251 | 24.885057 | 76 | h |
null | ceph-main/src/librbd/cache/pwl/InitRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/cache/pwl/InitRequest.h"
#include "librbd/io/ImageDispatcher.h"
#include "librbd/Utils.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/cache/pwl/ImageCacheState.h"
#include "librbd/cache/WriteLogImageDispatch.h"
#include "librbd/cache/ImageWriteback.h"
#ifdef WITH_RBD_RWL
#include "librbd/cache/pwl/rwl/WriteLog.h"
#endif
#ifdef WITH_RBD_SSD_CACHE
#include "librbd/cache/pwl/ssd/WriteLog.h"
#endif
#include "librbd/cache/Utils.h"
#include "librbd/ImageCtx.h"
#include "librbd/plugin/Api.h"
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl:InitRequest " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace cache {
namespace pwl {
using librbd::util::create_async_context_callback;
using librbd::util::create_context_callback;
template <typename I>
InitRequest<I>* InitRequest<I>::create(
I &image_ctx,
cache::ImageWritebackInterface& image_writeback,
plugin::Api<I>& plugin_api,
Context *on_finish) {
return new InitRequest(image_ctx, image_writeback, plugin_api, on_finish);
}
template <typename I>
InitRequest<I>::InitRequest(
I &image_ctx,
cache::ImageWritebackInterface& image_writeback,
plugin::Api<I>& plugin_api,
Context *on_finish)
: m_image_ctx(image_ctx),
m_image_writeback(image_writeback),
m_plugin_api(plugin_api),
m_on_finish(create_async_context_callback(image_ctx, on_finish)),
m_error_result(0) {
}
template <typename I>
void InitRequest<I>::send() {
get_image_cache_state();
}
template <typename I>
void InitRequest<I>::get_image_cache_state() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
int r;
auto cache_state = ImageCacheState<I>::create_image_cache_state(
&m_image_ctx, m_plugin_api, r);
if (r < 0 || !cache_state) {
save_result(r);
finish();
return;
} else if (!cache_state->is_valid()) {
delete cache_state;
cache_state = nullptr;
lderr(cct) << "failed to get image cache state: " << cpp_strerror(r)
<< dendl;
save_result(-ENOENT);
finish();
return;
}
auto mode = cache_state->get_image_cache_mode();
switch (mode) {
#ifdef WITH_RBD_RWL
case cache::IMAGE_CACHE_TYPE_RWL:
m_image_cache =
new librbd::cache::pwl::rwl::WriteLog<I>(m_image_ctx,
cache_state,
m_image_writeback,
m_plugin_api);
break;
#endif
#ifdef WITH_RBD_SSD_CACHE
case cache::IMAGE_CACHE_TYPE_SSD:
m_image_cache =
new librbd::cache::pwl::ssd::WriteLog<I>(m_image_ctx,
cache_state,
m_image_writeback,
m_plugin_api);
break;
#endif
default:
delete cache_state;
cache_state = nullptr;
save_result(-ENOENT);
finish();
return;
}
init_image_cache();
}
template <typename I>
void InitRequest<I>::init_image_cache() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
using klass = InitRequest<I>;
Context *ctx = create_async_context_callback(m_image_ctx,
create_context_callback<klass, &klass::handle_init_image_cache>(this));
m_image_cache->init(ctx);
}
template <typename I>
void InitRequest<I>::handle_init_image_cache(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
if (r < 0) {
lderr(cct) << "failed to init image cache: " << cpp_strerror(r)
<< dendl;
delete m_image_cache;
m_image_cache = nullptr;
save_result(r);
finish();
return;
}
set_feature_bit();
}
template <typename I>
void InitRequest<I>::set_feature_bit() {
CephContext *cct = m_image_ctx.cct;
uint64_t new_features = m_image_ctx.features | RBD_FEATURE_DIRTY_CACHE;
uint64_t features_mask = RBD_FEATURE_DIRTY_CACHE;
ldout(cct, 10) << "old_features=" << m_image_ctx.features
<< ", new_features=" << new_features
<< ", features_mask=" << features_mask
<< dendl;
int r = librbd::cls_client::set_features(&m_image_ctx.md_ctx,
m_image_ctx.header_oid,
new_features, features_mask);
m_image_ctx.features |= RBD_FEATURE_DIRTY_CACHE;
using klass = InitRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_set_feature_bit>(
this);
ctx->complete(r);
}
template <typename I>
void InitRequest<I>::handle_set_feature_bit(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to set feature bit: " << cpp_strerror(r)
<< dendl;
save_result(r);
shutdown_image_cache();
}
// Register RWL dispatch
auto image_dispatch = new cache::WriteLogImageDispatch<I>(
&m_image_ctx, m_image_cache, m_plugin_api);
m_image_ctx.io_image_dispatcher->register_dispatch(image_dispatch);
finish();
}
template <typename I>
void InitRequest<I>::shutdown_image_cache() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
using klass = InitRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_shutdown_image_cache>(this);
m_image_cache->shut_down(ctx);
}
template <typename I>
void InitRequest<I>::handle_shutdown_image_cache(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
if (r < 0) {
lderr(cct) << "failed to close image cache: " << cpp_strerror(r)
<< dendl;
}
delete m_image_cache;
m_image_cache = nullptr;
finish();
}
template <typename I>
void InitRequest<I>::finish() {
m_on_finish->complete(m_error_result);
delete this;
}
} // namespace pwl
} // namespace cache
} // namespace librbd
template class librbd::cache::pwl::InitRequest<librbd::ImageCtx>;
| 6,206 | 26.343612 | 80 | cc |
null | ceph-main/src/librbd/cache/pwl/InitRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_RWL_INIT_REQUEST_H
#define CEPH_LIBRBD_CACHE_RWL_INIT_REQUEST_H
class Context;
namespace librbd {
class ImageCtx;
namespace io { class ImageDispatchInterface; }
namespace plugin { template <typename> struct Api; }
namespace cache {
class ImageWritebackInterface;
namespace pwl {
template<typename>
class AbstractWriteLog;
template<typename>
class ImageCacheState;
template <typename ImageCtxT = ImageCtx>
class InitRequest {
public:
static InitRequest* create(
ImageCtxT &image_ctx,
librbd::cache::ImageWritebackInterface& image_writeback,
plugin::Api<ImageCtxT>& plugin_api,
Context *on_finish);
void send();
private:
/**
* @verbatim
*
* Init request goes through the following state machine:
*
* <start>
* |
* v
* GET_IMAGE_CACHE_STATE
* |
* v
* INIT_IMAGE_CACHE
* |
* v
* SET_FEATURE_BIT * * * > CLOSE_IMAGE_CACHE
* | |
* v |
* <finish> <-------------------/
*
* @endverbatim
*/
InitRequest(ImageCtxT &image_ctx,
librbd::cache::ImageWritebackInterface& image_writeback,
plugin::Api<ImageCtxT>& plugin_api,
Context *on_finish);
ImageCtxT &m_image_ctx;
librbd::cache::ImageWritebackInterface& m_image_writeback;
plugin::Api<ImageCtxT>& m_plugin_api;
AbstractWriteLog<ImageCtxT> *m_image_cache;
Context *m_on_finish;
int m_error_result;
bool is_pwl_enabled();
void get_image_cache_state();
void init_image_cache();
void handle_init_image_cache(int r);
void set_feature_bit();
void handle_set_feature_bit(int r);
void shutdown_image_cache();
void handle_shutdown_image_cache(int r);
void finish();
void save_result(int result) {
if (m_error_result == 0 && result < 0) {
m_error_result = result;
}
}
};
} // namespace pwl
} // namespace cache
} // namespace librbd
extern template class librbd::cache::pwl::InitRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CACHE_RWL_INIT_REQUEST_H
| 2,182 | 19.59434 | 72 | h |
null | ceph-main/src/librbd/cache/pwl/LogEntry.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <iostream>
#include "LogEntry.h"
#include "librbd/cache/ImageWriteback.h"
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::LogEntry: " << this << " " \
<< __func__ << ": "
namespace librbd {
namespace cache {
namespace pwl {
std::ostream& GenericLogEntry::format(std::ostream &os) const {
os << "ram_entry=[" << ram_entry
<< "], cache_entry=" << (void*)cache_entry
<< ", log_entry_index=" << log_entry_index
<< ", completed=" << completed;
return os;
}
std::ostream &operator<<(std::ostream &os,
const GenericLogEntry &entry) {
return entry.format(os);
}
std::ostream& SyncPointLogEntry::format(std::ostream &os) const {
os << "(Sync Point) ";
GenericLogEntry::format(os);
os << ", writes=" << writes
<< ", bytes=" << bytes
<< ", writes_completed=" << writes_completed
<< ", writes_flushed=" << writes_flushed
<< ", prior_sync_point_flushed=" << prior_sync_point_flushed
<< ", next_sync_point_entry=" << next_sync_point_entry;
return os;
}
std::ostream &operator<<(std::ostream &os,
const SyncPointLogEntry &entry) {
return entry.format(os);
}
bool GenericWriteLogEntry::can_writeback() const {
return (this->completed &&
(ram_entry.is_sequenced() ||
(sync_point_entry &&
sync_point_entry->completed)));
}
std::ostream& GenericWriteLogEntry::format(std::ostream &os) const {
GenericLogEntry::format(os);
os << ", sync_point_entry=[";
if (sync_point_entry) {
os << *sync_point_entry;
} else {
os << "nullptr";
}
os << "], referring_map_entries=" << referring_map_entries;
return os;
}
std::ostream &operator<<(std::ostream &os,
const GenericWriteLogEntry &entry) {
return entry.format(os);
}
void WriteLogEntry::init(bool has_data,
uint64_t current_sync_gen,
uint64_t last_op_sequence_num, bool persist_on_flush) {
ram_entry.set_has_data(has_data);
ram_entry.sync_gen_number = current_sync_gen;
if (persist_on_flush) {
/* Persist on flush. Sequence #0 is never used. */
ram_entry.write_sequence_number = 0;
} else {
/* Persist on write */
ram_entry.write_sequence_number = last_op_sequence_num;
ram_entry.set_sequenced(true);
}
ram_entry.set_sync_point(false);
ram_entry.set_discard(false);
}
std::ostream& WriteLogEntry::format(std::ostream &os) const {
os << "(Write) ";
GenericWriteLogEntry::format(os);
os << ", cache_buffer=" << (void*)cache_buffer;
os << ", cache_bp=" << cache_bp;
os << ", bl_refs=" << bl_refs;
return os;
}
std::ostream &operator<<(std::ostream &os,
const WriteLogEntry &entry) {
return entry.format(os);
}
void DiscardLogEntry::writeback(
librbd::cache::ImageWritebackInterface &image_writeback, Context *ctx) {
image_writeback.aio_discard(ram_entry.image_offset_bytes,
ram_entry.write_bytes,
m_discard_granularity_bytes, ctx);
}
void DiscardLogEntry::init(uint64_t current_sync_gen, bool persist_on_flush,
uint64_t last_op_sequence_num) {
ram_entry.sync_gen_number = current_sync_gen;
if (persist_on_flush) {
/* Persist on flush. Sequence #0 is never used. */
ram_entry.write_sequence_number = 0;
} else {
/* Persist on write */
ram_entry.write_sequence_number = last_op_sequence_num;
ram_entry.set_sequenced(true);
}
}
std::ostream &DiscardLogEntry::format(std::ostream &os) const {
os << "(Discard) ";
GenericWriteLogEntry::format(os);
return os;
}
std::ostream &operator<<(std::ostream &os,
const DiscardLogEntry &entry) {
return entry.format(os);
}
} // namespace pwl
} // namespace cache
} // namespace librbd
| 4,025 | 28.602941 | 80 | cc |
null | ceph-main/src/librbd/cache/pwl/LogEntry.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_PWL_LOG_ENTRY_H
#define CEPH_LIBRBD_CACHE_PWL_LOG_ENTRY_H
#include "common/ceph_mutex.h"
#include "librbd/Utils.h"
#include "librbd/cache/pwl/Types.h"
#include <atomic>
#include <memory>
namespace librbd {
namespace cache {
class ImageWritebackInterface;
namespace pwl {
class SyncPointLogEntry;
class GenericWriteLogEntry;
class WriteLogEntry;
typedef std::list<std::shared_ptr<GenericWriteLogEntry>> GenericWriteLogEntries;
class GenericLogEntry {
public:
WriteLogCacheEntry ram_entry;
WriteLogCacheEntry *cache_entry = nullptr;
uint64_t log_entry_index = 0;
bool completed = false;
BlockGuardCell* m_cell = nullptr;
GenericLogEntry(uint64_t image_offset_bytes = 0, uint64_t write_bytes = 0)
: ram_entry(image_offset_bytes, write_bytes) {
};
virtual ~GenericLogEntry() { };
GenericLogEntry(const GenericLogEntry&) = delete;
GenericLogEntry &operator=(const GenericLogEntry&) = delete;
virtual bool can_writeback() const {
return false;
}
virtual bool can_retire() const {
return false;
}
virtual void set_flushed(bool flushed) {
ceph_assert(false);
}
virtual unsigned int write_bytes() const {
return 0;
};
virtual unsigned int bytes_dirty() const {
return 0;
};
virtual std::shared_ptr<SyncPointLogEntry> get_sync_point_entry() {
return nullptr;
}
virtual void writeback(librbd::cache::ImageWritebackInterface &image_writeback,
Context *ctx) {
ceph_assert(false);
};
virtual void writeback_bl(librbd::cache::ImageWritebackInterface &image_writeback,
Context *ctx, ceph::bufferlist &&bl) {
ceph_assert(false);
}
virtual bool is_write_entry() const {
return false;
}
virtual bool is_writesame_entry() const {
return false;
}
virtual bool is_sync_point() const {
return false;
}
virtual unsigned int get_aligned_data_size() const {
return 0;
}
virtual void remove_cache_bl() {}
virtual std::ostream& format(std::ostream &os) const;
friend std::ostream &operator<<(std::ostream &os,
const GenericLogEntry &entry);
};
class SyncPointLogEntry : public GenericLogEntry {
public:
/* Writing entries using this sync gen number */
std::atomic<unsigned int> writes = {0};
/* Total bytes for all writing entries using this sync gen number */
std::atomic<uint64_t> bytes = {0};
/* Writing entries using this sync gen number that have completed */
std::atomic<unsigned int> writes_completed = {0};
/* Writing entries using this sync gen number that have completed flushing to the writeback interface */
std::atomic<unsigned int> writes_flushed = {0};
/* All writing entries using all prior sync gen numbers have been flushed */
std::atomic<bool> prior_sync_point_flushed = {true};
std::shared_ptr<SyncPointLogEntry> next_sync_point_entry = nullptr;
SyncPointLogEntry(uint64_t sync_gen_number) {
ram_entry.sync_gen_number = sync_gen_number;
ram_entry.set_sync_point(true);
};
~SyncPointLogEntry() override {};
SyncPointLogEntry(const SyncPointLogEntry&) = delete;
SyncPointLogEntry &operator=(const SyncPointLogEntry&) = delete;
bool can_retire() const override {
return this->completed;
}
bool is_sync_point() const override {
return true;
}
std::ostream& format(std::ostream &os) const;
friend std::ostream &operator<<(std::ostream &os,
const SyncPointLogEntry &entry);
};
class GenericWriteLogEntry : public GenericLogEntry {
public:
uint32_t referring_map_entries = 0;
std::shared_ptr<SyncPointLogEntry> sync_point_entry;
GenericWriteLogEntry(std::shared_ptr<SyncPointLogEntry> sync_point_entry,
uint64_t image_offset_bytes, uint64_t write_bytes)
: GenericLogEntry(image_offset_bytes, write_bytes), sync_point_entry(sync_point_entry) { }
GenericWriteLogEntry(uint64_t image_offset_bytes, uint64_t write_bytes)
: GenericLogEntry(image_offset_bytes, write_bytes), sync_point_entry(nullptr) { }
~GenericWriteLogEntry() override {};
GenericWriteLogEntry(const GenericWriteLogEntry&) = delete;
GenericWriteLogEntry &operator=(const GenericWriteLogEntry&) = delete;
unsigned int write_bytes() const override {
/* The valid bytes in this ops data buffer. Discard and WS override. */
return ram_entry.write_bytes;
};
unsigned int bytes_dirty() const override {
/* The bytes in the image this op makes dirty. Discard and WS override. */
return write_bytes();
};
BlockExtent block_extent() {
return ram_entry.block_extent();
}
uint32_t get_map_ref() {
return(referring_map_entries);
}
void inc_map_ref() { referring_map_entries++; }
void dec_map_ref() { referring_map_entries--; }
bool can_writeback() const override;
std::shared_ptr<SyncPointLogEntry> get_sync_point_entry() override {
return sync_point_entry;
}
virtual void copy_cache_bl(bufferlist *out_bl) = 0;
void set_flushed(bool flushed) override {
m_flushed = flushed;
}
bool get_flushed() const {
return m_flushed;
}
std::ostream &format(std::ostream &os) const;
friend std::ostream &operator<<(std::ostream &os,
const GenericWriteLogEntry &entry);
private:
bool m_flushed = false; /* or invalidated */
};
class WriteLogEntry : public GenericWriteLogEntry {
protected:
bool is_writesame = false;
buffer::ptr cache_bp;
buffer::list cache_bl;
std::atomic<int> bl_refs = {0}; /* The refs held on cache_bp by cache_bl */
/* Used in WriteLogEntry::get_cache_bl() to synchronize between threads making entries readable */
mutable ceph::mutex m_entry_bl_lock;
virtual void init_cache_bp() {}
virtual void init_bl(buffer::ptr &bp, buffer::list &bl) {}
public:
uint8_t *cache_buffer = nullptr;
WriteLogEntry(std::shared_ptr<SyncPointLogEntry> sync_point_entry,
uint64_t image_offset_bytes, uint64_t write_bytes)
: GenericWriteLogEntry(sync_point_entry, image_offset_bytes, write_bytes),
m_entry_bl_lock(ceph::make_mutex(pwl::unique_lock_name(
"librbd::cache::pwl::WriteLogEntry::m_entry_bl_lock", this)))
{ }
WriteLogEntry(uint64_t image_offset_bytes, uint64_t write_bytes)
: GenericWriteLogEntry(nullptr, image_offset_bytes, write_bytes),
m_entry_bl_lock(ceph::make_mutex(pwl::unique_lock_name(
"librbd::cache::pwl::WriteLogEntry::m_entry_bl_lock", this)))
{ }
WriteLogEntry(std::shared_ptr<SyncPointLogEntry> sync_point_entry,
uint64_t image_offset_bytes, uint64_t write_bytes,
uint32_t data_length)
: WriteLogEntry(sync_point_entry, image_offset_bytes, write_bytes) {
ram_entry.set_writesame(true);
ram_entry.ws_datalen = data_length;
is_writesame = true;
};
WriteLogEntry(uint64_t image_offset_bytes, uint64_t write_bytes,
uint32_t data_length)
: WriteLogEntry(nullptr, image_offset_bytes, write_bytes) {
ram_entry.set_writesame(true);
ram_entry.ws_datalen = data_length;
is_writesame = true;
};
~WriteLogEntry() override {};
WriteLogEntry(const WriteLogEntry&) = delete;
WriteLogEntry &operator=(const WriteLogEntry&) = delete;
unsigned int write_bytes() const override {
// The valid bytes in this ops data buffer.
if(is_writesame) {
return ram_entry.ws_datalen;
}
return ram_entry.write_bytes;
};
unsigned int bytes_dirty() const override {
// The bytes in the image this op makes dirty.
return ram_entry.write_bytes;
};
void init(bool has_data,
uint64_t current_sync_gen, uint64_t last_op_sequence_num, bool persist_on_flush);
virtual void init_cache_buffer(std::vector<WriteBufferAllocation>::iterator allocation) {}
virtual void init_cache_bl(bufferlist &src_bl, uint64_t off, uint64_t len) {}
/* Returns a ref to a bl containing bufferptrs to the entry cache buffer */
virtual buffer::list &get_cache_bl() = 0;
BlockExtent block_extent();
virtual unsigned int reader_count() const = 0;
/* Constructs a new bl containing copies of cache_bp */
bool can_retire() const override {
return (this->completed && this->get_flushed() && (0 == reader_count()));
}
bool is_write_entry() const override {
return true;
}
bool is_writesame_entry() const override {
return is_writesame;
}
std::ostream &format(std::ostream &os) const;
friend std::ostream &operator<<(std::ostream &os,
const WriteLogEntry &entry);
};
class DiscardLogEntry : public GenericWriteLogEntry {
public:
DiscardLogEntry(std::shared_ptr<SyncPointLogEntry> sync_point_entry,
uint64_t image_offset_bytes, uint64_t write_bytes,
uint32_t discard_granularity_bytes)
: GenericWriteLogEntry(sync_point_entry, image_offset_bytes, write_bytes),
m_discard_granularity_bytes(discard_granularity_bytes) {
ram_entry.set_discard(true);
};
DiscardLogEntry(uint64_t image_offset_bytes, uint64_t write_bytes)
: GenericWriteLogEntry(nullptr, image_offset_bytes, write_bytes) {
ram_entry.set_discard(true);
};
DiscardLogEntry(const DiscardLogEntry&) = delete;
DiscardLogEntry &operator=(const DiscardLogEntry&) = delete;
unsigned int write_bytes() const override {
/* The valid bytes in this ops data buffer. */
return 0;
};
unsigned int bytes_dirty() const override {
/* The bytes in the image this op makes dirty. */
return ram_entry.write_bytes;
};
bool can_retire() const override {
return this->completed;
}
void copy_cache_bl(bufferlist *out_bl) override {
ceph_assert(false);
}
void writeback(librbd::cache::ImageWritebackInterface &image_writeback,
Context *ctx) override;
void init(uint64_t current_sync_gen, bool persist_on_flush, uint64_t last_op_sequence_num);
std::ostream &format(std::ostream &os) const;
friend std::ostream &operator<<(std::ostream &os,
const DiscardLogEntry &entry);
private:
uint32_t m_discard_granularity_bytes;
};
} // namespace pwl
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_PWL_LOG_ENTRY_H
| 10,315 | 35.711744 | 106 | h |
null | ceph-main/src/librbd/cache/pwl/LogMap.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "LogMap.h"
#include "include/ceph_assert.h"
#include "librbd/Utils.h"
#include "librbd/cache/pwl/LogEntry.h"
namespace librbd {
namespace cache {
namespace pwl {
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::LogMap: " << this << " " \
<< __func__ << ": "
template <typename T>
std::ostream &operator<<(std::ostream &os,
LogMapEntry<T> &e) {
os << "block_extent=" << e.block_extent
<< ", log_entry=[" << e.log_entry << "]";
return os;
}
template <typename T>
LogMapEntry<T>::LogMapEntry(const BlockExtent block_extent,
std::shared_ptr<T> log_entry)
: block_extent(block_extent) , log_entry(log_entry) {
}
template <typename T>
LogMapEntry<T>::LogMapEntry(std::shared_ptr<T> log_entry)
: block_extent(log_entry->block_extent()) , log_entry(log_entry) {
}
template <typename T>
LogMap<T>::LogMap(CephContext *cct)
: m_cct(cct),
m_lock(ceph::make_mutex(pwl::unique_lock_name(
"librbd::cache::pwl::LogMap::m_lock", this))) {
}
/**
* Add a write log entry to the map. Subsequent queries for blocks
* within this log entry's extent will find this log entry. Portions
* of prior write log entries overlapping with this log entry will
* be replaced in the map by this log entry.
*
* The map_entries field of the log entry object will be updated to
* contain this map entry.
*
* The map_entries fields of all log entries overlapping with this
* entry will be updated to remove the regions that overlap with
* this.
*/
template <typename T>
void LogMap<T>::add_log_entry(std::shared_ptr<T> log_entry) {
std::lock_guard locker(m_lock);
add_log_entry_locked(log_entry);
}
template <typename T>
void LogMap<T>::add_log_entries(std::list<std::shared_ptr<T>> &log_entries) {
std::lock_guard locker(m_lock);
ldout(m_cct, 20) << dendl;
for (auto &log_entry : log_entries) {
add_log_entry_locked(log_entry);
}
}
/**
* Remove any map entries that refer to the supplied write log
* entry.
*/
template <typename T>
void LogMap<T>::remove_log_entry(std::shared_ptr<T> log_entry) {
std::lock_guard locker(m_lock);
remove_log_entry_locked(log_entry);
}
template <typename T>
void LogMap<T>::remove_log_entries(std::list<std::shared_ptr<T>> &log_entries) {
std::lock_guard locker(m_lock);
ldout(m_cct, 20) << dendl;
for (auto &log_entry : log_entries) {
remove_log_entry_locked(log_entry);
}
}
/**
* Returns the list of all write log entries that overlap the specified block
* extent. This doesn't tell you which portions of these entries overlap the
* extent, or each other. For that, use find_map_entries(). A log entry may
* appear in the list more than once, if multiple map entries refer to it
* (e.g. the middle of that write log entry has been overwritten).
*/
template <typename T>
std::list<std::shared_ptr<T>> LogMap<T>::find_log_entries(BlockExtent block_extent) {
std::lock_guard locker(m_lock);
ldout(m_cct, 20) << dendl;
return find_log_entries_locked(block_extent);
}
/**
* Returns the list of all write log map entries that overlap the
* specified block extent.
*/
template <typename T>
LogMapEntries<T> LogMap<T>::find_map_entries(BlockExtent block_extent) {
std::lock_guard locker(m_lock);
ldout(m_cct, 20) << dendl;
return find_map_entries_locked(block_extent);
}
template <typename T>
void LogMap<T>::add_log_entry_locked(std::shared_ptr<T> log_entry) {
LogMapEntry<T> map_entry(log_entry);
ldout(m_cct, 20) << "block_extent=" << map_entry.block_extent
<< dendl;
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
LogMapEntries<T> overlap_entries = find_map_entries_locked(map_entry.block_extent);
for (auto &entry : overlap_entries) {
ldout(m_cct, 20) << entry << dendl;
if (map_entry.block_extent.block_start <= entry.block_extent.block_start) {
if (map_entry.block_extent.block_end >= entry.block_extent.block_end) {
ldout(m_cct, 20) << "map entry completely occluded by new log entry" << dendl;
remove_map_entry_locked(entry);
} else {
ceph_assert(map_entry.block_extent.block_end < entry.block_extent.block_end);
/* The new entry occludes the beginning of the old entry */
BlockExtent adjusted_extent(map_entry.block_extent.block_end,
entry.block_extent.block_end);
adjust_map_entry_locked(entry, adjusted_extent);
}
} else {
if (map_entry.block_extent.block_end >= entry.block_extent.block_end) {
/* The new entry occludes the end of the old entry */
BlockExtent adjusted_extent(entry.block_extent.block_start,
map_entry.block_extent.block_start);
adjust_map_entry_locked(entry, adjusted_extent);
} else {
/* The new entry splits the old entry */
split_map_entry_locked(entry, map_entry.block_extent);
}
}
}
add_map_entry_locked(map_entry);
}
template <typename T>
void LogMap<T>::remove_log_entry_locked(std::shared_ptr<T> log_entry) {
ldout(m_cct, 20) << "*log_entry=" << *log_entry << dendl;
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
LogMapEntries<T> possible_hits = find_map_entries_locked(log_entry->block_extent());
for (auto &possible_hit : possible_hits) {
if (possible_hit.log_entry == log_entry) {
/* This map entry refers to the specified log entry */
remove_map_entry_locked(possible_hit);
}
}
}
template <typename T>
void LogMap<T>::add_map_entry_locked(LogMapEntry<T> &map_entry) {
ceph_assert(map_entry.log_entry);
m_block_to_log_entry_map.insert(map_entry);
map_entry.log_entry->inc_map_ref();
}
template <typename T>
void LogMap<T>::remove_map_entry_locked(LogMapEntry<T> &map_entry) {
auto it = m_block_to_log_entry_map.find(map_entry);
ceph_assert(it != m_block_to_log_entry_map.end());
LogMapEntry<T> erased = *it;
m_block_to_log_entry_map.erase(it);
erased.log_entry->dec_map_ref();
if (0 == erased.log_entry->get_map_ref()) {
ldout(m_cct, 20) << "log entry has zero map entries: " << erased.log_entry << dendl;
}
}
template <typename T>
void LogMap<T>::adjust_map_entry_locked(LogMapEntry<T> &map_entry, BlockExtent &new_extent) {
auto it = m_block_to_log_entry_map.find(map_entry);
ceph_assert(it != m_block_to_log_entry_map.end());
LogMapEntry<T> adjusted = *it;
m_block_to_log_entry_map.erase(it);
m_block_to_log_entry_map.insert(LogMapEntry<T>(new_extent, adjusted.log_entry));
}
template <typename T>
void LogMap<T>::split_map_entry_locked(LogMapEntry<T> &map_entry, BlockExtent &removed_extent) {
auto it = m_block_to_log_entry_map.find(map_entry);
ceph_assert(it != m_block_to_log_entry_map.end());
LogMapEntry<T> split = *it;
m_block_to_log_entry_map.erase(it);
BlockExtent left_extent(split.block_extent.block_start,
removed_extent.block_start);
m_block_to_log_entry_map.insert(LogMapEntry<T>(left_extent, split.log_entry));
BlockExtent right_extent(removed_extent.block_end,
split.block_extent.block_end);
m_block_to_log_entry_map.insert(LogMapEntry<T>(right_extent, split.log_entry));
split.log_entry->inc_map_ref();
}
template <typename T>
std::list<std::shared_ptr<T>> LogMap<T>::find_log_entries_locked(const BlockExtent &block_extent) {
std::list<std::shared_ptr<T>> overlaps;
ldout(m_cct, 20) << "block_extent=" << block_extent << dendl;
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
LogMapEntries<T> map_entries = find_map_entries_locked(block_extent);
for (auto &map_entry : map_entries) {
overlaps.emplace_back(map_entry.log_entry);
}
return overlaps;
}
/**
* TODO: Generalize this to do some arbitrary thing to each map
* extent, instead of returning a list.
*/
template <typename T>
LogMapEntries<T> LogMap<T>::find_map_entries_locked(const BlockExtent &block_extent) {
LogMapEntries<T> overlaps;
ldout(m_cct, 20) << "block_extent=" << block_extent << dendl;
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
auto p = m_block_to_log_entry_map.equal_range(LogMapEntry<T>(block_extent));
ldout(m_cct, 20) << "count=" << std::distance(p.first, p.second) << dendl;
for ( auto i = p.first; i != p.second; ++i ) {
LogMapEntry<T> entry = *i;
overlaps.emplace_back(entry);
ldout(m_cct, 20) << entry << dendl;
}
return overlaps;
}
/* We map block extents to write log entries, or portions of write log
* entries. These are both represented by a WriteLogMapEntry. When a
* GenericWriteLogEntry is added to this map, a WriteLogMapEntry is created to
* represent the entire block extent of the GenericWriteLogEntry, and the
* WriteLogMapEntry is added to the set.
*
* The set must not contain overlapping write log entries. Entries
* in the set that overlap with one being added are adjusted (shrunk, split,
* or removed) before the new entry is added.
*
* This comparison works despite the ambiguity because we ensure the set
* contains no overlapping entries. This comparison works to find entries
* that overlap with a given block extent because equal_range() returns the
* first entry in which the extent doesn't end before the given extent
* starts, and the last entry for which the extent starts before the given
* extent ends (the first entry that the key is less than, and the last entry
* that is less than the key).
*/
template <typename T>
bool LogMap<T>::LogMapEntryCompare::operator()(const LogMapEntry<T> &lhs,
const LogMapEntry<T> &rhs) const {
if (lhs.block_extent.block_end <= rhs.block_extent.block_start) {
return true;
}
return false;
}
} //namespace pwl
} //namespace cache
} //namespace librbd
template class librbd::cache::pwl::LogMap<librbd::cache::pwl::GenericWriteLogEntry>;
| 10,025 | 34.935484 | 99 | cc |
null | ceph-main/src/librbd/cache/pwl/LogMap.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_RWL_LOG_MAP_H
#define CEPH_LIBRBD_CACHE_RWL_LOG_MAP_H
#include "librbd/BlockGuard.h"
#include <list>
namespace librbd {
namespace cache {
namespace pwl {
/**
* WriteLogMap: maps block extents to GenericWriteLogEntries
*
* A WriteLogMapEntry (based on LogMapEntry) refers to a portion of a GenericWriteLogEntry
*/
template <typename T>
class LogMapEntry {
public:
BlockExtent block_extent;
std::shared_ptr<T> log_entry;
LogMapEntry(BlockExtent block_extent,
std::shared_ptr<T> log_entry = nullptr);
LogMapEntry(std::shared_ptr<T> log_entry);
template <typename U>
friend std::ostream &operator<<(std::ostream &os,
LogMapEntry<U> &e);
};
template <typename T>
using LogMapEntries = std::list<LogMapEntry<T>>;
template <typename T>
class LogMap {
public:
LogMap(CephContext *cct);
LogMap(const LogMap&) = delete;
LogMap &operator=(const LogMap&) = delete;
void add_log_entry(std::shared_ptr<T> log_entry);
void add_log_entries(std::list<std::shared_ptr<T>> &log_entries);
void remove_log_entry(std::shared_ptr<T> log_entry);
void remove_log_entries(std::list<std::shared_ptr<T>> &log_entries);
std::list<std::shared_ptr<T>> find_log_entries(BlockExtent block_extent);
LogMapEntries<T> find_map_entries(BlockExtent block_extent);
private:
void add_log_entry_locked(std::shared_ptr<T> log_entry);
void remove_log_entry_locked(std::shared_ptr<T> log_entry);
void add_map_entry_locked(LogMapEntry<T> &map_entry);
void remove_map_entry_locked(LogMapEntry<T> &map_entry);
void adjust_map_entry_locked(LogMapEntry<T> &map_entry, BlockExtent &new_extent);
void split_map_entry_locked(LogMapEntry<T> &map_entry, BlockExtent &removed_extent);
std::list<std::shared_ptr<T>> find_log_entries_locked(const BlockExtent &block_extent);
LogMapEntries<T> find_map_entries_locked(const BlockExtent &block_extent);
using LogMapEntryT = LogMapEntry<T>;
class LogMapEntryCompare {
public:
bool operator()(const LogMapEntryT &lhs,
const LogMapEntryT &rhs) const;
};
using BlockExtentToLogMapEntries = std::set<LogMapEntryT,
LogMapEntryCompare>;
CephContext *m_cct;
ceph::mutex m_lock;
BlockExtentToLogMapEntries m_block_to_log_entry_map;
};
} //namespace pwl
} //namespace cache
} //namespace librbd
#endif //CEPH_LIBRBD_CACHE_RWL_LOG_MAP_H
| 2,540 | 29.987805 | 90 | h |
null | ceph-main/src/librbd/cache/pwl/LogOperation.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <iostream>
#include "LogOperation.h"
#include "librbd/cache/pwl/Types.h"
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::LogOperation: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace cache {
namespace pwl {
GenericLogOperation::GenericLogOperation(utime_t dispatch_time,
PerfCounters *perfcounter)
: m_perfcounter(perfcounter), dispatch_time(dispatch_time) {
}
std::ostream& GenericLogOperation::format(std::ostream &os) const {
os << "dispatch_time=[" << dispatch_time
<< "], buf_persist_start_time=[" << buf_persist_start_time
<< "], buf_persist_comp_time=[" << buf_persist_comp_time
<< "], log_append_start_time=[" << log_append_start_time
<< "], log_append_comp_time=[" << log_append_comp_time << "]";
return os;
}
std::ostream &operator<<(std::ostream &os,
const GenericLogOperation &op) {
return op.format(os);
}
SyncPointLogOperation::SyncPointLogOperation(ceph::mutex &lock,
std::shared_ptr<SyncPoint> sync_point,
utime_t dispatch_time,
PerfCounters *perfcounter,
CephContext *cct)
: GenericLogOperation(dispatch_time, perfcounter), m_cct(cct), m_lock(lock),
sync_point(sync_point) {
}
SyncPointLogOperation::~SyncPointLogOperation() { }
std::ostream &SyncPointLogOperation::format(std::ostream &os) const {
os << "(Sync Point) ";
GenericLogOperation::format(os);
os << ", sync_point=[" << *sync_point << "]";
return os;
}
std::ostream &operator<<(std::ostream &os,
const SyncPointLogOperation &op) {
return op.format(os);
}
std::vector<Context*> SyncPointLogOperation::append_sync_point() {
std::vector<Context*> appending_contexts;
std::lock_guard locker(m_lock);
if (!sync_point->appending) {
sync_point->appending = true;
}
appending_contexts.swap(sync_point->on_sync_point_appending);
return appending_contexts;
}
void SyncPointLogOperation::clear_earlier_sync_point() {
std::lock_guard locker(m_lock);
ceph_assert(sync_point->later_sync_point);
ceph_assert(sync_point->later_sync_point->earlier_sync_point == sync_point);
sync_point->later_sync_point->earlier_sync_point = nullptr;
sync_point->later_sync_point = nullptr;
}
std::vector<Context*> SyncPointLogOperation::swap_on_sync_point_persisted() {
std::lock_guard locker(m_lock);
std::vector<Context*> persisted_contexts;
persisted_contexts.swap(sync_point->on_sync_point_persisted);
return persisted_contexts;
}
void SyncPointLogOperation::appending() {
ceph_assert(sync_point);
ldout(m_cct, 20) << "Sync point op=[" << *this
<< "] appending" << dendl;
auto appending_contexts = append_sync_point();
for (auto &ctx : appending_contexts) {
ctx->complete(0);
}
}
void SyncPointLogOperation::complete(int result) {
ceph_assert(sync_point);
ldout(m_cct, 20) << "Sync point op =[" << *this
<< "] completed" << dendl;
clear_earlier_sync_point();
/* Do append now in case completion occurred before the
* normal append callback executed, and to handle
* on_append work that was queued after the sync point
* entered the appending state. */
appending();
auto persisted_contexts = swap_on_sync_point_persisted();
for (auto &ctx : persisted_contexts) {
ctx->complete(result);
}
}
GenericWriteLogOperation::GenericWriteLogOperation(std::shared_ptr<SyncPoint> sync_point,
utime_t dispatch_time,
PerfCounters *perfcounter,
CephContext *cct)
: GenericLogOperation(dispatch_time, perfcounter),
m_lock(ceph::make_mutex(pwl::unique_lock_name(
"librbd::cache::pwl::GenericWriteLogOperation::m_lock", this))),
m_cct(cct),
sync_point(sync_point) {
}
GenericWriteLogOperation::~GenericWriteLogOperation() { }
std::ostream &GenericWriteLogOperation::format(std::ostream &os) const {
GenericLogOperation::format(os);
return os;
}
std::ostream &operator<<(std::ostream &os,
const GenericWriteLogOperation &op) {
return op.format(os);
}
/* Called when the write log operation is appending and its log position is guaranteed */
void GenericWriteLogOperation::appending() {
Context *on_append = nullptr;
ldout(m_cct, 20) << __func__ << " " << this << dendl;
{
std::lock_guard locker(m_lock);
on_append = on_write_append;
on_write_append = nullptr;
}
if (on_append) {
ldout(m_cct, 20) << __func__ << " " << this << " on_append=" << on_append << dendl;
on_append->complete(0);
}
}
/* Called when the write log operation is completed in all log replicas */
void GenericWriteLogOperation::complete(int result) {
appending();
Context *on_persist = nullptr;
ldout(m_cct, 20) << __func__ << " " << this << dendl;
{
std::lock_guard locker(m_lock);
on_persist = on_write_persist;
on_write_persist = nullptr;
}
if (on_persist) {
ldout(m_cct, 20) << __func__ << " " << this << " on_persist=" << on_persist
<< dendl;
on_persist->complete(result);
}
}
WriteLogOperation::WriteLogOperation(
WriteLogOperationSet &set, uint64_t image_offset_bytes,
uint64_t write_bytes, CephContext *cct,
std::shared_ptr<WriteLogEntry> write_log_entry)
: GenericWriteLogOperation(set.sync_point, set.dispatch_time,
set.perfcounter, cct),
log_entry(write_log_entry) {
on_write_append = set.extent_ops_appending->new_sub();
on_write_persist = set.extent_ops_persist->new_sub();
log_entry->sync_point_entry->writes++;
log_entry->sync_point_entry->bytes += write_bytes;
}
WriteLogOperation::WriteLogOperation(WriteLogOperationSet &set,
uint64_t image_offset_bytes,
uint64_t write_bytes,
uint32_t data_len,
CephContext *cct,
std::shared_ptr<WriteLogEntry> writesame_log_entry)
: WriteLogOperation(set, image_offset_bytes, write_bytes, cct,
writesame_log_entry) {
is_writesame = true;
}
WriteLogOperation::~WriteLogOperation() { }
void WriteLogOperation::init(bool has_data, std::vector<WriteBufferAllocation>::iterator allocation,
uint64_t current_sync_gen,
uint64_t last_op_sequence_num,
bufferlist &write_req_bl, uint64_t buffer_offset,
bool persist_on_flush) {
log_entry->init(has_data, current_sync_gen, last_op_sequence_num,
persist_on_flush);
buffer_alloc = &(*allocation);
bl.substr_of(write_req_bl, buffer_offset, log_entry->write_bytes());
log_entry->init_cache_bl(write_req_bl, buffer_offset,
log_entry->write_bytes());
}
std::ostream &WriteLogOperation::format(std::ostream &os) const {
std::string op_name = is_writesame ? "(Write Same) " : "(Write) ";
os << op_name;
GenericWriteLogOperation::format(os);
if (log_entry) {
os << ", log_entry=[" << *log_entry << "]";
} else {
os << ", log_entry=nullptr";
}
os << ", bl=[" << bl << "], buffer_alloc=" << buffer_alloc;
return os;
}
std::ostream &operator<<(std::ostream &os,
const WriteLogOperation &op) {
return op.format(os);
}
void WriteLogOperation::complete(int result) {
GenericWriteLogOperation::complete(result);
m_perfcounter->tinc(l_librbd_pwl_log_op_dis_to_buf_t,
buf_persist_start_time - dispatch_time);
utime_t buf_persist_lat = buf_persist_comp_time - buf_persist_start_time;
m_perfcounter->tinc(l_librbd_pwl_log_op_buf_to_bufc_t, buf_persist_lat);
m_perfcounter->hinc(l_librbd_pwl_log_op_buf_to_bufc_t_hist,
buf_persist_lat.to_nsec(),
log_entry->ram_entry.write_bytes);
m_perfcounter->tinc(l_librbd_pwl_log_op_buf_to_app_t,
log_append_start_time - buf_persist_start_time);
}
WriteLogOperationSet::WriteLogOperationSet(utime_t dispatched, PerfCounters *perfcounter, std::shared_ptr<SyncPoint> sync_point,
bool persist_on_flush, CephContext *cct, Context *on_finish)
: m_cct(cct), m_on_finish(on_finish),
persist_on_flush(persist_on_flush),
dispatch_time(dispatched),
perfcounter(perfcounter),
sync_point(sync_point) {
on_ops_appending = sync_point->prior_persisted_gather_new_sub();
on_ops_persist = nullptr;
extent_ops_persist =
new C_Gather(m_cct,
new LambdaContext( [this](int r) {
ldout(this->m_cct,20) << __func__ << " " << this << " m_extent_ops_persist completed" << dendl;
if (on_ops_persist) {
on_ops_persist->complete(r);
}
m_on_finish->complete(r);
}));
auto appending_persist_sub = extent_ops_persist->new_sub();
extent_ops_appending =
new C_Gather(m_cct,
new LambdaContext( [this, appending_persist_sub](int r) {
ldout(this->m_cct, 20) << __func__ << " " << this << " m_extent_ops_appending completed" << dendl;
on_ops_appending->complete(r);
appending_persist_sub->complete(r);
}));
}
WriteLogOperationSet::~WriteLogOperationSet() { }
std::ostream &operator<<(std::ostream &os,
const WriteLogOperationSet &s) {
os << "cell=" << (void*)s.cell
<< ", extent_ops_appending=" << s.extent_ops_appending
<< ", extent_ops_persist=" << s.extent_ops_persist;
return os;
}
DiscardLogOperation::DiscardLogOperation(std::shared_ptr<SyncPoint> sync_point,
uint64_t image_offset_bytes,
uint64_t write_bytes,
uint32_t discard_granularity_bytes,
utime_t dispatch_time,
PerfCounters *perfcounter,
CephContext *cct)
: GenericWriteLogOperation(sync_point, dispatch_time, perfcounter, cct),
log_entry(std::make_shared<DiscardLogEntry>(sync_point->log_entry,
image_offset_bytes,
write_bytes,
discard_granularity_bytes)) {
on_write_persist = nullptr;
log_entry->sync_point_entry->writes++;
log_entry->sync_point_entry->bytes += write_bytes;
}
DiscardLogOperation::~DiscardLogOperation() { }
std::ostream &DiscardLogOperation::format(std::ostream &os) const {
os << "(Discard) ";
GenericWriteLogOperation::format(os);
if (log_entry) {
os << ", log_entry=[" << *log_entry << "]";
} else {
os << ", log_entry=nullptr";
}
return os;
}
std::ostream &operator<<(std::ostream &os,
const DiscardLogOperation &op) {
return op.format(os);
}
} // namespace pwl
} // namespace cache
} // namespace librbd
| 11,626 | 36.146965 | 128 | cc |
null | ceph-main/src/librbd/cache/pwl/LogOperation.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_RWL_LOG_OPERATION_H
#define CEPH_LIBRBD_CACHE_RWL_LOG_OPERATION_H
#include "include/utime.h"
#include "librbd/cache/pwl/LogEntry.h"
#include "librbd/cache/pwl/SyncPoint.h"
namespace librbd {
namespace cache {
namespace pwl {
struct WriteBufferAllocation;
class WriteLogOperationSet;
class WriteLogOperation;
class GenericWriteLogOperation;
class SyncPointLogOperation;
class GenericLogOperation;
template <typename T>
class AbstractWriteLog;
using GenericLogOperationSharedPtr = std::shared_ptr<GenericLogOperation>;
using GenericLogOperationsVector = std::vector<GenericLogOperationSharedPtr>;
class GenericLogOperation {
protected:
PerfCounters *m_perfcounter = nullptr;
public:
utime_t dispatch_time; // When op created
utime_t buf_persist_start_time; // When buffer persist begins
utime_t buf_persist_comp_time; // When buffer persist completes
utime_t log_append_start_time; // When log append begins
utime_t log_append_comp_time; // When log append completes
GenericLogOperation(utime_t dispatch_time, PerfCounters *perfcounter);
virtual ~GenericLogOperation() { };
GenericLogOperation(const GenericLogOperation&) = delete;
GenericLogOperation &operator=(const GenericLogOperation&) = delete;
virtual std::ostream &format(std::ostream &os) const;
friend std::ostream &operator<<(std::ostream &os,
const GenericLogOperation &op);
virtual const std::shared_ptr<GenericLogEntry> get_log_entry() = 0;
virtual void appending() = 0;
virtual void complete(int r) = 0;
virtual void mark_log_entry_completed() {};
virtual bool reserved_allocated() const {
return false;
}
virtual bool is_writing_op() const {
return false;
}
virtual void init_op(uint64_t current_sync_gen, bool persist_on_flush,
uint64_t last_op_sequence_num, Context *write_persist,
Context *write_append) {};
virtual void copy_bl_to_cache_buffer(
std::vector<WriteBufferAllocation>::iterator allocation) {};
};
class SyncPointLogOperation : public GenericLogOperation {
private:
CephContext *m_cct;
ceph::mutex &m_lock;
std::vector<Context*> append_sync_point();
void clear_earlier_sync_point();
std::vector<Context*> swap_on_sync_point_persisted();
public:
std::shared_ptr<SyncPoint> sync_point;
SyncPointLogOperation(ceph::mutex &lock,
std::shared_ptr<SyncPoint> sync_point,
utime_t dispatch_time,
PerfCounters *perfcounter,
CephContext *cct);
~SyncPointLogOperation() override;
SyncPointLogOperation(const SyncPointLogOperation&) = delete;
SyncPointLogOperation &operator=(const SyncPointLogOperation&) = delete;
std::ostream &format(std::ostream &os) const;
friend std::ostream &operator<<(std::ostream &os,
const SyncPointLogOperation &op);
const std::shared_ptr<GenericLogEntry> get_log_entry() override {
return sync_point->log_entry;
}
void appending() override;
void complete(int r) override;
};
class GenericWriteLogOperation : public GenericLogOperation {
protected:
ceph::mutex m_lock;
CephContext *m_cct;
public:
std::shared_ptr<SyncPoint> sync_point;
Context *on_write_append = nullptr; /* Completion for things waiting on this
* write's position in the log to be
* guaranteed */
Context *on_write_persist = nullptr; /* Completion for things waiting on this
* write to persist */
GenericWriteLogOperation(std::shared_ptr<SyncPoint> sync_point,
utime_t dispatch_time,
PerfCounters *perfcounter,
CephContext *cct);
~GenericWriteLogOperation() override;
GenericWriteLogOperation(const GenericWriteLogOperation&) = delete;
GenericWriteLogOperation &operator=(const GenericWriteLogOperation&) = delete;
std::ostream &format(std::ostream &os) const;
friend std::ostream &operator<<(std::ostream &os,
const GenericWriteLogOperation &op);
void mark_log_entry_completed() override{
sync_point->log_entry->writes_completed++;
}
bool reserved_allocated() const override {
return true;
}
bool is_writing_op() const override {
return true;
}
void appending() override;
void complete(int r) override;
};
class WriteLogOperation : public GenericWriteLogOperation {
public:
using GenericWriteLogOperation::m_lock;
using GenericWriteLogOperation::sync_point;
using GenericWriteLogOperation::on_write_append;
using GenericWriteLogOperation::on_write_persist;
std::shared_ptr<WriteLogEntry> log_entry;
bufferlist bl;
bool is_writesame = false;
WriteBufferAllocation *buffer_alloc = nullptr;
WriteLogOperation(WriteLogOperationSet &set,
uint64_t image_offset_bytes,
uint64_t write_bytes, CephContext *cct,
std::shared_ptr<WriteLogEntry> write_log_entry);
WriteLogOperation(WriteLogOperationSet &set,
uint64_t image_offset_bytes,
uint64_t write_bytes, uint32_t data_len,
CephContext *cct,
std::shared_ptr<WriteLogEntry> writesame_log_entry);
~WriteLogOperation() override;
WriteLogOperation(const WriteLogOperation&) = delete;
WriteLogOperation &operator=(const WriteLogOperation&) = delete;
void init(bool has_data,
std::vector<WriteBufferAllocation>::iterator allocation,
uint64_t current_sync_gen, uint64_t last_op_sequence_num,
bufferlist &write_req_bl, uint64_t buffer_offset,
bool persist_on_flush);
std::ostream &format(std::ostream &os) const;
friend std::ostream &operator<<(std::ostream &os,
const WriteLogOperation &op);
const std::shared_ptr<GenericLogEntry> get_log_entry() override {
return log_entry;
}
void complete(int r) override;
};
class WriteLogOperationSet {
private:
CephContext *m_cct;
Context *m_on_finish;
public:
bool persist_on_flush;
BlockGuardCell *cell;
C_Gather *extent_ops_appending;
Context *on_ops_appending;
C_Gather *extent_ops_persist;
Context *on_ops_persist;
GenericLogOperationsVector operations;
utime_t dispatch_time; /* When set created */
PerfCounters *perfcounter = nullptr;
std::shared_ptr<SyncPoint> sync_point;
WriteLogOperationSet(utime_t dispatched, PerfCounters *perfcounter,
std::shared_ptr<SyncPoint> sync_point,
const bool persist_on_flush, CephContext *cct,
Context *on_finish);
~WriteLogOperationSet();
WriteLogOperationSet(const WriteLogOperationSet&) = delete;
WriteLogOperationSet &operator=(const WriteLogOperationSet&) = delete;
friend std::ostream &operator<<(std::ostream &os,
const WriteLogOperationSet &s);
};
class DiscardLogOperation : public GenericWriteLogOperation {
public:
using GenericWriteLogOperation::m_lock;
using GenericWriteLogOperation::sync_point;
using GenericWriteLogOperation::on_write_append;
using GenericWriteLogOperation::on_write_persist;
std::shared_ptr<DiscardLogEntry> log_entry;
DiscardLogOperation(std::shared_ptr<SyncPoint> sync_point,
uint64_t image_offset_bytes,
uint64_t write_bytes,
uint32_t discard_granularity_bytes,
utime_t dispatch_time,
PerfCounters *perfcounter,
CephContext *cct);
~DiscardLogOperation() override;
DiscardLogOperation(const DiscardLogOperation&) = delete;
DiscardLogOperation &operator=(const DiscardLogOperation&) = delete;
const std::shared_ptr<GenericLogEntry> get_log_entry() override {
return log_entry;
}
bool reserved_allocated() const override {
return false;
}
std::ostream &format(std::ostream &os) const;
friend std::ostream &operator<<(std::ostream &os,
const DiscardLogOperation &op);
};
} // namespace pwl
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_RWL_LOG_OPERATION_H
| 8,440 | 36.515556 | 80 | h |
null | ceph-main/src/librbd/cache/pwl/ReadRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_RWL_READ_REQUEST_H
#define CEPH_LIBRBD_CACHE_RWL_READ_REQUEST_H
#include "include/Context.h"
#include "librbd/cache/pwl/Types.h"
namespace librbd {
namespace cache {
namespace pwl {
typedef std::vector<std::shared_ptr<pwl::ImageExtentBuf>> ImageExtentBufs;
class C_ReadRequest : public Context {
public:
io::Extents miss_extents; // move back to caller
ImageExtentBufs read_extents;
bufferlist miss_bl;
C_ReadRequest(
CephContext *cct, utime_t arrived, PerfCounters *perfcounter,
bufferlist *out_bl, Context *on_finish)
: m_cct(cct), m_on_finish(on_finish), m_out_bl(out_bl),
m_arrived_time(arrived), m_perfcounter(perfcounter) {}
~C_ReadRequest() {}
const char *get_name() const {
return "C_ReadRequest";
}
protected:
CephContext *m_cct;
Context *m_on_finish;
bufferlist *m_out_bl;
utime_t m_arrived_time;
PerfCounters *m_perfcounter;
};
} // namespace pwl
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_RWL_READ_REQUEST_H
| 1,132 | 23.630435 | 74 | h |
null | ceph-main/src/librbd/cache/pwl/Request.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "Request.h"
#include "librbd/BlockGuard.h"
#include "librbd/cache/pwl/LogEntry.h"
#include "librbd/cache/pwl/AbstractWriteLog.h"
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::Request: " << this << " " \
<< __func__ << ": "
using namespace std;
namespace librbd {
namespace cache {
namespace pwl {
template <typename T>
C_BlockIORequest<T>::C_BlockIORequest(T &pwl, const utime_t arrived, io::Extents &&extents,
bufferlist&& bl, const int fadvise_flags, Context *user_req)
: pwl(pwl), image_extents(std::move(extents)),
bl(std::move(bl)), fadvise_flags(fadvise_flags),
user_req(user_req), image_extents_summary(image_extents), m_arrived_time(arrived) {
ldout(pwl.get_context(), 99) << this << dendl;
}
template <typename T>
C_BlockIORequest<T>::~C_BlockIORequest() {
ldout(pwl.get_context(), 99) << this << dendl;
ceph_assert(m_cell_released || !m_cell);
}
template <typename T>
std::ostream &operator<<(std::ostream &os,
const C_BlockIORequest<T> &req) {
os << "image_extents=" << req.image_extents
<< ", image_extents_summary=[" << req.image_extents_summary
<< "], bl=" << req.bl
<< ", user_req=" << req.user_req
<< ", m_user_req_completed=" << req.m_user_req_completed
<< ", m_deferred=" << req.m_deferred
<< ", detained=" << req.detained;
return os;
}
template <typename T>
void C_BlockIORequest<T>::set_cell(BlockGuardCell *cell) {
ldout(pwl.get_context(), 20) << this << " cell=" << cell << dendl;
ceph_assert(cell);
ceph_assert(!m_cell);
m_cell = cell;
}
template <typename T>
BlockGuardCell *C_BlockIORequest<T>::get_cell(void) {
ldout(pwl.get_context(), 20) << this << " cell=" << m_cell << dendl;
return m_cell;
}
template <typename T>
void C_BlockIORequest<T>::release_cell() {
ldout(pwl.get_context(), 20) << this << " cell=" << m_cell << dendl;
ceph_assert(m_cell);
bool initial = false;
if (m_cell_released.compare_exchange_strong(initial, true)) {
pwl.release_guarded_request(m_cell);
} else {
ldout(pwl.get_context(), 5) << "cell " << m_cell << " already released for " << this << dendl;
}
}
template <typename T>
void C_BlockIORequest<T>::complete_user_request(int r) {
bool initial = false;
if (m_user_req_completed.compare_exchange_strong(initial, true)) {
ldout(pwl.get_context(), 15) << this << " completing user req" << dendl;
m_user_req_completed_time = ceph_clock_now();
pwl.complete_user_request(user_req, r);
} else {
ldout(pwl.get_context(), 20) << this << " user req already completed" << dendl;
}
}
template <typename T>
void C_BlockIORequest<T>::finish(int r) {
ldout(pwl.get_context(), 20) << this << dendl;
complete_user_request(r);
bool initial = false;
if (m_finish_called.compare_exchange_strong(initial, true)) {
ldout(pwl.get_context(), 15) << this << " finishing" << dendl;
finish_req(0);
} else {
ldout(pwl.get_context(), 20) << this << " already finished" << dendl;
ceph_assert(0);
}
}
template <typename T>
void C_BlockIORequest<T>::deferred() {
bool initial = false;
if (m_deferred.compare_exchange_strong(initial, true)) {
deferred_handler();
}
}
template <typename T>
C_WriteRequest<T>::C_WriteRequest(T &pwl, const utime_t arrived, io::Extents &&image_extents,
bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req)
: C_BlockIORequest<T>(pwl, arrived, std::move(image_extents), std::move(bl), fadvise_flags, user_req),
m_perfcounter(perfcounter), m_lock(lock) {
ldout(pwl.get_context(), 99) << this << dendl;
}
template <typename T>
C_WriteRequest<T>::C_WriteRequest(T &pwl, const utime_t arrived, io::Extents &&image_extents,
bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset,
int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter,
Context *user_req)
: C_BlockIORequest<T>(pwl, arrived, std::move(image_extents), std::move(bl), fadvise_flags, user_req),
mismatch_offset(mismatch_offset), cmp_bl(std::move(cmp_bl)),
m_perfcounter(perfcounter), m_lock(lock) {
is_comp_and_write = true;
ldout(pwl.get_context(), 20) << dendl;
}
template <typename T>
C_WriteRequest<T>::~C_WriteRequest() {
ldout(pwl.get_context(), 99) << this << dendl;
}
template <typename T>
std::ostream &operator<<(std::ostream &os,
const C_WriteRequest<T> &req) {
os << (C_BlockIORequest<T>&)req
<< " m_resources.allocated=" << req.m_resources.allocated;
if (req.op_set) {
os << " op_set=[" << *req.op_set << "]";
}
return os;
}
template <typename T>
void C_WriteRequest<T>::blockguard_acquired(GuardedRequestFunctionContext &guard_ctx) {
ldout(pwl.get_context(), 20) << __func__ << " write_req=" << this << " cell=" << guard_ctx.cell << dendl;
ceph_assert(guard_ctx.cell);
this->detained = guard_ctx.state.detained; /* overlapped */
this->m_queued = guard_ctx.state.queued; /* queued behind at least one barrier */
this->set_cell(guard_ctx.cell);
}
template <typename T>
void C_WriteRequest<T>::finish_req(int r) {
ldout(pwl.get_context(), 15) << "write_req=" << this << " cell=" << this->get_cell() << dendl;
/* Completed to caller by here (in finish(), which calls this) */
utime_t now = ceph_clock_now();
if(is_comp_and_write && !compare_succeeded) {
update_req_stats(now);
return;
}
pwl.release_write_lanes(this);
ceph_assert(m_resources.allocated);
m_resources.allocated = false;
this->release_cell(); /* TODO: Consider doing this in appending state */
update_req_stats(now);
}
template <typename T>
std::shared_ptr<WriteLogOperation> C_WriteRequest<T>::create_operation(
uint64_t offset, uint64_t len) {
return pwl.m_builder->create_write_log_operation(
*op_set, offset, len, pwl.get_context(),
pwl.m_builder->create_write_log_entry(op_set->sync_point->log_entry, offset, len));
}
template <typename T>
void C_WriteRequest<T>::setup_log_operations(DeferredContexts &on_exit) {
GenericWriteLogEntries log_entries;
{
std::lock_guard locker(m_lock);
std::shared_ptr<SyncPoint> current_sync_point = pwl.get_current_sync_point();
if ((!pwl.get_persist_on_flush() && current_sync_point->log_entry->writes_completed) ||
(current_sync_point->log_entry->writes > MAX_WRITES_PER_SYNC_POINT) ||
(current_sync_point->log_entry->bytes > MAX_BYTES_PER_SYNC_POINT)) {
/* Create new sync point and persist the previous one. This sequenced
* write will bear a sync gen number shared with no already completed
* writes. A group of sequenced writes may be safely flushed concurrently
* if they all arrived before any of them completed. We'll insert one on
* an aio_flush() from the application. Here we're inserting one to cap
* the number of bytes and writes per sync point. When the application is
* not issuing flushes, we insert sync points to record some observed
* write concurrency information that enables us to safely issue >1 flush
* write (for writes observed here to have been in flight simultaneously)
* at a time in persist-on-write mode.
*/
pwl.flush_new_sync_point(nullptr, on_exit);
current_sync_point = pwl.get_current_sync_point();
}
uint64_t current_sync_gen = pwl.get_current_sync_gen();
op_set =
make_unique<WriteLogOperationSet>(this->m_dispatched_time,
m_perfcounter,
current_sync_point,
pwl.get_persist_on_flush(),
pwl.get_context(), this);
ldout(pwl.get_context(), 20) << "write_req=[" << *this
<< "], op_set=" << op_set.get() << dendl;
ceph_assert(m_resources.allocated);
/* op_set->operations initialized differently for plain write or write same */
auto allocation = m_resources.buffers.begin();
uint64_t buffer_offset = 0;
for (auto &extent : this->image_extents) {
/* operation->on_write_persist connected to m_prior_log_entries_persisted Gather */
auto operation = this->create_operation(extent.first, extent.second);
this->op_set->operations.emplace_back(operation);
/* A WS is also a write */
ldout(pwl.get_context(), 20) << "write_req=[" << *this
<< "], op_set=" << op_set.get()
<< ", operation=" << operation << dendl;
log_entries.emplace_back(operation->log_entry);
if (!op_set->persist_on_flush) {
pwl.inc_last_op_sequence_num();
}
operation->init(true, allocation, current_sync_gen,
pwl.get_last_op_sequence_num(), this->bl, buffer_offset, op_set->persist_on_flush);
buffer_offset += operation->log_entry->write_bytes();
ldout(pwl.get_context(), 20) << "operation=[" << *operation << "]" << dendl;
allocation++;
}
}
/* All extent ops subs created */
op_set->extent_ops_appending->activate();
op_set->extent_ops_persist->activate();
pwl.add_into_log_map(log_entries, this);
}
template <typename T>
void C_WriteRequest<T>::copy_cache() {
pwl.copy_bl_to_buffer(&m_resources, op_set);
}
template <typename T>
bool C_WriteRequest<T>::append_write_request(std::shared_ptr<SyncPoint> sync_point) {
std::lock_guard locker(m_lock);
auto write_req_sp = this;
if (sync_point->earlier_sync_point) {
Context *schedule_append_ctx = new LambdaContext([write_req_sp](int r) {
write_req_sp->schedule_append();
});
sync_point->earlier_sync_point->on_sync_point_appending.push_back(schedule_append_ctx);
return true;
}
return false;
}
template <typename T>
void C_WriteRequest<T>::schedule_append() {
ceph_assert(++m_appended == 1);
pwl.setup_schedule_append(this->op_set->operations, m_do_early_flush, this);
}
/**
* Attempts to allocate log resources for a write. Returns true if successful.
*
* Resources include 1 lane per extent, 1 log entry per extent, and the payload
* data space for each extent.
*
* Lanes are released after the write persists via release_write_lanes()
*/
template <typename T>
bool C_WriteRequest<T>::alloc_resources() {
this->allocated_time = ceph_clock_now();
return pwl.alloc_resources(this);
}
/**
* Takes custody of write_req. Resources must already be allocated.
*
* Locking:
* Acquires lock
*/
template <typename T>
void C_WriteRequest<T>::dispatch()
{
CephContext *cct = pwl.get_context();
DeferredContexts on_exit;
utime_t now = ceph_clock_now();
this->m_dispatched_time = now;
ldout(cct, 15) << "write_req=" << this << " cell=" << this->get_cell() << dendl;
this->setup_log_operations(on_exit);
bool append_deferred = false;
if (!op_set->persist_on_flush &&
append_write_request(op_set->sync_point)) {
/* In persist-on-write mode, we defer the append of this write until the
* previous sync point is appending (meaning all the writes before it are
* persisted and that previous sync point can now appear in the
* log). Since we insert sync points in persist-on-write mode when writes
* have already completed to the current sync point, this limits us to
* one inserted sync point in flight at a time, and gives the next
* inserted sync point some time to accumulate a few writes if they
* arrive soon. Without this we can insert an absurd number of sync
* points, each with one or two writes. That uses a lot of log entries,
* and limits flushing to very few writes at a time. */
m_do_early_flush = false;
append_deferred = true;
} else {
/* The prior sync point is done, so we'll schedule append here. If this is
* persist-on-write, and probably still the caller's thread, we'll use this
* caller's thread to perform the persist & replication of the payload
* buffer. */
m_do_early_flush =
!(this->detained || this->m_queued || this->m_deferred || op_set->persist_on_flush);
}
if (!append_deferred) {
this->schedule_append();
}
}
template <typename T>
C_FlushRequest<T>::C_FlushRequest(T &pwl, const utime_t arrived,
io::Extents &&image_extents,
bufferlist&& bl, const int fadvise_flags,
ceph::mutex &lock, PerfCounters *perfcounter,
Context *user_req)
: C_BlockIORequest<T>(pwl, arrived, std::move(image_extents), std::move(bl),
fadvise_flags, user_req),
m_lock(lock), m_perfcounter(perfcounter) {
ldout(pwl.get_context(), 20) << this << dendl;
}
template <typename T>
void C_FlushRequest<T>::finish_req(int r) {
ldout(pwl.get_context(), 20) << "flush_req=" << this
<< " cell=" << this->get_cell() << dendl;
/* Block guard already released */
ceph_assert(!this->get_cell());
/* Completed to caller by here */
utime_t now = ceph_clock_now();
m_perfcounter->tinc(l_librbd_pwl_aio_flush_latency, now - this->m_arrived_time);
}
template <typename T>
bool C_FlushRequest<T>::alloc_resources() {
ldout(pwl.get_context(), 20) << "req type=" << get_name()
<< " req=[" << *this << "]" << dendl;
return pwl.alloc_resources(this);
}
template <typename T>
void C_FlushRequest<T>::dispatch() {
utime_t now = ceph_clock_now();
ldout(pwl.get_context(), 20) << "req type=" << get_name()
<< " req=[" << *this << "]" << dendl;
ceph_assert(this->m_resources.allocated);
this->m_dispatched_time = now;
op = std::make_shared<SyncPointLogOperation>(m_lock,
to_append,
now,
m_perfcounter,
pwl.get_context());
m_perfcounter->inc(l_librbd_pwl_log_ops, 1);
pwl.schedule_append(op);
}
template <typename T>
void C_FlushRequest<T>::setup_buffer_resources(
uint64_t *bytes_cached, uint64_t *bytes_dirtied, uint64_t *bytes_allocated,
uint64_t *number_lanes, uint64_t *number_log_entries,
uint64_t *number_unpublished_reserves) {
*number_log_entries = 1;
}
template <typename T>
std::ostream &operator<<(std::ostream &os,
const C_FlushRequest<T> &req) {
os << (C_BlockIORequest<T>&)req
<< " m_resources.allocated=" << req.m_resources.allocated;
return os;
}
template <typename T>
C_DiscardRequest<T>::C_DiscardRequest(T &pwl, const utime_t arrived, io::Extents &&image_extents,
uint32_t discard_granularity_bytes, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req)
: C_BlockIORequest<T>(pwl, arrived, std::move(image_extents), bufferlist(), 0, user_req),
m_discard_granularity_bytes(discard_granularity_bytes),
m_lock(lock),
m_perfcounter(perfcounter) {
ldout(pwl.get_context(), 20) << this << dendl;
}
template <typename T>
C_DiscardRequest<T>::~C_DiscardRequest() {
ldout(pwl.get_context(), 20) << this << dendl;
}
template <typename T>
bool C_DiscardRequest<T>::alloc_resources() {
ldout(pwl.get_context(), 20) << "req type=" << get_name()
<< " req=[" << *this << "]" << dendl;
return pwl.alloc_resources(this);
}
template <typename T>
void C_DiscardRequest<T>::setup_log_operations() {
std::lock_guard locker(m_lock);
GenericWriteLogEntries log_entries;
for (auto &extent : this->image_extents) {
op = pwl.m_builder->create_discard_log_operation(
pwl.get_current_sync_point(), extent.first, extent.second,
m_discard_granularity_bytes, this->m_dispatched_time, m_perfcounter,
pwl.get_context());
log_entries.emplace_back(op->log_entry);
break;
}
uint64_t current_sync_gen = pwl.get_current_sync_gen();
bool persist_on_flush = pwl.get_persist_on_flush();
if (!persist_on_flush) {
pwl.inc_last_op_sequence_num();
}
auto discard_req = this;
Context *on_write_append = pwl.get_current_sync_point()->prior_persisted_gather_new_sub();
Context *on_write_persist = new LambdaContext(
[this, discard_req](int r) {
ldout(pwl.get_context(), 20) << "discard_req=" << discard_req
<< " cell=" << discard_req->get_cell() << dendl;
ceph_assert(discard_req->get_cell());
discard_req->complete_user_request(r);
discard_req->release_cell();
});
op->init_op(current_sync_gen, persist_on_flush, pwl.get_last_op_sequence_num(),
on_write_persist, on_write_append);
pwl.add_into_log_map(log_entries, this);
}
template <typename T>
void C_DiscardRequest<T>::dispatch() {
utime_t now = ceph_clock_now();
ldout(pwl.get_context(), 20) << "req type=" << get_name()
<< " req=[" << *this << "]" << dendl;
ceph_assert(this->m_resources.allocated);
this->m_dispatched_time = now;
setup_log_operations();
m_perfcounter->inc(l_librbd_pwl_log_ops, 1);
pwl.schedule_append(op);
}
template <typename T>
void C_DiscardRequest<T>::setup_buffer_resources(
uint64_t *bytes_cached, uint64_t *bytes_dirtied, uint64_t *bytes_allocated,
uint64_t *number_lanes, uint64_t *number_log_entries,
uint64_t *number_unpublished_reserves) {
*number_log_entries = 1;
/* No bytes are allocated for a discard, but we count the discarded bytes
* as dirty. This means it's possible to have more bytes dirty than
* there are bytes cached or allocated. */
for (auto &extent : this->image_extents) {
*bytes_dirtied = extent.second;
break;
}
}
template <typename T>
void C_DiscardRequest<T>::blockguard_acquired(GuardedRequestFunctionContext &guard_ctx) {
ldout(pwl.get_context(), 20) << " cell=" << guard_ctx.cell << dendl;
ceph_assert(guard_ctx.cell);
this->detained = guard_ctx.state.detained; /* overlapped */
this->set_cell(guard_ctx.cell);
}
template <typename T>
std::ostream &operator<<(std::ostream &os,
const C_DiscardRequest<T> &req) {
os << (C_BlockIORequest<T>&)req;
if (req.op) {
os << " op=[" << *req.op << "]";
} else {
os << " op=nullptr";
}
return os;
}
template <typename T>
C_WriteSameRequest<T>::C_WriteSameRequest(
T &pwl, const utime_t arrived, io::Extents &&image_extents,
bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req)
: C_WriteRequest<T>(pwl, arrived, std::move(image_extents), std::move(bl),
fadvise_flags, lock, perfcounter, user_req) {
ldout(pwl.get_context(), 20) << this << dendl;
}
template <typename T>
C_WriteSameRequest<T>::~C_WriteSameRequest() {
ldout(pwl.get_context(), 20) << this << dendl;
}
template <typename T>
void C_WriteSameRequest<T>::update_req_stats(utime_t &now) {
/* Write same stats excluded from most write stats
* because the read phase will make them look like slow writes in
* those histograms. */
ldout(pwl.get_context(), 20) << this << dendl;
utime_t comp_latency = now - this->m_arrived_time;
this->m_perfcounter->tinc(l_librbd_pwl_ws_latency, comp_latency);
}
template <typename T>
std::shared_ptr<WriteLogOperation> C_WriteSameRequest<T>::create_operation(
uint64_t offset, uint64_t len) {
ceph_assert(this->image_extents.size() == 1);
WriteLogOperationSet &set = *this->op_set.get();
return pwl.m_builder->create_write_log_operation(
*this->op_set.get(), offset, len, this->bl.length(), pwl.get_context(),
pwl.m_builder->create_writesame_log_entry(set.sync_point->log_entry, offset,
len, this->bl.length()));
}
template <typename T>
std::ostream &operator<<(std::ostream &os,
const C_WriteSameRequest<T> &req) {
os << (C_WriteRequest<T>&)req;
return os;
}
template <typename T>
void C_WriteRequest<T>::update_req_stats(utime_t &now) {
/* Compare-and-write stats. Compare-and-write excluded from most write
* stats because the read phase will make them look like slow writes in
* those histograms. */
if(is_comp_and_write) {
if (!compare_succeeded) {
this->m_perfcounter->inc(l_librbd_pwl_cmp_fails, 1);
}
utime_t comp_latency = now - this->m_arrived_time;
this->m_perfcounter->tinc(l_librbd_pwl_cmp_latency, comp_latency);
}
}
} // namespace pwl
} // namespace cache
} // namespace librbd
template class librbd::cache::pwl::C_BlockIORequest<librbd::cache::pwl::AbstractWriteLog<librbd::ImageCtx> >;
template class librbd::cache::pwl::C_WriteRequest<librbd::cache::pwl::AbstractWriteLog<librbd::ImageCtx> >;
template class librbd::cache::pwl::C_FlushRequest<librbd::cache::pwl::AbstractWriteLog<librbd::ImageCtx> >;
template class librbd::cache::pwl::C_DiscardRequest<librbd::cache::pwl::AbstractWriteLog<librbd::ImageCtx> >;
template class librbd::cache::pwl::C_WriteSameRequest<librbd::cache::pwl::AbstractWriteLog<librbd::ImageCtx> >;
| 21,501 | 37.191829 | 111 | cc |
null | ceph-main/src/librbd/cache/pwl/Request.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_PWL_REQUEST_H
#define CEPH_LIBRBD_CACHE_PWL_REQUEST_H
#include "include/Context.h"
#include "librbd/cache/pwl/Types.h"
#include "librbd/cache/pwl/LogOperation.h"
namespace librbd {
class BlockGuardCell;
namespace cache {
namespace pwl {
class GuardedRequestFunctionContext;
struct WriteRequestResources {
bool allocated = false;
std::vector<WriteBufferAllocation> buffers;
};
/**
* A request that can be deferred in a BlockGuard to sequence
* overlapping operations.
* This is the custodian of the BlockGuard cell for this IO, and the
* state information about the progress of this IO. This object lives
* until the IO is persisted in all (live) log replicas. User request
* may be completed from here before the IO persists.
*/
template <typename T>
class C_BlockIORequest : public Context {
public:
T &pwl;
io::Extents image_extents;
bufferlist bl;
int fadvise_flags;
Context *user_req; /* User write request */
ExtentsSummary<io::Extents> image_extents_summary;
bool detained = false; /* Detained in blockguard (overlapped with a prior IO) */
utime_t allocated_time; /* When allocation began */
C_BlockIORequest(T &pwl, const utime_t arrived, io::Extents &&extents,
bufferlist&& bl, const int fadvise_flags, Context *user_req);
~C_BlockIORequest() override;
C_BlockIORequest(const C_BlockIORequest&) = delete;
C_BlockIORequest &operator=(const C_BlockIORequest&) = delete;
void set_cell(BlockGuardCell *cell);
BlockGuardCell *get_cell(void);
void release_cell();
void complete_user_request(int r);
void finish(int r);
virtual void finish_req(int r) = 0;
virtual bool alloc_resources() = 0;
void deferred();
virtual void deferred_handler() = 0;
virtual void dispatch() = 0;
virtual void copy_cache() {};
virtual const char *get_name() const {
return "C_BlockIORequest";
}
uint64_t get_image_extents_size() {
return image_extents.size();
}
std::vector<WriteBufferAllocation>& get_resources_buffers() {
return m_resources.buffers;
}
void set_allocated(bool allocated) {
if (allocated) {
m_resources.allocated = true;
} else {
m_resources.buffers.clear();
}
}
virtual void setup_buffer_resources(
uint64_t *bytes_cached, uint64_t *bytes_dirtied, uint64_t *bytes_allocated,
uint64_t *number_lanes, uint64_t *number_log_entries,
uint64_t *number_unpublished_reserves) = 0;
protected:
utime_t m_arrived_time;
utime_t m_dispatched_time; /* When dispatch began */
utime_t m_user_req_completed_time;
std::atomic<bool> m_deferred = {false}; /* Deferred because this or a prior IO had to wait for write resources */
WriteRequestResources m_resources;
private:
std::atomic<bool> m_user_req_completed = {false};
std::atomic<bool> m_finish_called = {false};
std::atomic<bool> m_cell_released = {false};
BlockGuardCell* m_cell = nullptr;
template <typename U>
friend std::ostream &operator<<(std::ostream &os,
const C_BlockIORequest<U> &req);
};
/**
* This is the custodian of the BlockGuard cell for this write. Block
* guard is not released until the write persists everywhere (this is
* how we guarantee to each log replica that they will never see
* overlapping writes).
*/
template <typename T>
class C_WriteRequest : public C_BlockIORequest<T> {
public:
using C_BlockIORequest<T>::pwl;
bool compare_succeeded = false;
uint64_t *mismatch_offset;
bufferlist cmp_bl;
bufferlist read_bl;
bool is_comp_and_write = false;
std::unique_ptr<WriteLogOperationSet> op_set = nullptr;
C_WriteRequest(T &pwl, const utime_t arrived, io::Extents &&image_extents,
bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req);
C_WriteRequest(T &pwl, const utime_t arrived, io::Extents &&image_extents,
bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset,
int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter,
Context *user_req);
~C_WriteRequest() override;
void blockguard_acquired(GuardedRequestFunctionContext &guard_ctx);
/* Common finish to plain write and compare-and-write (if it writes) */
void finish_req(int r) override;
/* Compare and write will override this */
virtual void update_req_stats(utime_t &now);
bool alloc_resources() override;
void deferred_handler() override { }
void dispatch() override;
void copy_cache() override;
virtual std::shared_ptr<WriteLogOperation> create_operation(uint64_t offset,
uint64_t len);
virtual void setup_log_operations(DeferredContexts &on_exit);
bool append_write_request(std::shared_ptr<SyncPoint> sync_point);
virtual void schedule_append();
const char *get_name() const override {
return "C_WriteRequest";
}
protected:
using C_BlockIORequest<T>::m_resources;
PerfCounters *m_perfcounter = nullptr;
private:
bool m_do_early_flush = false;
std::atomic<int> m_appended = {0};
bool m_queued = false;
ceph::mutex &m_lock;
template <typename U>
friend std::ostream &operator<<(std::ostream &os,
const C_WriteRequest<U> &req);
};
/**
* This is the custodian of the BlockGuard cell for this
* aio_flush. Block guard is released as soon as the new
* sync point (if required) is created. Subsequent IOs can
* proceed while this flush waits for prior IOs to complete
* and any required sync points to be persisted.
*/
template <typename T>
class C_FlushRequest : public C_BlockIORequest<T> {
public:
using C_BlockIORequest<T>::pwl;
bool internal = false;
std::shared_ptr<SyncPoint> to_append;
C_FlushRequest(T &pwl, const utime_t arrived,
io::Extents &&image_extents,
bufferlist&& bl, const int fadvise_flags,
ceph::mutex &lock, PerfCounters *perfcounter,
Context *user_req);
~C_FlushRequest() override {}
bool alloc_resources() override;
void dispatch() override;
const char *get_name() const override {
return "C_FlushRequest";
}
void setup_buffer_resources(
uint64_t *bytes_cached, uint64_t *bytes_dirtied,
uint64_t *bytes_allocated, uint64_t *number_lanes,
uint64_t *number_log_entries,
uint64_t *number_unpublished_reserves) override;
private:
std::shared_ptr<SyncPointLogOperation> op;
ceph::mutex &m_lock;
PerfCounters *m_perfcounter = nullptr;
void finish_req(int r) override;
void deferred_handler() override {
m_perfcounter->inc(l_librbd_pwl_aio_flush_def, 1);
}
template <typename U>
friend std::ostream &operator<<(std::ostream &os,
const C_FlushRequest<U> &req);
};
/**
* This is the custodian of the BlockGuard cell for this discard. As in the
* case of write, the block guard is not released until the discard persists
* everywhere.
*/
template <typename T>
class C_DiscardRequest : public C_BlockIORequest<T> {
public:
using C_BlockIORequest<T>::pwl;
std::shared_ptr<DiscardLogOperation> op;
C_DiscardRequest(T &pwl, const utime_t arrived, io::Extents &&image_extents,
uint32_t discard_granularity_bytes, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req);
~C_DiscardRequest() override;
void finish_req(int r) override {}
bool alloc_resources() override;
void deferred_handler() override { }
void setup_log_operations();
void dispatch() override;
void blockguard_acquired(GuardedRequestFunctionContext &guard_ctx);
const char *get_name() const override {
return "C_DiscardRequest";
}
void setup_buffer_resources(
uint64_t *bytes_cached, uint64_t *bytes_dirtied, uint64_t *bytes_allocated,
uint64_t *number_lanes, uint64_t *number_log_entries,
uint64_t *number_unpublished_reserves) override;
private:
uint32_t m_discard_granularity_bytes;
ceph::mutex &m_lock;
PerfCounters *m_perfcounter = nullptr;
template <typename U>
friend std::ostream &operator<<(std::ostream &os,
const C_DiscardRequest<U> &req);
};
/**
* This is the custodian of the BlockGuard cell for this write same.
*
* A writesame allocates and persists a data buffer like a write, but the
* data buffer is usually much shorter than the write same.
*/
template <typename T>
class C_WriteSameRequest : public C_WriteRequest<T> {
public:
using C_BlockIORequest<T>::pwl;
C_WriteSameRequest(T &pwl, const utime_t arrived, io::Extents &&image_extents,
bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req);
~C_WriteSameRequest() override;
void update_req_stats(utime_t &now) override;
std::shared_ptr<WriteLogOperation> create_operation(uint64_t offset, uint64_t len) override;
const char *get_name() const override {
return "C_WriteSameRequest";
}
template<typename U>
friend std::ostream &operator<<(std::ostream &os,
const C_WriteSameRequest<U> &req);
};
struct BlockGuardReqState {
bool barrier = false; /* This is a barrier request */
bool current_barrier = false; /* This is the currently active barrier */
bool detained = false;
bool queued = false; /* Queued for barrier */
friend std::ostream &operator<<(std::ostream &os,
const BlockGuardReqState &r) {
os << "barrier=" << r.barrier
<< ", current_barrier=" << r.current_barrier
<< ", detained=" << r.detained
<< ", queued=" << r.queued;
return os;
}
};
class GuardedRequestFunctionContext : public Context {
public:
BlockGuardCell *cell = nullptr;
BlockGuardReqState state;
GuardedRequestFunctionContext(boost::function<void(GuardedRequestFunctionContext&)> &&callback)
: m_callback(std::move(callback)){ }
~GuardedRequestFunctionContext(void) override { };
GuardedRequestFunctionContext(const GuardedRequestFunctionContext&) = delete;
GuardedRequestFunctionContext &operator=(const GuardedRequestFunctionContext&) = delete;
private:
boost::function<void(GuardedRequestFunctionContext&)> m_callback;
void finish(int r) override {
ceph_assert(cell);
m_callback(*this);
}
};
class GuardedRequest {
public:
const BlockExtent block_extent;
GuardedRequestFunctionContext *guard_ctx; /* Work to do when guard on range obtained */
GuardedRequest(const BlockExtent block_extent,
GuardedRequestFunctionContext *on_guard_acquire, bool barrier = false)
: block_extent(block_extent), guard_ctx(on_guard_acquire) {
guard_ctx->state.barrier = barrier;
}
friend std::ostream &operator<<(std::ostream &os,
const GuardedRequest &r) {
os << "guard_ctx->state=[" << r.guard_ctx->state
<< "], block_extent.block_start=" << r.block_extent.block_start
<< ", block_extent.block_end=" << r.block_extent.block_end;
return os;
}
};
} // namespace pwl
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_PWL_REQUEST_H
| 11,409 | 30.519337 | 115 | h |
null | ceph-main/src/librbd/cache/pwl/ShutdownRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/cache/pwl/ShutdownRequest.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/Operations.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/cache/Types.h"
#include "librbd/cache/pwl/AbstractWriteLog.h"
#include "librbd/plugin/Api.h"
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl:ShutdownRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace cache {
namespace pwl {
using librbd::util::create_async_context_callback;
using librbd::util::create_context_callback;
template <typename I>
ShutdownRequest<I>* ShutdownRequest<I>::create(
I &image_ctx,
AbstractWriteLog<I> *image_cache,
plugin::Api<I>& plugin_api,
Context *on_finish) {
return new ShutdownRequest(image_ctx, image_cache, plugin_api, on_finish);
}
template <typename I>
ShutdownRequest<I>::ShutdownRequest(
I &image_ctx,
AbstractWriteLog<I> *image_cache,
plugin::Api<I>& plugin_api,
Context *on_finish)
: m_image_ctx(image_ctx),
m_image_cache(image_cache),
m_plugin_api(plugin_api),
m_on_finish(create_async_context_callback(image_ctx, on_finish)),
m_error_result(0) {
}
template <typename I>
void ShutdownRequest<I>::send() {
send_shutdown_image_cache();
}
template <typename I>
void ShutdownRequest<I>::send_shutdown_image_cache() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
if (m_image_cache == nullptr) {
finish();
return;
}
using klass = ShutdownRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_shutdown_image_cache>(
this);
m_image_cache->shut_down(ctx);
}
template <typename I>
void ShutdownRequest<I>::handle_shutdown_image_cache(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
if (r < 0) {
lderr(cct) << "failed to shut down the image cache: " << cpp_strerror(r)
<< dendl;
save_result(r);
finish();
return;
} else {
delete m_image_cache;
m_image_cache = nullptr;
}
send_remove_feature_bit();
}
template <typename I>
void ShutdownRequest<I>::send_remove_feature_bit() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
uint64_t new_features = m_image_ctx.features & ~RBD_FEATURE_DIRTY_CACHE;
uint64_t features_mask = RBD_FEATURE_DIRTY_CACHE;
ldout(cct, 10) << "old_features=" << m_image_ctx.features
<< ", new_features=" << new_features
<< ", features_mask=" << features_mask
<< dendl;
int r = librbd::cls_client::set_features(&m_image_ctx.md_ctx, m_image_ctx.header_oid,
new_features, features_mask);
m_image_ctx.features &= ~RBD_FEATURE_DIRTY_CACHE;
using klass = ShutdownRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_remove_feature_bit>(
this);
ctx->complete(r);
}
template <typename I>
void ShutdownRequest<I>::handle_remove_feature_bit(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
if (r < 0) {
lderr(cct) << "failed to remove the feature bit: " << cpp_strerror(r)
<< dendl;
save_result(r);
finish();
return;
}
send_remove_image_cache_state();
}
template <typename I>
void ShutdownRequest<I>::send_remove_image_cache_state() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
using klass = ShutdownRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_remove_image_cache_state>(
this);
std::shared_lock owner_lock{m_image_ctx.owner_lock};
m_plugin_api.execute_image_metadata_remove(&m_image_ctx, PERSISTENT_CACHE_STATE, ctx);
}
template <typename I>
void ShutdownRequest<I>::handle_remove_image_cache_state(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
if (r < 0) {
lderr(cct) << "failed to remove the image cache state: " << cpp_strerror(r)
<< dendl;
save_result(r);
}
finish();
}
template <typename I>
void ShutdownRequest<I>::finish() {
m_on_finish->complete(m_error_result);
delete this;
}
} // namespace pwl
} // namespace cache
} // namespace librbd
template class librbd::cache::pwl::ShutdownRequest<librbd::ImageCtx>;
| 4,455 | 26.506173 | 89 | cc |
null | ceph-main/src/librbd/cache/pwl/ShutdownRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_RWL_SHUTDOWN_REQUEST_H
#define CEPH_LIBRBD_CACHE_RWL_SHUTDOWN_REQUEST_H
class Context;
namespace librbd {
class ImageCtx;
namespace plugin { template <typename> struct Api; }
namespace cache {
namespace pwl {
template<typename>
class AbstractWriteLog;
template<typename>
class ImageCacheState;
template <typename ImageCtxT = ImageCtx>
class ShutdownRequest {
public:
static ShutdownRequest* create(
ImageCtxT &image_ctx,
AbstractWriteLog<ImageCtxT> *image_cache,
plugin::Api<ImageCtxT>& plugin_api,
Context *on_finish);
void send();
private:
/**
* @verbatim
*
* Shutdown request goes through the following state machine:
*
* <start>
* |
* v
* SHUTDOWN_IMAGE_CACHE
* |
* v
* REMOVE_IMAGE_FEATURE_BIT
* |
* v
* REMOVE_IMAGE_CACHE_STATE
* |
* v
* <finish>
*
* @endverbatim
*/
ShutdownRequest(ImageCtxT &image_ctx,
AbstractWriteLog<ImageCtxT> *image_cache,
plugin::Api<ImageCtxT>& plugin_api,
Context *on_finish);
ImageCtxT &m_image_ctx;
AbstractWriteLog<ImageCtxT> *m_image_cache;
plugin::Api<ImageCtxT>& m_plugin_api;
Context *m_on_finish;
int m_error_result;
void send_shutdown_image_cache();
void handle_shutdown_image_cache(int r);
void send_remove_feature_bit();
void handle_remove_feature_bit(int r);
void send_remove_image_cache_state();
void handle_remove_image_cache_state(int r);
void finish();
void save_result(int result) {
if (m_error_result == 0 && result < 0) {
m_error_result = result;
}
}
};
} // namespace pwl
} // namespace cache
} // namespace librbd
extern template class librbd::cache::pwl::ShutdownRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CACHE_RWL_SHUTDOWN_REQUEST_H
| 1,913 | 18.9375 | 76 | h |
null | ceph-main/src/librbd/cache/pwl/SyncPoint.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "SyncPoint.h"
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::SyncPoint: " << this << " " \
<< __func__ << ": "
namespace librbd {
namespace cache {
namespace pwl {
SyncPoint::SyncPoint(uint64_t sync_gen_num, CephContext *cct)
: log_entry(std::make_shared<SyncPointLogEntry>(sync_gen_num)), m_cct(cct) {
m_prior_log_entries_persisted = new C_Gather(cct, nullptr);
m_sync_point_persist = new C_Gather(cct, nullptr);
on_sync_point_appending.reserve(MAX_WRITES_PER_SYNC_POINT + 2);
on_sync_point_persisted.reserve(MAX_WRITES_PER_SYNC_POINT + 2);
ldout(m_cct, 20) << "sync point " << sync_gen_num << dendl;
}
SyncPoint::~SyncPoint() {
ceph_assert(on_sync_point_appending.empty());
ceph_assert(on_sync_point_persisted.empty());
ceph_assert(!earlier_sync_point);
}
std::ostream &operator<<(std::ostream &os,
const SyncPoint &p) {
os << "log_entry=[" << *p.log_entry
<< "], earlier_sync_point=" << p.earlier_sync_point
<< ", later_sync_point=" << p.later_sync_point
<< ", m_final_op_sequence_num=" << p.m_final_op_sequence_num
<< ", m_prior_log_entries_persisted=" << p.m_prior_log_entries_persisted
<< ", m_prior_log_entries_persisted_complete=" << p.m_prior_log_entries_persisted_complete
<< ", m_append_scheduled=" << p.m_append_scheduled
<< ", appending=" << p.appending
<< ", on_sync_point_appending=" << p.on_sync_point_appending.size()
<< ", on_sync_point_persisted=" << p.on_sync_point_persisted.size();
return os;
}
void SyncPoint::persist_gather_set_finisher(Context *ctx) {
m_append_scheduled = true;
/* All prior sync points that are still in this list must already be scheduled for append */
std::shared_ptr<SyncPoint> previous = earlier_sync_point;
while (previous) {
ceph_assert(previous->m_append_scheduled);
previous = previous->earlier_sync_point;
}
m_sync_point_persist->set_finisher(ctx);
}
void SyncPoint::persist_gather_activate() {
m_sync_point_persist->activate();
}
Context* SyncPoint::persist_gather_new_sub() {
return m_sync_point_persist->new_sub();
}
void SyncPoint::prior_persisted_gather_activate() {
m_prior_log_entries_persisted->activate();
}
Context* SyncPoint::prior_persisted_gather_new_sub() {
return m_prior_log_entries_persisted->new_sub();
}
void SyncPoint::prior_persisted_gather_set_finisher() {
Context *sync_point_persist_ready = persist_gather_new_sub();
std::shared_ptr<SyncPoint> sp = shared_from_this();
m_prior_log_entries_persisted->
set_finisher(new LambdaContext([this, sp, sync_point_persist_ready](int r) {
ldout(m_cct, 20) << "Prior log entries persisted for sync point =["
<< sp << "]" << dendl;
sp->m_prior_log_entries_persisted_result = r;
sp->m_prior_log_entries_persisted_complete = true;
sync_point_persist_ready->complete(r);
}));
}
void SyncPoint::add_in_on_persisted_ctxs(Context* ctx) {
on_sync_point_persisted.push_back(ctx);
}
void SyncPoint::add_in_on_appending_ctxs(Context* ctx) {
on_sync_point_appending.push_back(ctx);
}
void SyncPoint::setup_earlier_sync_point(std::shared_ptr<SyncPoint> sync_point,
uint64_t last_op_sequence_num) {
earlier_sync_point = sync_point;
log_entry->prior_sync_point_flushed = false;
earlier_sync_point->log_entry->next_sync_point_entry = log_entry;
earlier_sync_point->later_sync_point = shared_from_this();
earlier_sync_point->m_final_op_sequence_num = last_op_sequence_num;
if (!earlier_sync_point->appending) {
/* Append of new sync point deferred until old sync point is appending */
earlier_sync_point->add_in_on_appending_ctxs(prior_persisted_gather_new_sub());
}
}
} // namespace pwl
} // namespace cache
} // namespace librbd
| 4,009 | 35.454545 | 95 | cc |
null | ceph-main/src/librbd/cache/pwl/SyncPoint.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_RWL_SYNC_POINT_H
#define CEPH_LIBRBD_CACHE_RWL_SYNC_POINT_H
#include "librbd/ImageCtx.h"
#include "librbd/cache/pwl/LogEntry.h"
#include "librbd/cache/pwl/Types.h"
namespace librbd {
namespace cache {
namespace pwl {
class SyncPoint: public std::enable_shared_from_this<SyncPoint> {
public:
std::shared_ptr<SyncPointLogEntry> log_entry;
/* Use lock for earlier/later links */
std::shared_ptr<SyncPoint> earlier_sync_point; /* NULL if earlier has completed */
std::shared_ptr<SyncPoint> later_sync_point;
bool appending = false;
/* Signal these when this sync point is appending to the log, and its order
* of appearance is guaranteed. One of these is is a sub-operation of the
* next sync point's m_prior_log_entries_persisted Gather. */
std::vector<Context*> on_sync_point_appending;
/* Signal these when this sync point is appended and persisted. User
* aio_flush() calls are added to this. */
std::vector<Context*> on_sync_point_persisted;
SyncPoint(uint64_t sync_gen_num, CephContext *cct);
~SyncPoint();
SyncPoint(const SyncPoint&) = delete;
SyncPoint &operator=(const SyncPoint&) = delete;
void persist_gather_activate();
Context* persist_gather_new_sub();
void persist_gather_set_finisher(Context *ctx);
void prior_persisted_gather_activate();
Context* prior_persisted_gather_new_sub();
void prior_persisted_gather_set_finisher();
void add_in_on_persisted_ctxs(Context* cxt);
void add_in_on_appending_ctxs(Context* cxt);
void setup_earlier_sync_point(std::shared_ptr<SyncPoint> sync_point,
uint64_t last_op_sequence_num);
private:
CephContext *m_cct;
bool m_append_scheduled = false;
uint64_t m_final_op_sequence_num = 0;
/* A sync point can't appear in the log until all the writes bearing
* it and all the prior sync points have been appended and
* persisted.
*
* Writes bearing this sync gen number and the prior sync point will be
* sub-ops of this Gather. This sync point will not be appended until all
* these complete to the point where their persist order is guaranteed. */
C_Gather *m_prior_log_entries_persisted;
/* The finisher for this will append the sync point to the log. The finisher
* for m_prior_log_entries_persisted will be a sub-op of this. */
C_Gather *m_sync_point_persist;
int m_prior_log_entries_persisted_result = 0;
int m_prior_log_entries_persisted_complete = false;
friend std::ostream &operator<<(std::ostream &os,
const SyncPoint &p);
};
} // namespace pwl
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_RWL_SYNC_POINT_H
| 2,776 | 38.671429 | 84 | h |
null | ceph-main/src/librbd/cache/pwl/Types.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <iostream>
#include "Types.h"
#include "common/ceph_context.h"
#include "include/Context.h"
#include "include/stringify.h"
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::Types: " << this << " " \
<< __func__ << ": "
using ceph::Formatter;
namespace librbd {
namespace cache {
namespace pwl {
DeferredContexts::~DeferredContexts() {
finish_contexts(nullptr, contexts, 0);
}
void DeferredContexts::add(Context* ctx) {
contexts.push_back(ctx);
}
/*
* A BlockExtent identifies a range by first and last.
*
* An Extent ("image extent") identifies a range by start and length.
*
* The ImageDispatch interface is defined in terms of image extents, and
* requires no alignment of the beginning or end of the extent. We
* convert between image and block extents here using a "block size"
* of 1.
*/
BlockExtent convert_to_block_extent(uint64_t offset_bytes, uint64_t length_bytes)
{
return BlockExtent(offset_bytes,
offset_bytes + length_bytes);
}
BlockExtent WriteLogCacheEntry::block_extent() {
return convert_to_block_extent(image_offset_bytes, write_bytes);
}
uint64_t WriteLogCacheEntry::get_offset_bytes() {
return image_offset_bytes;
}
uint64_t WriteLogCacheEntry::get_write_bytes() {
return write_bytes;
}
#ifdef WITH_RBD_SSD_CACHE
void WriteLogCacheEntry::dump(Formatter *f) const {
f->dump_unsigned("sync_gen_number", sync_gen_number);
f->dump_unsigned("write_sequence_number", write_sequence_number);
f->dump_unsigned("image_offset_bytes", image_offset_bytes);
f->dump_unsigned("write_bytes", write_bytes);
f->dump_unsigned("write_data_pos", write_data_pos);
f->dump_bool("entry_valid", is_entry_valid());
f->dump_bool("sync_point", is_sync_point());
f->dump_bool("sequenced", is_sequenced());
f->dump_bool("has_data", has_data());
f->dump_bool("discard", is_discard());
f->dump_bool("writesame", is_writesame());
f->dump_unsigned("ws_datalen", ws_datalen);
f->dump_unsigned("entry_index", entry_index);
}
void WriteLogCacheEntry::generate_test_instances(std::list<WriteLogCacheEntry*>& ls) {
ls.push_back(new WriteLogCacheEntry());
ls.push_back(new WriteLogCacheEntry);
ls.back()->sync_gen_number = 1;
ls.back()->write_sequence_number = 1;
ls.back()->image_offset_bytes = 1;
ls.back()->write_bytes = 1;
ls.back()->write_data_pos = 1;
ls.back()->set_entry_valid(true);
ls.back()->set_sync_point(true);
ls.back()->set_sequenced(true);
ls.back()->set_has_data(true);
ls.back()->set_discard(true);
ls.back()->set_writesame(true);
ls.back()->ws_datalen = 1;
ls.back()->entry_index = 1;
}
void WriteLogPoolRoot::dump(Formatter *f) const {
f->dump_unsigned("layout_version", layout_version);
f->dump_unsigned("cur_sync_gen", cur_sync_gen);
f->dump_unsigned("pool_size", pool_size);
f->dump_unsigned("flushed_sync_gen", flushed_sync_gen);
f->dump_unsigned("block_size", block_size);
f->dump_unsigned("num_log_entries", num_log_entries);
f->dump_unsigned("first_free_entry", first_free_entry);
f->dump_unsigned("first_valid_entry", first_valid_entry);
}
void WriteLogPoolRoot::generate_test_instances(std::list<WriteLogPoolRoot*>& ls) {
ls.push_back(new WriteLogPoolRoot());
ls.push_back(new WriteLogPoolRoot);
ls.back()->layout_version = 2;
ls.back()->cur_sync_gen = 1;
ls.back()->pool_size = 1024;
ls.back()->flushed_sync_gen = 1;
ls.back()->block_size = 4096;
ls.back()->num_log_entries = 10000000;
ls.back()->first_free_entry = 1;
ls.back()->first_valid_entry = 0;
}
#endif
std::ostream& operator<<(std::ostream& os,
const WriteLogCacheEntry &entry) {
os << "entry_valid=" << entry.is_entry_valid()
<< ", sync_point=" << entry.is_sync_point()
<< ", sequenced=" << entry.is_sequenced()
<< ", has_data=" << entry.has_data()
<< ", discard=" << entry.is_discard()
<< ", writesame=" << entry.is_writesame()
<< ", sync_gen_number=" << entry.sync_gen_number
<< ", write_sequence_number=" << entry.write_sequence_number
<< ", image_offset_bytes=" << entry.image_offset_bytes
<< ", write_bytes=" << entry.write_bytes
<< ", ws_datalen=" << entry.ws_datalen
<< ", entry_index=" << entry.entry_index;
return os;
}
template <typename ExtentsType>
ExtentsSummary<ExtentsType>::ExtentsSummary(const ExtentsType &extents)
: total_bytes(0), first_image_byte(0), last_image_byte(0)
{
if (extents.empty()) return;
/* These extents refer to image offsets between first_image_byte
* and last_image_byte, inclusive, but we don't guarantee here
* that they address all of those bytes. There may be gaps. */
first_image_byte = extents.front().first;
last_image_byte = first_image_byte + extents.front().second;
for (auto &extent : extents) {
/* Ignore zero length extents */
if (extent.second) {
total_bytes += extent.second;
if (extent.first < first_image_byte) {
first_image_byte = extent.first;
}
if ((extent.first + extent.second) > last_image_byte) {
last_image_byte = extent.first + extent.second;
}
}
}
}
io::Extent whole_volume_extent() {
return io::Extent({0, std::numeric_limits<uint64_t>::max()});
}
BlockExtent block_extent(const io::Extent& image_extent) {
return convert_to_block_extent(image_extent.first, image_extent.second);
}
Context * override_ctx(int r, Context *ctx) {
if (r < 0) {
/* Override next_ctx status with this error */
return new LambdaContext(
[r, ctx](int _r) {
ctx->complete(r);
});
} else {
return ctx;
}
}
std::string unique_lock_name(const std::string &name, void *address) {
return name + " (" + stringify(address) + ")";
}
} // namespace pwl
} // namespace cache
} // namespace librbd
template class librbd::cache::pwl::ExtentsSummary<librbd::io::Extents>;
| 6,030 | 31.424731 | 86 | cc |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.