repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | ceph-main/src/librbd/image/DetachParentRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_DETACH_PARENT_REQUEST_H
#define CEPH_LIBRBD_IMAGE_DETACH_PARENT_REQUEST_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "librbd/Types.h"
class Context;
namespace librbd {
class ImageCtx;
namespace image {
template <typename ImageCtxT = ImageCtx>
class DetachParentRequest {
public:
static DetachParentRequest* create(ImageCtxT& image_ctx, Context* on_finish) {
return new DetachParentRequest(image_ctx, on_finish);
}
DetachParentRequest(ImageCtxT& image_ctx, Context* on_finish)
: m_image_ctx(image_ctx), m_on_finish(on_finish) {
}
void send();
private:
/**
* @verbatim
*
* <start>
* | * * * * * *
* | * * -EOPNOTSUPP
* v v *
* DETACH_PARENT * * *
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT& m_image_ctx;
Context* m_on_finish;
bool m_legacy_parent = false;
void detach_parent();
void handle_detach_parent(int r);
void finish(int r);
};
} // namespace image
} // namespace librbd
extern template class librbd::image::DetachParentRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_DETACH_PARENT_REQUEST_H
| 1,326 | 18.80597 | 80 | h |
null | ceph-main/src/librbd/image/GetMetadataRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/image/GetMetadataRequest.h"
#include "cls/rbd/cls_rbd_client.h"
#include "common/dout.h"
#include "common/errno.h"
#include "include/ceph_assert.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include <boost/algorithm/string/predicate.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::GetMetadataRequest: " \
<< this << " " << __func__ << ": "
#define MAX_KEYS 64U
namespace librbd {
namespace image {
namespace {
static const std::string INTERNAL_KEY_PREFIX{".rbd"};
} // anonymous namespace
using util::create_rados_callback;
template <typename I>
GetMetadataRequest<I>::GetMetadataRequest(
IoCtx &io_ctx, const std::string &oid, bool filter_internal,
const std::string& filter_key_prefix, const std::string& last_key,
uint32_t max_results, KeyValues* key_values, Context *on_finish)
: m_io_ctx(io_ctx), m_oid(oid), m_filter_internal(filter_internal),
m_filter_key_prefix(filter_key_prefix), m_last_key(last_key),
m_max_results(max_results), m_key_values(key_values),
m_on_finish(on_finish),
m_cct(reinterpret_cast<CephContext*>(m_io_ctx.cct())) {
}
template <typename I>
void GetMetadataRequest<I>::send() {
metadata_list();
}
template <typename I>
void GetMetadataRequest<I>::metadata_list() {
ldout(m_cct, 15) << "start_key=" << m_last_key << dendl;
m_expected_results = MAX_KEYS;
if (m_max_results > 0) {
m_expected_results = std::min<uint32_t>(
m_expected_results, m_max_results - m_key_values->size());
}
librados::ObjectReadOperation op;
cls_client::metadata_list_start(&op, m_last_key, m_expected_results);
auto aio_comp = create_rados_callback<
GetMetadataRequest<I>, &GetMetadataRequest<I>::handle_metadata_list>(this);
m_out_bl.clear();
m_io_ctx.aio_operate(m_oid, aio_comp, &op, &m_out_bl);
aio_comp->release();
}
template <typename I>
void GetMetadataRequest<I>::handle_metadata_list(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
KeyValues metadata;
if (r == 0) {
auto it = m_out_bl.cbegin();
r = cls_client::metadata_list_finish(&it, &metadata);
}
if (r == -ENOENT || r == -EOPNOTSUPP) {
finish(0);
return;
} else if (r < 0) {
lderr(m_cct) << "failed to retrieve image metadata: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
for (auto it = metadata.begin(); it != metadata.end(); ++it) {
if (m_filter_internal &&
boost::starts_with(it->first, INTERNAL_KEY_PREFIX)) {
continue;
} else if (!m_filter_key_prefix.empty() &&
!boost::starts_with(it->first, m_filter_key_prefix)) {
continue;
}
m_key_values->insert({it->first, std::move(it->second)});
}
if (!metadata.empty()) {
m_last_key = metadata.rbegin()->first;
}
if (metadata.size() == m_expected_results &&
(m_max_results == 0 || m_key_values->size() < m_max_results)) {
metadata_list();
return;
}
finish(0);
}
template <typename I>
void GetMetadataRequest<I>::finish(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace image
} // namespace librbd
template class librbd::image::GetMetadataRequest<librbd::ImageCtx>;
| 3,391 | 26.803279 | 79 | cc |
null | ceph-main/src/librbd/image/GetMetadataRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_GET_METADATA_REQUEST_H
#define CEPH_LIBRBD_IMAGE_GET_METADATA_REQUEST_H
#include "include/common_fwd.h"
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
#include <string>
#include <map>
class Context;
namespace librbd {
struct ImageCtx;
namespace image {
template <typename ImageCtxT = ImageCtx>
class GetMetadataRequest {
public:
typedef std::map<std::string, bufferlist> KeyValues;
static GetMetadataRequest* create(
IoCtx &io_ctx, const std::string &oid, bool filter_internal,
const std::string& filter_key_prefix, const std::string& last_key,
uint32_t max_results, KeyValues* key_values, Context *on_finish) {
return new GetMetadataRequest(io_ctx, oid, filter_internal,
filter_key_prefix, last_key, max_results,
key_values, on_finish);
}
GetMetadataRequest(
IoCtx &io_ctx, const std::string &oid, bool filter_internal,
const std::string& filter_key_prefix, const std::string& last_key,
uint32_t max_results, KeyValues* key_values, Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* | /-------\
* | | |
* v v |
* METADATA_LIST ---/
* |
* v
* <finish>
*
* @endverbatim
*/
librados::IoCtx m_io_ctx;
std::string m_oid;
bool m_filter_internal;
std::string m_filter_key_prefix;
std::string m_last_key;
uint32_t m_max_results;
KeyValues* m_key_values;
Context* m_on_finish;
CephContext* m_cct;
bufferlist m_out_bl;
uint32_t m_expected_results = 0;
void metadata_list();
void handle_metadata_list(int r);
void finish(int r);
};
} //namespace image
} //namespace librbd
extern template class librbd::image::GetMetadataRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_GET_METADATA_REQUEST_H
| 2,014 | 22.988095 | 75 | h |
null | ceph-main/src/librbd/image/ListWatchersRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "ListWatchersRequest.h"
#include "common/RWLock.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/Utils.h"
#include <algorithm>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::ListWatchersRequest: " << this \
<< " " << __func__ << ": "
static std::ostream& operator<<(std::ostream& os, const obj_watch_t& watch) {
os << "{addr=" << watch.addr << ", "
<< "watcher_id=" << watch.watcher_id << ", "
<< "cookie=" << watch.cookie << "}";
return os;
}
namespace librbd {
namespace image {
using librados::IoCtx;
using util::create_rados_callback;
template<typename I>
ListWatchersRequest<I>::ListWatchersRequest(I &image_ctx, int flags,
std::list<obj_watch_t> *watchers,
Context *on_finish)
: m_image_ctx(image_ctx), m_flags(flags), m_watchers(watchers),
m_on_finish(on_finish), m_cct(m_image_ctx.cct) {
ceph_assert((m_flags & LIST_WATCHERS_FILTER_OUT_MIRROR_INSTANCES) == 0 ||
(m_flags & LIST_WATCHERS_MIRROR_INSTANCES_ONLY) == 0);
}
template<typename I>
void ListWatchersRequest<I>::send() {
ldout(m_cct, 20) << dendl;
list_image_watchers();
}
template<typename I>
void ListWatchersRequest<I>::list_image_watchers() {
ldout(m_cct, 20) << dendl;
librados::ObjectReadOperation op;
op.list_watchers(&m_object_watchers, &m_ret_val);
using klass = ListWatchersRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_list_image_watchers>(this);
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
rados_completion, &op, &m_out_bl);
ceph_assert(r == 0);
rados_completion->release();
}
template<typename I>
void ListWatchersRequest<I>::handle_list_image_watchers(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r == 0 && m_ret_val < 0) {
r = m_ret_val;
}
if (r < 0) {
lderr(m_cct) << "error listing image watchers: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
ldout(m_cct, 20) << "object_watchers=" << m_object_watchers << dendl;
list_mirror_watchers();
}
template<typename I>
void ListWatchersRequest<I>::list_mirror_watchers() {
if ((m_object_watchers.empty()) ||
(m_flags & (LIST_WATCHERS_FILTER_OUT_MIRROR_INSTANCES |
LIST_WATCHERS_MIRROR_INSTANCES_ONLY)) == 0) {
finish(0);
return;
}
ldout(m_cct, 20) << dendl;
librados::ObjectReadOperation op;
op.list_watchers(&m_mirror_watchers, &m_ret_val);
using klass = ListWatchersRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_list_mirror_watchers>(this);
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(RBD_MIRRORING, rados_completion,
&op, &m_out_bl);
ceph_assert(r == 0);
rados_completion->release();
}
template<typename I>
void ListWatchersRequest<I>::handle_list_mirror_watchers(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r == 0 && m_ret_val < 0) {
r = m_ret_val;
}
if (r < 0 && r != -ENOENT) {
ldout(m_cct, 1) << "error listing mirror watchers: " << cpp_strerror(r)
<< dendl;
}
ldout(m_cct, 20) << "mirror_watchers=" << m_mirror_watchers << dendl;
finish(0);
}
template<typename I>
void ListWatchersRequest<I>::finish(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r == 0) {
m_watchers->clear();
if (m_object_watchers.size() > 0) {
std::shared_lock owner_locker{m_image_ctx.owner_lock};
uint64_t watch_handle = m_image_ctx.image_watcher != nullptr ?
m_image_ctx.image_watcher->get_watch_handle() : 0;
for (auto &w : m_object_watchers) {
if ((m_flags & LIST_WATCHERS_FILTER_OUT_MY_INSTANCE) != 0) {
if (w.cookie == watch_handle) {
ldout(m_cct, 20) << "filtering out my instance: " << w << dendl;
continue;
}
}
auto it = std::find_if(m_mirror_watchers.begin(),
m_mirror_watchers.end(),
[w] (obj_watch_t &watcher) {
return (strncmp(w.addr, watcher.addr,
sizeof(w.addr)) == 0);
});
if ((m_flags & LIST_WATCHERS_FILTER_OUT_MIRROR_INSTANCES) != 0) {
if (it != m_mirror_watchers.end()) {
ldout(m_cct, 20) << "filtering out mirror instance: " << w << dendl;
continue;
}
} else if ((m_flags & LIST_WATCHERS_MIRROR_INSTANCES_ONLY) != 0) {
if (it == m_mirror_watchers.end()) {
ldout(m_cct, 20) << "filtering out non-mirror instance: " << w
<< dendl;
continue;
}
}
m_watchers->push_back(w);
}
}
}
m_on_finish->complete(r);
delete this;
}
} // namespace image
} // namespace librbd
template class librbd::image::ListWatchersRequest<librbd::ImageCtx>;
| 5,382 | 29.76 | 80 | cc |
null | ceph-main/src/librbd/image/ListWatchersRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_LIST_WATCHERS_REQUEST_H
#define CEPH_LIBRBD_IMAGE_LIST_WATCHERS_REQUEST_H
#include "include/rados/rados_types.hpp"
#include <list>
class Context;
namespace librbd {
class ImageCtx;
namespace image {
enum {
LIST_WATCHERS_FILTER_OUT_MY_INSTANCE = 1 << 0,
LIST_WATCHERS_FILTER_OUT_MIRROR_INSTANCES = 1 << 1,
LIST_WATCHERS_MIRROR_INSTANCES_ONLY = 1 << 3,
};
template<typename ImageCtxT = ImageCtx>
class ListWatchersRequest {
public:
static ListWatchersRequest *create(ImageCtxT &image_ctx, int flags,
std::list<obj_watch_t> *watchers,
Context *on_finish) {
return new ListWatchersRequest(image_ctx, flags, watchers, on_finish);
}
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* LIST_IMAGE_WATCHERS
* |
* v
* LIST_MIRROR_WATCHERS (skip if not needed)
* |
* v
* <finish>
*
* @endverbatim
*/
ListWatchersRequest(ImageCtxT &image_ctx, int flags, std::list<obj_watch_t> *watchers,
Context *on_finish);
ImageCtxT& m_image_ctx;
int m_flags;
std::list<obj_watch_t> *m_watchers;
Context *m_on_finish;
CephContext *m_cct;
int m_ret_val;
bufferlist m_out_bl;
std::list<obj_watch_t> m_object_watchers;
std::list<obj_watch_t> m_mirror_watchers;
void list_image_watchers();
void handle_list_image_watchers(int r);
void list_mirror_watchers();
void handle_list_mirror_watchers(int r);
void finish(int r);
};
} // namespace image
} // namespace librbd
extern template class librbd::image::ListWatchersRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_LIST_WATCHERS_REQUEST_H
| 1,824 | 20.987952 | 88 | h |
null | ceph-main/src/librbd/image/OpenRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/image/OpenRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ConfigWatcher.h"
#include "librbd/ImageCtx.h"
#include "librbd/PluginRegistry.h"
#include "librbd/Utils.h"
#include "librbd/cache/ObjectCacherObjectDispatch.h"
#include "librbd/cache/WriteAroundObjectDispatch.h"
#include "librbd/image/CloseRequest.h"
#include "librbd/image/RefreshRequest.h"
#include "librbd/image/SetSnapRequest.h"
#include "librbd/io/SimpleSchedulerObjectDispatch.h"
#include <boost/algorithm/string/predicate.hpp>
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::OpenRequest: "
namespace librbd {
namespace image {
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
OpenRequest<I>::OpenRequest(I *image_ctx, uint64_t flags,
Context *on_finish)
: m_image_ctx(image_ctx),
m_skip_open_parent_image(flags & OPEN_FLAG_SKIP_OPEN_PARENT),
m_on_finish(on_finish), m_error_result(0) {
if ((flags & OPEN_FLAG_OLD_FORMAT) != 0) {
m_image_ctx->old_format = true;
}
if ((flags & OPEN_FLAG_IGNORE_MIGRATING) != 0) {
m_image_ctx->ignore_migrating = true;
}
}
template <typename I>
void OpenRequest<I>::send() {
if (m_image_ctx->old_format) {
send_v1_detect_header();
} else {
send_v2_detect_header();
}
}
template <typename I>
void OpenRequest<I>::send_v1_detect_header() {
librados::ObjectReadOperation op;
op.stat(NULL, NULL, NULL);
using klass = OpenRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_v1_detect_header>(this);
m_out_bl.clear();
m_image_ctx->md_ctx.aio_operate(util::old_header_name(m_image_ctx->name),
comp, &op, &m_out_bl);
comp->release();
}
template <typename I>
Context *OpenRequest<I>::handle_v1_detect_header(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
if (*result != -ENOENT) {
lderr(cct) << "failed to stat image header: " << cpp_strerror(*result)
<< dendl;
}
send_close_image(*result);
} else {
ldout(cct, 1) << "RBD image format 1 is deprecated. "
<< "Please copy this image to image format 2." << dendl;
m_image_ctx->old_format = true;
m_image_ctx->header_oid = util::old_header_name(m_image_ctx->name);
m_image_ctx->apply_metadata({}, true);
send_refresh();
}
return nullptr;
}
template <typename I>
void OpenRequest<I>::send_v2_detect_header() {
if (m_image_ctx->id.empty()) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
op.stat(NULL, NULL, NULL);
using klass = OpenRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_v2_detect_header>(this);
m_out_bl.clear();
m_image_ctx->md_ctx.aio_operate(util::id_obj_name(m_image_ctx->name),
comp, &op, &m_out_bl);
comp->release();
} else {
send_v2_get_name();
}
}
template <typename I>
Context *OpenRequest<I>::handle_v2_detect_header(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << __func__ << ": r=" << *result << dendl;
if (*result == -ENOENT) {
send_v1_detect_header();
} else if (*result < 0) {
lderr(cct) << "failed to stat v2 image header: " << cpp_strerror(*result)
<< dendl;
send_close_image(*result);
} else {
m_image_ctx->old_format = false;
send_v2_get_id();
}
return nullptr;
}
template <typename I>
void OpenRequest<I>::send_v2_get_id() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
cls_client::get_id_start(&op);
using klass = OpenRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_v2_get_id>(this);
m_out_bl.clear();
m_image_ctx->md_ctx.aio_operate(util::id_obj_name(m_image_ctx->name),
comp, &op, &m_out_bl);
comp->release();
}
template <typename I>
Context *OpenRequest<I>::handle_v2_get_id(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << __func__ << ": r=" << *result << dendl;
if (*result == 0) {
auto it = m_out_bl.cbegin();
*result = cls_client::get_id_finish(&it, &m_image_ctx->id);
}
if (*result < 0) {
lderr(cct) << "failed to retrieve image id: " << cpp_strerror(*result)
<< dendl;
send_close_image(*result);
} else {
send_v2_get_initial_metadata();
}
return nullptr;
}
template <typename I>
void OpenRequest<I>::send_v2_get_name() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
cls_client::dir_get_name_start(&op, m_image_ctx->id);
using klass = OpenRequest<I>;
librados::AioCompletion *comp = create_rados_callback<
klass, &klass::handle_v2_get_name>(this);
m_out_bl.clear();
m_image_ctx->md_ctx.aio_operate(RBD_DIRECTORY, comp, &op, &m_out_bl);
comp->release();
}
template <typename I>
Context *OpenRequest<I>::handle_v2_get_name(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << __func__ << ": r=" << *result << dendl;
if (*result == 0) {
auto it = m_out_bl.cbegin();
*result = cls_client::dir_get_name_finish(&it, &m_image_ctx->name);
}
if (*result < 0 && *result != -ENOENT) {
lderr(cct) << "failed to retrieve name: "
<< cpp_strerror(*result) << dendl;
send_close_image(*result);
} else if (*result == -ENOENT) {
// image does not exist in directory, look in the trash bin
ldout(cct, 10) << "image id " << m_image_ctx->id << " does not exist in "
<< "rbd directory, searching in rbd trash..." << dendl;
send_v2_get_name_from_trash();
} else {
send_v2_get_initial_metadata();
}
return nullptr;
}
template <typename I>
void OpenRequest<I>::send_v2_get_name_from_trash() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
cls_client::trash_get_start(&op, m_image_ctx->id);
using klass = OpenRequest<I>;
librados::AioCompletion *comp = create_rados_callback<
klass, &klass::handle_v2_get_name_from_trash>(this);
m_out_bl.clear();
m_image_ctx->md_ctx.aio_operate(RBD_TRASH, comp, &op, &m_out_bl);
comp->release();
}
template <typename I>
Context *OpenRequest<I>::handle_v2_get_name_from_trash(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << __func__ << ": r=" << *result << dendl;
cls::rbd::TrashImageSpec trash_spec;
if (*result == 0) {
auto it = m_out_bl.cbegin();
*result = cls_client::trash_get_finish(&it, &trash_spec);
m_image_ctx->name = trash_spec.name;
}
if (*result < 0) {
if (*result == -EOPNOTSUPP) {
*result = -ENOENT;
}
if (*result == -ENOENT) {
ldout(cct, 5) << "failed to retrieve name for image id "
<< m_image_ctx->id << dendl;
} else {
lderr(cct) << "failed to retrieve name from trash: "
<< cpp_strerror(*result) << dendl;
}
send_close_image(*result);
} else {
send_v2_get_initial_metadata();
}
return nullptr;
}
template <typename I>
void OpenRequest<I>::send_v2_get_initial_metadata() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_image_ctx->old_format = false;
m_image_ctx->header_oid = util::header_name(m_image_ctx->id);
librados::ObjectReadOperation op;
cls_client::get_size_start(&op, CEPH_NOSNAP);
cls_client::get_object_prefix_start(&op);
cls_client::get_features_start(&op, true);
using klass = OpenRequest<I>;
librados::AioCompletion *comp = create_rados_callback<
klass, &klass::handle_v2_get_initial_metadata>(this);
m_out_bl.clear();
m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op,
&m_out_bl);
comp->release();
}
template <typename I>
Context *OpenRequest<I>::handle_v2_get_initial_metadata(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << __func__ << ": r=" << *result << dendl;
auto it = m_out_bl.cbegin();
if (*result >= 0) {
uint64_t size;
*result = cls_client::get_size_finish(&it, &size, &m_image_ctx->order);
}
if (*result >= 0) {
*result = cls_client::get_object_prefix_finish(&it,
&m_image_ctx->object_prefix);
}
if (*result >= 0) {
uint64_t incompatible_features;
*result = cls_client::get_features_finish(&it, &m_image_ctx->features,
&incompatible_features);
}
if (*result < 0) {
lderr(cct) << "failed to retrieve initial metadata: "
<< cpp_strerror(*result) << dendl;
send_close_image(*result);
return nullptr;
}
if (m_image_ctx->test_features(RBD_FEATURE_STRIPINGV2)) {
send_v2_get_stripe_unit_count();
} else {
send_v2_get_create_timestamp();
}
return nullptr;
}
template <typename I>
void OpenRequest<I>::send_v2_get_stripe_unit_count() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
cls_client::get_stripe_unit_count_start(&op);
using klass = OpenRequest<I>;
librados::AioCompletion *comp = create_rados_callback<
klass, &klass::handle_v2_get_stripe_unit_count>(this);
m_out_bl.clear();
m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op,
&m_out_bl);
comp->release();
}
template <typename I>
Context *OpenRequest<I>::handle_v2_get_stripe_unit_count(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << __func__ << ": r=" << *result << dendl;
if (*result == 0) {
auto it = m_out_bl.cbegin();
*result = cls_client::get_stripe_unit_count_finish(
&it, &m_image_ctx->stripe_unit, &m_image_ctx->stripe_count);
}
if (*result == -ENOEXEC || *result == -EINVAL) {
*result = 0;
}
if (*result < 0) {
lderr(cct) << "failed to read striping metadata: " << cpp_strerror(*result)
<< dendl;
send_close_image(*result);
return nullptr;
}
send_v2_get_create_timestamp();
return nullptr;
}
template <typename I>
void OpenRequest<I>::send_v2_get_create_timestamp() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
cls_client::get_create_timestamp_start(&op);
using klass = OpenRequest<I>;
librados::AioCompletion *comp = create_rados_callback<
klass, &klass::handle_v2_get_create_timestamp>(this);
m_out_bl.clear();
m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op,
&m_out_bl);
comp->release();
}
template <typename I>
Context *OpenRequest<I>::handle_v2_get_create_timestamp(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result == 0) {
auto it = m_out_bl.cbegin();
*result = cls_client::get_create_timestamp_finish(&it,
&m_image_ctx->create_timestamp);
}
if (*result < 0 && *result != -EOPNOTSUPP) {
lderr(cct) << "failed to retrieve create_timestamp: "
<< cpp_strerror(*result)
<< dendl;
send_close_image(*result);
return nullptr;
}
send_v2_get_access_modify_timestamp();
return nullptr;
}
template <typename I>
void OpenRequest<I>::send_v2_get_access_modify_timestamp() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
cls_client::get_access_timestamp_start(&op);
cls_client::get_modify_timestamp_start(&op);
//TODO: merge w/ create timestamp query after luminous EOLed
using klass = OpenRequest<I>;
librados::AioCompletion *comp = create_rados_callback<
klass, &klass::handle_v2_get_access_modify_timestamp>(this);
m_out_bl.clear();
m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op,
&m_out_bl);
comp->release();
}
template <typename I>
Context *OpenRequest<I>::handle_v2_get_access_modify_timestamp(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result == 0) {
auto it = m_out_bl.cbegin();
*result = cls_client::get_access_timestamp_finish(&it,
&m_image_ctx->access_timestamp);
if (*result == 0)
*result = cls_client::get_modify_timestamp_finish(&it,
&m_image_ctx->modify_timestamp);
}
if (*result < 0 && *result != -EOPNOTSUPP) {
lderr(cct) << "failed to retrieve access/modify_timestamp: "
<< cpp_strerror(*result)
<< dendl;
send_close_image(*result);
return nullptr;
}
send_v2_get_data_pool();
return nullptr;
}
template <typename I>
void OpenRequest<I>::send_v2_get_data_pool() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
cls_client::get_data_pool_start(&op);
using klass = OpenRequest<I>;
librados::AioCompletion *comp = create_rados_callback<
klass, &klass::handle_v2_get_data_pool>(this);
m_out_bl.clear();
m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op,
&m_out_bl);
comp->release();
}
template <typename I>
Context *OpenRequest<I>::handle_v2_get_data_pool(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
int64_t data_pool_id = -1;
if (*result == 0) {
auto it = m_out_bl.cbegin();
*result = cls_client::get_data_pool_finish(&it, &data_pool_id);
} else if (*result == -EOPNOTSUPP) {
*result = 0;
}
if (*result < 0) {
lderr(cct) << "failed to read data pool: " << cpp_strerror(*result)
<< dendl;
send_close_image(*result);
return nullptr;
}
if (data_pool_id != -1) {
*result = util::create_ioctx(m_image_ctx->md_ctx, "data pool", data_pool_id,
{}, &m_image_ctx->data_ctx);
if (*result < 0) {
if (*result != -ENOENT) {
send_close_image(*result);
return nullptr;
}
m_image_ctx->data_ctx.close();
} else {
m_image_ctx->rebuild_data_io_context();
}
} else {
data_pool_id = m_image_ctx->md_ctx.get_id();
}
m_image_ctx->init_layout(data_pool_id);
send_refresh();
return nullptr;
}
template <typename I>
void OpenRequest<I>::send_refresh() {
m_image_ctx->init();
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_image_ctx->config_watcher = ConfigWatcher<I>::create(*m_image_ctx);
m_image_ctx->config_watcher->init();
using klass = OpenRequest<I>;
RefreshRequest<I> *req = RefreshRequest<I>::create(
*m_image_ctx, false, m_skip_open_parent_image,
create_context_callback<klass, &klass::handle_refresh>(this));
req->send();
}
template <typename I>
Context *OpenRequest<I>::handle_refresh(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to refresh image: " << cpp_strerror(*result)
<< dendl;
send_close_image(*result);
return nullptr;
}
send_init_plugin_registry();
return nullptr;
}
template <typename I>
void OpenRequest<I>::send_init_plugin_registry() {
CephContext *cct = m_image_ctx->cct;
auto plugins = m_image_ctx->config.template get_val<std::string>(
"rbd_plugins");
ldout(cct, 10) << __func__ << ": plugins=" << plugins << dendl;
auto ctx = create_context_callback<
OpenRequest<I>, &OpenRequest<I>::handle_init_plugin_registry>(this);
m_image_ctx->plugin_registry->init(plugins, ctx);
}
template <typename I>
Context* OpenRequest<I>::handle_init_plugin_registry(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to initialize plugin registry: "
<< cpp_strerror(*result) << dendl;
send_close_image(*result);
return nullptr;
}
return send_init_cache(result);
}
template <typename I>
Context *OpenRequest<I>::send_init_cache(int *result) {
if (!m_image_ctx->cache || m_image_ctx->child != nullptr ||
!m_image_ctx->data_ctx.is_valid()) {
return send_register_watch(result);
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
size_t max_dirty = m_image_ctx->config.template get_val<Option::size_t>(
"rbd_cache_max_dirty");
auto writethrough_until_flush = m_image_ctx->config.template get_val<bool>(
"rbd_cache_writethrough_until_flush");
auto cache_policy = m_image_ctx->config.template get_val<std::string>(
"rbd_cache_policy");
if (cache_policy == "writearound") {
auto cache = cache::WriteAroundObjectDispatch<I>::create(
m_image_ctx, max_dirty, writethrough_until_flush);
cache->init();
m_image_ctx->readahead.set_max_readahead_size(0);
} else if (cache_policy == "writethrough" || cache_policy == "writeback") {
if (cache_policy == "writethrough") {
max_dirty = 0;
}
auto cache = cache::ObjectCacherObjectDispatch<I>::create(
m_image_ctx, max_dirty, writethrough_until_flush);
cache->init();
// readahead requires the object cacher cache
m_image_ctx->readahead.set_trigger_requests(
m_image_ctx->config.template get_val<uint64_t>("rbd_readahead_trigger_requests"));
m_image_ctx->readahead.set_max_readahead_size(
m_image_ctx->config.template get_val<Option::size_t>("rbd_readahead_max_bytes"));
}
return send_register_watch(result);
}
template <typename I>
Context *OpenRequest<I>::send_register_watch(int *result) {
if ((m_image_ctx->read_only_flags & IMAGE_READ_ONLY_FLAG_USER) != 0U) {
return send_set_snap(result);
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
using klass = OpenRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_register_watch>(this);
m_image_ctx->register_watch(ctx);
return nullptr;
}
template <typename I>
Context *OpenRequest<I>::handle_register_watch(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result == -EPERM) {
ldout(cct, 5) << "user does not have write permission" << dendl;
send_close_image(*result);
return nullptr;
} else if (*result < 0) {
lderr(cct) << "failed to register watch: " << cpp_strerror(*result)
<< dendl;
send_close_image(*result);
return nullptr;
}
return send_set_snap(result);
}
template <typename I>
Context *OpenRequest<I>::send_set_snap(int *result) {
if (m_image_ctx->snap_name.empty() &&
m_image_ctx->open_snap_id == CEPH_NOSNAP) {
*result = 0;
return finalize(*result);
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
uint64_t snap_id = CEPH_NOSNAP;
std::swap(m_image_ctx->open_snap_id, snap_id);
if (snap_id == CEPH_NOSNAP) {
std::shared_lock image_locker{m_image_ctx->image_lock};
snap_id = m_image_ctx->get_snap_id(m_image_ctx->snap_namespace,
m_image_ctx->snap_name);
}
if (snap_id == CEPH_NOSNAP) {
lderr(cct) << "failed to find snapshot " << m_image_ctx->snap_name << dendl;
send_close_image(-ENOENT);
return nullptr;
}
using klass = OpenRequest<I>;
SetSnapRequest<I> *req = SetSnapRequest<I>::create(
*m_image_ctx, snap_id,
create_context_callback<klass, &klass::handle_set_snap>(this));
req->send();
return nullptr;
}
template <typename I>
Context *OpenRequest<I>::handle_set_snap(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to set image snapshot: " << cpp_strerror(*result)
<< dendl;
send_close_image(*result);
return nullptr;
}
return finalize(*result);
}
template <typename I>
Context *OpenRequest<I>::finalize(int r) {
if (r == 0) {
auto io_scheduler_cfg =
m_image_ctx->config.template get_val<std::string>("rbd_io_scheduler");
if (io_scheduler_cfg == "simple" && !m_image_ctx->read_only) {
auto io_scheduler =
io::SimpleSchedulerObjectDispatch<I>::create(m_image_ctx);
io_scheduler->init();
}
}
return m_on_finish;
}
template <typename I>
void OpenRequest<I>::send_close_image(int error_result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_error_result = error_result;
using klass = OpenRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_close_image>(
this);
CloseRequest<I> *req = CloseRequest<I>::create(m_image_ctx, ctx);
req->send();
}
template <typename I>
Context *OpenRequest<I>::handle_close_image(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to close image: " << cpp_strerror(*result) << dendl;
}
if (m_error_result < 0) {
*result = m_error_result;
}
return m_on_finish;
}
} // namespace image
} // namespace librbd
template class librbd::image::OpenRequest<librbd::ImageCtx>;
| 22,061 | 29.304945 | 88 | cc |
null | ceph-main/src/librbd/image/OpenRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_OPEN_REQUEST_H
#define CEPH_LIBRBD_IMAGE_OPEN_REQUEST_H
#include "include/buffer.h"
#include <map>
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace image {
template <typename ImageCtxT = ImageCtx>
class OpenRequest {
public:
static OpenRequest *create(ImageCtxT *image_ctx, uint64_t flags,
Context *on_finish) {
return new OpenRequest(image_ctx, flags, on_finish);
}
void send();
private:
/**
* @verbatim
*
* <start>
* |
* | (v1)
* |-----> V1_DETECT_HEADER
* | |
* | \-------------------------------\
* | (v2) |
* \-----> V2_DETECT_HEADER |
* | |
* v |
* V2_GET_ID|NAME |
* | |
* v (skip if have name) |
* V2_GET_NAME_FROM_TRASH |
* | |
* v |
* V2_GET_INITIAL_METADATA |
* | |
* v |
* V2_GET_STRIPE_UNIT_COUNT (skip if |
* | disabled) |
* v |
* V2_GET_CREATE_TIMESTAMP |
* | |
* v |
* V2_GET_ACCESS_MODIFY_TIMESTAMP |
* | |
* v |
* V2_GET_DATA_POOL --------------> REFRESH
* |
* v
* INIT_PLUGIN_REGISTRY
* |
* v
* INIT_CACHE
* |
* v
* REGISTER_WATCH (skip if
* | read-only)
* v
* SET_SNAP (skip if no snap)
* |
* v
* <finish>
* ^
* (on error) |
* * * * * * * > CLOSE ------------------------/
*
* @endverbatim
*/
OpenRequest(ImageCtxT *image_ctx, uint64_t flags, Context *on_finish);
ImageCtxT *m_image_ctx;
bool m_skip_open_parent_image;
Context *m_on_finish;
bufferlist m_out_bl;
int m_error_result;
void send_v1_detect_header();
Context *handle_v1_detect_header(int *result);
void send_v2_detect_header();
Context *handle_v2_detect_header(int *result);
void send_v2_get_id();
Context *handle_v2_get_id(int *result);
void send_v2_get_name();
Context *handle_v2_get_name(int *result);
void send_v2_get_name_from_trash();
Context *handle_v2_get_name_from_trash(int *result);
void send_v2_get_initial_metadata();
Context *handle_v2_get_initial_metadata(int *result);
void send_v2_get_stripe_unit_count();
Context *handle_v2_get_stripe_unit_count(int *result);
void send_v2_get_create_timestamp();
Context *handle_v2_get_create_timestamp(int *result);
void send_v2_get_access_modify_timestamp();
Context *handle_v2_get_access_modify_timestamp(int *result);
void send_v2_get_data_pool();
Context *handle_v2_get_data_pool(int *result);
void send_refresh();
Context *handle_refresh(int *result);
void send_init_plugin_registry();
Context* handle_init_plugin_registry(int *result);
Context *send_init_cache(int *result);
Context *send_register_watch(int *result);
Context *handle_register_watch(int *result);
Context *send_set_snap(int *result);
Context *handle_set_snap(int *result);
Context *finalize(int r);
void send_close_image(int error_result);
Context *handle_close_image(int *result);
};
} // namespace image
} // namespace librbd
extern template class librbd::image::OpenRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_OPEN_REQUEST_H
| 4,830 | 31.206667 | 75 | h |
null | ceph-main/src/librbd/image/PreRemoveRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/image/PreRemoveRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/Utils.h"
#include "librbd/exclusive_lock/StandardPolicy.h"
#include "librbd/image/ListWatchersRequest.h"
#include "librbd/journal/DisabledPolicy.h"
#include "librbd/operation/SnapshotRemoveRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::PreRemoveRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace image {
namespace {
bool auto_delete_snapshot(const SnapInfo& snap_info) {
auto snap_namespace_type = cls::rbd::get_snap_namespace_type(
snap_info.snap_namespace);
switch (snap_namespace_type) {
case cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH:
return true;
default:
return false;
}
}
bool ignore_snapshot(const SnapInfo& snap_info) {
auto snap_namespace_type = cls::rbd::get_snap_namespace_type(
snap_info.snap_namespace);
switch (snap_namespace_type) {
case cls::rbd::SNAPSHOT_NAMESPACE_TYPE_MIRROR:
return true;
default:
return false;
}
}
} // anonymous namespace
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
void PreRemoveRequest<I>::send() {
auto cct = m_image_ctx->cct;
if (m_image_ctx->operations_disabled) {
lderr(cct) << "image operations disabled due to unsupported op features"
<< dendl;
finish(-EROFS);
return;
}
acquire_exclusive_lock();
}
template <typename I>
void PreRemoveRequest<I>::acquire_exclusive_lock() {
// lock for write for set_exclusive_lock_policy()
std::unique_lock owner_locker{m_image_ctx->owner_lock};
if (m_image_ctx->exclusive_lock == nullptr) {
owner_locker.unlock();
validate_image_removal();
return;
}
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
// refuse to release exclusive lock when (in the midst of) removing
// the image
m_image_ctx->set_exclusive_lock_policy(
new exclusive_lock::StandardPolicy<I>(m_image_ctx));
// do not attempt to open the journal when removing the image in case
// it's corrupt
if (m_image_ctx->test_features(RBD_FEATURE_JOURNALING)) {
std::unique_lock image_locker{m_image_ctx->image_lock};
m_image_ctx->set_journal_policy(new journal::DisabledPolicy());
}
m_exclusive_lock = m_image_ctx->exclusive_lock;
auto ctx = create_context_callback<
PreRemoveRequest<I>,
&PreRemoveRequest<I>::handle_exclusive_lock>(this, m_exclusive_lock);
m_exclusive_lock->acquire_lock(ctx);
}
template <typename I>
void PreRemoveRequest<I>::handle_exclusive_lock(int r) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0 || !m_image_ctx->exclusive_lock->is_lock_owner()) {
if (!m_force) {
lderr(cct) << "cannot obtain exclusive lock - not removing" << dendl;
finish(-EBUSY);
} else {
ldout(cct, 5) << "cannot obtain exclusive lock - "
<< "proceeding due to force flag set" << dendl;
shut_down_exclusive_lock();
}
return;
}
validate_image_removal();
}
template <typename I>
void PreRemoveRequest<I>::shut_down_exclusive_lock() {
std::shared_lock owner_locker{m_image_ctx->owner_lock};
if (m_image_ctx->exclusive_lock == nullptr) {
owner_locker.unlock();
validate_image_removal();
return;
}
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
auto ctx = create_context_callback<
PreRemoveRequest<I>,
&PreRemoveRequest<I>::handle_shut_down_exclusive_lock>(this);
m_exclusive_lock = m_image_ctx->exclusive_lock;
m_exclusive_lock->shut_down(ctx);
}
template <typename I>
void PreRemoveRequest<I>::handle_shut_down_exclusive_lock(int r) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << "r=" << r << dendl;
m_exclusive_lock->put();
m_exclusive_lock = nullptr;
if (r < 0) {
lderr(cct) << "error shutting down exclusive lock: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
ceph_assert(m_image_ctx->exclusive_lock == nullptr);
validate_image_removal();
}
template <typename I>
void PreRemoveRequest<I>::validate_image_removal() {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
if (!m_image_ctx->ignore_migrating &&
m_image_ctx->test_features(RBD_FEATURE_MIGRATING)) {
lderr(cct) << "image in migration state - not removing" << dendl;
finish(-EBUSY);
return;
}
check_image_snaps();
}
template <typename I>
void PreRemoveRequest<I>::check_image_snaps() {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
m_image_ctx->image_lock.lock_shared();
for (auto& snap_info : m_image_ctx->snap_info) {
if (auto_delete_snapshot(snap_info.second)) {
m_snap_infos.insert(snap_info);
} else if (!ignore_snapshot(snap_info.second)) {
m_image_ctx->image_lock.unlock_shared();
ldout(cct, 5) << "image has snapshots - not removing" << dendl;
finish(-ENOTEMPTY);
return;
}
}
m_image_ctx->image_lock.unlock_shared();
list_image_watchers();
}
template <typename I>
void PreRemoveRequest<I>::list_image_watchers() {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
int flags = LIST_WATCHERS_FILTER_OUT_MY_INSTANCE |
LIST_WATCHERS_FILTER_OUT_MIRROR_INSTANCES;
auto ctx = create_context_callback<
PreRemoveRequest<I>,
&PreRemoveRequest<I>::handle_list_image_watchers>(this);
auto req = ListWatchersRequest<I>::create(*m_image_ctx, flags, &m_watchers,
ctx);
req->send();
}
template <typename I>
void PreRemoveRequest<I>::handle_list_image_watchers(int r) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "error listing image watchers: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
check_image_watchers();
}
template <typename I>
void PreRemoveRequest<I>::check_image_watchers() {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
if (!m_watchers.empty()) {
lderr(cct) << "image has watchers - not removing" << dendl;
finish(-EBUSY);
return;
}
check_group();
}
template <typename I>
void PreRemoveRequest<I>::check_group() {
if (m_image_ctx->old_format) {
finish(0);
return;
}
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
librados::ObjectReadOperation op;
librbd::cls_client::image_group_get_start(&op);
auto rados_completion = create_rados_callback<
PreRemoveRequest<I>, &PreRemoveRequest<I>::handle_check_group>(this);
m_out_bl.clear();
int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid,
rados_completion, &op, &m_out_bl);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
void PreRemoveRequest<I>::handle_check_group(int r) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << "r=" << r << dendl;
cls::rbd::GroupSpec s;
if (r == 0) {
auto it = m_out_bl.cbegin();
r = librbd::cls_client::image_group_get_finish(&it, &s);
}
if (r < 0 && r != -EOPNOTSUPP) {
lderr(cct) << "error fetching group for image: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
if (s.is_valid()) {
lderr(cct) << "image is in a group - not removing" << dendl;
finish(-EMLINK);
return;
}
remove_snapshot();
}
template <typename I>
void PreRemoveRequest<I>::remove_snapshot() {
if (m_snap_infos.empty()) {
finish(0);
return;
}
auto cct = m_image_ctx->cct;
auto snap_id = m_snap_infos.begin()->first;
auto& snap_info = m_snap_infos.begin()->second;
ldout(cct, 20) << "snap_id=" << snap_id << ", "
<< "snap_name=" << snap_info.name << dendl;
std::shared_lock owner_lock{m_image_ctx->owner_lock};
auto ctx = create_context_callback<
PreRemoveRequest<I>, &PreRemoveRequest<I>::handle_remove_snapshot>(this);
auto req = librbd::operation::SnapshotRemoveRequest<I>::create(
*m_image_ctx, snap_info.snap_namespace, snap_info.name,
snap_id, ctx);
req->send();
}
template <typename I>
void PreRemoveRequest<I>::handle_remove_snapshot(int r) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r == -EBUSY) {
ldout(cct, 5) << "skipping attached child" << dendl;
if (m_ret_val == 0) {
m_ret_val = -ECHILD;
}
} else if (r < 0 && r != -ENOENT) {
auto snap_id = m_snap_infos.begin()->first;
lderr(cct) << "failed to auto-prune snapshot " << snap_id << ": "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
ceph_assert(!m_snap_infos.empty());
m_snap_infos.erase(m_snap_infos.begin());
remove_snapshot();
}
template <typename I>
void PreRemoveRequest<I>::finish(int r) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << "r=" << r << dendl;
if (m_ret_val == 0) {
m_ret_val = r;
}
m_on_finish->complete(m_ret_val);
delete this;
}
} // namespace image
} // namespace librbd
template class librbd::image::PreRemoveRequest<librbd::ImageCtx>;
| 9,251 | 25.510029 | 79 | cc |
null | ceph-main/src/librbd/image/PreRemoveRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_PRE_REMOVE_REQUEST_H
#define CEPH_LIBRBD_IMAGE_PRE_REMOVE_REQUEST_H
#include "include/rados/librados.hpp"
#include "include/buffer.h"
#include "librbd/ImageCtx.h"
#include <list>
#include <map>
class Context;
namespace librbd {
namespace image {
template <typename ImageCtxT>
class PreRemoveRequest {
public:
static PreRemoveRequest *create(ImageCtxT *image_ctx, bool force,
Context *on_finish) {
return new PreRemoveRequest(image_ctx, force, on_finish);
}
PreRemoveRequest(ImageCtxT *image_ctx, bool force, Context *on_finish)
: m_image_ctx(image_ctx), m_force(force), m_on_finish(on_finish) {
}
void send();
private:
/**
* @verbatim
*
* <start>
* | (skip if
* v not needed) (error)
* ACQUIRE EXCLUSIVE LOCK * * * * * * > SHUT DOWN EXCLUSIVE LOCK
* | |
* v |
* CHECK IMAGE WATCHERS <------------------/
* |
* v
* CHECK GROUP
* |
* | /------\
* | | |
* v v |
* REMOVE SNAPS ----/
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT* m_image_ctx;
bool m_force;
Context* m_on_finish;
decltype(m_image_ctx->exclusive_lock) m_exclusive_lock = nullptr;
bufferlist m_out_bl;
std::list<obj_watch_t> m_watchers;
std::map<uint64_t, SnapInfo> m_snap_infos;
int m_ret_val = 0;
void acquire_exclusive_lock();
void handle_exclusive_lock(int r);
void shut_down_exclusive_lock();
void handle_shut_down_exclusive_lock(int r);
void validate_image_removal();
void check_image_snaps();
void list_image_watchers();
void handle_list_image_watchers(int r);
void check_image_watchers();
void check_group();
void handle_check_group(int r);
void remove_snapshot();
void handle_remove_snapshot(int r);
void finish(int r);
};
} // namespace image
} // namespace librbd
extern template class librbd::image::PreRemoveRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_PRE_REMOVE_REQUEST_H
| 2,292 | 21.70297 | 72 | h |
null | ceph-main/src/librbd/image/RefreshParentRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/image/RefreshParentRequest.h"
#include "include/rados/librados.hpp"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/migration/OpenSourceImageRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::RefreshParentRequest: "
namespace librbd {
namespace image {
using util::create_async_context_callback;
using util::create_context_callback;
template <typename I>
RefreshParentRequest<I>::RefreshParentRequest(
I &child_image_ctx, const ParentImageInfo &parent_md,
const MigrationInfo &migration_info, Context *on_finish)
: m_child_image_ctx(child_image_ctx), m_parent_md(parent_md),
m_migration_info(migration_info), m_on_finish(on_finish),
m_parent_image_ctx(nullptr), m_parent_snap_id(CEPH_NOSNAP),
m_error_result(0) {
}
template <typename I>
bool RefreshParentRequest<I>::is_refresh_required(
I &child_image_ctx, const ParentImageInfo &parent_md,
const MigrationInfo &migration_info) {
ceph_assert(ceph_mutex_is_locked(child_image_ctx.image_lock));
return (is_open_required(child_image_ctx, parent_md, migration_info) ||
is_close_required(child_image_ctx, parent_md, migration_info));
}
template <typename I>
bool RefreshParentRequest<I>::is_close_required(
I &child_image_ctx, const ParentImageInfo &parent_md,
const MigrationInfo &migration_info) {
return (child_image_ctx.parent != nullptr &&
!does_parent_exist(child_image_ctx, parent_md, migration_info));
}
template <typename I>
bool RefreshParentRequest<I>::is_open_required(
I &child_image_ctx, const ParentImageInfo &parent_md,
const MigrationInfo &migration_info) {
return (does_parent_exist(child_image_ctx, parent_md, migration_info) &&
(child_image_ctx.parent == nullptr ||
child_image_ctx.parent->md_ctx.get_id() != parent_md.spec.pool_id ||
child_image_ctx.parent->md_ctx.get_namespace() !=
parent_md.spec.pool_namespace ||
child_image_ctx.parent->id != parent_md.spec.image_id ||
child_image_ctx.parent->snap_id != parent_md.spec.snap_id));
}
template <typename I>
bool RefreshParentRequest<I>::does_parent_exist(
I &child_image_ctx, const ParentImageInfo &parent_md,
const MigrationInfo &migration_info) {
if (child_image_ctx.child != nullptr &&
child_image_ctx.child->migration_info.empty() && parent_md.overlap == 0) {
// intermediate, non-migrating images should only open their parent if they
// overlap
return false;
}
return (parent_md.spec.pool_id > -1 && parent_md.overlap > 0) ||
!migration_info.empty();
}
template <typename I>
void RefreshParentRequest<I>::send() {
if (is_open_required(m_child_image_ctx, m_parent_md, m_migration_info)) {
send_open_parent();
} else {
// parent will be closed (if necessary) during finalize
send_complete(0);
}
}
template <typename I>
void RefreshParentRequest<I>::apply() {
ceph_assert(ceph_mutex_is_wlocked(m_child_image_ctx.image_lock));
std::swap(m_child_image_ctx.parent, m_parent_image_ctx);
}
template <typename I>
void RefreshParentRequest<I>::finalize(Context *on_finish) {
CephContext *cct = m_child_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_on_finish = on_finish;
if (m_parent_image_ctx != nullptr) {
send_close_parent();
} else {
send_complete(0);
}
}
template <typename I>
void RefreshParentRequest<I>::send_open_parent() {
ceph_assert(m_parent_md.spec.pool_id >= 0);
CephContext *cct = m_child_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
if (!m_migration_info.empty()) {
auto ctx = create_async_context_callback(
m_child_image_ctx, create_context_callback<
RefreshParentRequest<I>,
&RefreshParentRequest<I>::handle_open_parent, false>(this));
auto req = migration::OpenSourceImageRequest<I>::create(
m_child_image_ctx.md_ctx, &m_child_image_ctx, m_parent_md.spec.snap_id,
m_migration_info, &m_parent_image_ctx, ctx);
req->send();
return;
}
librados::IoCtx parent_io_ctx;
int r = util::create_ioctx(m_child_image_ctx.md_ctx, "parent image",
m_parent_md.spec.pool_id,
m_parent_md.spec.pool_namespace, &parent_io_ctx);
if (r < 0) {
send_complete(r);
return;
}
m_parent_image_ctx = new I("", m_parent_md.spec.image_id,
m_parent_md.spec.snap_id, parent_io_ctx, true);
m_parent_image_ctx->child = &m_child_image_ctx;
// set rados flags for reading the parent image
if (m_child_image_ctx.config.template get_val<bool>("rbd_balance_parent_reads")) {
m_parent_image_ctx->set_read_flag(librados::OPERATION_BALANCE_READS);
} else if (m_child_image_ctx.config.template get_val<bool>("rbd_localize_parent_reads")) {
m_parent_image_ctx->set_read_flag(librados::OPERATION_LOCALIZE_READS);
}
auto ctx = create_async_context_callback(
m_child_image_ctx, create_context_callback<
RefreshParentRequest<I>,
&RefreshParentRequest<I>::handle_open_parent, false>(this));
m_parent_image_ctx->state->open(0U, ctx);
}
template <typename I>
Context *RefreshParentRequest<I>::handle_open_parent(int *result) {
CephContext *cct = m_child_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << " r=" << *result << dendl;
save_result(result);
if (*result < 0) {
lderr(cct) << "failed to open parent image: " << cpp_strerror(*result)
<< dendl;
// image already closed by open state machine
m_parent_image_ctx = nullptr;
}
return m_on_finish;
}
template <typename I>
void RefreshParentRequest<I>::send_close_parent() {
ceph_assert(m_parent_image_ctx != nullptr);
CephContext *cct = m_child_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
auto ctx = create_async_context_callback(
m_child_image_ctx, create_context_callback<
RefreshParentRequest<I>,
&RefreshParentRequest<I>::handle_close_parent, false>(this));
m_parent_image_ctx->state->close(ctx);
}
template <typename I>
Context *RefreshParentRequest<I>::handle_close_parent(int *result) {
CephContext *cct = m_child_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << " r=" << *result << dendl;
m_parent_image_ctx = nullptr;
if (*result < 0) {
lderr(cct) << "failed to close parent image: " << cpp_strerror(*result)
<< dendl;
}
send_reset_existence_cache();
return nullptr;
}
template <typename I>
void RefreshParentRequest<I>::send_reset_existence_cache() {
CephContext *cct = m_child_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
Context *ctx = create_async_context_callback(
m_child_image_ctx, create_context_callback<
RefreshParentRequest<I>,
&RefreshParentRequest<I>::handle_reset_existence_cache, false>(this));
m_child_image_ctx.io_object_dispatcher->reset_existence_cache(ctx);
}
template <typename I>
Context *RefreshParentRequest<I>::handle_reset_existence_cache(int *result) {
CephContext *cct = m_child_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << " r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to reset object existence cache: "
<< cpp_strerror(*result) << dendl;
}
if (m_error_result < 0) {
// propagate errors from opening the image
*result = m_error_result;
} else {
*result = 0;
}
return m_on_finish;
}
template <typename I>
void RefreshParentRequest<I>::send_complete(int r) {
CephContext *cct = m_child_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_on_finish->complete(r);
}
} // namespace image
} // namespace librbd
template class librbd::image::RefreshParentRequest<librbd::ImageCtx>;
| 8,121 | 32.15102 | 92 | cc |
null | ceph-main/src/librbd/image/RefreshParentRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_REFRESH_PARENT_REQUEST_H
#define CEPH_LIBRBD_IMAGE_REFRESH_PARENT_REQUEST_H
#include "include/int_types.h"
#include "librbd/Types.h"
class Context;
namespace librbd {
class ImageCtx;
namespace image {
template <typename ImageCtxT = ImageCtx>
class RefreshParentRequest {
public:
static RefreshParentRequest *create(ImageCtxT &child_image_ctx,
const ParentImageInfo &parent_md,
const MigrationInfo &migration_info,
Context *on_finish) {
return new RefreshParentRequest(child_image_ctx, parent_md, migration_info,
on_finish);
}
static bool is_refresh_required(ImageCtxT &child_image_ctx,
const ParentImageInfo &parent_md,
const MigrationInfo &migration_info);
void send();
void apply();
void finalize(Context *on_finish);
private:
/**
* @verbatim
*
* <start>
* |
* | (open required)
* |----------------> OPEN_PARENT * * * * * * * * * * * * * * *
* | | *
* | v (on error) *
* \----------------> <apply> *
* | *
* | (close required) *
* |-----------------> CLOSE_PARENT *
* | | *
* | v *
* | RESET_EXISTENCE *
* | | *
* | v *
* \-----------------> <finish> < * * * *
*
* @endverbatim
*/
RefreshParentRequest(ImageCtxT &child_image_ctx,
const ParentImageInfo &parent_md,
const MigrationInfo &migration_info, Context *on_finish);
ImageCtxT &m_child_image_ctx;
ParentImageInfo m_parent_md;
MigrationInfo m_migration_info;
Context *m_on_finish;
ImageCtxT *m_parent_image_ctx;
uint64_t m_parent_snap_id;
int m_error_result;
static bool is_close_required(ImageCtxT &child_image_ctx,
const ParentImageInfo &parent_md,
const MigrationInfo &migration_info);
static bool is_open_required(ImageCtxT &child_image_ctx,
const ParentImageInfo &parent_md,
const MigrationInfo &migration_info);
static bool does_parent_exist(ImageCtxT &child_image_ctx,
const ParentImageInfo &parent_md,
const MigrationInfo &migration_info);
void send_open_parent();
Context *handle_open_parent(int *result);
void send_close_parent();
Context *handle_close_parent(int *result);
void send_reset_existence_cache();
Context *handle_reset_existence_cache(int *result);
void send_complete(int r);
void save_result(int *result) {
if (m_error_result == 0 && *result < 0) {
m_error_result = *result;
}
}
};
} // namespace image
} // namespace librbd
extern template class librbd::image::RefreshParentRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_REFRESH_PARENT_REQUEST_H
| 3,653 | 32.218182 | 80 | h |
null | ceph-main/src/librbd/image/RefreshRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/ceph_assert.h"
#include "librbd/image/RefreshRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/lock/cls_lock_client.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/Journal.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/deep_copy/Utils.h"
#include "librbd/image/GetMetadataRequest.h"
#include "librbd/image/RefreshParentRequest.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/journal/Policy.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::RefreshRequest: "
namespace librbd {
namespace image {
using util::create_rados_callback;
using util::create_async_context_callback;
using util::create_context_callback;
template <typename I>
RefreshRequest<I>::RefreshRequest(I &image_ctx, bool acquiring_lock,
bool skip_open_parent, Context *on_finish)
: m_image_ctx(image_ctx), m_acquiring_lock(acquiring_lock),
m_skip_open_parent_image(skip_open_parent),
m_on_finish(create_async_context_callback(m_image_ctx, on_finish)),
m_error_result(0), m_flush_aio(false), m_exclusive_lock(nullptr),
m_object_map(nullptr), m_journal(nullptr), m_refresh_parent(nullptr) {
m_pool_metadata_io_ctx.dup(image_ctx.md_ctx);
m_pool_metadata_io_ctx.set_namespace("");
}
template <typename I>
RefreshRequest<I>::~RefreshRequest() {
// these require state machine to close
ceph_assert(m_exclusive_lock == nullptr);
ceph_assert(m_object_map == nullptr);
ceph_assert(m_journal == nullptr);
ceph_assert(m_refresh_parent == nullptr);
ceph_assert(!m_blocked_writes);
}
template <typename I>
void RefreshRequest<I>::send() {
if (m_image_ctx.old_format) {
send_v1_read_header();
} else {
send_v2_get_mutable_metadata();
}
}
template <typename I>
void RefreshRequest<I>::send_get_migration_header() {
if (m_image_ctx.ignore_migrating) {
m_migration_spec = {};
if (m_image_ctx.old_format) {
send_v1_get_snapshots();
} else {
send_v2_get_metadata();
}
return;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
cls_client::migration_get_start(&op);
using klass = RefreshRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_get_migration_header>(this);
m_out_bl.clear();
m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
comp->release();
}
template <typename I>
Context *RefreshRequest<I>::handle_get_migration_header(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result >= 0) {
auto it = m_out_bl.cbegin();
*result = cls_client::migration_get_finish(&it, &m_migration_spec);
} else if (*result == -ENOENT) {
ldout(cct, 5) << this << " " << __func__ << ": no migration header found"
<< ", retrying" << dendl;
send();
return nullptr;
}
if (*result < 0) {
lderr(cct) << "failed to retrieve migration header: "
<< cpp_strerror(*result) << dendl;
return m_on_finish;
}
switch(m_migration_spec.header_type) {
case cls::rbd::MIGRATION_HEADER_TYPE_SRC:
if (!m_read_only) {
lderr(cct) << "image being migrated" << dendl;
*result = -EROFS;
return m_on_finish;
}
ldout(cct, 1) << this << " " << __func__ << ": migrating to: "
<< m_migration_spec << dendl;
break;
case cls::rbd::MIGRATION_HEADER_TYPE_DST:
ldout(cct, 1) << this << " " << __func__ << ": migrating from: "
<< m_migration_spec << dendl;
switch (m_migration_spec.state) {
case cls::rbd::MIGRATION_STATE_PREPARING:
ldout(cct, 5) << this << " " << __func__ << ": current migration state: "
<< m_migration_spec.state << ", retrying" << dendl;
send();
return nullptr;
case cls::rbd::MIGRATION_STATE_PREPARED:
case cls::rbd::MIGRATION_STATE_EXECUTING:
case cls::rbd::MIGRATION_STATE_EXECUTED:
break;
case cls::rbd::MIGRATION_STATE_ABORTING:
if (!m_read_only) {
lderr(cct) << this << " " << __func__ << ": migration is being aborted"
<< dendl;
*result = -EROFS;
return m_on_finish;
}
break;
default:
lderr(cct) << this << " " << __func__ << ": migration is in an "
<< "unexpected state" << dendl;
*result = -EINVAL;
return m_on_finish;
}
break;
default:
ldout(cct, 1) << this << " " << __func__ << ": migration type "
<< m_migration_spec.header_type << dendl;
*result = -EBADMSG;
return m_on_finish;
}
if (m_image_ctx.old_format) {
send_v1_get_snapshots();
} else {
send_v2_get_metadata();
}
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_v1_read_header() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
op.read(0, 0, nullptr, nullptr);
using klass = RefreshRequest<I>;
librados::AioCompletion *comp = create_rados_callback<
klass, &klass::handle_v1_read_header>(this);
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
Context *RefreshRequest<I>::handle_v1_read_header(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl;
rbd_obj_header_ondisk v1_header;
bool migrating = false;
if (*result < 0) {
return m_on_finish;
} else if (m_out_bl.length() < sizeof(v1_header)) {
lderr(cct) << "v1 header too small" << dendl;
*result = -EIO;
return m_on_finish;
} else if (memcmp(RBD_HEADER_TEXT, m_out_bl.c_str(),
sizeof(RBD_HEADER_TEXT)) != 0) {
if (memcmp(RBD_MIGRATE_HEADER_TEXT, m_out_bl.c_str(),
sizeof(RBD_MIGRATE_HEADER_TEXT)) == 0) {
ldout(cct, 1) << this << " " << __func__ << ": migration v1 header detected"
<< dendl;
migrating = true;
} else {
lderr(cct) << "unrecognized v1 header" << dendl;
*result = -ENXIO;
return m_on_finish;
}
}
{
std::shared_lock image_locker{m_image_ctx.image_lock};
m_read_only = m_image_ctx.read_only;
m_read_only_flags = m_image_ctx.read_only_flags;
}
memcpy(&v1_header, m_out_bl.c_str(), sizeof(v1_header));
m_order = v1_header.options.order;
m_size = v1_header.image_size;
m_object_prefix = v1_header.block_name;
if (migrating) {
send_get_migration_header();
} else {
m_migration_spec = {};
send_v1_get_snapshots();
}
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_v1_get_snapshots() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
cls_client::old_snapshot_list_start(&op);
using klass = RefreshRequest<I>;
librados::AioCompletion *comp = create_rados_callback<
klass, &klass::handle_v1_get_snapshots>(this);
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
Context *RefreshRequest<I>::handle_v1_get_snapshots(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl;
std::vector<std::string> snap_names;
std::vector<uint64_t> snap_sizes;
if (*result >= 0) {
auto it = m_out_bl.cbegin();
*result = cls_client::old_snapshot_list_finish(&it, &snap_names,
&snap_sizes, &m_snapc);
}
if (*result < 0) {
lderr(cct) << "failed to retrieve v1 snapshots: " << cpp_strerror(*result)
<< dendl;
return m_on_finish;
}
if (!m_snapc.is_valid()) {
lderr(cct) << "v1 image snap context is invalid" << dendl;
*result = -EIO;
return m_on_finish;
}
m_snap_infos.clear();
for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
m_snap_infos.push_back({m_snapc.snaps[i],
{cls::rbd::UserSnapshotNamespace{}},
snap_names[i], snap_sizes[i], {}, 0});
}
send_v1_get_locks();
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_v1_get_locks() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
rados::cls::lock::get_lock_info_start(&op, RBD_LOCK_NAME);
using klass = RefreshRequest<I>;
librados::AioCompletion *comp = create_rados_callback<
klass, &klass::handle_v1_get_locks>(this);
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
Context *RefreshRequest<I>::handle_v1_get_locks(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": "
<< "r=" << *result << dendl;
if (*result >= 0) {
auto it = m_out_bl.cbegin();
ClsLockType lock_type;
*result = rados::cls::lock::get_lock_info_finish(&it, &m_lockers,
&lock_type, &m_lock_tag);
if (*result >= 0) {
m_exclusive_locked = (lock_type == ClsLockType::EXCLUSIVE);
}
}
if (*result < 0) {
lderr(cct) << "failed to retrieve locks: " << cpp_strerror(*result)
<< dendl;
return m_on_finish;
}
send_v1_apply();
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_v1_apply() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
// ensure we are not in a rados callback when applying updates
using klass = RefreshRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_v1_apply>(this);
m_image_ctx.op_work_queue->queue(ctx, 0);
}
template <typename I>
Context *RefreshRequest<I>::handle_v1_apply(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
apply();
return send_flush_aio();
}
template <typename I>
void RefreshRequest<I>::send_v2_get_mutable_metadata() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
uint64_t snap_id;
{
std::shared_lock image_locker{m_image_ctx.image_lock};
snap_id = m_image_ctx.snap_id;
m_read_only = m_image_ctx.read_only;
m_read_only_flags = m_image_ctx.read_only_flags;
}
// mask out the non-primary read-only flag since its state can change
bool read_only = (
((m_read_only_flags & ~IMAGE_READ_ONLY_FLAG_NON_PRIMARY) != 0) ||
(snap_id != CEPH_NOSNAP));
librados::ObjectReadOperation op;
cls_client::get_size_start(&op, CEPH_NOSNAP);
cls_client::get_features_start(&op, read_only);
cls_client::get_flags_start(&op, CEPH_NOSNAP);
cls_client::get_snapcontext_start(&op);
rados::cls::lock::get_lock_info_start(&op, RBD_LOCK_NAME);
using klass = RefreshRequest<I>;
librados::AioCompletion *comp = create_rados_callback<
klass, &klass::handle_v2_get_mutable_metadata>(this);
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
Context *RefreshRequest<I>::handle_v2_get_mutable_metadata(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": "
<< "r=" << *result << dendl;
auto it = m_out_bl.cbegin();
if (*result >= 0) {
uint8_t order;
*result = cls_client::get_size_finish(&it, &m_size, &order);
}
if (*result >= 0) {
*result = cls_client::get_features_finish(&it, &m_features,
&m_incompatible_features);
}
if (*result >= 0) {
*result = cls_client::get_flags_finish(&it, &m_flags);
}
if (*result >= 0) {
*result = cls_client::get_snapcontext_finish(&it, &m_snapc);
}
if (*result >= 0) {
ClsLockType lock_type;
*result = rados::cls::lock::get_lock_info_finish(&it, &m_lockers,
&lock_type, &m_lock_tag);
if (*result >= 0) {
m_exclusive_locked = (lock_type == ClsLockType::EXCLUSIVE);
}
}
if (*result < 0) {
lderr(cct) << "failed to retrieve mutable metadata: "
<< cpp_strerror(*result) << dendl;
return m_on_finish;
}
uint64_t unsupported = m_incompatible_features & ~RBD_FEATURES_ALL;
if (unsupported != 0ULL) {
lderr(cct) << "Image uses unsupported features: " << unsupported << dendl;
*result = -ENOSYS;
return m_on_finish;
}
if (!m_snapc.is_valid()) {
lderr(cct) << "image snap context is invalid!" << dendl;
*result = -EIO;
return m_on_finish;
}
if (m_acquiring_lock && (m_features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0) {
ldout(cct, 5) << "ignoring dynamically disabled exclusive lock" << dendl;
m_features |= RBD_FEATURE_EXCLUSIVE_LOCK;
m_incomplete_update = true;
} else {
m_incomplete_update = false;
}
if (((m_incompatible_features & RBD_FEATURE_NON_PRIMARY) != 0U) &&
((m_read_only_flags & IMAGE_READ_ONLY_FLAG_NON_PRIMARY) == 0U) &&
((m_image_ctx.read_only_mask & IMAGE_READ_ONLY_FLAG_NON_PRIMARY) != 0U)) {
// implies we opened a non-primary image in R/W mode
ldout(cct, 5) << "adding non-primary read-only image flag" << dendl;
m_read_only_flags |= IMAGE_READ_ONLY_FLAG_NON_PRIMARY;
} else if ((((m_incompatible_features & RBD_FEATURE_NON_PRIMARY) == 0U) ||
((m_image_ctx.read_only_mask &
IMAGE_READ_ONLY_FLAG_NON_PRIMARY) == 0U)) &&
((m_read_only_flags & IMAGE_READ_ONLY_FLAG_NON_PRIMARY) != 0U)) {
ldout(cct, 5) << "removing non-primary read-only image flag" << dendl;
m_read_only_flags &= ~IMAGE_READ_ONLY_FLAG_NON_PRIMARY;
}
m_read_only = (m_read_only_flags != 0U);
m_legacy_parent = false;
send_v2_get_parent();
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_v2_get_parent() {
// NOTE: remove support when Mimic is EOLed
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": legacy=" << m_legacy_parent
<< dendl;
librados::ObjectReadOperation op;
if (!m_legacy_parent) {
cls_client::parent_get_start(&op);
cls_client::parent_overlap_get_start(&op, CEPH_NOSNAP);
} else {
cls_client::get_parent_start(&op, CEPH_NOSNAP);
}
auto aio_comp = create_rados_callback<
RefreshRequest<I>, &RefreshRequest<I>::handle_v2_get_parent>(this);
m_out_bl.clear();
m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, aio_comp, &op,
&m_out_bl);
aio_comp->release();
}
template <typename I>
Context *RefreshRequest<I>::handle_v2_get_parent(int *result) {
// NOTE: remove support when Mimic is EOLed
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
auto it = m_out_bl.cbegin();
if (!m_legacy_parent) {
if (*result >= 0) {
*result = cls_client::parent_get_finish(&it, &m_parent_md.spec);
}
std::optional<uint64_t> parent_overlap;
if (*result >= 0) {
*result = cls_client::parent_overlap_get_finish(&it, &parent_overlap);
}
if (*result >= 0) {
if (parent_overlap) {
m_parent_md.overlap = *parent_overlap;
m_head_parent_overlap = true;
} else {
m_parent_md.overlap = 0;
m_head_parent_overlap = false;
}
}
} else if (*result >= 0) {
*result = cls_client::get_parent_finish(&it, &m_parent_md.spec,
&m_parent_md.overlap);
m_head_parent_overlap = true;
}
if (*result == -EOPNOTSUPP && !m_legacy_parent) {
ldout(cct, 10) << "retrying using legacy parent method" << dendl;
m_legacy_parent = true;
send_v2_get_parent();
return nullptr;
} else if (*result < 0) {
lderr(cct) << "failed to retrieve parent: " << cpp_strerror(*result)
<< dendl;
return m_on_finish;
}
if ((m_features & RBD_FEATURE_MIGRATING) != 0) {
ldout(cct, 1) << "migrating feature set" << dendl;
send_get_migration_header();
} else {
m_migration_spec = {};
send_v2_get_metadata();
}
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_v2_get_metadata() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
auto ctx = create_context_callback<
RefreshRequest<I>, &RefreshRequest<I>::handle_v2_get_metadata>(this);
m_metadata.clear();
auto req = GetMetadataRequest<I>::create(
m_image_ctx.md_ctx, m_image_ctx.header_oid, true,
ImageCtx::METADATA_CONF_PREFIX, ImageCtx::METADATA_CONF_PREFIX, 0U,
&m_metadata, ctx);
req->send();
}
template <typename I>
Context *RefreshRequest<I>::handle_v2_get_metadata(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to retrieve metadata: " << cpp_strerror(*result)
<< dendl;
return m_on_finish;
}
send_v2_get_pool_metadata();
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_v2_get_pool_metadata() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
auto ctx = create_context_callback<
RefreshRequest<I>, &RefreshRequest<I>::handle_v2_get_pool_metadata>(this);
auto req = GetMetadataRequest<I>::create(
m_pool_metadata_io_ctx, RBD_INFO, true, ImageCtx::METADATA_CONF_PREFIX,
ImageCtx::METADATA_CONF_PREFIX, 0U, &m_metadata, ctx);
req->send();
}
template <typename I>
Context *RefreshRequest<I>::handle_v2_get_pool_metadata(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to retrieve pool metadata: " << cpp_strerror(*result)
<< dendl;
return m_on_finish;
}
bool thread_safe = m_image_ctx.image_watcher->is_unregistered();
m_image_ctx.apply_metadata(m_metadata, thread_safe);
send_v2_get_op_features();
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_v2_get_op_features() {
if ((m_features & RBD_FEATURE_OPERATIONS) == 0LL) {
m_op_features = 0;
send_v2_get_group();
return;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
cls_client::op_features_get_start(&op);
librados::AioCompletion *comp = create_rados_callback<
RefreshRequest<I>, &RefreshRequest<I>::handle_v2_get_op_features>(this);
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
Context *RefreshRequest<I>::handle_v2_get_op_features(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": "
<< "r=" << *result << dendl;
// -EOPNOTSUPP handler not required since feature bit implies OSD
// supports the method
if (*result >= 0) {
auto it = m_out_bl.cbegin();
*result = cls_client::op_features_get_finish(&it, &m_op_features);
}
if (*result < 0) {
lderr(cct) << "failed to retrieve op features: " << cpp_strerror(*result)
<< dendl;
return m_on_finish;
}
send_v2_get_group();
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_v2_get_group() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
cls_client::image_group_get_start(&op);
using klass = RefreshRequest<I>;
librados::AioCompletion *comp = create_rados_callback<
klass, &klass::handle_v2_get_group>(this);
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
Context *RefreshRequest<I>::handle_v2_get_group(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": "
<< "r=" << *result << dendl;
if (*result >= 0) {
auto it = m_out_bl.cbegin();
*result = cls_client::image_group_get_finish(&it, &m_group_spec);
}
if (*result == -EOPNOTSUPP) {
m_group_spec = {};
} else if (*result < 0) {
lderr(cct) << "failed to retrieve group: " << cpp_strerror(*result)
<< dendl;
return m_on_finish;
}
m_legacy_snapshot = LEGACY_SNAPSHOT_DISABLED;
send_v2_get_snapshots();
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_v2_get_snapshots() {
m_snap_infos.resize(m_snapc.snaps.size());
m_snap_flags.resize(m_snapc.snaps.size());
m_snap_parents.resize(m_snapc.snaps.size());
m_snap_protection.resize(m_snapc.snaps.size());
if (m_snapc.snaps.empty()) {
send_v2_refresh_parent();
return;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
for (auto snap_id : m_snapc.snaps) {
if (m_legacy_snapshot != LEGACY_SNAPSHOT_DISABLED) {
/// NOTE: remove after Luminous is retired
cls_client::get_snapshot_name_start(&op, snap_id);
cls_client::get_size_start(&op, snap_id);
if (m_legacy_snapshot != LEGACY_SNAPSHOT_ENABLED_NO_TIMESTAMP) {
cls_client::get_snapshot_timestamp_start(&op, snap_id);
}
} else {
cls_client::snapshot_get_start(&op, snap_id);
}
if (m_legacy_parent) {
cls_client::get_parent_start(&op, snap_id);
} else {
cls_client::parent_overlap_get_start(&op, snap_id);
}
cls_client::get_flags_start(&op, snap_id);
cls_client::get_protection_status_start(&op, snap_id);
}
using klass = RefreshRequest<I>;
librados::AioCompletion *comp = create_rados_callback<
klass, &klass::handle_v2_get_snapshots>(this);
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
Context *RefreshRequest<I>::handle_v2_get_snapshots(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl;
auto it = m_out_bl.cbegin();
for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
if (m_legacy_snapshot != LEGACY_SNAPSHOT_DISABLED) {
/// NOTE: remove after Luminous is retired
std::string snap_name;
if (*result >= 0) {
*result = cls_client::get_snapshot_name_finish(&it, &snap_name);
}
uint64_t snap_size;
if (*result >= 0) {
uint8_t order;
*result = cls_client::get_size_finish(&it, &snap_size, &order);
}
utime_t snap_timestamp;
if (*result >= 0 &&
m_legacy_snapshot != LEGACY_SNAPSHOT_ENABLED_NO_TIMESTAMP) {
/// NOTE: remove after Jewel is retired
*result = cls_client::get_snapshot_timestamp_finish(&it,
&snap_timestamp);
}
if (*result >= 0) {
m_snap_infos[i] = {m_snapc.snaps[i],
{cls::rbd::UserSnapshotNamespace{}},
snap_name, snap_size, snap_timestamp, 0};
}
} else if (*result >= 0) {
*result = cls_client::snapshot_get_finish(&it, &m_snap_infos[i]);
}
if (*result >= 0) {
if (m_legacy_parent) {
*result = cls_client::get_parent_finish(&it, &m_snap_parents[i].spec,
&m_snap_parents[i].overlap);
} else {
std::optional<uint64_t> parent_overlap;
*result = cls_client::parent_overlap_get_finish(&it, &parent_overlap);
if (*result >= 0) {
if (parent_overlap && m_parent_md.spec.pool_id > -1) {
m_snap_parents[i].spec = m_parent_md.spec;
m_snap_parents[i].overlap = *parent_overlap;
} else {
m_snap_parents[i] = {};
}
}
}
}
if (*result >= 0) {
*result = cls_client::get_flags_finish(&it, &m_snap_flags[i]);
}
if (*result >= 0) {
*result = cls_client::get_protection_status_finish(
&it, &m_snap_protection[i]);
}
if (*result < 0) {
break;
}
}
if (*result == -ENOENT && m_enoent_retries++ < MAX_ENOENT_RETRIES) {
ldout(cct, 10) << "out-of-sync snapshot state detected, retrying" << dendl;
send_v2_get_mutable_metadata();
return nullptr;
} else if (m_legacy_snapshot == LEGACY_SNAPSHOT_DISABLED &&
*result == -EOPNOTSUPP) {
ldout(cct, 10) << "retrying using legacy snapshot methods" << dendl;
m_legacy_snapshot = LEGACY_SNAPSHOT_ENABLED;
send_v2_get_snapshots();
return nullptr;
} else if (m_legacy_snapshot == LEGACY_SNAPSHOT_ENABLED &&
*result == -EOPNOTSUPP) {
ldout(cct, 10) << "retrying using legacy snapshot methods (jewel)" << dendl;
m_legacy_snapshot = LEGACY_SNAPSHOT_ENABLED_NO_TIMESTAMP;
send_v2_get_snapshots();
return nullptr;
} else if (*result < 0) {
lderr(cct) << "failed to retrieve snapshots: " << cpp_strerror(*result)
<< dendl;
return m_on_finish;
}
send_v2_refresh_parent();
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_v2_refresh_parent() {
{
std::shared_lock image_locker{m_image_ctx.image_lock};
ParentImageInfo parent_md;
MigrationInfo migration_info;
int r = get_parent_info(m_image_ctx.snap_id, &parent_md, &migration_info);
if (!m_skip_open_parent_image && (r < 0 ||
RefreshParentRequest<I>::is_refresh_required(m_image_ctx, parent_md,
migration_info))) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
using klass = RefreshRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_v2_refresh_parent>(this);
m_refresh_parent = RefreshParentRequest<I>::create(
m_image_ctx, parent_md, migration_info, ctx);
}
}
if (m_refresh_parent != nullptr) {
m_refresh_parent->send();
} else {
send_v2_init_exclusive_lock();
}
}
template <typename I>
Context *RefreshRequest<I>::handle_v2_refresh_parent(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result == -ENOENT && m_enoent_retries++ < MAX_ENOENT_RETRIES) {
ldout(cct, 10) << "out-of-sync parent info detected, retrying" << dendl;
ceph_assert(m_refresh_parent != nullptr);
delete m_refresh_parent;
m_refresh_parent = nullptr;
send_v2_get_mutable_metadata();
return nullptr;
} else if (*result < 0) {
lderr(cct) << "failed to refresh parent image: " << cpp_strerror(*result)
<< dendl;
save_result(result);
send_v2_apply();
return nullptr;
}
send_v2_init_exclusive_lock();
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_v2_init_exclusive_lock() {
if ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0 ||
m_read_only || !m_image_ctx.snap_name.empty() ||
m_image_ctx.exclusive_lock != nullptr) {
send_v2_open_object_map();
return;
}
// implies exclusive lock dynamically enabled or image open in-progress
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
// TODO need safe shut down
m_exclusive_lock = m_image_ctx.create_exclusive_lock();
using klass = RefreshRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_v2_init_exclusive_lock>(this);
std::shared_lock owner_locker{m_image_ctx.owner_lock};
m_exclusive_lock->init(m_features, ctx);
}
template <typename I>
Context *RefreshRequest<I>::handle_v2_init_exclusive_lock(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to initialize exclusive lock: "
<< cpp_strerror(*result) << dendl;
save_result(result);
}
// object map and journal will be opened when exclusive lock is
// acquired (if features are enabled)
send_v2_apply();
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_v2_open_journal() {
bool journal_disabled = (
(m_features & RBD_FEATURE_JOURNALING) == 0 ||
m_read_only ||
!m_image_ctx.snap_name.empty() ||
m_image_ctx.journal != nullptr ||
m_image_ctx.exclusive_lock == nullptr ||
!m_image_ctx.exclusive_lock->is_lock_owner());
bool journal_disabled_by_policy;
{
std::shared_lock image_locker{m_image_ctx.image_lock};
journal_disabled_by_policy = (
!journal_disabled &&
m_image_ctx.get_journal_policy()->journal_disabled());
}
if (journal_disabled || journal_disabled_by_policy) {
// journal dynamically enabled -- doesn't own exclusive lock
if ((m_features & RBD_FEATURE_JOURNALING) != 0 &&
!journal_disabled_by_policy &&
m_image_ctx.exclusive_lock != nullptr &&
m_image_ctx.journal == nullptr) {
auto ctx = new LambdaContext([this](int) {
send_v2_block_writes();
});
m_image_ctx.exclusive_lock->set_require_lock(
true, librbd::io::DIRECTION_BOTH, ctx);
return;
}
send_v2_block_writes();
return;
}
// implies journal dynamically enabled since ExclusiveLock will init
// the journal upon acquiring the lock
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
using klass = RefreshRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_v2_open_journal>(this);
// TODO need safe close
m_journal = m_image_ctx.create_journal();
m_journal->open(ctx);
}
template <typename I>
Context *RefreshRequest<I>::handle_v2_open_journal(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to initialize journal: " << cpp_strerror(*result)
<< dendl;
save_result(result);
}
send_v2_block_writes();
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_v2_block_writes() {
bool disabled_journaling = false;
{
std::shared_lock image_locker{m_image_ctx.image_lock};
disabled_journaling = ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) != 0 &&
(m_features & RBD_FEATURE_JOURNALING) == 0 &&
m_image_ctx.journal != nullptr);
}
if (!disabled_journaling) {
send_v2_apply();
return;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
// we need to block writes temporarily to avoid in-flight journal
// writes
m_blocked_writes = true;
Context *ctx = create_context_callback<
RefreshRequest<I>, &RefreshRequest<I>::handle_v2_block_writes>(this);
std::shared_lock owner_locker{m_image_ctx.owner_lock};
m_image_ctx.io_image_dispatcher->block_writes(ctx);
}
template <typename I>
Context *RefreshRequest<I>::handle_v2_block_writes(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to block writes: " << cpp_strerror(*result)
<< dendl;
save_result(result);
}
send_v2_apply();
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_v2_open_object_map() {
if ((m_features & RBD_FEATURE_OBJECT_MAP) == 0 ||
m_image_ctx.object_map != nullptr ||
(m_image_ctx.snap_name.empty() &&
(m_read_only ||
m_image_ctx.exclusive_lock == nullptr ||
!m_image_ctx.exclusive_lock->is_lock_owner()))) {
send_v2_open_journal();
return;
}
// implies object map dynamically enabled or image open in-progress
// since SetSnapRequest loads the object map for a snapshot and
// ExclusiveLock loads the object map for HEAD
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
if (m_image_ctx.snap_name.empty()) {
m_object_map = m_image_ctx.create_object_map(CEPH_NOSNAP);
} else {
for (size_t snap_idx = 0; snap_idx < m_snap_infos.size(); ++snap_idx) {
if (m_snap_infos[snap_idx].name == m_image_ctx.snap_name) {
m_object_map = m_image_ctx.create_object_map(
m_snapc.snaps[snap_idx].val);
break;
}
}
if (m_object_map == nullptr) {
lderr(cct) << "failed to locate snapshot: " << m_image_ctx.snap_name
<< dendl;
send_v2_open_journal();
return;
}
}
using klass = RefreshRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_v2_open_object_map>(this);
m_object_map->open(ctx);
}
template <typename I>
Context *RefreshRequest<I>::handle_v2_open_object_map(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to open object map: " << cpp_strerror(*result)
<< dendl;
m_object_map->put();
m_object_map = nullptr;
if (*result != -EFBIG) {
save_result(result);
}
}
send_v2_open_journal();
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_v2_apply() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
// ensure we are not in a rados callback when applying updates
using klass = RefreshRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_v2_apply>(this);
m_image_ctx.op_work_queue->queue(ctx, 0);
}
template <typename I>
Context *RefreshRequest<I>::handle_v2_apply(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
apply();
return send_v2_finalize_refresh_parent();
}
template <typename I>
Context *RefreshRequest<I>::send_v2_finalize_refresh_parent() {
if (m_refresh_parent == nullptr) {
return send_v2_shut_down_exclusive_lock();
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
using klass = RefreshRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_v2_finalize_refresh_parent>(this);
m_refresh_parent->finalize(ctx);
return nullptr;
}
template <typename I>
Context *RefreshRequest<I>::handle_v2_finalize_refresh_parent(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
ceph_assert(m_refresh_parent != nullptr);
delete m_refresh_parent;
m_refresh_parent = nullptr;
return send_v2_shut_down_exclusive_lock();
}
template <typename I>
Context *RefreshRequest<I>::send_v2_shut_down_exclusive_lock() {
if (m_exclusive_lock == nullptr) {
return send_v2_close_journal();
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
// exclusive lock feature was dynamically disabled. in-flight IO will be
// flushed and in-flight requests will be canceled before releasing lock
using klass = RefreshRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_v2_shut_down_exclusive_lock>(this);
m_exclusive_lock->shut_down(ctx);
return nullptr;
}
template <typename I>
Context *RefreshRequest<I>::handle_v2_shut_down_exclusive_lock(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to shut down exclusive lock: "
<< cpp_strerror(*result) << dendl;
save_result(result);
}
{
std::unique_lock owner_locker{m_image_ctx.owner_lock};
ceph_assert(m_image_ctx.exclusive_lock == nullptr);
}
ceph_assert(m_exclusive_lock != nullptr);
m_exclusive_lock->put();
m_exclusive_lock = nullptr;
return send_v2_close_journal();
}
template <typename I>
Context *RefreshRequest<I>::send_v2_close_journal() {
if (m_journal == nullptr) {
return send_v2_close_object_map();
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
// journal feature was dynamically disabled
using klass = RefreshRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_v2_close_journal>(this);
m_journal->close(ctx);
return nullptr;
}
template <typename I>
Context *RefreshRequest<I>::handle_v2_close_journal(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
save_result(result);
lderr(cct) << "failed to close journal: " << cpp_strerror(*result)
<< dendl;
}
ceph_assert(m_journal != nullptr);
m_journal->put();
m_journal = nullptr;
ceph_assert(m_blocked_writes);
m_blocked_writes = false;
m_image_ctx.io_image_dispatcher->unblock_writes();
return send_v2_close_object_map();
}
template <typename I>
Context *RefreshRequest<I>::send_v2_close_object_map() {
if (m_object_map == nullptr) {
return send_flush_aio();
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
// object map was dynamically disabled
using klass = RefreshRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_v2_close_object_map>(this);
m_object_map->close(ctx);
return nullptr;
}
template <typename I>
Context *RefreshRequest<I>::handle_v2_close_object_map(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to close object map: " << cpp_strerror(*result)
<< dendl;
}
ceph_assert(m_object_map != nullptr);
m_object_map->put();
m_object_map = nullptr;
return send_flush_aio();
}
template <typename I>
Context *RefreshRequest<I>::send_flush_aio() {
if (m_incomplete_update && m_error_result == 0) {
// if this was a partial refresh, notify ImageState
m_error_result = -ERESTART;
}
if (m_flush_aio) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
std::shared_lock owner_locker{m_image_ctx.owner_lock};
auto ctx = create_context_callback<
RefreshRequest<I>, &RefreshRequest<I>::handle_flush_aio>(this);
auto aio_comp = io::AioCompletion::create_and_start(
ctx, util::get_image_ctx(&m_image_ctx), io::AIO_TYPE_FLUSH);
auto req = io::ImageDispatchSpec::create_flush(
m_image_ctx, io::IMAGE_DISPATCH_LAYER_REFRESH, aio_comp,
io::FLUSH_SOURCE_REFRESH, {});
req->send();
return nullptr;
} else if (m_error_result < 0) {
// propagate saved error back to caller
Context *ctx = create_context_callback<
RefreshRequest<I>, &RefreshRequest<I>::handle_error>(this);
m_image_ctx.op_work_queue->queue(ctx, 0);
return nullptr;
}
return m_on_finish;
}
template <typename I>
Context *RefreshRequest<I>::handle_flush_aio(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to flush pending AIO: " << cpp_strerror(*result)
<< dendl;
}
return handle_error(result);
}
template <typename I>
Context *RefreshRequest<I>::handle_error(int *result) {
if (m_error_result < 0) {
*result = m_error_result;
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
}
return m_on_finish;
}
template <typename I>
void RefreshRequest<I>::apply() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
std::scoped_lock locker{m_image_ctx.owner_lock, m_image_ctx.image_lock};
m_image_ctx.read_only_flags = m_read_only_flags;
m_image_ctx.read_only = m_read_only;
m_image_ctx.size = m_size;
m_image_ctx.lockers = m_lockers;
m_image_ctx.lock_tag = m_lock_tag;
m_image_ctx.exclusive_locked = m_exclusive_locked;
std::map<uint64_t, uint64_t> migration_reverse_snap_seq;
if (m_image_ctx.old_format) {
m_image_ctx.order = m_order;
m_image_ctx.features = 0;
m_image_ctx.flags = 0;
m_image_ctx.op_features = 0;
m_image_ctx.operations_disabled = false;
m_image_ctx.object_prefix = std::move(m_object_prefix);
m_image_ctx.init_layout(m_image_ctx.md_ctx.get_id());
} else {
// HEAD revision doesn't have a defined overlap so it's only
// applicable to snapshots
if (!m_head_parent_overlap) {
m_parent_md = {};
}
m_image_ctx.features = m_features;
m_image_ctx.flags = m_flags;
m_image_ctx.op_features = m_op_features;
m_image_ctx.operations_disabled = (
(m_op_features & ~RBD_OPERATION_FEATURES_ALL) != 0ULL);
m_image_ctx.group_spec = m_group_spec;
bool migration_info_valid;
int r = get_migration_info(&m_image_ctx.parent_md,
&m_image_ctx.migration_info,
&migration_info_valid);
ceph_assert(r == 0); // validated in refresh parent step
if (migration_info_valid) {
for (auto it : m_image_ctx.migration_info.snap_map) {
migration_reverse_snap_seq[it.second.front()] = it.first;
}
} else {
m_image_ctx.parent_md = m_parent_md;
m_image_ctx.migration_info = {};
}
librados::Rados rados(m_image_ctx.md_ctx);
int8_t require_osd_release;
r = rados.get_min_compatible_osd(&require_osd_release);
if (r == 0 && require_osd_release >= CEPH_RELEASE_OCTOPUS) {
m_image_ctx.enable_sparse_copyup = true;
}
}
for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
std::vector<librados::snap_t>::const_iterator it = std::find(
m_image_ctx.snaps.begin(), m_image_ctx.snaps.end(),
m_snapc.snaps[i].val);
if (it == m_image_ctx.snaps.end()) {
m_flush_aio = true;
ldout(cct, 20) << "new snapshot id=" << m_snapc.snaps[i].val
<< " name=" << m_snap_infos[i].name
<< " size=" << m_snap_infos[i].image_size
<< dendl;
}
}
m_image_ctx.snaps.clear();
m_image_ctx.snap_info.clear();
m_image_ctx.snap_ids.clear();
auto overlap = m_image_ctx.parent_md.overlap;
for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
uint64_t flags = m_image_ctx.old_format ? 0 : m_snap_flags[i];
uint8_t protection_status = m_image_ctx.old_format ?
static_cast<uint8_t>(RBD_PROTECTION_STATUS_UNPROTECTED) :
m_snap_protection[i];
ParentImageInfo parent;
if (!m_image_ctx.old_format) {
if (!m_image_ctx.migration_info.empty()) {
parent = m_image_ctx.parent_md;
auto it = migration_reverse_snap_seq.find(m_snapc.snaps[i].val);
if (it != migration_reverse_snap_seq.end()) {
parent.spec.snap_id = it->second;
parent.overlap = m_snap_infos[i].image_size;
} else {
overlap = std::min(overlap, m_snap_infos[i].image_size);
parent.overlap = overlap;
}
} else {
parent = m_snap_parents[i];
}
}
m_image_ctx.add_snap(m_snap_infos[i].snapshot_namespace,
m_snap_infos[i].name, m_snapc.snaps[i].val,
m_snap_infos[i].image_size, parent,
protection_status, flags,
m_snap_infos[i].timestamp);
}
m_image_ctx.parent_md.overlap = std::min(overlap, m_image_ctx.size);
m_image_ctx.snapc = m_snapc;
if (m_image_ctx.snap_id != CEPH_NOSNAP &&
m_image_ctx.get_snap_id(m_image_ctx.snap_namespace,
m_image_ctx.snap_name) != m_image_ctx.snap_id) {
lderr(cct) << "tried to read from a snapshot that no longer exists: "
<< m_image_ctx.snap_name << dendl;
m_image_ctx.snap_exists = false;
}
if (m_refresh_parent != nullptr) {
m_refresh_parent->apply();
}
if (m_image_ctx.data_ctx.is_valid()) {
m_image_ctx.data_ctx.selfmanaged_snap_set_write_ctx(m_image_ctx.snapc.seq,
m_image_ctx.snaps);
m_image_ctx.rebuild_data_io_context();
}
// handle dynamically enabled / disabled features
if (m_image_ctx.exclusive_lock != nullptr &&
!m_image_ctx.test_features(RBD_FEATURE_EXCLUSIVE_LOCK,
m_image_ctx.image_lock)) {
// disabling exclusive lock will automatically handle closing
// object map and journaling
ceph_assert(m_exclusive_lock == nullptr);
m_exclusive_lock = m_image_ctx.exclusive_lock;
} else {
if (m_exclusive_lock != nullptr) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr);
std::swap(m_exclusive_lock, m_image_ctx.exclusive_lock);
}
if (!m_image_ctx.test_features(RBD_FEATURE_JOURNALING,
m_image_ctx.image_lock)) {
if (!m_image_ctx.clone_copy_on_read && m_image_ctx.journal != nullptr) {
m_image_ctx.exclusive_lock->unset_require_lock(io::DIRECTION_READ);
}
std::swap(m_journal, m_image_ctx.journal);
} else if (m_journal != nullptr) {
std::swap(m_journal, m_image_ctx.journal);
}
if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock) ||
m_object_map != nullptr) {
std::swap(m_object_map, m_image_ctx.object_map);
}
}
}
template <typename I>
int RefreshRequest<I>::get_parent_info(uint64_t snap_id,
ParentImageInfo *parent_md,
MigrationInfo *migration_info) {
bool migration_info_valid;
int r = get_migration_info(parent_md, migration_info, &migration_info_valid);
if (r < 0) {
return r;
}
if (migration_info_valid) {
return 0;
} else if (snap_id == CEPH_NOSNAP) {
*parent_md = m_parent_md;
*migration_info = {};
return 0;
} else {
for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
if (m_snapc.snaps[i].val == snap_id) {
*parent_md = m_snap_parents[i];
*migration_info = {};
return 0;
}
}
}
return -ENOENT;
}
template <typename I>
int RefreshRequest<I>::get_migration_info(ParentImageInfo *parent_md,
MigrationInfo *migration_info,
bool* migration_info_valid) {
CephContext *cct = m_image_ctx.cct;
if (m_migration_spec.header_type != cls::rbd::MIGRATION_HEADER_TYPE_DST ||
(m_migration_spec.state != cls::rbd::MIGRATION_STATE_PREPARED &&
m_migration_spec.state != cls::rbd::MIGRATION_STATE_EXECUTING &&
m_migration_spec.state != cls::rbd::MIGRATION_STATE_ABORTING)) {
if (m_migration_spec.header_type != cls::rbd::MIGRATION_HEADER_TYPE_SRC &&
m_migration_spec.pool_id != -1 &&
m_migration_spec.state != cls::rbd::MIGRATION_STATE_EXECUTED) {
lderr(cct) << this << " " << __func__ << ": invalid migration spec"
<< dendl;
return -EINVAL;
}
*migration_info_valid = false;
return 0;
}
if (!m_migration_spec.source_spec.empty()) {
// use special pool id just to indicate a parent (migration source image)
// exists
parent_md->spec.pool_id = std::numeric_limits<int64_t>::max();
parent_md->spec.pool_namespace = "";
parent_md->spec.image_id = "";
} else {
parent_md->spec.pool_id = m_migration_spec.pool_id;
parent_md->spec.pool_namespace = m_migration_spec.pool_namespace;
parent_md->spec.image_id = m_migration_spec.image_id;
}
parent_md->spec.snap_id = CEPH_NOSNAP;
parent_md->overlap = std::min(m_size, m_migration_spec.overlap);
auto snap_seqs = m_migration_spec.snap_seqs;
// If new snapshots have been created on destination image after
// migration stared, map the source CEPH_NOSNAP to the earliest of
// these snapshots.
snapid_t snap_id = snap_seqs.empty() ? 0 : snap_seqs.rbegin()->second;
auto it = std::upper_bound(m_snapc.snaps.rbegin(), m_snapc.snaps.rend(),
snap_id);
if (it != m_snapc.snaps.rend()) {
snap_seqs[CEPH_NOSNAP] = *it;
} else {
snap_seqs[CEPH_NOSNAP] = CEPH_NOSNAP;
}
std::set<uint64_t> snap_ids;
for (auto& it : snap_seqs) {
snap_ids.insert(it.second);
}
uint64_t overlap = snap_ids.find(CEPH_NOSNAP) != snap_ids.end() ?
parent_md->overlap : 0;
for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
if (snap_ids.find(m_snapc.snaps[i].val) != snap_ids.end()) {
overlap = std::max(overlap, m_snap_infos[i].image_size);
}
}
*migration_info = {m_migration_spec.pool_id, m_migration_spec.pool_namespace,
m_migration_spec.image_name, m_migration_spec.image_id,
m_migration_spec.source_spec, {}, overlap,
m_migration_spec.flatten};
*migration_info_valid = true;
deep_copy::util::compute_snap_map(m_image_ctx.cct, 0, CEPH_NOSNAP, {},
snap_seqs, &migration_info->snap_map);
return 0;
}
} // namespace image
} // namespace librbd
template class librbd::image::RefreshRequest<librbd::ImageCtx>;
| 50,603 | 31.109137 | 82 | cc |
null | ceph-main/src/librbd/image/RefreshRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_REFRESH_REQUEST_H
#define CEPH_LIBRBD_IMAGE_REFRESH_REQUEST_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/utime.h"
#include "common/snap_types.h"
#include "cls/lock/cls_lock_types.h"
#include "librbd/ImageCtx.h"
#include "librbd/Types.h"
#include <string>
#include <vector>
class Context;
namespace librbd {
class ImageCtx;
namespace image {
template<typename> class RefreshParentRequest;
template<typename ImageCtxT = ImageCtx>
class RefreshRequest {
public:
static constexpr int MAX_ENOENT_RETRIES = 10;
static RefreshRequest *create(ImageCtxT &image_ctx, bool acquiring_lock,
bool skip_open_parent, Context *on_finish) {
return new RefreshRequest(image_ctx, acquiring_lock, skip_open_parent,
on_finish);
}
RefreshRequest(ImageCtxT &image_ctx, bool acquiring_lock,
bool skip_open_parent, Context *on_finish);
~RefreshRequest();
void send();
private:
/**
* @verbatim
*
* <start> < * * * * * * * * * * * * * * * * * * * * * * * * * * (ENOENT)
* ^ | *
* * | (v1) *
* * |-----> V1_READ_HEADER -------------> GET_MIGRATION_HEADER (skip if not
* * | | migrating)
* * | (v2) v
* * \-----> V2_GET_MUTABLE_METADATA V1_GET_SNAPSHOTS
* * * | |
* * * | -EOPNOTSUPP v
* * * | * * * V1_GET_LOCKS
* * * | * * |
* * * v v * v
* * * V2_GET_PARENT <apply>
* * * | |
* * v |
* * * * * * GET_MIGRATION_HEADER (skip if not |
* (ENOENT) | migrating) |
* v |
* * V2_GET_METADATA |
* * | |
* * v |
* * V2_GET_POOL_METADATA |
* * | |
* * v (skip if not enabled) |
* * V2_GET_OP_FEATURES |
* * | |
* * v |
* * V2_GET_GROUP |
* * | |
* * | -EOPNOTSUPP |
* * | * * * |
* * | * * |
* * v v * |
* * * V2_GET_SNAPSHOTS (skip if no snaps) |
* (ENOENT) | |
* * v |
* * * V2_REFRESH_PARENT (skip if no parent or |
* (ENOENT) | refresh not needed) |
* v |
* V2_INIT_EXCLUSIVE_LOCK (skip if lock |
* | active or disabled) |
* v |
* V2_OPEN_OBJECT_MAP (skip if map |
* | active or disabled) |
* v |
* V2_OPEN_JOURNAL (skip if journal |
* | active or disabled) |
* v |
* V2_BLOCK_WRITES (skip if journal not |
* | disabled) |
* v |
* <apply> |
* | |
* v |
* V2_FINALIZE_REFRESH_PARENT (skip if refresh |
* | not needed) |
* (error) v |
* * * * * > V2_SHUT_DOWN_EXCLUSIVE_LOCK (skip if lock |
* | active or enabled) |
* v |
* V2_CLOSE_JOURNAL (skip if journal inactive |
* | or enabled) |
* v |
* V2_CLOSE_OBJECT_MAP (skip if map inactive |
* | or enabled) |
* | |
* \-------------------\/--------------------/
* |
* v
* FLUSH (skip if no new
* | snapshots)
* v
* <finish>
*
* @endverbatim
*/
enum LegacySnapshot {
LEGACY_SNAPSHOT_DISABLED,
LEGACY_SNAPSHOT_ENABLED,
LEGACY_SNAPSHOT_ENABLED_NO_TIMESTAMP
};
ImageCtxT &m_image_ctx;
bool m_acquiring_lock;
bool m_skip_open_parent_image;
Context *m_on_finish;
cls::rbd::MigrationSpec m_migration_spec;
int m_error_result;
bool m_flush_aio;
decltype(m_image_ctx.exclusive_lock) m_exclusive_lock;
decltype(m_image_ctx.object_map) m_object_map;
decltype(m_image_ctx.journal) m_journal;
RefreshParentRequest<ImageCtxT> *m_refresh_parent;
bufferlist m_out_bl;
bool m_legacy_parent = false;
LegacySnapshot m_legacy_snapshot = LEGACY_SNAPSHOT_DISABLED;
int m_enoent_retries = 0;
uint8_t m_order = 0;
uint64_t m_size = 0;
uint64_t m_features = 0;
uint64_t m_incompatible_features = 0;
uint64_t m_flags = 0;
uint64_t m_op_features = 0;
uint32_t m_read_only_flags = 0U;
bool m_read_only = false;
librados::IoCtx m_pool_metadata_io_ctx;
std::map<std::string, bufferlist> m_metadata;
std::string m_object_prefix;
ParentImageInfo m_parent_md;
bool m_head_parent_overlap = false;
cls::rbd::GroupSpec m_group_spec;
::SnapContext m_snapc;
std::vector<cls::rbd::SnapshotInfo> m_snap_infos;
std::vector<ParentImageInfo> m_snap_parents;
std::vector<uint8_t> m_snap_protection;
std::vector<uint64_t> m_snap_flags;
std::map<rados::cls::lock::locker_id_t,
rados::cls::lock::locker_info_t> m_lockers;
std::string m_lock_tag;
bool m_exclusive_locked = false;
bool m_blocked_writes = false;
bool m_incomplete_update = false;
void send_get_migration_header();
Context *handle_get_migration_header(int *result);
void send_v1_read_header();
Context *handle_v1_read_header(int *result);
void send_v1_get_snapshots();
Context *handle_v1_get_snapshots(int *result);
void send_v1_get_locks();
Context *handle_v1_get_locks(int *result);
void send_v1_apply();
Context *handle_v1_apply(int *result);
void send_v2_get_mutable_metadata();
Context *handle_v2_get_mutable_metadata(int *result);
void send_v2_get_parent();
Context *handle_v2_get_parent(int *result);
void send_v2_get_metadata();
Context *handle_v2_get_metadata(int *result);
void send_v2_get_pool_metadata();
Context *handle_v2_get_pool_metadata(int *result);
void send_v2_get_op_features();
Context *handle_v2_get_op_features(int *result);
void send_v2_get_group();
Context *handle_v2_get_group(int *result);
void send_v2_get_snapshots();
Context *handle_v2_get_snapshots(int *result);
void send_v2_get_snapshots_legacy();
Context *handle_v2_get_snapshots_legacy(int *result);
void send_v2_refresh_parent();
Context *handle_v2_refresh_parent(int *result);
void send_v2_init_exclusive_lock();
Context *handle_v2_init_exclusive_lock(int *result);
void send_v2_open_journal();
Context *handle_v2_open_journal(int *result);
void send_v2_block_writes();
Context *handle_v2_block_writes(int *result);
void send_v2_open_object_map();
Context *handle_v2_open_object_map(int *result);
void send_v2_apply();
Context *handle_v2_apply(int *result);
Context *send_v2_finalize_refresh_parent();
Context *handle_v2_finalize_refresh_parent(int *result);
Context *send_v2_shut_down_exclusive_lock();
Context *handle_v2_shut_down_exclusive_lock(int *result);
Context *send_v2_close_journal();
Context *handle_v2_close_journal(int *result);
Context *send_v2_close_object_map();
Context *handle_v2_close_object_map(int *result);
Context *send_flush_aio();
Context *handle_flush_aio(int *result);
Context *handle_error(int *result);
void save_result(int *result) {
if (m_error_result == 0 && *result < 0) {
m_error_result = *result;
}
}
void apply();
int get_parent_info(uint64_t snap_id, ParentImageInfo *parent_md,
MigrationInfo *migration_info);
int get_migration_info(ParentImageInfo *parent_md,
MigrationInfo *migration_info,
bool* migration_info_valid);
};
} // namespace image
} // namespace librbd
extern template class librbd::image::RefreshRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_REFRESH_REQUEST_H
| 10,041 | 35.384058 | 79 | h |
null | ceph-main/src/librbd/image/RemoveRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/image/RemoveRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/internal.h"
#include "librbd/ImageState.h"
#include "librbd/Journal.h"
#include "librbd/ObjectMap.h"
#include "librbd/image/DetachChildRequest.h"
#include "librbd/image/PreRemoveRequest.h"
#include "librbd/journal/RemoveRequest.h"
#include "librbd/journal/TypeTraits.h"
#include "librbd/mirror/DisableRequest.h"
#include "librbd/operation/TrimRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::RemoveRequest: " << this << " " \
<< __func__ << ": "
namespace librbd {
namespace image {
using librados::IoCtx;
using util::create_context_callback;
using util::create_async_context_callback;
using util::create_rados_callback;
template<typename I>
RemoveRequest<I>::RemoveRequest(IoCtx &ioctx, const std::string &image_name,
const std::string &image_id, bool force,
bool from_trash_remove,
ProgressContext &prog_ctx,
ContextWQ *op_work_queue, Context *on_finish)
: m_ioctx(ioctx), m_image_name(image_name), m_image_id(image_id),
m_force(force), m_from_trash_remove(from_trash_remove),
m_prog_ctx(prog_ctx), m_op_work_queue(op_work_queue),
m_on_finish(on_finish) {
m_cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
}
template<typename I>
RemoveRequest<I>::RemoveRequest(IoCtx &ioctx, I *image_ctx, bool force,
bool from_trash_remove,
ProgressContext &prog_ctx,
ContextWQ *op_work_queue, Context *on_finish)
: m_ioctx(ioctx), m_image_name(image_ctx->name), m_image_id(image_ctx->id),
m_image_ctx(image_ctx), m_force(force),
m_from_trash_remove(from_trash_remove), m_prog_ctx(prog_ctx),
m_op_work_queue(op_work_queue), m_on_finish(on_finish),
m_cct(image_ctx->cct), m_header_oid(image_ctx->header_oid),
m_old_format(image_ctx->old_format), m_unknown_format(false) {
}
template<typename I>
void RemoveRequest<I>::send() {
ldout(m_cct, 20) << dendl;
open_image();
}
template<typename I>
void RemoveRequest<I>::open_image() {
if (m_image_ctx != nullptr) {
pre_remove_image();
return;
}
m_image_ctx = I::create(m_image_id.empty() ? m_image_name : "", m_image_id,
nullptr, m_ioctx, false);
ldout(m_cct, 20) << dendl;
using klass = RemoveRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_open_image>(
this);
m_image_ctx->state->open(OPEN_FLAG_SKIP_OPEN_PARENT, ctx);
}
template<typename I>
void RemoveRequest<I>::handle_open_image(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
m_image_ctx = nullptr;
if (r != -ENOENT) {
lderr(m_cct) << "error opening image: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
remove_image();
return;
}
m_image_id = m_image_ctx->id;
m_image_name = m_image_ctx->name;
m_header_oid = m_image_ctx->header_oid;
m_old_format = m_image_ctx->old_format;
m_unknown_format = false;
pre_remove_image();
}
template<typename I>
void RemoveRequest<I>::pre_remove_image() {
ldout(m_cct, 5) << dendl;
auto ctx = create_context_callback<
RemoveRequest<I>, &RemoveRequest<I>::handle_pre_remove_image>(this);
auto req = PreRemoveRequest<I>::create(m_image_ctx, m_force, ctx);
req->send();
}
template<typename I>
void RemoveRequest<I>::handle_pre_remove_image(int r) {
ldout(m_cct, 5) << "r=" << r << dendl;
if (r < 0) {
if (r == -ECHILD) {
r = -ENOTEMPTY;
}
send_close_image(r);
return;
}
if (!m_image_ctx->data_ctx.is_valid()) {
detach_child();
return;
}
trim_image();
}
template<typename I>
void RemoveRequest<I>::trim_image() {
ldout(m_cct, 20) << dendl;
using klass = RemoveRequest<I>;
Context *ctx = create_async_context_callback(
*m_image_ctx, create_context_callback<
klass, &klass::handle_trim_image>(this));
std::shared_lock owner_lock{m_image_ctx->owner_lock};
auto req = librbd::operation::TrimRequest<I>::create(
*m_image_ctx, ctx, m_image_ctx->size, 0, m_prog_ctx);
req->send();
}
template<typename I>
void RemoveRequest<I>::handle_trim_image(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to remove some object(s): "
<< cpp_strerror(r) << dendl;
send_close_image(r);
return;
}
if (m_old_format) {
send_close_image(r);
return;
}
detach_child();
}
template<typename I>
void RemoveRequest<I>::detach_child() {
ldout(m_cct, 20) << dendl;
auto ctx = create_context_callback<
RemoveRequest<I>, &RemoveRequest<I>::handle_detach_child>(this);
auto req = DetachChildRequest<I>::create(*m_image_ctx, ctx);
req->send();
}
template<typename I>
void RemoveRequest<I>::handle_detach_child(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to detach child from parent: "
<< cpp_strerror(r) << dendl;
send_close_image(r);
return;
}
send_disable_mirror();
}
template<typename I>
void RemoveRequest<I>::send_disable_mirror() {
ldout(m_cct, 20) << dendl;
using klass = RemoveRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_disable_mirror>(this);
mirror::DisableRequest<I> *req =
mirror::DisableRequest<I>::create(m_image_ctx, m_force, !m_force, ctx);
req->send();
}
template<typename I>
void RemoveRequest<I>::handle_disable_mirror(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r == -EOPNOTSUPP) {
r = 0;
} else if (r < 0) {
lderr(m_cct) << "error disabling image mirroring: "
<< cpp_strerror(r) << dendl;
}
// one last chance to ensure all snapshots have been deleted
m_image_ctx->image_lock.lock_shared();
if (!m_image_ctx->snap_info.empty()) {
ldout(m_cct, 5) << "image has snapshots - not removing" << dendl;
m_ret_val = -ENOTEMPTY;
}
m_image_ctx->image_lock.unlock_shared();
send_close_image(r);
}
template<typename I>
void RemoveRequest<I>::send_close_image(int r) {
ldout(m_cct, 20) << dendl;
m_ret_val = r;
using klass = RemoveRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_send_close_image>(this);
m_image_ctx->state->close(ctx);
}
template<typename I>
void RemoveRequest<I>::handle_send_close_image(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "error encountered while closing image: "
<< cpp_strerror(r) << dendl;
}
m_image_ctx = nullptr;
if (m_ret_val < 0) {
r = m_ret_val;
finish(r);
return;
}
remove_header();
}
template<typename I>
void RemoveRequest<I>::remove_header() {
ldout(m_cct, 20) << dendl;
using klass = RemoveRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_remove_header>(this);
int r = m_ioctx.aio_remove(m_header_oid, rados_completion);
ceph_assert(r == 0);
rados_completion->release();
}
template<typename I>
void RemoveRequest<I>::handle_remove_header(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "error removing header: " << cpp_strerror(r) << dendl;
m_ret_val = r;
}
remove_image();
}
template<typename I>
void RemoveRequest<I>::remove_header_v2() {
ldout(m_cct, 20) << dendl;
if (m_header_oid.empty()) {
m_header_oid = util::header_name(m_image_id);
}
using klass = RemoveRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_remove_header_v2>(this);
int r = m_ioctx.aio_remove(m_header_oid, rados_completion);
ceph_assert(r == 0);
rados_completion->release();
}
template<typename I>
void RemoveRequest<I>::handle_remove_header_v2(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "error removing header: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
send_journal_remove();
}
template<typename I>
void RemoveRequest<I>::send_journal_remove() {
ldout(m_cct, 20) << dendl;
using klass = RemoveRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_journal_remove>(this);
typename journal::TypeTraits<I>::ContextWQ* context_wq;
Journal<I>::get_work_queue(m_cct, &context_wq);
journal::RemoveRequest<I> *req = journal::RemoveRequest<I>::create(
m_ioctx, m_image_id, Journal<>::IMAGE_CLIENT_ID, context_wq, ctx);
req->send();
}
template<typename I>
void RemoveRequest<I>::handle_journal_remove(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "failed to remove image journal: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
} else {
r = 0;
}
send_object_map_remove();
}
template<typename I>
void RemoveRequest<I>::send_object_map_remove() {
ldout(m_cct, 20) << dendl;
using klass = RemoveRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_object_map_remove>(this);
int r = ObjectMap<>::aio_remove(m_ioctx,
m_image_id,
rados_completion);
ceph_assert(r == 0);
rados_completion->release();
}
template<typename I>
void RemoveRequest<I>::handle_object_map_remove(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "failed to remove image journal: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
} else {
r = 0;
}
mirror_image_remove();
}
template<typename I>
void RemoveRequest<I>::mirror_image_remove() {
ldout(m_cct, 20) << dendl;
librados::ObjectWriteOperation op;
cls_client::mirror_image_remove(&op, m_image_id);
using klass = RemoveRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_mirror_image_remove>(this);
int r = m_ioctx.aio_operate(RBD_MIRRORING, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
template<typename I>
void RemoveRequest<I>::handle_mirror_image_remove(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT && r != -EOPNOTSUPP) {
lderr(m_cct) << "failed to remove mirror image state: "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
if (m_from_trash_remove) {
// both the id object and the directory entry have been removed in
// a previous call to trash_move.
finish(0);
return;
}
remove_id_object();
}
template<typename I>
void RemoveRequest<I>::remove_image() {
ldout(m_cct, 20) << dendl;
if (m_old_format || m_unknown_format) {
remove_v1_image();
} else {
remove_v2_image();
}
}
template<typename I>
void RemoveRequest<I>::remove_v1_image() {
ldout(m_cct, 20) << dendl;
Context *ctx = new LambdaContext([this] (int r) {
r = tmap_rm(m_ioctx, m_image_name);
handle_remove_v1_image(r);
});
m_op_work_queue->queue(ctx, 0);
}
template<typename I>
void RemoveRequest<I>::handle_remove_v1_image(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
m_old_format = (r == 0);
if (r == 0 || (r < 0 && !m_unknown_format)) {
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "error removing image from v1 directory: "
<< cpp_strerror(r) << dendl;
}
m_on_finish->complete(r);
delete this;
return;
}
if (!m_old_format) {
remove_v2_image();
}
}
template<typename I>
void RemoveRequest<I>::remove_v2_image() {
ldout(m_cct, 20) << dendl;
if (m_image_id.empty()) {
dir_get_image_id();
return;
} else if (m_image_name.empty()) {
dir_get_image_name();
return;
}
remove_header_v2();
return;
}
template<typename I>
void RemoveRequest<I>::dir_get_image_id() {
ldout(m_cct, 20) << dendl;
librados::ObjectReadOperation op;
librbd::cls_client::dir_get_id_start(&op, m_image_name);
using klass = RemoveRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_dir_get_image_id>(this);
m_out_bl.clear();
int r = m_ioctx.aio_operate(RBD_DIRECTORY, rados_completion, &op, &m_out_bl);
ceph_assert(r == 0);
rados_completion->release();
}
template<typename I>
void RemoveRequest<I>::handle_dir_get_image_id(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "error fetching image id: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
if (r == 0) {
auto iter = m_out_bl.cbegin();
r = librbd::cls_client::dir_get_id_finish(&iter, &m_image_id);
if (r < 0) {
finish(r);
return;
}
}
remove_header_v2();
}
template<typename I>
void RemoveRequest<I>::dir_get_image_name() {
ldout(m_cct, 20) << dendl;
librados::ObjectReadOperation op;
librbd::cls_client::dir_get_name_start(&op, m_image_id);
using klass = RemoveRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_dir_get_image_name>(this);
m_out_bl.clear();
int r = m_ioctx.aio_operate(RBD_DIRECTORY, rados_completion, &op, &m_out_bl);
ceph_assert(r == 0);
rados_completion->release();
}
template<typename I>
void RemoveRequest<I>::handle_dir_get_image_name(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "error fetching image name: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
if (r == 0) {
auto iter = m_out_bl.cbegin();
r = librbd::cls_client::dir_get_name_finish(&iter, &m_image_name);
if (r < 0) {
finish(r);
return;
}
}
remove_header_v2();
}
template<typename I>
void RemoveRequest<I>::remove_id_object() {
ldout(m_cct, 20) << dendl;
using klass = RemoveRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_remove_id_object>(this);
int r = m_ioctx.aio_remove(util::id_obj_name(m_image_name), rados_completion);
ceph_assert(r == 0);
rados_completion->release();
}
template<typename I>
void RemoveRequest<I>::handle_remove_id_object(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "error removing id object: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
dir_remove_image();
}
template<typename I>
void RemoveRequest<I>::dir_remove_image() {
ldout(m_cct, 20) << dendl;
librados::ObjectWriteOperation op;
librbd::cls_client::dir_remove_image(&op, m_image_name, m_image_id);
using klass = RemoveRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_dir_remove_image>(this);
int r = m_ioctx.aio_operate(RBD_DIRECTORY, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
template<typename I>
void RemoveRequest<I>::handle_dir_remove_image(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "error removing image from v2 directory: "
<< cpp_strerror(r) << dendl;
}
finish(r);
}
template<typename I>
void RemoveRequest<I>::finish(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace image
} // namespace librbd
template class librbd::image::RemoveRequest<librbd::ImageCtx>;
| 15,781 | 24.537217 | 80 | cc |
null | ceph-main/src/librbd/image/RemoveRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_REMOVE_REQUEST_H
#define CEPH_LIBRBD_IMAGE_REMOVE_REQUEST_H
#include "include/rados/librados.hpp"
#include "librbd/ImageCtx.h"
#include "librbd/image/TypeTraits.h"
#include "common/Timer.h"
#include <list>
class Context;
namespace librbd {
class ProgressContext;
namespace image {
template<typename ImageCtxT = ImageCtx>
class RemoveRequest {
private:
// mock unit testing support
typedef ::librbd::image::TypeTraits<ImageCtxT> TypeTraits;
typedef typename TypeTraits::ContextWQ ContextWQ;
public:
static RemoveRequest *create(librados::IoCtx &ioctx,
const std::string &image_name,
const std::string &image_id,
bool force, bool from_trash_remove,
ProgressContext &prog_ctx,
ContextWQ *op_work_queue,
Context *on_finish) {
return new RemoveRequest(ioctx, image_name, image_id, force,
from_trash_remove, prog_ctx, op_work_queue,
on_finish);
}
static RemoveRequest *create(librados::IoCtx &ioctx, ImageCtxT *image_ctx,
bool force, bool from_trash_remove,
ProgressContext &prog_ctx,
ContextWQ *op_work_queue,
Context *on_finish) {
return new RemoveRequest(ioctx, image_ctx, force, from_trash_remove,
prog_ctx, op_work_queue, on_finish);
}
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* (skip if already opened) OPEN IMAGE------------------\
* | |
* v |
* PRE REMOVE IMAGE * * * |
* | * |
* v * |
* (skip if invalid data pool) TRIM IMAGE * * * * * |
* | * |
* v * |
* DETACH CHILD * |
* | * |
* v * v
* CLOSE IMAGE < * * * * |
* | |
* error v |
* /------<--------\ REMOVE HEADER<--------------/
* | | / |
* | |-------<-------/ |
* | | v
* | | REMOVE JOURNAL
* | | / |
* | |-------<-------/ |
* | | v
* v ^ REMOVE OBJECTMAP
* | | / |
* | |-------<-------/ |
* | | v
* | | REMOVE MIRROR IMAGE
* | | / |
* | |-------<-------/ |
* | | v
* | | REMOVE ID OBJECT
* | | / |
* | |-------<-------/ |
* | | v
* | | REMOVE IMAGE
* | | / |
* | \-------<-------/ |
* | v
* \------------------>------------<finish>
*
* @endverbatim
*/
RemoveRequest(librados::IoCtx &ioctx, const std::string &image_name,
const std::string &image_id, bool force, bool from_trash_remove,
ProgressContext &prog_ctx, ContextWQ *op_work_queue,
Context *on_finish);
RemoveRequest(librados::IoCtx &ioctx, ImageCtxT *image_ctx, bool force,
bool from_trash_remove, ProgressContext &prog_ctx,
ContextWQ *op_work_queue, Context *on_finish);
librados::IoCtx &m_ioctx;
std::string m_image_name;
std::string m_image_id;
ImageCtxT *m_image_ctx = nullptr;
bool m_force;
bool m_from_trash_remove;
ProgressContext &m_prog_ctx;
ContextWQ *m_op_work_queue;
Context *m_on_finish;
CephContext *m_cct;
std::string m_header_oid;
bool m_old_format = false;
bool m_unknown_format = true;
librados::IoCtx m_parent_io_ctx;
decltype(m_image_ctx->exclusive_lock) m_exclusive_lock = nullptr;
int m_ret_val = 0;
bufferlist m_out_bl;
std::list<obj_watch_t> m_watchers;
std::map<uint64_t, SnapInfo> m_snap_infos;
void open_image();
void handle_open_image(int r);
void send_journal_remove();
void handle_journal_remove(int r);
void send_object_map_remove();
void handle_object_map_remove(int r);
void mirror_image_remove();
void handle_mirror_image_remove(int r);
void pre_remove_image();
void handle_pre_remove_image(int r);
void trim_image();
void handle_trim_image(int r);
void detach_child();
void handle_detach_child(int r);
void send_disable_mirror();
void handle_disable_mirror(int r);
void send_close_image(int r);
void handle_send_close_image(int r);
void remove_header();
void handle_remove_header(int r);
void remove_header_v2();
void handle_remove_header_v2(int r);
void remove_image();
void remove_v1_image();
void handle_remove_v1_image(int r);
void remove_v2_image();
void dir_get_image_id();
void handle_dir_get_image_id(int r);
void dir_get_image_name();
void handle_dir_get_image_name(int r);
void remove_id_object();
void handle_remove_id_object(int r);
void dir_remove_image();
void handle_dir_remove_image(int r);
void finish(int r);
};
} // namespace image
} // namespace librbd
extern template class librbd::image::RemoveRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_REMOVE_REQUEST_H
| 6,373 | 31.191919 | 80 | h |
null | ceph-main/src/librbd/image/SetFlagsRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/image/SetFlagsRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::SetFlagsRequest: "
namespace librbd {
namespace image {
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
SetFlagsRequest<I>::SetFlagsRequest(I *image_ctx, uint64_t flags,
uint64_t mask, Context *on_finish)
: m_image_ctx(image_ctx), m_flags(flags), m_mask(mask),
m_on_finish(on_finish) {
}
template <typename I>
void SetFlagsRequest<I>::send() {
send_set_flags();
}
template <typename I>
void SetFlagsRequest<I>::send_set_flags() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
std::unique_lock image_locker{m_image_ctx->image_lock};
std::vector<uint64_t> snap_ids;
snap_ids.push_back(CEPH_NOSNAP);
for (auto it : m_image_ctx->snap_info) {
snap_ids.push_back(it.first);
}
Context *ctx = create_context_callback<
SetFlagsRequest<I>, &SetFlagsRequest<I>::handle_set_flags>(this);
C_Gather *gather_ctx = new C_Gather(cct, ctx);
for (auto snap_id : snap_ids) {
librados::ObjectWriteOperation op;
cls_client::set_flags(&op, snap_id, m_flags, m_mask);
librados::AioCompletion *comp =
create_rados_callback(gather_ctx->new_sub());
int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
gather_ctx->activate();
}
template <typename I>
Context *SetFlagsRequest<I>::handle_set_flags(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "set_flags failed: " << cpp_strerror(*result)
<< dendl;
}
return m_on_finish;
}
} // namespace image
} // namespace librbd
template class librbd::image::SetFlagsRequest<librbd::ImageCtx>;
| 2,169 | 26.468354 | 80 | cc |
null | ceph-main/src/librbd/image/SetFlagsRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_SET_FLAGS_REQUEST_H
#define CEPH_LIBRBD_IMAGE_SET_FLAGS_REQUEST_H
#include "include/buffer.h"
#include <map>
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace image {
template <typename ImageCtxT = ImageCtx>
class SetFlagsRequest {
public:
static SetFlagsRequest *create(ImageCtxT *image_ctx, uint64_t flags,
uint64_t mask, Context *on_finish) {
return new SetFlagsRequest(image_ctx, flags, mask, on_finish);
}
void send();
private:
/**
* @verbatim
*
* <start>
* | . . .
* v v .
* SET_FLAGS . (for every snapshot)
* | . .
* v . . .
* <finis>
*
* @endverbatim
*/
SetFlagsRequest(ImageCtxT *image_ctx, uint64_t flags, uint64_t mask,
Context *on_finish);
ImageCtxT *m_image_ctx;
uint64_t m_flags;
uint64_t m_mask;
Context *m_on_finish;
void send_set_flags();
Context *handle_set_flags(int *result);
};
} // namespace image
} // namespace librbd
extern template class librbd::image::SetFlagsRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_SET_FLAGS_REQUEST_H
| 1,235 | 18.935484 | 71 | h |
null | ceph-main/src/librbd/image/SetSnapRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/image/SetSnapRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/image/RefreshParentRequest.h"
#include "librbd/io/ImageDispatcherInterface.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::SetSnapRequest: "
namespace librbd {
namespace image {
using util::create_context_callback;
template <typename I>
SetSnapRequest<I>::SetSnapRequest(I &image_ctx, uint64_t snap_id,
Context *on_finish)
: m_image_ctx(image_ctx), m_snap_id(snap_id), m_on_finish(on_finish),
m_exclusive_lock(nullptr), m_object_map(nullptr), m_refresh_parent(nullptr),
m_writes_blocked(false) {
}
template <typename I>
SetSnapRequest<I>::~SetSnapRequest() {
ceph_assert(!m_writes_blocked);
delete m_refresh_parent;
if (m_object_map) {
m_object_map->put();
}
if (m_exclusive_lock) {
m_exclusive_lock->put();
}
}
template <typename I>
void SetSnapRequest<I>::send() {
if (m_snap_id == CEPH_NOSNAP) {
send_init_exclusive_lock();
} else {
send_block_writes();
}
}
template <typename I>
void SetSnapRequest<I>::send_init_exclusive_lock() {
{
std::shared_lock image_locker{m_image_ctx.image_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
ceph_assert(m_image_ctx.snap_id == CEPH_NOSNAP);
send_complete();
return;
}
}
if (m_image_ctx.read_only ||
!m_image_ctx.test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) {
int r = 0;
if (send_refresh_parent(&r) != nullptr) {
send_complete();
}
return;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << __func__ << dendl;
m_exclusive_lock = ExclusiveLock<I>::create(m_image_ctx);
using klass = SetSnapRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_init_exclusive_lock>(this);
std::shared_lock owner_locker{m_image_ctx.owner_lock};
m_exclusive_lock->init(m_image_ctx.features, ctx);
}
template <typename I>
Context *SetSnapRequest<I>::handle_init_exclusive_lock(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to initialize exclusive lock: "
<< cpp_strerror(*result) << dendl;
finalize();
return m_on_finish;
}
return send_refresh_parent(result);
}
template <typename I>
void SetSnapRequest<I>::send_block_writes() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << __func__ << dendl;
m_writes_blocked = true;
using klass = SetSnapRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_block_writes>(this);
std::shared_lock owner_locker{m_image_ctx.owner_lock};
m_image_ctx.io_image_dispatcher->block_writes(ctx);
}
template <typename I>
Context *SetSnapRequest<I>::handle_block_writes(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to block writes: " << cpp_strerror(*result)
<< dendl;
finalize();
return m_on_finish;
}
{
std::shared_lock image_locker{m_image_ctx.image_lock};
auto it = m_image_ctx.snap_info.find(m_snap_id);
if (it == m_image_ctx.snap_info.end()) {
ldout(cct, 5) << "failed to locate snapshot '" << m_snap_id << "'"
<< dendl;
*result = -ENOENT;
finalize();
return m_on_finish;
}
}
return send_shut_down_exclusive_lock(result);
}
template <typename I>
Context *SetSnapRequest<I>::send_shut_down_exclusive_lock(int *result) {
{
std::shared_lock image_locker{m_image_ctx.image_lock};
m_exclusive_lock = m_image_ctx.exclusive_lock;
}
if (m_exclusive_lock == nullptr) {
return send_refresh_parent(result);
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << __func__ << dendl;
using klass = SetSnapRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_shut_down_exclusive_lock>(this);
m_exclusive_lock->shut_down(ctx);
return nullptr;
}
template <typename I>
Context *SetSnapRequest<I>::handle_shut_down_exclusive_lock(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to shut down exclusive lock: "
<< cpp_strerror(*result) << dendl;
finalize();
return m_on_finish;
}
return send_refresh_parent(result);
}
template <typename I>
Context *SetSnapRequest<I>::send_refresh_parent(int *result) {
CephContext *cct = m_image_ctx.cct;
ParentImageInfo parent_md;
bool refresh_parent;
{
std::shared_lock image_locker{m_image_ctx.image_lock};
const auto parent_info = m_image_ctx.get_parent_info(m_snap_id);
if (parent_info == nullptr) {
*result = -ENOENT;
lderr(cct) << "failed to retrieve snapshot parent info" << dendl;
finalize();
return m_on_finish;
}
parent_md = *parent_info;
refresh_parent = RefreshParentRequest<I>::is_refresh_required(
m_image_ctx, parent_md, m_image_ctx.migration_info);
}
if (!refresh_parent) {
if (m_snap_id == CEPH_NOSNAP) {
// object map is loaded when exclusive lock is acquired
*result = apply();
finalize();
return m_on_finish;
} else {
// load snapshot object map
return send_open_object_map(result);
}
}
ldout(cct, 10) << __func__ << dendl;
using klass = SetSnapRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_refresh_parent>(this);
m_refresh_parent = RefreshParentRequest<I>::create(m_image_ctx, parent_md,
m_image_ctx.migration_info,
ctx);
m_refresh_parent->send();
return nullptr;
}
template <typename I>
Context *SetSnapRequest<I>::handle_refresh_parent(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to refresh snapshot parent: " << cpp_strerror(*result)
<< dendl;
finalize();
return m_on_finish;
}
if (m_snap_id == CEPH_NOSNAP) {
// object map is loaded when exclusive lock is acquired
*result = apply();
if (*result < 0) {
finalize();
return m_on_finish;
}
return send_finalize_refresh_parent(result);
} else {
// load snapshot object map
return send_open_object_map(result);
}
}
template <typename I>
Context *SetSnapRequest<I>::send_open_object_map(int *result) {
if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP)) {
*result = apply();
if (*result < 0) {
finalize();
return m_on_finish;
}
return send_finalize_refresh_parent(result);
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << __func__ << dendl;
using klass = SetSnapRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_open_object_map>(this);
m_object_map = ObjectMap<I>::create(m_image_ctx, m_snap_id);
m_object_map->open(ctx);
return nullptr;
}
template <typename I>
Context *SetSnapRequest<I>::handle_open_object_map(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to open object map: " << cpp_strerror(*result)
<< dendl;
m_object_map->put();
m_object_map = nullptr;
}
*result = apply();
if (*result < 0) {
finalize();
return m_on_finish;
}
return send_finalize_refresh_parent(result);
}
template <typename I>
Context *SetSnapRequest<I>::send_finalize_refresh_parent(int *result) {
if (m_refresh_parent == nullptr) {
finalize();
return m_on_finish;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
using klass = SetSnapRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_finalize_refresh_parent>(this);
m_refresh_parent->finalize(ctx);
return nullptr;
}
template <typename I>
Context *SetSnapRequest<I>::handle_finalize_refresh_parent(int *result) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to close parent image: " << cpp_strerror(*result)
<< dendl;
}
finalize();
return m_on_finish;
}
template <typename I>
int SetSnapRequest<I>::apply() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << __func__ << dendl;
std::scoped_lock locker{m_image_ctx.owner_lock, m_image_ctx.image_lock};
if (m_snap_id != CEPH_NOSNAP) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr);
int r = m_image_ctx.snap_set(m_snap_id);
if (r < 0) {
return r;
}
} else {
std::swap(m_image_ctx.exclusive_lock, m_exclusive_lock);
m_image_ctx.snap_unset();
}
if (m_refresh_parent != nullptr) {
m_refresh_parent->apply();
}
std::swap(m_object_map, m_image_ctx.object_map);
return 0;
}
template <typename I>
void SetSnapRequest<I>::finalize() {
if (m_writes_blocked) {
m_image_ctx.io_image_dispatcher->unblock_writes();
m_writes_blocked = false;
}
}
template <typename I>
void SetSnapRequest<I>::send_complete() {
finalize();
m_on_finish->complete(0);
delete this;
}
} // namespace image
} // namespace librbd
template class librbd::image::SetSnapRequest<librbd::ImageCtx>;
| 9,795 | 25.547425 | 80 | cc |
null | ceph-main/src/librbd/image/SetSnapRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_SNAP_SET_REQUEST_H
#define CEPH_LIBRBD_IMAGE_SNAP_SET_REQUEST_H
#include "cls/rbd/cls_rbd_client.h"
#include <string>
class Context;
namespace librbd {
template <typename> class ExclusiveLock;
class ImageCtx;
template <typename> class ObjectMap;
namespace image {
template <typename> class RefreshParentRequest;
template <typename ImageCtxT = ImageCtx>
class SetSnapRequest {
public:
static SetSnapRequest *create(ImageCtxT &image_ctx, uint64_t snap_id,
Context *on_finish) {
return new SetSnapRequest(image_ctx, snap_id, on_finish);
}
~SetSnapRequest();
void send();
private:
/**
* @verbatim
*
* <start>
* |
* | (set snap)
* |-----------> BLOCK_WRITES
* | |
* | v
* | SHUTDOWN_EXCLUSIVE_LOCK (skip if lock inactive
* | | or disabled)
* | v
* | REFRESH_PARENT (skip if no parent
* | | or refresh not needed)
* | v
* | OPEN_OBJECT_MAP (skip if map disabled)
* | |
* | v
* | <apply>
* | |
* | v
* | FINALIZE_REFRESH_PARENT (skip if no parent
* | | or refresh not needed)
* | v
* | <finish>
* |
* \-----------> INIT_EXCLUSIVE_LOCK (skip if active or
* | disabled)
* v
* REFRESH_PARENT (skip if no parent
* | or refresh not needed)
* v
* <apply>
* |
* v
* FINALIZE_REFRESH_PARENT (skip if no parent
* | or refresh not needed)
* v
* <finish>
*
* @endverbatim
*/
SetSnapRequest(ImageCtxT &image_ctx, uint64_t snap_id, Context *on_finish);
ImageCtxT &m_image_ctx;
uint64_t m_snap_id;
Context *m_on_finish;
ExclusiveLock<ImageCtxT> *m_exclusive_lock;
ObjectMap<ImageCtxT> *m_object_map;
RefreshParentRequest<ImageCtxT> *m_refresh_parent;
bool m_writes_blocked;
void send_block_writes();
Context *handle_block_writes(int *result);
void send_init_exclusive_lock();
Context *handle_init_exclusive_lock(int *result);
Context *send_shut_down_exclusive_lock(int *result);
Context *handle_shut_down_exclusive_lock(int *result);
Context *send_refresh_parent(int *result);
Context *handle_refresh_parent(int *result);
Context *send_open_object_map(int *result);
Context *handle_open_object_map(int *result);
Context *send_finalize_refresh_parent(int *result);
Context *handle_finalize_refresh_parent(int *result);
int apply();
void finalize();
void send_complete();
};
} // namespace image
} // namespace librbd
extern template class librbd::image::SetSnapRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_SNAP_SET_REQUEST_H
| 3,340 | 27.07563 | 77 | h |
null | ceph-main/src/librbd/image/TypeTraits.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_TYPE_TRAITS_H
#define CEPH_LIBRBD_IMAGE_TYPE_TRAITS_H
namespace librbd {
namespace asio { struct ContextWQ; }
namespace image {
template <typename ImageCtxT>
struct TypeTraits {
typedef asio::ContextWQ ContextWQ;
};
} // namespace image
} // namespace librbd
#endif // CEPH_LIBRBD_IMAGE_TYPE_TRAITS_H
| 434 | 18.772727 | 70 | h |
null | ceph-main/src/librbd/image/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRBD_IMAGE_TYPES_H
#define LIBRBD_IMAGE_TYPES_H
namespace librbd {
namespace image {
enum {
CREATE_FLAG_SKIP_MIRROR_ENABLE = 1 << 0,
CREATE_FLAG_FORCE_MIRROR_ENABLE = 1 << 1,
CREATE_FLAG_MIRROR_ENABLE_MASK = (CREATE_FLAG_SKIP_MIRROR_ENABLE |
CREATE_FLAG_FORCE_MIRROR_ENABLE),
};
} // namespace image
} // librbd
#endif // LIBRBD_IMAGE_TYPES_H
| 499 | 22.809524 | 70 | h |
null | ceph-main/src/librbd/image/ValidatePoolRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/image/ValidatePoolRequest.h"
#include "include/rados/librados.hpp"
#include "include/ceph_assert.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::ValidatePoolRequest: " \
<< __func__ << ": "
namespace librbd {
namespace image {
namespace {
const std::string OVERWRITE_VALIDATED("overwrite validated");
const std::string VALIDATE("validate");
} // anonymous namespace
using util::create_rados_callback;
using util::create_context_callback;
using util::create_async_context_callback;
template <typename I>
ValidatePoolRequest<I>::ValidatePoolRequest(librados::IoCtx& io_ctx,
Context *on_finish)
: m_cct(reinterpret_cast<CephContext*>(io_ctx.cct())),
m_on_finish(on_finish) {
// validation should occur in default namespace
m_io_ctx.dup(io_ctx);
m_io_ctx.set_namespace("");
}
template <typename I>
void ValidatePoolRequest<I>::send() {
read_rbd_info();
}
template <typename I>
void ValidatePoolRequest<I>::read_rbd_info() {
ldout(m_cct, 5) << dendl;
auto comp = create_rados_callback<
ValidatePoolRequest<I>,
&ValidatePoolRequest<I>::handle_read_rbd_info>(this);
librados::ObjectReadOperation op;
op.read(0, 0, nullptr, nullptr);
m_out_bl.clear();
int r = m_io_ctx.aio_operate(RBD_INFO, comp, &op, &m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void ValidatePoolRequest<I>::handle_read_rbd_info(int r) {
ldout(m_cct, 5) << "r=" << r << dendl;
if (r >= 0) {
bufferlist validated_bl;
validated_bl.append(OVERWRITE_VALIDATED);
bufferlist validate_bl;
validate_bl.append(VALIDATE);
if (m_out_bl.contents_equal(validated_bl)) {
// already validated pool
finish(0);
return;
} else if (m_out_bl.contents_equal(validate_bl)) {
// implies snapshot was already successfully created
overwrite_rbd_info();
return;
}
} else if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "failed to read RBD info: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
create_snapshot();
}
template <typename I>
void ValidatePoolRequest<I>::create_snapshot() {
ldout(m_cct, 5) << dendl;
// allocate a self-managed snapshot id if this a new pool to force
// self-managed snapshot mode
auto comp = create_rados_callback<
ValidatePoolRequest<I>,
&ValidatePoolRequest<I>::handle_create_snapshot>(this);
m_io_ctx.aio_selfmanaged_snap_create(&m_snap_id, comp);
comp->release();
}
template <typename I>
void ValidatePoolRequest<I>::handle_create_snapshot(int r) {
ldout(m_cct, 5) << "r=" << r << dendl;
if (r == -EINVAL) {
lderr(m_cct) << "pool not configured for self-managed RBD snapshot support"
<< dendl;
finish(r);
return;
} else if (r < 0) {
lderr(m_cct) << "failed to allocate self-managed snapshot: "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
write_rbd_info();
}
template <typename I>
void ValidatePoolRequest<I>::write_rbd_info() {
ldout(m_cct, 5) << dendl;
bufferlist bl;
bl.append(VALIDATE);
librados::ObjectWriteOperation op;
op.create(true);
op.write(0, bl);
auto comp = create_rados_callback<
ValidatePoolRequest<I>,
&ValidatePoolRequest<I>::handle_write_rbd_info>(this);
int r = m_io_ctx.aio_operate(RBD_INFO, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void ValidatePoolRequest<I>::handle_write_rbd_info(int r) {
ldout(m_cct, 5) << "r=" << r << dendl;
if (r == -EOPNOTSUPP) {
lderr(m_cct) << "pool missing required overwrite support" << dendl;
m_ret_val = -EINVAL;
} else if (r < 0 && r != -EEXIST) {
lderr(m_cct) << "failed to write RBD info: " << cpp_strerror(r) << dendl;
m_ret_val = r;
}
remove_snapshot();
}
template <typename I>
void ValidatePoolRequest<I>::remove_snapshot() {
ldout(m_cct, 5) << dendl;
auto comp = create_rados_callback<
ValidatePoolRequest<I>,
&ValidatePoolRequest<I>::handle_remove_snapshot>(this);
m_io_ctx.aio_selfmanaged_snap_remove(m_snap_id, comp);
comp->release();
}
template <typename I>
void ValidatePoolRequest<I>::handle_remove_snapshot(int r) {
ldout(m_cct, 5) << "r=" << r << dendl;
if (r < 0) {
// not a fatal error
lderr(m_cct) << "failed to remove validation snapshot: " << cpp_strerror(r)
<< dendl;
}
if (m_ret_val < 0) {
finish(m_ret_val);
return;
}
overwrite_rbd_info();
}
template <typename I>
void ValidatePoolRequest<I>::overwrite_rbd_info() {
ldout(m_cct, 5) << dendl;
bufferlist bl;
bl.append(OVERWRITE_VALIDATED);
librados::ObjectWriteOperation op;
op.write(0, bl);
auto comp = create_rados_callback<
ValidatePoolRequest<I>,
&ValidatePoolRequest<I>::handle_overwrite_rbd_info>(this);
int r = m_io_ctx.aio_operate(RBD_INFO, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void ValidatePoolRequest<I>::handle_overwrite_rbd_info(int r) {
ldout(m_cct, 5) << "r=" << r << dendl;
if (r == -EOPNOTSUPP) {
lderr(m_cct) << "pool missing required overwrite support" << dendl;
finish(-EINVAL);
return;
} else if (r < 0) {
lderr(m_cct) << "failed to validate overwrite support: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
void ValidatePoolRequest<I>::finish(int r) {
ldout(m_cct, 5) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace image
} // namespace librbd
template class librbd::image::ValidatePoolRequest<librbd::ImageCtx>;
| 5,952 | 24.331915 | 79 | cc |
null | ceph-main/src/librbd/image/ValidatePoolRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_VALIDATE_POOL_REQUEST_H
#define CEPH_LIBRBD_IMAGE_VALIDATE_POOL_REQUEST_H
#include "include/common_fwd.h"
#include "include/rados/librados.hpp"
#include "include/buffer.h"
class Context;
namespace librbd {
struct ImageCtx;
namespace asio { struct ContextWQ; }
namespace image {
template <typename ImageCtxT>
class ValidatePoolRequest {
public:
static ValidatePoolRequest* create(librados::IoCtx& io_ctx,
Context *on_finish) {
return new ValidatePoolRequest(io_ctx, on_finish);
}
ValidatePoolRequest(librados::IoCtx& io_ctx, Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v (overwrites validated)
* READ RBD INFO . . . . . . . . .
* | . .
* | . (snapshots validated) .
* | . . . . . . . . . . .
* v . .
* CREATE SNAPSHOT . .
* | . .
* v . .
* WRITE RBD INFO . .
* | . .
* v . .
* REMOVE SNAPSHOT . .
* | . .
* v . .
* OVERWRITE RBD INFO < . . . .
* | .
* v .
* <finish> < . . . . . . . . . .`
*
* @endverbatim
*/
librados::IoCtx m_io_ctx;
CephContext* m_cct;
Context* m_on_finish;
int m_ret_val = 0;
bufferlist m_out_bl;
uint64_t m_snap_id = 0;
void read_rbd_info();
void handle_read_rbd_info(int r);
void create_snapshot();
void handle_create_snapshot(int r);
void write_rbd_info();
void handle_write_rbd_info(int r);
void remove_snapshot();
void handle_remove_snapshot(int r);
void overwrite_rbd_info();
void handle_overwrite_rbd_info(int r);
void finish(int r);
};
} // namespace image
} // namespace librbd
extern template class librbd::image::ValidatePoolRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_VALIDATE_POOL_REQUEST_H
| 2,228 | 22.712766 | 75 | h |
null | ceph-main/src/librbd/image_watcher/NotifyLockOwner.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/image_watcher/NotifyLockOwner.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/WatchNotifyTypes.h"
#include "librbd/watcher/Notifier.h"
#include <map>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image_watcher::NotifyLockOwner: " \
<< this << " " << __func__
namespace librbd {
namespace image_watcher {
using namespace watch_notify;
using util::create_context_callback;
NotifyLockOwner::NotifyLockOwner(ImageCtx &image_ctx,
watcher::Notifier ¬ifier,
bufferlist &&bl, Context *on_finish)
: m_image_ctx(image_ctx), m_notifier(notifier), m_bl(std::move(bl)),
m_on_finish(on_finish) {
}
void NotifyLockOwner::send() {
send_notify();
}
void NotifyLockOwner::send_notify() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
m_notifier.notify(m_bl, &m_notify_response, create_context_callback<
NotifyLockOwner, &NotifyLockOwner::handle_notify>(this));
}
void NotifyLockOwner::handle_notify(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": r=" << r << dendl;
if (r < 0 && r != -ETIMEDOUT) {
lderr(cct) << ": lock owner notification failed: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
bufferlist response;
bool lock_owner_responded = false;
for (auto &it : m_notify_response.acks) {
if (it.second.length() > 0) {
if (lock_owner_responded) {
lderr(cct) << ": duplicate lock owners detected" << dendl;
finish(-EINVAL);
return;
}
lock_owner_responded = true;
response = std::move(it.second);
}
}
if (!lock_owner_responded) {
ldout(cct, 1) << ": no lock owners detected" << dendl;
finish(-ETIMEDOUT);
return;
}
try {
auto iter = response.cbegin();
ResponseMessage response_message;
using ceph::decode;
decode(response_message, iter);
r = response_message.result;
ldout(cct, 20) << " client responded with r=" << r << dendl;
} catch (const buffer::error &err) {
r = -EINVAL;
}
finish(r);
}
void NotifyLockOwner::finish(int r) {
m_on_finish->complete(r);
delete this;
}
} // namespace image_watcher
} // namespace librbd
| 2,508 | 24.865979 | 74 | cc |
null | ceph-main/src/librbd/image_watcher/NotifyLockOwner.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_WATCHER_NOTIFY_LOCK_OWNER_H
#define CEPH_LIBRBD_IMAGE_WATCHER_NOTIFY_LOCK_OWNER_H
#include "include/buffer.h"
#include "librbd/watcher/Types.h"
class Context;
namespace librbd {
struct ImageCtx;
namespace watcher { class Notifier; }
namespace image_watcher {
class NotifyLockOwner {
public:
static NotifyLockOwner *create(ImageCtx &image_ctx,
watcher::Notifier ¬ifier,
bufferlist &&bl, Context *on_finish) {
return new NotifyLockOwner(image_ctx, notifier, std::move(bl), on_finish);
}
NotifyLockOwner(ImageCtx &image_ctx, watcher::Notifier ¬ifier,
bufferlist &&bl, Context *on_finish);
void send();
private:
ImageCtx &m_image_ctx;
watcher::Notifier &m_notifier;
bufferlist m_bl;
watcher::NotifyResponse m_notify_response;
Context *m_on_finish;
void send_notify();
void handle_notify(int r);
void finish(int r);
};
} // namespace image_watcher
} // namespace librbd
#endif // CEPH_LIBRBD_IMAGE_WATCHER_NOTIFY_LOCK_OWNER_H
| 1,179 | 22.137255 | 78 | h |
null | ceph-main/src/librbd/io/AioCompletion.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/AioCompletion.h"
#include <errno.h>
#include "common/ceph_context.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/perf_counters.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/internal.h"
#include "librbd/Journal.h"
#include "librbd/Types.h"
#include <boost/asio/dispatch.hpp>
#include <boost/asio/post.hpp>
#ifdef WITH_LTTNG
#include "tracing/librbd.h"
#else
#define tracepoint(...)
#endif
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::AioCompletion: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace io {
int AioCompletion::wait_for_complete() {
tracepoint(librbd, aio_wait_for_complete_enter, this);
{
std::unique_lock<std::mutex> locker(lock);
while (state != AIO_STATE_COMPLETE) {
cond.wait(locker);
}
}
tracepoint(librbd, aio_wait_for_complete_exit, 0);
return 0;
}
void AioCompletion::finalize() {
ceph_assert(ictx != nullptr);
CephContext *cct = ictx->cct;
// finalize any pending error results since we won't be
// atomically incrementing rval anymore
int err_r = error_rval;
if (err_r < 0) {
rval = err_r;
}
ssize_t r = rval;
ldout(cct, 20) << "r=" << r << dendl;
if (r >= 0 && aio_type == AIO_TYPE_READ) {
read_result.assemble_result(cct);
}
}
void AioCompletion::complete() {
ceph_assert(ictx != nullptr);
ssize_t r = rval;
if ((aio_type == AIO_TYPE_CLOSE) || (aio_type == AIO_TYPE_OPEN && r < 0)) {
ictx = nullptr;
external_callback = false;
} else {
CephContext *cct = ictx->cct;
tracepoint(librbd, aio_complete_enter, this, r);
if (ictx->perfcounter != nullptr) {
ceph::timespan elapsed = coarse_mono_clock::now() - start_time;
switch (aio_type) {
case AIO_TYPE_GENERIC:
case AIO_TYPE_OPEN:
break;
case AIO_TYPE_READ:
ictx->perfcounter->tinc(l_librbd_rd_latency, elapsed); break;
case AIO_TYPE_WRITE:
ictx->perfcounter->tinc(l_librbd_wr_latency, elapsed); break;
case AIO_TYPE_DISCARD:
ictx->perfcounter->tinc(l_librbd_discard_latency, elapsed); break;
case AIO_TYPE_FLUSH:
ictx->perfcounter->tinc(l_librbd_flush_latency, elapsed); break;
case AIO_TYPE_WRITESAME:
ictx->perfcounter->tinc(l_librbd_ws_latency, elapsed); break;
case AIO_TYPE_COMPARE_AND_WRITE:
ictx->perfcounter->tinc(l_librbd_cmp_latency, elapsed); break;
default:
lderr(cct) << "completed invalid aio_type: " << aio_type << dendl;
break;
}
}
}
state = AIO_STATE_CALLBACK;
if (complete_cb) {
if (external_callback) {
complete_external_callback();
} else {
complete_cb(rbd_comp, complete_arg);
complete_event_socket();
notify_callbacks_complete();
}
} else {
complete_event_socket();
notify_callbacks_complete();
}
tracepoint(librbd, aio_complete_exit);
}
void AioCompletion::init_time(ImageCtx *i, aio_type_t t) {
if (ictx == nullptr) {
ictx = i;
aio_type = t;
start_time = coarse_mono_clock::now();
}
}
void AioCompletion::start_op() {
ceph_assert(ictx != nullptr);
if (aio_type == AIO_TYPE_OPEN || aio_type == AIO_TYPE_CLOSE) {
// no need to track async open/close operations
return;
}
ceph_assert(!async_op.started());
async_op.start_op(*ictx);
}
void AioCompletion::queue_complete() {
uint32_t zero = 0;
pending_count.compare_exchange_strong(zero, 1);
ceph_assert(zero == 0);
add_request();
// ensure completion fires in clean lock context
boost::asio::post(ictx->asio_engine->get_api_strand(), [this]() {
complete_request(0);
});
}
void AioCompletion::block(CephContext* cct) {
ldout(cct, 20) << dendl;
ceph_assert(!was_armed);
get();
++pending_count;
}
void AioCompletion::unblock(CephContext* cct) {
ldout(cct, 20) << dendl;
ceph_assert(was_armed);
uint32_t previous_pending_count = pending_count--;
ceph_assert(previous_pending_count > 0);
if (previous_pending_count == 1) {
queue_complete();
}
put();
}
void AioCompletion::fail(int r)
{
ceph_assert(ictx != nullptr);
ceph_assert(r < 0);
bool queue_required = true;
if (aio_type == AIO_TYPE_CLOSE || aio_type == AIO_TYPE_OPEN) {
// executing from a safe context and the ImageCtx has been destructed
queue_required = false;
} else {
CephContext *cct = ictx->cct;
lderr(cct) << cpp_strerror(r) << dendl;
}
ceph_assert(!was_armed);
was_armed = true;
rval = r;
uint32_t previous_pending_count = pending_count.load();
if (previous_pending_count == 0) {
if (queue_required) {
queue_complete();
} else {
complete();
}
}
}
void AioCompletion::set_request_count(uint32_t count) {
ceph_assert(ictx != nullptr);
CephContext *cct = ictx->cct;
ceph_assert(!was_armed);
was_armed = true;
ldout(cct, 20) << "pending=" << count << dendl;
uint32_t previous_pending_count = pending_count.fetch_add(count);
if (previous_pending_count == 0 && count == 0) {
queue_complete();
}
}
void AioCompletion::complete_request(ssize_t r)
{
ceph_assert(ictx != nullptr);
CephContext *cct = ictx->cct;
if (r > 0) {
rval += r;
} else if (r < 0 && r != -EEXIST) {
// might race w/ another thread setting an error code but
// first one wins
int zero = 0;
error_rval.compare_exchange_strong(zero, r);
}
uint32_t previous_pending_count = pending_count--;
ceph_assert(previous_pending_count > 0);
auto pending_count = previous_pending_count - 1;
ldout(cct, 20) << "cb=" << complete_cb << ", "
<< "pending=" << pending_count << dendl;
if (pending_count == 0) {
finalize();
complete();
}
put();
}
bool AioCompletion::is_complete() {
tracepoint(librbd, aio_is_complete_enter, this);
bool done = (this->state != AIO_STATE_PENDING);
tracepoint(librbd, aio_is_complete_exit, done);
return done;
}
ssize_t AioCompletion::get_return_value() {
tracepoint(librbd, aio_get_return_value_enter, this);
ssize_t r = rval;
tracepoint(librbd, aio_get_return_value_exit, r);
return r;
}
void AioCompletion::complete_external_callback() {
get();
// ensure librbd external users never experience concurrent callbacks
// from multiple librbd-internal threads.
boost::asio::dispatch(ictx->asio_engine->get_api_strand(), [this]() {
complete_cb(rbd_comp, complete_arg);
complete_event_socket();
notify_callbacks_complete();
put();
});
}
void AioCompletion::complete_event_socket() {
if (ictx != nullptr && event_notify && ictx->event_socket.is_valid()) {
ictx->event_socket_completions.push(this);
ictx->event_socket.notify();
}
}
void AioCompletion::notify_callbacks_complete() {
state = AIO_STATE_COMPLETE;
{
std::unique_lock<std::mutex> locker(lock);
cond.notify_all();
}
if (image_dispatcher_ctx != nullptr) {
image_dispatcher_ctx->complete(rval);
}
// note: possible for image to be closed after op marked finished
if (async_op.started()) {
async_op.finish_op();
}
}
} // namespace io
} // namespace librbd
| 7,328 | 23.844068 | 77 | cc |
null | ceph-main/src/librbd/io/AioCompletion.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_AIO_COMPLETION_H
#define CEPH_LIBRBD_IO_AIO_COMPLETION_H
#include "common/ceph_time.h"
#include "include/common_fwd.h"
#include "include/Context.h"
#include "include/utime.h"
#include "include/rbd/librbd.hpp"
#include "librbd/ImageCtx.h"
#include "librbd/io/AsyncOperation.h"
#include "librbd/io/ReadResult.h"
#include "librbd/io/Types.h"
#include <atomic>
#include <condition_variable>
#include <mutex>
struct Context;
namespace librbd {
namespace io {
/**
* AioCompletion is the overall completion for a single
* rbd I/O request. It may be composed of many AioObjectRequests,
* which each go to a single object.
*
* The retrying of individual requests is handled at a lower level,
* so all AioCompletion cares about is the count of outstanding
* requests. The number of expected individual requests should be
* set initially using set_request_count() prior to issuing the
* requests. This ensures that the completion will not be completed
* within the caller's thread of execution (instead via a librados
* context or via a thread pool context for cache read hits).
*/
struct AioCompletion {
typedef enum {
AIO_STATE_PENDING = 0,
AIO_STATE_CALLBACK,
AIO_STATE_COMPLETE,
} aio_state_t;
mutable std::mutex lock;
std::condition_variable cond;
callback_t complete_cb = nullptr;
void *complete_arg = nullptr;
rbd_completion_t rbd_comp = nullptr;
/// note: only using atomic for built-in memory barrier
std::atomic<aio_state_t> state{AIO_STATE_PENDING};
std::atomic<ssize_t> rval{0};
std::atomic<int> error_rval{0};
std::atomic<uint32_t> ref{1};
std::atomic<uint32_t> pending_count{0}; ///< number of requests/blocks
std::atomic<bool> released{false};
ImageCtx *ictx = nullptr;
coarse_mono_time start_time;
aio_type_t aio_type = AIO_TYPE_NONE;
ReadResult read_result;
AsyncOperation async_op;
bool event_notify = false;
bool was_armed = false;
bool external_callback = false;
Context* image_dispatcher_ctx = nullptr;
template <typename T, void (T::*MF)(int)>
static void callback_adapter(completion_t cb, void *arg) {
AioCompletion *comp = reinterpret_cast<AioCompletion *>(cb);
T *t = reinterpret_cast<T *>(arg);
(t->*MF)(comp->get_return_value());
comp->release();
}
static AioCompletion *create(void *cb_arg, callback_t cb_complete,
rbd_completion_t rbd_comp) {
AioCompletion *comp = new AioCompletion();
comp->set_complete_cb(cb_arg, cb_complete);
comp->rbd_comp = (rbd_comp != nullptr ? rbd_comp : comp);
return comp;
}
template <typename T, void (T::*MF)(int) = &T::complete>
static AioCompletion *create(T *obj) {
AioCompletion *comp = new AioCompletion();
comp->set_complete_cb(obj, &callback_adapter<T, MF>);
comp->rbd_comp = comp;
return comp;
}
template <typename T, void (T::*MF)(int) = &T::complete>
static AioCompletion *create_and_start(T *obj, ImageCtx *image_ctx,
aio_type_t type) {
AioCompletion *comp = create<T, MF>(obj);
comp->init_time(image_ctx, type);
comp->start_op();
return comp;
}
AioCompletion() {
}
~AioCompletion() {
}
int wait_for_complete();
void finalize();
inline bool is_initialized(aio_type_t type) const {
std::unique_lock<std::mutex> locker(lock);
return ((ictx != nullptr) && (aio_type == type));
}
inline bool is_started() const {
std::unique_lock<std::mutex> locker(lock);
return async_op.started();
}
void block(CephContext* cct);
void unblock(CephContext* cct);
void init_time(ImageCtx *i, aio_type_t t);
void start_op();
void fail(int r);
void complete();
void set_complete_cb(void *cb_arg, callback_t cb) {
complete_cb = cb;
complete_arg = cb_arg;
}
void set_request_count(uint32_t num);
void add_request() {
ceph_assert(pending_count > 0);
get();
}
void complete_request(ssize_t r);
bool is_complete();
ssize_t get_return_value();
void get() {
ceph_assert(ref > 0);
++ref;
}
void release() {
bool previous_released = released.exchange(true);
ceph_assert(!previous_released);
put();
}
void put() {
uint32_t previous_ref = ref--;
ceph_assert(previous_ref > 0);
if (previous_ref == 1) {
delete this;
}
}
void set_event_notify(bool s) {
event_notify = s;
}
void *get_arg() {
return complete_arg;
}
private:
void queue_complete();
void complete_external_callback();
void complete_event_socket();
void notify_callbacks_complete();
};
class C_AioRequest : public Context {
public:
C_AioRequest(AioCompletion *completion) : m_completion(completion) {
m_completion->add_request();
}
~C_AioRequest() override {}
void finish(int r) override {
m_completion->complete_request(r);
}
protected:
AioCompletion *m_completion;
};
} // namespace io
} // namespace librbd
#endif // CEPH_LIBRBD_IO_AIO_COMPLETION_H
| 5,107 | 24.039216 | 74 | h |
null | ceph-main/src/librbd/io/AsyncOperation.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/AsyncOperation.h"
#include "include/ceph_assert.h"
#include "common/dout.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::AsyncOperation: "
namespace librbd {
namespace io {
namespace {
struct C_CompleteFlushes : public Context {
ImageCtx *image_ctx;
std::list<Context *> flush_contexts;
explicit C_CompleteFlushes(ImageCtx *image_ctx,
std::list<Context *> &&flush_contexts)
: image_ctx(image_ctx), flush_contexts(std::move(flush_contexts)) {
}
void finish(int r) override {
std::shared_lock owner_locker{image_ctx->owner_lock};
while (!flush_contexts.empty()) {
Context *flush_ctx = flush_contexts.front();
flush_contexts.pop_front();
ldout(image_ctx->cct, 20) << "completed flush: " << flush_ctx << dendl;
flush_ctx->complete(0);
}
}
};
} // anonymous namespace
void AsyncOperation::start_op(ImageCtx &image_ctx) {
ceph_assert(m_image_ctx == NULL);
m_image_ctx = &image_ctx;
ldout(m_image_ctx->cct, 20) << this << " " << __func__ << dendl;
std::lock_guard l{m_image_ctx->async_ops_lock};
m_image_ctx->async_ops.push_front(&m_xlist_item);
}
void AsyncOperation::finish_op() {
ldout(m_image_ctx->cct, 20) << this << " " << __func__ << dendl;
{
std::lock_guard l{m_image_ctx->async_ops_lock};
xlist<AsyncOperation *>::iterator iter(&m_xlist_item);
++iter;
ceph_assert(m_xlist_item.remove_myself());
// linked list stored newest -> oldest ops
if (!iter.end() && !m_flush_contexts.empty()) {
ldout(m_image_ctx->cct, 20) << "moving flush contexts to previous op: "
<< *iter << dendl;
(*iter)->m_flush_contexts.insert((*iter)->m_flush_contexts.end(),
m_flush_contexts.begin(),
m_flush_contexts.end());
return;
}
}
if (!m_flush_contexts.empty()) {
C_CompleteFlushes *ctx = new C_CompleteFlushes(m_image_ctx,
std::move(m_flush_contexts));
m_image_ctx->asio_engine->post(ctx, 0);
}
}
void AsyncOperation::flush(Context* on_finish) {
{
std::lock_guard locker{m_image_ctx->async_ops_lock};
xlist<AsyncOperation *>::iterator iter(&m_xlist_item);
++iter;
// linked list stored newest -> oldest ops
if (!iter.end()) {
(*iter)->m_flush_contexts.push_back(on_finish);
return;
}
}
m_image_ctx->asio_engine->post(on_finish, 0);
}
} // namespace io
} // namespace librbd
| 2,761 | 28.073684 | 80 | cc |
null | ceph-main/src/librbd/io/AsyncOperation.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRBD_IO_ASYNC_OPERATION_H
#define LIBRBD_IO_ASYNC_OPERATION_H
#include "include/ceph_assert.h"
#include "include/xlist.h"
#include <list>
class Context;
namespace librbd {
class ImageCtx;
namespace io {
class AsyncOperation {
public:
AsyncOperation()
: m_image_ctx(NULL), m_xlist_item(this)
{
}
~AsyncOperation()
{
ceph_assert(!m_xlist_item.is_on_list());
}
inline bool started() const {
return m_xlist_item.is_on_list();
}
void start_op(ImageCtx &image_ctx);
void finish_op();
void flush(Context *on_finish);
private:
ImageCtx *m_image_ctx;
xlist<AsyncOperation *>::item m_xlist_item;
std::list<Context *> m_flush_contexts;
};
} // namespace io
} // namespace librbd
#endif // LIBRBD_IO_ASYNC_OPERATION_H
| 871 | 15.45283 | 70 | h |
null | ceph-main/src/librbd/io/CopyupRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/CopyupRequest.h"
#include "include/neorados/RADOS.hpp"
#include "common/ceph_context.h"
#include "common/ceph_mutex.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/AsioEngine.h"
#include "librbd/AsyncObjectThrottle.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/asio/Utils.h"
#include "librbd/deep_copy/ObjectCopyRequest.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/io/ObjectRequest.h"
#include "librbd/io/ReadResult.h"
#include "librbd/io/Utils.h"
#include <boost/lambda/bind.hpp>
#include <boost/lambda/construct.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::CopyupRequest: " << this \
<< " " << __func__ << ": " \
<< data_object_name(m_image_ctx, m_object_no) << " "
namespace librbd {
namespace io {
using librbd::util::data_object_name;
namespace {
template <typename I>
class C_UpdateObjectMap : public C_AsyncObjectThrottle<I> {
public:
C_UpdateObjectMap(AsyncObjectThrottle<I> &throttle, I *image_ctx,
uint64_t object_no, uint8_t head_object_map_state,
const std::vector<uint64_t> *snap_ids,
bool first_snap_is_clean, const ZTracer::Trace &trace,
size_t snap_id_idx)
: C_AsyncObjectThrottle<I>(throttle, *image_ctx), m_object_no(object_no),
m_head_object_map_state(head_object_map_state), m_snap_ids(*snap_ids),
m_first_snap_is_clean(first_snap_is_clean), m_trace(trace),
m_snap_id_idx(snap_id_idx)
{
}
int send() override {
auto& image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
if (image_ctx.exclusive_lock == nullptr) {
return 1;
}
ceph_assert(image_ctx.exclusive_lock->is_lock_owner());
std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.object_map == nullptr) {
return 1;
}
uint64_t snap_id = m_snap_ids[m_snap_id_idx];
if (snap_id == CEPH_NOSNAP) {
return update_head();
} else {
return update_snapshot(snap_id);
}
}
int update_head() {
auto& image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
bool sent = image_ctx.object_map->template aio_update<Context>(
CEPH_NOSNAP, m_object_no, m_head_object_map_state, {}, m_trace, false,
this);
return (sent ? 0 : 1);
}
int update_snapshot(uint64_t snap_id) {
auto& image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
uint8_t state = OBJECT_EXISTS;
if (image_ctx.test_features(RBD_FEATURE_FAST_DIFF, image_ctx.image_lock) &&
(m_snap_id_idx > 0 || m_first_snap_is_clean)) {
// first snapshot should be exists+dirty since it contains
// the copyup data -- later snapshots inherit the data.
state = OBJECT_EXISTS_CLEAN;
}
bool sent = image_ctx.object_map->template aio_update<Context>(
snap_id, m_object_no, state, {}, m_trace, true, this);
ceph_assert(sent);
return 0;
}
private:
uint64_t m_object_no;
uint8_t m_head_object_map_state;
const std::vector<uint64_t> &m_snap_ids;
bool m_first_snap_is_clean;
const ZTracer::Trace &m_trace;
size_t m_snap_id_idx;
};
} // anonymous namespace
template <typename I>
CopyupRequest<I>::CopyupRequest(I *ictx, uint64_t objectno,
Extents &&image_extents, ImageArea area,
const ZTracer::Trace &parent_trace)
: m_image_ctx(ictx), m_object_no(objectno),
m_image_extents(std::move(image_extents)), m_image_area(area),
m_trace(librbd::util::create_trace(*m_image_ctx, "copy-up", parent_trace))
{
ceph_assert(m_image_ctx->data_ctx.is_valid());
m_async_op.start_op(*librbd::util::get_image_ctx(m_image_ctx));
}
template <typename I>
CopyupRequest<I>::~CopyupRequest() {
ceph_assert(m_pending_requests.empty());
m_async_op.finish_op();
}
template <typename I>
void CopyupRequest<I>::append_request(AbstractObjectWriteRequest<I> *req,
const Extents& object_extents) {
std::lock_guard locker{m_lock};
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "object_request=" << req << ", "
<< "append=" << m_append_request_permitted << dendl;
if (m_append_request_permitted) {
m_pending_requests.push_back(req);
for (auto [offset, length] : object_extents) {
if (length > 0) {
m_write_object_extents.union_insert(offset, length);
}
}
} else {
m_restart_requests.push_back(req);
}
}
template <typename I>
void CopyupRequest<I>::send() {
read_from_parent();
}
template <typename I>
void CopyupRequest<I>::read_from_parent() {
auto cct = m_image_ctx->cct;
std::shared_lock image_locker{m_image_ctx->image_lock};
if (m_image_ctx->parent == nullptr) {
ldout(cct, 5) << "parent detached" << dendl;
m_image_ctx->asio_engine->post(
[this]() { handle_read_from_parent(-ENOENT); });
return;
} else if (is_deep_copy()) {
deep_copy();
return;
}
auto comp = AioCompletion::create_and_start<
CopyupRequest<I>,
&CopyupRequest<I>::handle_read_from_parent>(
this, librbd::util::get_image_ctx(m_image_ctx->parent), AIO_TYPE_READ);
ldout(cct, 20) << "completion=" << comp
<< " image_extents=" << m_image_extents
<< " area=" << m_image_area << dendl;
auto req = io::ImageDispatchSpec::create_read(
*m_image_ctx->parent, io::IMAGE_DISPATCH_LAYER_INTERNAL_START, comp,
std::move(m_image_extents), m_image_area,
ReadResult{&m_copyup_extent_map, &m_copyup_data},
m_image_ctx->parent->get_data_io_context(), 0, 0, m_trace);
req->send();
}
template <typename I>
void CopyupRequest<I>::handle_read_from_parent(int r) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
m_lock.lock();
disable_append_requests();
m_lock.unlock();
lderr(cct) << "error reading from parent: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
convert_copyup_extent_map();
m_image_ctx->image_lock.lock_shared();
m_lock.lock();
disable_append_requests();
r = prepare_copyup_data();
if (r < 0) {
m_lock.unlock();
m_image_ctx->image_lock.unlock_shared();
lderr(m_image_ctx->cct) << "failed to prepare copyup data: "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
m_copyup_is_zero = m_copyup_data.is_zero();
m_copyup_required = is_copyup_required();
if (!m_copyup_required) {
m_lock.unlock();
m_image_ctx->image_lock.unlock_shared();
ldout(cct, 20) << "no-op, skipping" << dendl;
finish(0);
return;
}
// copyup() will affect snapshots only if parent data is not all
// zeros.
if (!m_copyup_is_zero) {
m_snap_ids.insert(m_snap_ids.end(), m_image_ctx->snaps.rbegin(),
m_image_ctx->snaps.rend());
}
m_lock.unlock();
m_image_ctx->image_lock.unlock_shared();
update_object_maps();
}
template <typename I>
void CopyupRequest<I>::deep_copy() {
auto cct = m_image_ctx->cct;
ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock));
ceph_assert(m_image_ctx->parent != nullptr);
m_lock.lock();
m_deep_copied = true;
m_flatten = is_copyup_required() ? true : m_image_ctx->migration_info.flatten;
m_lock.unlock();
ldout(cct, 20) << "flatten=" << m_flatten << dendl;
uint32_t flags = deep_copy::OBJECT_COPY_REQUEST_FLAG_MIGRATION;
if (m_flatten) {
flags |= deep_copy::OBJECT_COPY_REQUEST_FLAG_FLATTEN;
}
auto ctx = librbd::util::create_context_callback<
CopyupRequest<I>, &CopyupRequest<I>::handle_deep_copy>(this);
auto req = deep_copy::ObjectCopyRequest<I>::create(
m_image_ctx->parent, m_image_ctx, 0, 0,
m_image_ctx->migration_info.snap_map, m_object_no, flags, nullptr, ctx);
req->send();
}
template <typename I>
void CopyupRequest<I>::handle_deep_copy(int r) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
m_image_ctx->image_lock.lock_shared();
m_lock.lock();
m_copyup_required = is_copyup_required();
if (r == -ENOENT && !m_flatten && m_copyup_required) {
m_lock.unlock();
m_image_ctx->image_lock.unlock_shared();
ldout(cct, 10) << "restart deep-copy with flatten" << dendl;
send();
return;
}
disable_append_requests();
if (r < 0 && r != -ENOENT) {
m_lock.unlock();
m_image_ctx->image_lock.unlock_shared();
lderr(cct) << "error encountered during deep-copy: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
if (!m_copyup_required && !is_update_object_map_required(r)) {
m_lock.unlock();
m_image_ctx->image_lock.unlock_shared();
if (r == -ENOENT) {
r = 0;
}
ldout(cct, 20) << "skipping" << dendl;
finish(r);
return;
}
// For deep-copy, copyup() will never affect snapshots. However,
// this state machine is responsible for updating object maps for
// snapshots that have been created on destination image after
// migration started.
if (r != -ENOENT) {
compute_deep_copy_snap_ids();
}
m_lock.unlock();
m_image_ctx->image_lock.unlock_shared();
update_object_maps();
}
template <typename I>
void CopyupRequest<I>::update_object_maps() {
std::shared_lock owner_locker{m_image_ctx->owner_lock};
std::shared_lock image_locker{m_image_ctx->image_lock};
if (m_image_ctx->object_map == nullptr) {
image_locker.unlock();
owner_locker.unlock();
copyup();
return;
}
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
bool copy_on_read = m_pending_requests.empty();
uint8_t head_object_map_state = OBJECT_EXISTS;
if (copy_on_read && !m_snap_ids.empty() &&
m_image_ctx->test_features(RBD_FEATURE_FAST_DIFF,
m_image_ctx->image_lock)) {
// HEAD is non-dirty since data is tied to first snapshot
head_object_map_state = OBJECT_EXISTS_CLEAN;
}
auto r_it = m_pending_requests.rbegin();
if (r_it != m_pending_requests.rend()) {
// last write-op determines the final object map state
head_object_map_state = (*r_it)->get_pre_write_object_map_state();
}
if ((*m_image_ctx->object_map)[m_object_no] != head_object_map_state) {
// (maybe) need to update the HEAD object map state
m_snap_ids.push_back(CEPH_NOSNAP);
}
image_locker.unlock();
ceph_assert(m_image_ctx->exclusive_lock->is_lock_owner());
typename AsyncObjectThrottle<I>::ContextFactory context_factory(
boost::lambda::bind(boost::lambda::new_ptr<C_UpdateObjectMap<I>>(),
boost::lambda::_1, m_image_ctx, m_object_no, head_object_map_state,
&m_snap_ids, m_first_snap_is_clean, m_trace, boost::lambda::_2));
auto ctx = librbd::util::create_context_callback<
CopyupRequest<I>, &CopyupRequest<I>::handle_update_object_maps>(this);
auto throttle = new AsyncObjectThrottle<I>(
nullptr, *m_image_ctx, context_factory, ctx, nullptr, 0, m_snap_ids.size());
throttle->start_ops(
m_image_ctx->config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
}
template <typename I>
void CopyupRequest<I>::handle_update_object_maps(int r) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_image_ctx->cct) << "failed to update object map: "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
copyup();
}
template <typename I>
void CopyupRequest<I>::copyup() {
auto cct = m_image_ctx->cct;
m_image_ctx->image_lock.lock_shared();
auto snapc = m_image_ctx->snapc;
auto io_context = m_image_ctx->get_data_io_context();
m_image_ctx->image_lock.unlock_shared();
m_lock.lock();
if (!m_copyup_required) {
m_lock.unlock();
ldout(cct, 20) << "skipping copyup" << dendl;
finish(0);
return;
}
ldout(cct, 20) << dendl;
bool copy_on_read = m_pending_requests.empty() && !m_deep_copied;
bool deep_copyup = !snapc.snaps.empty() && !m_copyup_is_zero;
if (m_copyup_is_zero) {
m_copyup_data.clear();
m_copyup_extent_map.clear();
}
neorados::WriteOp copyup_op;
neorados::WriteOp write_op;
neorados::WriteOp* op;
if (copy_on_read || deep_copyup) {
// copyup-op will use its own request issued to the initial object revision
op = ©up_op;
++m_pending_copyups;
} else {
// copyup-op can be combined with the write-ops (if any)
op = &write_op;
}
if (m_image_ctx->enable_sparse_copyup) {
cls_client::sparse_copyup(op, m_copyup_extent_map, m_copyup_data);
} else {
// convert the sparse read back into a standard (thick) read
Striper::StripedReadResult destriper;
destriper.add_partial_sparse_result(
cct, std::move(m_copyup_data), m_copyup_extent_map, 0,
{{0, m_image_ctx->layout.object_size}});
bufferlist thick_bl;
destriper.assemble_result(cct, thick_bl, false);
cls_client::copyup(op, thick_bl);
}
ObjectRequest<I>::add_write_hint(*m_image_ctx, op);
if (!copy_on_read) {
// merge all pending write ops into this single RADOS op
for (auto req : m_pending_requests) {
ldout(cct, 20) << "add_copyup_ops " << req << dendl;
req->add_copyup_ops(&write_op);
}
if (write_op.size() > 0) {
++m_pending_copyups;
}
}
m_lock.unlock();
// issue librados ops at the end to simplify test cases
auto object = neorados::Object{data_object_name(m_image_ctx, m_object_no)};
if (copyup_op.size() > 0) {
// send only the copyup request with a blank snapshot context so that
// all snapshots are detected from the parent for this object. If
// this is a CoW request, a second request will be created for the
// actual modification.
ldout(cct, 20) << "copyup with empty snapshot context" << dendl;
auto copyup_io_context = *io_context;
copyup_io_context.write_snap_context({});
m_image_ctx->rados_api.execute(
object, copyup_io_context, std::move(copyup_op),
librbd::asio::util::get_callback_adapter(
[this](int r) { handle_copyup(r); }), nullptr,
(this->m_trace.valid() ? this->m_trace.get_info() : nullptr));
}
if (write_op.size() > 0) {
// compare-and-write doesn't add any write ops (copyup+cmpext+write
// can't be executed in the same RADOS op because, unless the object
// was already present in the clone, cmpext wouldn't see it)
ldout(cct, 20) << (!deep_copyup && write_op.size() > 2 ?
"copyup + ops" : !deep_copyup ? "copyup" : "ops")
<< " with current snapshot context" << dendl;
m_image_ctx->rados_api.execute(
object, *io_context, std::move(write_op),
librbd::asio::util::get_callback_adapter(
[this](int r) { handle_copyup(r); }), nullptr,
(this->m_trace.valid() ? this->m_trace.get_info() : nullptr));
}
}
template <typename I>
void CopyupRequest<I>::handle_copyup(int r) {
auto cct = m_image_ctx->cct;
unsigned pending_copyups;
int copyup_ret_val = r;
{
std::lock_guard locker{m_lock};
ceph_assert(m_pending_copyups > 0);
pending_copyups = --m_pending_copyups;
if (m_copyup_ret_val < 0) {
copyup_ret_val = m_copyup_ret_val;
} else if (r < 0) {
m_copyup_ret_val = r;
}
}
ldout(cct, 20) << "r=" << r << ", "
<< "pending=" << pending_copyups << dendl;
if (pending_copyups == 0) {
if (copyup_ret_val < 0 && copyup_ret_val != -ENOENT) {
lderr(cct) << "failed to copyup object: " << cpp_strerror(copyup_ret_val)
<< dendl;
complete_requests(false, copyup_ret_val);
}
finish(0);
}
}
template <typename I>
void CopyupRequest<I>::finish(int r) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
complete_requests(true, r);
delete this;
}
template <typename I>
void CopyupRequest<I>::complete_requests(bool override_restart_retval, int r) {
auto cct = m_image_ctx->cct;
remove_from_list();
while (!m_pending_requests.empty()) {
auto it = m_pending_requests.begin();
auto req = *it;
ldout(cct, 20) << "completing request " << req << dendl;
req->handle_copyup(r);
m_pending_requests.erase(it);
}
if (override_restart_retval) {
r = -ERESTART;
}
while (!m_restart_requests.empty()) {
auto it = m_restart_requests.begin();
auto req = *it;
ldout(cct, 20) << "restarting request " << req << dendl;
req->handle_copyup(r);
m_restart_requests.erase(it);
}
}
template <typename I>
void CopyupRequest<I>::disable_append_requests() {
ceph_assert(ceph_mutex_is_locked(m_lock));
m_append_request_permitted = false;
}
template <typename I>
void CopyupRequest<I>::remove_from_list() {
std::lock_guard copyup_list_locker{m_image_ctx->copyup_list_lock};
auto it = m_image_ctx->copyup_list.find(m_object_no);
if (it != m_image_ctx->copyup_list.end()) {
m_image_ctx->copyup_list.erase(it);
}
}
template <typename I>
bool CopyupRequest<I>::is_copyup_required() {
ceph_assert(ceph_mutex_is_locked(m_lock));
bool copy_on_read = m_pending_requests.empty();
if (copy_on_read) {
// always force a copyup if CoR enabled
return true;
}
if (!m_copyup_is_zero) {
return true;
}
for (auto req : m_pending_requests) {
if (!req->is_empty_write_op()) {
return true;
}
}
return false;
}
template <typename I>
bool CopyupRequest<I>::is_deep_copy() const {
ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock));
return !m_image_ctx->migration_info.empty();
}
template <typename I>
bool CopyupRequest<I>::is_update_object_map_required(int r) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock));
if (r < 0) {
return false;
}
if (m_image_ctx->object_map == nullptr) {
return false;
}
if (m_image_ctx->migration_info.empty()) {
// migration might have completed while IO was in-flight,
// assume worst-case and perform an object map update
return true;
}
auto it = m_image_ctx->migration_info.snap_map.find(CEPH_NOSNAP);
ceph_assert(it != m_image_ctx->migration_info.snap_map.end());
return it->second[0] != CEPH_NOSNAP;
}
template <typename I>
void CopyupRequest<I>::compute_deep_copy_snap_ids() {
ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock));
// don't copy ids for the snaps updated by object deep copy or
// that don't overlap
std::set<uint64_t> deep_copied;
for (auto &it : m_image_ctx->migration_info.snap_map) {
if (it.first != CEPH_NOSNAP) {
deep_copied.insert(it.second.front());
}
}
ldout(m_image_ctx->cct, 15) << "deep_copied=" << deep_copied << dendl;
std::copy_if(m_image_ctx->snaps.rbegin(), m_image_ctx->snaps.rend(),
std::back_inserter(m_snap_ids),
[this, cct=m_image_ctx->cct, &deep_copied](uint64_t snap_id) {
if (deep_copied.count(snap_id)) {
m_first_snap_is_clean = true;
return false;
}
uint64_t raw_overlap = 0;
uint64_t object_overlap = 0;
int r = m_image_ctx->get_parent_overlap(snap_id, &raw_overlap);
if (r < 0) {
ldout(cct, 5) << "failed getting parent overlap for snap_id: "
<< snap_id << ": " << cpp_strerror(r) << dendl;
} else if (raw_overlap > 0) {
auto [parent_extents, area] = util::object_to_area_extents(
m_image_ctx, m_object_no, {{0, m_image_ctx->layout.object_size}});
object_overlap = m_image_ctx->prune_parent_extents(parent_extents, area,
raw_overlap, false);
}
return object_overlap > 0;
});
}
template <typename I>
void CopyupRequest<I>::convert_copyup_extent_map() {
auto cct = m_image_ctx->cct;
Extents image_extent_map;
image_extent_map.swap(m_copyup_extent_map);
m_copyup_extent_map.reserve(image_extent_map.size());
// convert the image-extent extent map to object-extents
for (auto [image_offset, image_length] : image_extent_map) {
striper::LightweightObjectExtents object_extents;
util::area_to_object_extents(m_image_ctx, image_offset, image_length,
m_image_area, 0, &object_extents);
for (auto& object_extent : object_extents) {
m_copyup_extent_map.emplace_back(
object_extent.offset, object_extent.length);
}
}
ldout(cct, 20) << "image_extents=" << image_extent_map << ", "
<< "object_extents=" << m_copyup_extent_map << dendl;
}
template <typename I>
int CopyupRequest<I>::prepare_copyup_data() {
ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock));
auto cct = m_image_ctx->cct;
SnapshotSparseBufferlist snapshot_sparse_bufferlist;
auto& sparse_bufferlist = snapshot_sparse_bufferlist[0];
bool copy_on_read = m_pending_requests.empty();
bool maybe_deep_copyup = !m_image_ctx->snapc.snaps.empty();
if (copy_on_read || maybe_deep_copyup) {
// stand-alone copyup that will not be overwritten until HEAD revision
ldout(cct, 20) << "processing full copy-up" << dendl;
uint64_t buffer_offset = 0;
for (auto [object_offset, object_length] : m_copyup_extent_map) {
bufferlist sub_bl;
sub_bl.substr_of(m_copyup_data, buffer_offset, object_length);
buffer_offset += object_length;
sparse_bufferlist.insert(
object_offset, object_length,
{SPARSE_EXTENT_STATE_DATA, object_length, std::move(sub_bl)});
}
} else {
// copyup that will concurrently written to the HEAD revision with the
// associated write-ops so only process partial extents
uint64_t buffer_offset = 0;
for (auto [object_offset, object_length] : m_copyup_extent_map) {
interval_set<uint64_t> copyup_object_extents;
copyup_object_extents.insert(object_offset, object_length);
interval_set<uint64_t> intersection;
intersection.intersection_of(copyup_object_extents,
m_write_object_extents);
// extract only portions of the parent copyup data that have not
// been overwritten by write-ops
copyup_object_extents.subtract(intersection);
for (auto [copyup_offset, copyup_length] : copyup_object_extents) {
bufferlist sub_bl;
sub_bl.substr_of(
m_copyup_data, buffer_offset + (copyup_offset - object_offset),
copyup_length);
ceph_assert(sub_bl.length() == copyup_length);
sparse_bufferlist.insert(
copyup_offset, copyup_length,
{SPARSE_EXTENT_STATE_DATA, copyup_length, std::move(sub_bl)});
}
buffer_offset += object_length;
}
ldout(cct, 20) << "processing partial copy-up: " << sparse_bufferlist
<< dendl;
}
// Let dispatch layers have a chance to process the data
auto r = m_image_ctx->io_object_dispatcher->prepare_copyup(
m_object_no, &snapshot_sparse_bufferlist);
if (r < 0) {
return r;
}
// Convert sparse extents back to extent map
m_copyup_data.clear();
m_copyup_extent_map.clear();
m_copyup_extent_map.reserve(sparse_bufferlist.ext_count());
for (auto& extent : sparse_bufferlist) {
auto& sbe = extent.get_val();
if (sbe.state == SPARSE_EXTENT_STATE_DATA) {
m_copyup_extent_map.emplace_back(extent.get_off(), extent.get_len());
m_copyup_data.append(sbe.bl);
}
}
return 0;
}
} // namespace io
} // namespace librbd
template class librbd::io::CopyupRequest<librbd::ImageCtx>;
| 23,927 | 29.914729 | 85 | cc |
null | ceph-main/src/librbd/io/CopyupRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_COPYUP_REQUEST_H
#define CEPH_LIBRBD_IO_COPYUP_REQUEST_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/interval_set.h"
#include "common/ceph_mutex.h"
#include "common/zipkin_trace.h"
#include "librbd/io/AsyncOperation.h"
#include "librbd/io/Types.h"
#include <map>
#include <string>
#include <vector>
namespace ZTracer { struct Trace; }
namespace librbd {
struct ImageCtx;
namespace io {
template <typename I> class AbstractObjectWriteRequest;
template <typename ImageCtxT = librbd::ImageCtx>
class CopyupRequest {
public:
static CopyupRequest* create(ImageCtxT *ictx, uint64_t objectno,
Extents &&image_extents, ImageArea area,
const ZTracer::Trace &parent_trace) {
return new CopyupRequest(ictx, objectno, std::move(image_extents), area,
parent_trace);
}
CopyupRequest(ImageCtxT *ictx, uint64_t objectno,
Extents &&image_extents, ImageArea area,
const ZTracer::Trace &parent_trace);
~CopyupRequest();
void append_request(AbstractObjectWriteRequest<ImageCtxT> *req,
const Extents& object_extents);
void send();
private:
/**
* Copyup requests go through the following state machine to read from the
* parent image, update the object map, and copyup the object:
*
*
* @verbatim
*
* <start>
* |
* /---------/ \---------\
* | |
* v v
* READ_FROM_PARENT DEEP_COPY
* | |
* \---------\ /---------/
* |
* v (skip if not needed)
* UPDATE_OBJECT_MAPS
* |
* v (skip if not needed)
* COPYUP
* |
* v
* <finish>
*
* @endverbatim
*
* The OBJECT_MAP state is skipped if the object map isn't enabled or if
* an object map update isn't required. The COPYUP state is skipped if
* no data was read from the parent *and* there are no additional ops.
*/
typedef std::vector<AbstractObjectWriteRequest<ImageCtxT> *> WriteRequests;
ImageCtxT *m_image_ctx;
uint64_t m_object_no;
Extents m_image_extents;
ImageArea m_image_area;
ZTracer::Trace m_trace;
bool m_flatten = false;
bool m_copyup_required = true;
bool m_copyup_is_zero = true;
bool m_deep_copied = false;
Extents m_copyup_extent_map;
ceph::bufferlist m_copyup_data;
AsyncOperation m_async_op;
std::vector<uint64_t> m_snap_ids;
bool m_first_snap_is_clean = false;
ceph::mutex m_lock = ceph::make_mutex("CopyupRequest", false);
WriteRequests m_pending_requests;
unsigned m_pending_copyups = 0;
int m_copyup_ret_val = 0;
WriteRequests m_restart_requests;
bool m_append_request_permitted = true;
interval_set<uint64_t> m_write_object_extents;
void read_from_parent();
void handle_read_from_parent(int r);
void deep_copy();
void handle_deep_copy(int r);
void update_object_maps();
void handle_update_object_maps(int r);
void copyup();
void handle_copyup(int r);
void finish(int r);
void complete_requests(bool override_restart_retval, int r);
void disable_append_requests();
void remove_from_list();
bool is_copyup_required();
bool is_update_object_map_required(int r);
bool is_deep_copy() const;
void compute_deep_copy_snap_ids();
void convert_copyup_extent_map();
int prepare_copyup_data();
};
} // namespace io
} // namespace librbd
extern template class librbd::io::CopyupRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IO_COPYUP_REQUEST_H
| 3,844 | 25.335616 | 77 | h |
null | ceph-main/src/librbd/io/Dispatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_DISPATCHER_H
#define CEPH_LIBRBD_IO_DISPATCHER_H
#include "include/int_types.h"
#include "include/Context.h"
#include "common/ceph_mutex.h"
#include "common/dout.h"
#include "common/AsyncOpTracker.h"
#include "librbd/Utils.h"
#include "librbd/io/DispatcherInterface.h"
#include "librbd/io/Types.h"
#include <map>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::Dispatcher: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace io {
template <typename ImageCtxT, typename DispatchInterfaceT>
class Dispatcher : public DispatchInterfaceT {
public:
typedef typename DispatchInterfaceT::Dispatch Dispatch;
typedef typename DispatchInterfaceT::DispatchLayer DispatchLayer;
typedef typename DispatchInterfaceT::DispatchSpec DispatchSpec;
Dispatcher(ImageCtxT* image_ctx)
: m_image_ctx(image_ctx),
m_lock(ceph::make_shared_mutex(
librbd::util::unique_lock_name("librbd::io::Dispatcher::lock",
this))) {
}
virtual ~Dispatcher() {
ceph_assert(m_dispatches.empty());
}
void shut_down(Context* on_finish) override {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
std::map<DispatchLayer, DispatchMeta> dispatches;
{
std::unique_lock locker{m_lock};
std::swap(dispatches, m_dispatches);
}
for (auto it : dispatches) {
shut_down_dispatch(it.second, &on_finish);
}
on_finish->complete(0);
}
void register_dispatch(Dispatch* dispatch) override {
auto cct = m_image_ctx->cct;
auto type = dispatch->get_dispatch_layer();
ldout(cct, 5) << "dispatch_layer=" << type << dendl;
std::unique_lock locker{m_lock};
auto result = m_dispatches.insert(
{type, {dispatch, new AsyncOpTracker()}});
ceph_assert(result.second);
}
bool exists(DispatchLayer dispatch_layer) override {
std::unique_lock locker{m_lock};
return m_dispatches.find(dispatch_layer) != m_dispatches.end();
}
void shut_down_dispatch(DispatchLayer dispatch_layer,
Context* on_finish) override {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << "dispatch_layer=" << dispatch_layer << dendl;
DispatchMeta dispatch_meta;
{
std::unique_lock locker{m_lock};
auto it = m_dispatches.find(dispatch_layer);
if (it == m_dispatches.end()) {
on_finish->complete(0);
return;
}
dispatch_meta = it->second;
m_dispatches.erase(it);
}
shut_down_dispatch(dispatch_meta, &on_finish);
on_finish->complete(0);
}
void send(DispatchSpec* dispatch_spec) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "dispatch_spec=" << dispatch_spec << dendl;
auto dispatch_layer = dispatch_spec->dispatch_layer;
// apply the IO request to all layers -- this method will be re-invoked
// by the dispatch layer if continuing / restarting the IO
while (true) {
m_lock.lock_shared();
dispatch_layer = dispatch_spec->dispatch_layer;
auto it = m_dispatches.upper_bound(dispatch_layer);
if (it == m_dispatches.end()) {
// the request is complete if handled by all layers
dispatch_spec->dispatch_result = DISPATCH_RESULT_COMPLETE;
m_lock.unlock_shared();
break;
}
auto& dispatch_meta = it->second;
auto dispatch = dispatch_meta.dispatch;
auto async_op_tracker = dispatch_meta.async_op_tracker;
dispatch_spec->dispatch_result = DISPATCH_RESULT_INVALID;
// prevent recursive locking back into the dispatcher while handling IO
async_op_tracker->start_op();
m_lock.unlock_shared();
// advance to next layer in case we skip or continue
dispatch_spec->dispatch_layer = dispatch->get_dispatch_layer();
bool handled = send_dispatch(dispatch, dispatch_spec);
async_op_tracker->finish_op();
// handled ops will resume when the dispatch ctx is invoked
if (handled) {
return;
}
}
// skipped through to the last layer
dispatch_spec->dispatcher_ctx.complete(0);
}
protected:
struct DispatchMeta {
Dispatch* dispatch = nullptr;
AsyncOpTracker* async_op_tracker = nullptr;
DispatchMeta() {
}
DispatchMeta(Dispatch* dispatch, AsyncOpTracker* async_op_tracker)
: dispatch(dispatch), async_op_tracker(async_op_tracker) {
}
};
ImageCtxT* m_image_ctx;
ceph::shared_mutex m_lock;
std::map<DispatchLayer, DispatchMeta> m_dispatches;
virtual bool send_dispatch(Dispatch* dispatch,
DispatchSpec* dispatch_spec) = 0;
protected:
struct C_LayerIterator : public Context {
Dispatcher* dispatcher;
Context* on_finish;
DispatchLayer dispatch_layer;
C_LayerIterator(Dispatcher* dispatcher,
DispatchLayer start_layer,
Context* on_finish)
: dispatcher(dispatcher), on_finish(on_finish), dispatch_layer(start_layer) {
}
void complete(int r) override {
while (true) {
dispatcher->m_lock.lock_shared();
auto it = dispatcher->m_dispatches.upper_bound(dispatch_layer);
if (it == dispatcher->m_dispatches.end()) {
dispatcher->m_lock.unlock_shared();
Context::complete(r);
return;
}
auto& dispatch_meta = it->second;
auto dispatch = dispatch_meta.dispatch;
// prevent recursive locking back into the dispatcher while handling IO
dispatch_meta.async_op_tracker->start_op();
dispatcher->m_lock.unlock_shared();
// next loop should start after current layer
dispatch_layer = dispatch->get_dispatch_layer();
auto handled = execute(dispatch, this);
dispatch_meta.async_op_tracker->finish_op();
if (handled) {
break;
}
}
}
void finish(int r) override {
on_finish->complete(0);
}
virtual bool execute(Dispatch* dispatch,
Context* on_finish) = 0;
};
struct C_InvalidateCache : public C_LayerIterator {
C_InvalidateCache(Dispatcher* dispatcher, DispatchLayer start_layer, Context* on_finish)
: C_LayerIterator(dispatcher, start_layer, on_finish) {
}
bool execute(Dispatch* dispatch,
Context* on_finish) override {
return dispatch->invalidate_cache(on_finish);
}
};
private:
void shut_down_dispatch(DispatchMeta& dispatch_meta,
Context** on_finish) {
auto dispatch = dispatch_meta.dispatch;
auto async_op_tracker = dispatch_meta.async_op_tracker;
auto ctx = *on_finish;
ctx = new LambdaContext(
[dispatch, async_op_tracker, ctx](int r) {
delete dispatch;
delete async_op_tracker;
ctx->complete(r);
});
ctx = new LambdaContext([dispatch, ctx](int r) {
dispatch->shut_down(ctx);
});
*on_finish = new LambdaContext([async_op_tracker, ctx](int r) {
async_op_tracker->wait_for_ops(ctx);
});
}
};
} // namespace io
} // namespace librbd
#undef dout_subsys
#undef dout_prefix
#define dout_prefix *_dout
#endif // CEPH_LIBRBD_IO_DISPATCHER_H
| 7,351 | 28.059289 | 92 | h |
null | ceph-main/src/librbd/io/DispatcherInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_DISPATCHER_INTERFACE_H
#define CEPH_LIBRBD_IO_DISPATCHER_INTERFACE_H
#include "include/int_types.h"
struct Context;
namespace librbd {
namespace io {
template <typename DispatchT>
struct DispatcherInterface {
public:
typedef DispatchT Dispatch;
typedef typename DispatchT::DispatchLayer DispatchLayer;
typedef typename DispatchT::DispatchSpec DispatchSpec;
virtual ~DispatcherInterface() {
}
virtual void shut_down(Context* on_finish) = 0;
virtual void register_dispatch(Dispatch* dispatch) = 0;
virtual bool exists(DispatchLayer dispatch_layer) = 0;
virtual void shut_down_dispatch(DispatchLayer dispatch_layer,
Context* on_finish) = 0;
virtual void send(DispatchSpec* dispatch_spec) = 0;
};
} // namespace io
} // namespace librbd
#endif // CEPH_LIBRBD_IO_DISPATCHER_INTERFACE_H
| 967 | 24.473684 | 70 | h |
null | ceph-main/src/librbd/io/FlushTracker.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/FlushTracker.h"
#include "common/dout.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::FlushTracker: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace io {
template <typename I>
FlushTracker<I>::FlushTracker(I* image_ctx)
: m_image_ctx(image_ctx),
m_lock(ceph::make_shared_mutex(
util::unique_lock_name("librbd::io::FlushTracker::m_lock", this))) {
}
template <typename I>
FlushTracker<I>::~FlushTracker() {
std::unique_lock locker{m_lock};
ceph_assert(m_flush_contexts.empty());
}
template <typename I>
void FlushTracker<I>::shut_down() {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
std::unique_lock locker{m_lock};
Contexts flush_ctxs;
for (auto& [flush_tid, ctxs] : m_flush_contexts) {
flush_ctxs.insert(flush_ctxs.end(), ctxs.begin(), ctxs.end());
}
m_flush_contexts.clear();
locker.unlock();
for (auto ctx : flush_ctxs) {
ctx->complete(0);
}
}
template <typename I>
uint64_t FlushTracker<I>::start_io(uint64_t tid) {
auto cct = m_image_ctx->cct;
std::unique_lock locker{m_lock};
auto [it, inserted] = m_tid_to_flush_tid.insert({tid, ++m_next_flush_tid});
auto flush_tid = it->second;
m_in_flight_flush_tids.insert(flush_tid);
locker.unlock();
ldout(cct, 20) << "tid=" << tid << ", flush_tid=" << flush_tid << dendl;
return flush_tid;
}
template <typename I>
void FlushTracker<I>::finish_io(uint64_t tid) {
auto cct = m_image_ctx->cct;
std::unique_lock locker{m_lock};
auto tid_to_flush_tid_it = m_tid_to_flush_tid.find(tid);
if (tid_to_flush_tid_it == m_tid_to_flush_tid.end()) {
return;
}
auto flush_tid = tid_to_flush_tid_it->second;
m_tid_to_flush_tid.erase(tid_to_flush_tid_it);
m_in_flight_flush_tids.erase(flush_tid);
ldout(cct, 20) << "tid=" << tid << ", flush_tid=" << flush_tid << dendl;
auto oldest_flush_tid = std::numeric_limits<uint64_t>::max();
if (!m_in_flight_flush_tids.empty()) {
oldest_flush_tid = *m_in_flight_flush_tids.begin();
}
// all flushes tagged before the oldest tid should be completed
Contexts flush_ctxs;
auto flush_contexts_it = m_flush_contexts.begin();
while (flush_contexts_it != m_flush_contexts.end()) {
if (flush_contexts_it->first >= oldest_flush_tid) {
ldout(cct, 20) << "pending IOs: [" << m_in_flight_flush_tids << "], "
<< "pending flushes=" << m_flush_contexts << dendl;
break;
}
auto& ctxs = flush_contexts_it->second;
flush_ctxs.insert(flush_ctxs.end(), ctxs.begin(), ctxs.end());
flush_contexts_it = m_flush_contexts.erase(flush_contexts_it);
}
locker.unlock();
if (!flush_ctxs.empty()) {
ldout(cct, 20) << "completing flushes: " << flush_ctxs << dendl;
for (auto ctx : flush_ctxs) {
ctx->complete(0);
}
}
}
template <typename I>
void FlushTracker<I>::flush(Context* on_finish) {
auto cct = m_image_ctx->cct;
std::unique_lock locker{m_lock};
if (m_in_flight_flush_tids.empty()) {
locker.unlock();
on_finish->complete(0);
return;
}
auto flush_tid = *m_in_flight_flush_tids.rbegin();
m_flush_contexts[flush_tid].push_back(on_finish);
ldout(cct, 20) << "flush_tid=" << flush_tid << ", ctx=" << on_finish << ", "
<< "flush_contexts=" << m_flush_contexts << dendl;
}
} // namespace io
} // namespace librbd
template class librbd::io::FlushTracker<librbd::ImageCtx>;
| 3,639 | 27.661417 | 78 | cc |
null | ceph-main/src/librbd/io/FlushTracker.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_FLUSH_TRACKER_H
#define CEPH_LIBRBD_IO_FLUSH_TRACKER_H
#include "include/int_types.h"
#include "common/ceph_mutex.h"
#include <atomic>
#include <list>
#include <map>
#include <set>
#include <unordered_map>
struct Context;
namespace librbd {
struct ImageCtx;
namespace io {
struct AioCompletion;
template <typename ImageCtxT>
class FlushTracker {
public:
FlushTracker(ImageCtxT* image_ctx);
~FlushTracker();
void shut_down();
uint64_t start_io(uint64_t tid);
void finish_io(uint64_t tid);
void flush(Context* on_finish);
private:
typedef std::list<Context*> Contexts;
typedef std::map<uint64_t, Contexts> FlushContexts;
typedef std::set<uint64_t> Tids;
typedef std::unordered_map<uint64_t, uint64_t> TidToFlushTid;
ImageCtxT* m_image_ctx;
std::atomic<uint32_t> m_next_flush_tid{0};
mutable ceph::shared_mutex m_lock;
TidToFlushTid m_tid_to_flush_tid;
Tids m_in_flight_flush_tids;
FlushContexts m_flush_contexts;
};
} // namespace io
} // namespace librbd
extern template class librbd::io::FlushTracker<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IO_FLUSH_TRACKER_H
| 1,234 | 18.919355 | 70 | h |
null | ceph-main/src/librbd/io/ImageDispatch.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/ImageDispatch.h"
#include "common/dout.h"
#include "librbd/ImageCtx.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageRequest.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::ImageDispatch: " << this << " " \
<< __func__ << ": "
namespace librbd {
namespace io {
namespace {
void start_in_flight_io(AioCompletion* aio_comp) {
// TODO remove AsyncOperation from AioCompletion
if (!aio_comp->async_op.started()) {
aio_comp->start_op();
}
}
ImageArea get_area(const std::atomic<uint32_t>* image_dispatch_flags) {
return (*image_dispatch_flags & IMAGE_DISPATCH_FLAG_CRYPTO_HEADER ?
ImageArea::CRYPTO_HEADER : ImageArea::DATA);
}
} // anonymous namespace
template <typename I>
void ImageDispatch<I>::shut_down(Context* on_finish) {
on_finish->complete(0);
}
template <typename I>
bool ImageDispatch<I>::read(
AioCompletion* aio_comp, Extents &&image_extents, ReadResult &&read_result,
IOContext io_context, int op_flags, int read_flags,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
auto area = get_area(image_dispatch_flags);
ldout(cct, 20) << "image_extents=" << image_extents
<< " area=" << area << dendl;
start_in_flight_io(aio_comp);
*dispatch_result = DISPATCH_RESULT_COMPLETE;
ImageRequest<I>::aio_read(m_image_ctx, aio_comp, std::move(image_extents),
area, std::move(read_result), io_context, op_flags,
read_flags, parent_trace);
return true;
}
template <typename I>
bool ImageDispatch<I>::write(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
auto area = get_area(image_dispatch_flags);
ldout(cct, 20) << "image_extents=" << image_extents
<< " area=" << area << dendl;
start_in_flight_io(aio_comp);
*dispatch_result = DISPATCH_RESULT_COMPLETE;
ImageRequest<I>::aio_write(m_image_ctx, aio_comp, std::move(image_extents),
area, std::move(bl), op_flags, parent_trace);
return true;
}
template <typename I>
bool ImageDispatch<I>::discard(
AioCompletion* aio_comp, Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
auto area = get_area(image_dispatch_flags);
ldout(cct, 20) << "image_extents=" << image_extents
<< " area=" << area << dendl;
start_in_flight_io(aio_comp);
*dispatch_result = DISPATCH_RESULT_COMPLETE;
ImageRequest<I>::aio_discard(m_image_ctx, aio_comp, std::move(image_extents),
area, discard_granularity_bytes, parent_trace);
return true;
}
template <typename I>
bool ImageDispatch<I>::write_same(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
auto area = get_area(image_dispatch_flags);
ldout(cct, 20) << "image_extents=" << image_extents
<< " area=" << area << dendl;
start_in_flight_io(aio_comp);
*dispatch_result = DISPATCH_RESULT_COMPLETE;
ImageRequest<I>::aio_writesame(m_image_ctx, aio_comp,
std::move(image_extents), area, std::move(bl),
op_flags, parent_trace);
return true;
}
template <typename I>
bool ImageDispatch<I>::compare_and_write(
AioCompletion* aio_comp, Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
auto area = get_area(image_dispatch_flags);
ldout(cct, 20) << "image_extents=" << image_extents
<< " area=" << area << dendl;
start_in_flight_io(aio_comp);
*dispatch_result = DISPATCH_RESULT_COMPLETE;
ImageRequest<I>::aio_compare_and_write(m_image_ctx, aio_comp,
std::move(image_extents), area,
std::move(cmp_bl), std::move(bl),
mismatch_offset, op_flags,
parent_trace);
return true;
}
template <typename I>
bool ImageDispatch<I>::flush(
AioCompletion* aio_comp, FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
start_in_flight_io(aio_comp);
*dispatch_result = DISPATCH_RESULT_COMPLETE;
ImageRequest<I>::aio_flush(m_image_ctx, aio_comp, flush_source, parent_trace);
return true;
}
template <typename I>
bool ImageDispatch<I>::list_snaps(
AioCompletion* aio_comp, Extents&& image_extents, SnapIds&& snap_ids,
int list_snaps_flags, SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
auto area = get_area(image_dispatch_flags);
ldout(cct, 20) << "image_extents=" << image_extents
<< " area=" << area << dendl;
start_in_flight_io(aio_comp);
*dispatch_result = DISPATCH_RESULT_COMPLETE;
ImageListSnapsRequest<I> req(*m_image_ctx, aio_comp, std::move(image_extents),
area, std::move(snap_ids), list_snaps_flags,
snapshot_delta, parent_trace);
req.send();
return true;
}
template <typename I>
bool ImageDispatch<I>::invalidate_cache(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
std::shared_lock owner_lock{m_image_ctx->owner_lock};
m_image_ctx->io_object_dispatcher->invalidate_cache(on_finish);
return true;
}
} // namespace io
} // namespace librbd
template class librbd::io::ImageDispatch<librbd::ImageCtx>;
| 7,057 | 34.114428 | 80 | cc |
null | ceph-main/src/librbd/io/ImageDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_IMAGE_DISPATCH_H
#define CEPH_LIBRBD_IO_IMAGE_DISPATCH_H
#include "librbd/io/ImageDispatchInterface.h"
#include "include/int_types.h"
#include "include/buffer.h"
#include "common/zipkin_trace.h"
#include "librbd/io/ReadResult.h"
#include "librbd/io/Types.h"
struct Context;
namespace librbd {
struct ImageCtx;
namespace io {
struct AioCompletion;
template <typename ImageCtxT>
class ImageDispatch : public ImageDispatchInterface {
public:
ImageDispatch(ImageCtxT* image_ctx) : m_image_ctx(image_ctx) {
}
ImageDispatchLayer get_dispatch_layer() const override {
return IMAGE_DISPATCH_LAYER_CORE;
}
void shut_down(Context* on_finish) override;
bool read(
AioCompletion* aio_comp, Extents &&image_extents,
ReadResult &&read_result, IOContext io_context, int op_flags,
int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool write(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool discard(
AioCompletion* aio_comp, Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool write_same(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool compare_and_write(
AioCompletion* aio_comp, Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool flush(
AioCompletion* aio_comp, FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool list_snaps(
AioCompletion* aio_comp, Extents&& image_extents, SnapIds&& snap_ids,
int list_snaps_flags, SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool invalidate_cache(Context* on_finish) override;
private:
ImageCtxT* m_image_ctx;
};
} // namespace io
} // namespace librbd
extern template class librbd::io::ImageDispatch<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IO_IMAGE_DISPATCH_H
| 3,345 | 33.854167 | 77 | h |
null | ceph-main/src/librbd/io/ImageDispatchInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_IMAGE_DISPATCH_INTERFACE_H
#define CEPH_LIBRBD_IO_IMAGE_DISPATCH_INTERFACE_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "common/zipkin_trace.h"
#include "librbd/Types.h"
#include "librbd/io/ReadResult.h"
#include "librbd/io/Types.h"
#include <atomic>
struct Context;
namespace librbd {
namespace io {
struct AioCompletion;
struct ImageDispatchSpec;
struct ImageDispatchInterface {
typedef ImageDispatchLayer DispatchLayer;
typedef ImageDispatchSpec DispatchSpec;
virtual ~ImageDispatchInterface() {
}
virtual ImageDispatchLayer get_dispatch_layer() const = 0;
virtual void shut_down(Context* on_finish) = 0;
virtual bool read(
AioCompletion* aio_comp, Extents &&image_extents,
ReadResult &&read_result, IOContext io_context, int op_flags,
int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) = 0;
virtual bool write(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) = 0;
virtual bool discard(
AioCompletion* aio_comp, Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) = 0;
virtual bool write_same(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) = 0;
virtual bool compare_and_write(
AioCompletion* aio_comp, Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) = 0;
virtual bool flush(
AioCompletion* aio_comp, FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) = 0;
virtual bool list_snaps(
AioCompletion* aio_comp, Extents&& image_extents, SnapIds&& snap_ids,
int list_snaps_flags, SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) = 0;
virtual bool invalidate_cache(Context* on_finish) = 0;
};
} // namespace io
} // namespace librbd
#endif // CEPH_LIBRBD_IO_IMAGE_DISPATCH_INTERFACE_H
| 3,262 | 36.079545 | 77 | h |
null | ceph-main/src/librbd/io/ImageDispatchSpec.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/ImageCtx.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageRequest.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include <boost/variant.hpp>
namespace librbd {
namespace io {
void ImageDispatchSpec::C_Dispatcher::complete(int r) {
switch (image_dispatch_spec->dispatch_result) {
case DISPATCH_RESULT_RESTART:
ceph_assert(image_dispatch_spec->dispatch_layer != 0);
image_dispatch_spec->dispatch_layer = static_cast<ImageDispatchLayer>(
image_dispatch_spec->dispatch_layer - 1);
[[fallthrough]];
case DISPATCH_RESULT_CONTINUE:
if (r < 0) {
// bubble dispatch failure through AioCompletion
image_dispatch_spec->dispatch_result = DISPATCH_RESULT_COMPLETE;
image_dispatch_spec->fail(r);
return;
}
image_dispatch_spec->send();
break;
case DISPATCH_RESULT_COMPLETE:
finish(r);
break;
case DISPATCH_RESULT_INVALID:
ceph_abort();
break;
}
}
void ImageDispatchSpec::C_Dispatcher::finish(int r) {
delete image_dispatch_spec;
}
void ImageDispatchSpec::send() {
image_dispatcher->send(this);
}
void ImageDispatchSpec::fail(int r) {
dispatch_result = DISPATCH_RESULT_COMPLETE;
aio_comp->fail(r);
}
} // namespace io
} // namespace librbd
| 1,403 | 24.527273 | 74 | cc |
null | ceph-main/src/librbd/io/ImageDispatchSpec.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_IMAGE_DISPATCH_SPEC_H
#define CEPH_LIBRBD_IO_IMAGE_DISPATCH_SPEC_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/Context.h"
#include "common/zipkin_trace.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/Types.h"
#include "librbd/io/ReadResult.h"
#include <boost/variant/variant.hpp>
#include <atomic>
namespace librbd {
class ImageCtx;
namespace io {
struct ImageDispatcherInterface;
class ImageDispatchSpec {
private:
// helper to avoid extra heap allocation per object IO
struct C_Dispatcher : public Context {
ImageDispatchSpec* image_dispatch_spec;
C_Dispatcher(ImageDispatchSpec* image_dispatch_spec)
: image_dispatch_spec(image_dispatch_spec) {
}
void complete(int r) override;
void finish(int r) override;
};
public:
struct Read {
ReadResult read_result;
int read_flags;
Read(ReadResult &&read_result, int read_flags)
: read_result(std::move(read_result)), read_flags(read_flags) {
}
};
struct Discard {
uint32_t discard_granularity_bytes;
Discard(uint32_t discard_granularity_bytes)
: discard_granularity_bytes(discard_granularity_bytes) {
}
};
struct Write {
bufferlist bl;
Write(bufferlist&& bl) : bl(std::move(bl)) {
}
};
struct WriteSame {
bufferlist bl;
WriteSame(bufferlist&& bl) : bl(std::move(bl)) {
}
};
struct CompareAndWrite {
bufferlist cmp_bl;
bufferlist bl;
uint64_t *mismatch_offset;
CompareAndWrite(bufferlist&& cmp_bl, bufferlist&& bl,
uint64_t *mismatch_offset)
: cmp_bl(std::move(cmp_bl)), bl(std::move(bl)),
mismatch_offset(mismatch_offset) {
}
};
struct Flush {
FlushSource flush_source;
Flush(FlushSource flush_source) : flush_source(flush_source) {
}
};
struct ListSnaps {
SnapIds snap_ids;
int list_snaps_flags;
SnapshotDelta* snapshot_delta;
ListSnaps(SnapIds&& snap_ids, int list_snaps_flags,
SnapshotDelta* snapshot_delta)
: snap_ids(std::move(snap_ids)), list_snaps_flags(list_snaps_flags),
snapshot_delta(snapshot_delta) {
}
};
typedef boost::variant<Read,
Discard,
Write,
WriteSame,
CompareAndWrite,
Flush,
ListSnaps> Request;
C_Dispatcher dispatcher_ctx;
ImageDispatcherInterface* image_dispatcher;
ImageDispatchLayer dispatch_layer;
std::atomic<uint32_t> image_dispatch_flags = 0;
DispatchResult dispatch_result = DISPATCH_RESULT_INVALID;
AioCompletion* aio_comp;
Extents image_extents;
Request request;
IOContext io_context;
int op_flags;
ZTracer::Trace parent_trace;
uint64_t tid = 0;
template <typename ImageCtxT = ImageCtx>
static ImageDispatchSpec* create_read(
ImageCtxT &image_ctx, ImageDispatchLayer image_dispatch_layer,
AioCompletion *aio_comp, Extents &&image_extents, ImageArea area,
ReadResult &&read_result, IOContext io_context, int op_flags,
int read_flags, const ZTracer::Trace &parent_trace) {
return new ImageDispatchSpec(image_ctx.io_image_dispatcher,
image_dispatch_layer, aio_comp,
std::move(image_extents), area,
Read{std::move(read_result), read_flags},
io_context, op_flags, parent_trace);
}
template <typename ImageCtxT = ImageCtx>
static ImageDispatchSpec* create_discard(
ImageCtxT &image_ctx, ImageDispatchLayer image_dispatch_layer,
AioCompletion *aio_comp, Extents &&image_extents, ImageArea area,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace) {
return new ImageDispatchSpec(image_ctx.io_image_dispatcher,
image_dispatch_layer, aio_comp,
std::move(image_extents), area,
Discard{discard_granularity_bytes},
{}, 0, parent_trace);
}
template <typename ImageCtxT = ImageCtx>
static ImageDispatchSpec* create_write(
ImageCtxT &image_ctx, ImageDispatchLayer image_dispatch_layer,
AioCompletion *aio_comp, Extents &&image_extents, ImageArea area,
bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace) {
return new ImageDispatchSpec(image_ctx.io_image_dispatcher,
image_dispatch_layer, aio_comp,
std::move(image_extents), area,
Write{std::move(bl)},
{}, op_flags, parent_trace);
}
template <typename ImageCtxT = ImageCtx>
static ImageDispatchSpec* create_write_same(
ImageCtxT &image_ctx, ImageDispatchLayer image_dispatch_layer,
AioCompletion *aio_comp, Extents &&image_extents, ImageArea area,
bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace) {
return new ImageDispatchSpec(image_ctx.io_image_dispatcher,
image_dispatch_layer, aio_comp,
std::move(image_extents), area,
WriteSame{std::move(bl)},
{}, op_flags, parent_trace);
}
template <typename ImageCtxT = ImageCtx>
static ImageDispatchSpec* create_compare_and_write(
ImageCtxT &image_ctx, ImageDispatchLayer image_dispatch_layer,
AioCompletion *aio_comp, Extents &&image_extents, ImageArea area,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace) {
return new ImageDispatchSpec(image_ctx.io_image_dispatcher,
image_dispatch_layer, aio_comp,
std::move(image_extents), area,
CompareAndWrite{std::move(cmp_bl),
std::move(bl),
mismatch_offset},
{}, op_flags, parent_trace);
}
template <typename ImageCtxT = ImageCtx>
static ImageDispatchSpec* create_flush(
ImageCtxT &image_ctx, ImageDispatchLayer image_dispatch_layer,
AioCompletion *aio_comp, FlushSource flush_source,
const ZTracer::Trace &parent_trace) {
return new ImageDispatchSpec(image_ctx.io_image_dispatcher,
image_dispatch_layer, aio_comp, {},
ImageArea::DATA /* dummy for {} */,
Flush{flush_source}, {}, 0, parent_trace);
}
template <typename ImageCtxT = ImageCtx>
static ImageDispatchSpec* create_list_snaps(
ImageCtxT &image_ctx, ImageDispatchLayer image_dispatch_layer,
AioCompletion *aio_comp, Extents &&image_extents, ImageArea area,
SnapIds&& snap_ids, int list_snaps_flags, SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace) {
return new ImageDispatchSpec(image_ctx.io_image_dispatcher,
image_dispatch_layer, aio_comp,
std::move(image_extents), area,
ListSnaps{std::move(snap_ids),
list_snaps_flags, snapshot_delta},
{}, 0, parent_trace);
}
~ImageDispatchSpec() {
aio_comp->put();
}
void send();
void fail(int r);
private:
struct SendVisitor;
struct IsWriteOpVisitor;
struct TokenRequestedVisitor;
ImageDispatchSpec(ImageDispatcherInterface* image_dispatcher,
ImageDispatchLayer image_dispatch_layer,
AioCompletion* aio_comp, Extents&& image_extents,
ImageArea area, Request&& request, IOContext io_context,
int op_flags, const ZTracer::Trace& parent_trace)
: dispatcher_ctx(this), image_dispatcher(image_dispatcher),
dispatch_layer(image_dispatch_layer), aio_comp(aio_comp),
image_extents(std::move(image_extents)), request(std::move(request)),
io_context(io_context), op_flags(op_flags), parent_trace(parent_trace) {
ceph_assert(aio_comp->image_dispatcher_ctx == nullptr);
aio_comp->image_dispatcher_ctx = &dispatcher_ctx;
aio_comp->get();
switch (area) {
case ImageArea::DATA:
break;
case ImageArea::CRYPTO_HEADER:
image_dispatch_flags |= IMAGE_DISPATCH_FLAG_CRYPTO_HEADER;
break;
default:
ceph_abort();
}
}
};
} // namespace io
} // namespace librbd
#endif // CEPH_LIBRBD_IO_IMAGE_DISPATCH_SPEC_H
| 8,864 | 33.764706 | 79 | h |
null | ceph-main/src/librbd/io/ImageDispatcher.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/ImageDispatcher.h"
#include "include/Context.h"
#include "common/AsyncOpTracker.h"
#include "common/dout.h"
#include "librbd/ImageCtx.h"
#include "librbd/crypto/CryptoImageDispatch.h"
#include "librbd/io/ImageDispatch.h"
#include "librbd/io/ImageDispatchInterface.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/QueueImageDispatch.h"
#include "librbd/io/QosImageDispatch.h"
#include "librbd/io/RefreshImageDispatch.h"
#include "librbd/io/Utils.h"
#include "librbd/io/WriteBlockImageDispatch.h"
#include <boost/variant.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::ImageDispatcher: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace io {
template <typename I>
struct ImageDispatcher<I>::SendVisitor : public boost::static_visitor<bool> {
ImageDispatchInterface* image_dispatch;
ImageDispatchSpec* image_dispatch_spec;
SendVisitor(ImageDispatchInterface* image_dispatch,
ImageDispatchSpec* image_dispatch_spec)
: image_dispatch(image_dispatch),
image_dispatch_spec(image_dispatch_spec) {
}
bool operator()(ImageDispatchSpec::Read& read) const {
return image_dispatch->read(
image_dispatch_spec->aio_comp,
std::move(image_dispatch_spec->image_extents),
std::move(read.read_result), image_dispatch_spec->io_context,
image_dispatch_spec->op_flags, read.read_flags,
image_dispatch_spec->parent_trace, image_dispatch_spec->tid,
&image_dispatch_spec->image_dispatch_flags,
&image_dispatch_spec->dispatch_result,
&image_dispatch_spec->aio_comp->image_dispatcher_ctx,
&image_dispatch_spec->dispatcher_ctx);
}
bool operator()(ImageDispatchSpec::Discard& discard) const {
return image_dispatch->discard(
image_dispatch_spec->aio_comp,
std::move(image_dispatch_spec->image_extents),
discard.discard_granularity_bytes, image_dispatch_spec->parent_trace,
image_dispatch_spec->tid, &image_dispatch_spec->image_dispatch_flags,
&image_dispatch_spec->dispatch_result,
&image_dispatch_spec->aio_comp->image_dispatcher_ctx,
&image_dispatch_spec->dispatcher_ctx);
}
bool operator()(ImageDispatchSpec::Write& write) const {
return image_dispatch->write(
image_dispatch_spec->aio_comp,
std::move(image_dispatch_spec->image_extents), std::move(write.bl),
image_dispatch_spec->op_flags, image_dispatch_spec->parent_trace,
image_dispatch_spec->tid, &image_dispatch_spec->image_dispatch_flags,
&image_dispatch_spec->dispatch_result,
&image_dispatch_spec->aio_comp->image_dispatcher_ctx,
&image_dispatch_spec->dispatcher_ctx);
}
bool operator()(ImageDispatchSpec::WriteSame& write_same) const {
return image_dispatch->write_same(
image_dispatch_spec->aio_comp,
std::move(image_dispatch_spec->image_extents), std::move(write_same.bl),
image_dispatch_spec->op_flags, image_dispatch_spec->parent_trace,
image_dispatch_spec->tid, &image_dispatch_spec->image_dispatch_flags,
&image_dispatch_spec->dispatch_result,
&image_dispatch_spec->aio_comp->image_dispatcher_ctx,
&image_dispatch_spec->dispatcher_ctx);
}
bool operator()(
ImageDispatchSpec::CompareAndWrite& compare_and_write) const {
return image_dispatch->compare_and_write(
image_dispatch_spec->aio_comp,
std::move(image_dispatch_spec->image_extents),
std::move(compare_and_write.cmp_bl), std::move(compare_and_write.bl),
compare_and_write.mismatch_offset,
image_dispatch_spec->op_flags, image_dispatch_spec->parent_trace,
image_dispatch_spec->tid, &image_dispatch_spec->image_dispatch_flags,
&image_dispatch_spec->dispatch_result,
&image_dispatch_spec->aio_comp->image_dispatcher_ctx,
&image_dispatch_spec->dispatcher_ctx);
}
bool operator()(ImageDispatchSpec::Flush& flush) const {
return image_dispatch->flush(
image_dispatch_spec->aio_comp, flush.flush_source,
image_dispatch_spec->parent_trace, image_dispatch_spec->tid,
&image_dispatch_spec->image_dispatch_flags,
&image_dispatch_spec->dispatch_result,
&image_dispatch_spec->aio_comp->image_dispatcher_ctx,
&image_dispatch_spec->dispatcher_ctx);
}
bool operator()(ImageDispatchSpec::ListSnaps& list_snaps) const {
return image_dispatch->list_snaps(
image_dispatch_spec->aio_comp,
std::move(image_dispatch_spec->image_extents),
std::move(list_snaps.snap_ids), list_snaps.list_snaps_flags,
list_snaps.snapshot_delta, image_dispatch_spec->parent_trace,
image_dispatch_spec->tid, &image_dispatch_spec->image_dispatch_flags,
&image_dispatch_spec->dispatch_result,
&image_dispatch_spec->aio_comp->image_dispatcher_ctx,
&image_dispatch_spec->dispatcher_ctx);
}
};
template <typename I>
struct ImageDispatcher<I>::PreprocessVisitor
: public boost::static_visitor<bool> {
ImageDispatcher<I>* image_dispatcher;
ImageDispatchSpec* image_dispatch_spec;
PreprocessVisitor(ImageDispatcher<I>* image_dispatcher,
ImageDispatchSpec* image_dispatch_spec)
: image_dispatcher(image_dispatcher),
image_dispatch_spec(image_dispatch_spec) {
}
bool clip_request() const {
auto area = (image_dispatch_spec->image_dispatch_flags &
IMAGE_DISPATCH_FLAG_CRYPTO_HEADER ? ImageArea::CRYPTO_HEADER :
ImageArea::DATA);
int r = util::clip_request(image_dispatcher->m_image_ctx,
&image_dispatch_spec->image_extents, area);
if (r < 0) {
image_dispatch_spec->fail(r);
return true;
}
return false;
}
bool operator()(ImageDispatchSpec::Read& read) const {
if ((read.read_flags & READ_FLAG_DISABLE_CLIPPING) != 0) {
return false;
}
return clip_request();
}
bool operator()(ImageDispatchSpec::Flush&) const {
return clip_request();
}
bool operator()(ImageDispatchSpec::ListSnaps&) const {
return false;
}
template <typename T>
bool operator()(T&) const {
if (clip_request()) {
return true;
}
std::shared_lock image_locker{image_dispatcher->m_image_ctx->image_lock};
if (image_dispatcher->m_image_ctx->snap_id != CEPH_NOSNAP ||
image_dispatcher->m_image_ctx->read_only) {
image_dispatch_spec->fail(-EROFS);
return true;
}
return false;
}
};
template <typename I>
ImageDispatcher<I>::ImageDispatcher(I* image_ctx)
: Dispatcher<I, ImageDispatcherInterface>(image_ctx) {
// configure the core image dispatch handler on startup
auto image_dispatch = new ImageDispatch(image_ctx);
this->register_dispatch(image_dispatch);
auto queue_image_dispatch = new QueueImageDispatch(image_ctx);
this->register_dispatch(queue_image_dispatch);
m_qos_image_dispatch = new QosImageDispatch<I>(image_ctx);
this->register_dispatch(m_qos_image_dispatch);
auto refresh_image_dispatch = new RefreshImageDispatch(image_ctx);
this->register_dispatch(refresh_image_dispatch);
m_write_block_dispatch = new WriteBlockImageDispatch<I>(image_ctx);
this->register_dispatch(m_write_block_dispatch);
}
template <typename I>
void ImageDispatcher<I>::invalidate_cache(Context* on_finish) {
auto image_ctx = this->m_image_ctx;
auto cct = image_ctx->cct;
ldout(cct, 5) << dendl;
auto ctx = new C_InvalidateCache(
this, IMAGE_DISPATCH_LAYER_NONE, on_finish);
ctx->complete(0);
}
template <typename I>
void ImageDispatcher<I>::shut_down(Context* on_finish) {
// TODO ensure all IOs are executed via a dispatcher
// ensure read-ahead / copy-on-read ops are finished since they are
// currently outside dispatcher tracking
auto async_op = new AsyncOperation();
on_finish = new LambdaContext([async_op, on_finish](int r) {
async_op->finish_op();
delete async_op;
on_finish->complete(0);
});
on_finish = new LambdaContext([this, on_finish](int r) {
Dispatcher<I, ImageDispatcherInterface>::shut_down(on_finish);
});
async_op->start_op(*this->m_image_ctx);
async_op->flush(on_finish);
}
template <typename I>
void ImageDispatcher<I>::apply_qos_schedule_tick_min(uint64_t tick) {
m_qos_image_dispatch->apply_qos_schedule_tick_min(tick);
}
template <typename I>
void ImageDispatcher<I>::apply_qos_limit(uint64_t flag, uint64_t limit,
uint64_t burst, uint64_t burst_seconds) {
m_qos_image_dispatch->apply_qos_limit(flag, limit, burst, burst_seconds);
}
template <typename I>
void ImageDispatcher<I>::apply_qos_exclude_ops(uint64_t exclude_ops) {
m_qos_image_dispatch->apply_qos_exclude_ops(exclude_ops);
}
template <typename I>
bool ImageDispatcher<I>::writes_blocked() const {
return m_write_block_dispatch->writes_blocked();
}
template <typename I>
int ImageDispatcher<I>::block_writes() {
return m_write_block_dispatch->block_writes();
}
template <typename I>
void ImageDispatcher<I>::block_writes(Context *on_blocked) {
m_write_block_dispatch->block_writes(on_blocked);
}
template <typename I>
void ImageDispatcher<I>::unblock_writes() {
m_write_block_dispatch->unblock_writes();
}
template <typename I>
void ImageDispatcher<I>::wait_on_writes_unblocked(Context *on_unblocked) {
m_write_block_dispatch->wait_on_writes_unblocked(on_unblocked);
}
template <typename I>
void ImageDispatcher<I>::remap_to_physical(Extents& image_extents,
ImageArea area) {
std::shared_lock locker{this->m_lock};
auto it = this->m_dispatches.find(IMAGE_DISPATCH_LAYER_CRYPTO);
if (it == this->m_dispatches.end()) {
ceph_assert(area == ImageArea::DATA);
return;
}
auto crypto_image_dispatch = static_cast<crypto::CryptoImageDispatch*>(
it->second.dispatch);
crypto_image_dispatch->remap_to_physical(image_extents, area);
}
template <typename I>
ImageArea ImageDispatcher<I>::remap_to_logical(Extents& image_extents) {
std::shared_lock locker{this->m_lock};
auto it = this->m_dispatches.find(IMAGE_DISPATCH_LAYER_CRYPTO);
if (it == this->m_dispatches.end()) {
return ImageArea::DATA;
}
auto crypto_image_dispatch = static_cast<crypto::CryptoImageDispatch*>(
it->second.dispatch);
return crypto_image_dispatch->remap_to_logical(image_extents);
}
template <typename I>
bool ImageDispatcher<I>::send_dispatch(
ImageDispatchInterface* image_dispatch,
ImageDispatchSpec* image_dispatch_spec) {
if (image_dispatch_spec->tid == 0) {
image_dispatch_spec->tid = ++m_next_tid;
bool finished = preprocess(image_dispatch_spec);
if (finished) {
return true;
}
}
return boost::apply_visitor(
SendVisitor{image_dispatch, image_dispatch_spec},
image_dispatch_spec->request);
}
template <typename I>
bool ImageDispatcher<I>::preprocess(
ImageDispatchSpec* image_dispatch_spec) {
return boost::apply_visitor(
PreprocessVisitor{this, image_dispatch_spec},
image_dispatch_spec->request);
}
} // namespace io
} // namespace librbd
template class librbd::io::ImageDispatcher<librbd::ImageCtx>;
| 11,280 | 33.710769 | 82 | cc |
null | ceph-main/src/librbd/io/ImageDispatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_IMAGE_DISPATCHER_H
#define CEPH_LIBRBD_IO_IMAGE_DISPATCHER_H
#include "include/int_types.h"
#include "common/ceph_mutex.h"
#include "librbd/io/Dispatcher.h"
#include "librbd/io/ImageDispatchInterface.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/io/Types.h"
#include <atomic>
#include <map>
struct Context;
namespace librbd {
struct ImageCtx;
namespace io {
template <typename> struct QosImageDispatch;
template <typename> struct WriteBlockImageDispatch;
template <typename ImageCtxT = ImageCtx>
class ImageDispatcher : public Dispatcher<ImageCtxT, ImageDispatcherInterface> {
public:
ImageDispatcher(ImageCtxT* image_ctx);
void invalidate_cache(Context* on_finish) override;
void shut_down(Context* on_finish) override;
void apply_qos_schedule_tick_min(uint64_t tick) override;
void apply_qos_limit(uint64_t flag, uint64_t limit, uint64_t burst,
uint64_t burst_seconds) override;
void apply_qos_exclude_ops(uint64_t exclude_ops) override;
bool writes_blocked() const override;
int block_writes() override;
void block_writes(Context *on_blocked) override;
void unblock_writes() override;
void wait_on_writes_unblocked(Context *on_unblocked) override;
void remap_to_physical(Extents& image_extents, ImageArea area) override;
ImageArea remap_to_logical(Extents& image_extents) override;
protected:
bool send_dispatch(
ImageDispatchInterface* image_dispatch,
ImageDispatchSpec* image_dispatch_spec) override;
private:
struct SendVisitor;
struct PreprocessVisitor;
using typename Dispatcher<ImageCtxT, ImageDispatcherInterface>::C_InvalidateCache;
std::atomic<uint64_t> m_next_tid{0};
QosImageDispatch<ImageCtxT>* m_qos_image_dispatch = nullptr;
WriteBlockImageDispatch<ImageCtxT>* m_write_block_dispatch = nullptr;
bool preprocess(ImageDispatchSpec* image_dispatch_spec);
};
} // namespace io
} // namespace librbd
extern template class librbd::io::ImageDispatcher<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IO_IMAGE_DISPATCHER_H
| 2,206 | 27.294872 | 84 | h |
null | ceph-main/src/librbd/io/ImageDispatcherInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_IMAGE_DISPATCHER_INTERFACE_H
#define CEPH_LIBRBD_IO_IMAGE_DISPATCHER_INTERFACE_H
#include "include/int_types.h"
#include "librbd/io/DispatcherInterface.h"
#include "librbd/io/ImageDispatchInterface.h"
#include "librbd/io/Types.h"
struct Context;
namespace librbd {
namespace io {
struct ImageDispatcherInterface
: public DispatcherInterface<ImageDispatchInterface> {
public:
virtual void apply_qos_schedule_tick_min(uint64_t tick) = 0;
virtual void apply_qos_limit(uint64_t flag, uint64_t limit,
uint64_t burst, uint64_t burst_seconds) = 0;
virtual void apply_qos_exclude_ops(uint64_t exclude_ops) = 0;
virtual bool writes_blocked() const = 0;
virtual int block_writes() = 0;
virtual void block_writes(Context *on_blocked) = 0;
virtual void unblock_writes() = 0;
virtual void wait_on_writes_unblocked(Context *on_unblocked) = 0;
virtual void invalidate_cache(Context* on_finish) = 0;
virtual void remap_to_physical(Extents& image_extents, ImageArea area) = 0;
virtual ImageArea remap_to_logical(Extents& image_extents) = 0;
};
} // namespace io
} // namespace librbd
#endif // CEPH_LIBRBD_IO_IMAGE_DISPATCHER_INTERFACE_H
| 1,308 | 30.166667 | 77 | h |
null | ceph-main/src/librbd/io/ImageRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/ImageRequest.h"
#include "librbd/ImageCtx.h"
#include "librbd/internal.h"
#include "librbd/Journal.h"
#include "librbd/Types.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/AsyncOperation.h"
#include "librbd/io/ObjectDispatchInterface.h"
#include "librbd/io/ObjectDispatchSpec.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/io/Utils.h"
#include "librbd/journal/Types.h"
#include "include/rados/librados.hpp"
#include "common/errno.h"
#include "common/perf_counters.h"
#include "osdc/Striper.h"
#include <algorithm>
#include <functional>
#include <map>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::ImageRequest: " << __func__ << ": "
namespace librbd {
namespace io {
using librbd::util::data_object_name;
using librbd::util::get_image_ctx;
namespace {
template <typename I>
struct C_AssembleSnapshotDeltas : public C_AioRequest {
I* image_ctx;
SnapshotDelta* snapshot_delta;
ceph::mutex lock = ceph::make_mutex(
"librbd::io::C_AssembleSnapshotDeltas::lock", false);
std::map<uint64_t, SnapshotDelta> object_snapshot_delta;
C_AssembleSnapshotDeltas(I* image_ctx, AioCompletion* aio_comp,
SnapshotDelta* snapshot_delta)
: C_AioRequest(aio_comp),
image_ctx(image_ctx), snapshot_delta(snapshot_delta) {
}
SnapshotDelta* get_snapshot_delta(uint64_t object_no) {
std::unique_lock locker{lock};
return &object_snapshot_delta[object_no];
}
void finish(int r) override {
auto cct = image_ctx->cct;
if (r < 0) {
lderr(cct) << "C_AssembleSnapshotDeltas: list snaps failed: "
<< cpp_strerror(r) << dendl;
C_AioRequest::finish(r);
return;
}
std::unique_lock locker{lock};
*snapshot_delta = {};
for (auto& [object_no, object_snapshot_delta] : object_snapshot_delta) {
SnapshotDelta image_snapshot_delta;
object_to_image_intervals(object_no, object_snapshot_delta,
&image_snapshot_delta, snapshot_delta);
ldout(cct, 20) << "object_no=" << object_no << ", "
<< "object_snapshot_delta="
<< object_snapshot_delta << ", "
<< "image_snapshot_delta=" << image_snapshot_delta
<< dendl;
}
ldout(cct, 20) << "snapshot_delta=" << *snapshot_delta << dendl;
C_AioRequest::finish(0);
}
void object_to_image_intervals(
uint64_t object_no, const SnapshotDelta& object_snapshot_delta,
SnapshotDelta* image_snapshot_delta,
SnapshotDelta* assembled_image_snapshot_delta) {
for (auto& [key, object_extents] : object_snapshot_delta) {
for (auto& object_extent : object_extents) {
auto [image_extents, _] = io::util::object_to_area_extents(
image_ctx, object_no,
{{object_extent.get_off(), object_extent.get_len()}});
auto& intervals = (*image_snapshot_delta)[key];
auto& assembled_intervals = (*assembled_image_snapshot_delta)[key];
for (auto [image_offset, image_length] : image_extents) {
SparseExtent sparse_extent{object_extent.get_val().state,
image_length};
intervals.insert(image_offset, image_length, sparse_extent);
assembled_intervals.insert(image_offset, image_length,
sparse_extent);
}
}
}
}
};
template <typename I>
struct C_RBD_Readahead : public Context {
I *ictx;
uint64_t object_no;
io::ReadExtents extents;
C_RBD_Readahead(I *ictx, uint64_t object_no, uint64_t offset, uint64_t length)
: ictx(ictx), object_no(object_no), extents({{offset, length}}) {
ictx->readahead.inc_pending();
}
void finish(int r) override {
ceph_assert(extents.size() == 1);
auto& extent = extents.front();
ldout(ictx->cct, 20) << "C_RBD_Readahead on "
<< data_object_name(ictx, object_no) << ": "
<< extent.offset << "~" << extent.length << dendl;
ictx->readahead.dec_pending();
}
};
template <typename I>
void readahead(I *ictx, const Extents& image_extents, IOContext io_context) {
uint64_t total_bytes = 0;
for (auto& image_extent : image_extents) {
total_bytes += image_extent.second;
}
ictx->image_lock.lock_shared();
auto total_bytes_read = ictx->total_bytes_read.fetch_add(total_bytes);
bool abort = (
ictx->readahead_disable_after_bytes != 0 &&
total_bytes_read > ictx->readahead_disable_after_bytes);
if (abort) {
ictx->image_lock.unlock_shared();
return;
}
uint64_t data_size = ictx->get_area_size(ImageArea::DATA);
ictx->image_lock.unlock_shared();
auto readahead_extent = ictx->readahead.update(image_extents, data_size);
uint64_t readahead_offset = readahead_extent.first;
uint64_t readahead_length = readahead_extent.second;
if (readahead_length > 0) {
ldout(ictx->cct, 20) << "(readahead logical) " << readahead_offset << "~"
<< readahead_length << dendl;
LightweightObjectExtents readahead_object_extents;
io::util::area_to_object_extents(ictx, readahead_offset, readahead_length,
ImageArea::DATA, 0,
&readahead_object_extents);
for (auto& object_extent : readahead_object_extents) {
ldout(ictx->cct, 20) << "(readahead) "
<< data_object_name(ictx,
object_extent.object_no) << " "
<< object_extent.offset << "~"
<< object_extent.length << dendl;
auto req_comp = new C_RBD_Readahead<I>(ictx, object_extent.object_no,
object_extent.offset,
object_extent.length);
auto req = io::ObjectDispatchSpec::create_read(
ictx, io::OBJECT_DISPATCH_LAYER_NONE, object_extent.object_no,
&req_comp->extents, io_context, 0, 0, {}, nullptr, req_comp);
req->send();
}
ictx->perfcounter->inc(l_librbd_readahead);
ictx->perfcounter->inc(l_librbd_readahead_bytes, readahead_length);
}
}
template <typename I>
struct C_UpdateTimestamp : public Context {
public:
I& m_image_ctx;
bool m_modify; // if modify set to 'true', modify timestamp is updated,
// access timestamp otherwise
AsyncOperation m_async_op;
C_UpdateTimestamp(I& ictx, bool m) : m_image_ctx(ictx), m_modify(m) {
m_async_op.start_op(*get_image_ctx(&m_image_ctx));
}
~C_UpdateTimestamp() override {
m_async_op.finish_op();
}
void send() {
librados::ObjectWriteOperation op;
if (m_modify) {
cls_client::set_modify_timestamp(&op);
} else {
cls_client::set_access_timestamp(&op);
}
auto comp = librbd::util::create_rados_callback(this);
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
void finish(int r) override {
// ignore errors updating timestamp
}
};
bool should_update_timestamp(const utime_t& now, const utime_t& timestamp,
uint64_t interval) {
return (interval &&
(static_cast<uint64_t>(now.sec()) >= interval + timestamp));
}
} // anonymous namespace
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::ImageRequest: " << this \
<< " " << __func__ << ": "
template <typename I>
void ImageRequest<I>::aio_read(I *ictx, AioCompletion *c,
Extents &&image_extents, ImageArea area,
ReadResult &&read_result, IOContext io_context,
int op_flags, int read_flags,
const ZTracer::Trace &parent_trace) {
ImageReadRequest<I> req(*ictx, c, std::move(image_extents), area,
std::move(read_result), io_context, op_flags,
read_flags, parent_trace);
req.send();
}
template <typename I>
void ImageRequest<I>::aio_write(I *ictx, AioCompletion *c,
Extents &&image_extents, ImageArea area,
bufferlist &&bl, int op_flags,
const ZTracer::Trace &parent_trace) {
ImageWriteRequest<I> req(*ictx, c, std::move(image_extents), area,
std::move(bl), op_flags, parent_trace);
req.send();
}
template <typename I>
void ImageRequest<I>::aio_discard(I *ictx, AioCompletion *c,
Extents &&image_extents, ImageArea area,
uint32_t discard_granularity_bytes,
const ZTracer::Trace &parent_trace) {
ImageDiscardRequest<I> req(*ictx, c, std::move(image_extents), area,
discard_granularity_bytes, parent_trace);
req.send();
}
template <typename I>
void ImageRequest<I>::aio_flush(I *ictx, AioCompletion *c,
FlushSource flush_source,
const ZTracer::Trace &parent_trace) {
ImageFlushRequest<I> req(*ictx, c, flush_source, parent_trace);
req.send();
}
template <typename I>
void ImageRequest<I>::aio_writesame(I *ictx, AioCompletion *c,
Extents &&image_extents, ImageArea area,
bufferlist &&bl, int op_flags,
const ZTracer::Trace &parent_trace) {
ImageWriteSameRequest<I> req(*ictx, c, std::move(image_extents), area,
std::move(bl), op_flags, parent_trace);
req.send();
}
template <typename I>
void ImageRequest<I>::aio_compare_and_write(I *ictx, AioCompletion *c,
Extents &&image_extents,
ImageArea area,
bufferlist &&cmp_bl,
bufferlist &&bl,
uint64_t *mismatch_offset,
int op_flags,
const ZTracer::Trace &parent_trace) {
ImageCompareAndWriteRequest<I> req(*ictx, c, std::move(image_extents), area,
std::move(cmp_bl), std::move(bl),
mismatch_offset, op_flags, parent_trace);
req.send();
}
template <typename I>
void ImageRequest<I>::send() {
I &image_ctx = this->m_image_ctx;
ceph_assert(m_aio_comp->is_initialized(get_aio_type()));
ceph_assert(m_aio_comp->is_started());
CephContext *cct = image_ctx.cct;
AioCompletion *aio_comp = this->m_aio_comp;
ldout(cct, 20) << get_request_type() << ": ictx=" << &image_ctx << ", "
<< "completion=" << aio_comp << dendl;
update_timestamp();
send_request();
}
template <typename I>
void ImageRequest<I>::update_timestamp() {
bool modify = (get_aio_type() != AIO_TYPE_READ);
uint64_t update_interval;
if (modify) {
update_interval = m_image_ctx.mtime_update_interval;
} else {
update_interval = m_image_ctx.atime_update_interval;
}
if (update_interval == 0) {
return;
}
utime_t (I::*get_timestamp_fn)() const;
void (I::*set_timestamp_fn)(utime_t);
if (modify) {
get_timestamp_fn = &I::get_modify_timestamp;
set_timestamp_fn = &I::set_modify_timestamp;
} else {
get_timestamp_fn = &I::get_access_timestamp;
set_timestamp_fn = &I::set_access_timestamp;
}
utime_t ts = ceph_clock_now();
{
std::shared_lock timestamp_locker{m_image_ctx.timestamp_lock};
if(!should_update_timestamp(ts, std::invoke(get_timestamp_fn, m_image_ctx),
update_interval)) {
return;
}
}
{
std::unique_lock timestamp_locker{m_image_ctx.timestamp_lock};
bool update = should_update_timestamp(
ts, std::invoke(get_timestamp_fn, m_image_ctx), update_interval);
if (!update) {
return;
}
std::invoke(set_timestamp_fn, m_image_ctx, ts);
}
// TODO we fire and forget this outside the IO path to prevent
// potential race conditions with librbd client IO callbacks
// between different threads (e.g. librados and object cacher)
ldout(m_image_ctx.cct, 10) << get_request_type() << dendl;
auto req = new C_UpdateTimestamp<I>(m_image_ctx, modify);
req->send();
}
template <typename I>
ImageReadRequest<I>::ImageReadRequest(I &image_ctx, AioCompletion *aio_comp,
Extents &&image_extents, ImageArea area,
ReadResult &&read_result,
IOContext io_context, int op_flags,
int read_flags,
const ZTracer::Trace &parent_trace)
: ImageRequest<I>(image_ctx, aio_comp, std::move(image_extents), area,
"read", parent_trace),
m_io_context(io_context), m_op_flags(op_flags), m_read_flags(read_flags) {
aio_comp->read_result = std::move(read_result);
}
template <typename I>
void ImageReadRequest<I>::send_request() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
auto &image_extents = this->m_image_extents;
if (this->m_image_area == ImageArea::DATA &&
image_ctx.cache && image_ctx.readahead_max_bytes > 0 &&
!(m_op_flags & LIBRADOS_OP_FLAG_FADVISE_RANDOM)) {
readahead(get_image_ctx(&image_ctx), image_extents, m_io_context);
}
// map image extents to object extents
LightweightObjectExtents object_extents;
uint64_t buffer_ofs = 0;
for (auto &extent : image_extents) {
if (extent.second == 0) {
continue;
}
util::area_to_object_extents(&image_ctx, extent.first, extent.second,
this->m_image_area, buffer_ofs,
&object_extents);
buffer_ofs += extent.second;
}
AioCompletion *aio_comp = this->m_aio_comp;
aio_comp->read_result.set_image_extents(image_extents);
// issue the requests
aio_comp->set_request_count(object_extents.size());
for (auto &oe : object_extents) {
ldout(cct, 20) << data_object_name(&image_ctx, oe.object_no) << " "
<< oe.offset << "~" << oe.length << " from "
<< oe.buffer_extents << dendl;
auto req_comp = new io::ReadResult::C_ObjectReadRequest(
aio_comp, {{oe.offset, oe.length, std::move(oe.buffer_extents)}});
auto req = ObjectDispatchSpec::create_read(
&image_ctx, OBJECT_DISPATCH_LAYER_NONE, oe.object_no,
&req_comp->extents, m_io_context, m_op_flags, m_read_flags,
this->m_trace, nullptr, req_comp);
req->send();
}
image_ctx.perfcounter->inc(l_librbd_rd);
image_ctx.perfcounter->inc(l_librbd_rd_bytes, buffer_ofs);
}
template <typename I>
void AbstractImageWriteRequest<I>::send_request() {
I &image_ctx = this->m_image_ctx;
bool journaling = false;
AioCompletion *aio_comp = this->m_aio_comp;
{
// prevent image size from changing between computing clip and recording
// pending async operation
std::shared_lock image_locker{image_ctx.image_lock};
journaling = (image_ctx.journal != nullptr &&
image_ctx.journal->is_journal_appending());
}
uint64_t clip_len = 0;
LightweightObjectExtents object_extents;
for (auto &extent : this->m_image_extents) {
if (extent.second == 0) {
continue;
}
// map to object extents
io::util::area_to_object_extents(&image_ctx, extent.first, extent.second,
this->m_image_area, clip_len,
&object_extents);
clip_len += extent.second;
}
int ret = prune_object_extents(&object_extents);
if (ret < 0) {
aio_comp->fail(ret);
return;
}
// reflect changes in object_extents back to m_image_extents
if (ret == 1) {
this->m_image_extents.clear();
for (auto& object_extent : object_extents) {
auto [image_extents, _] = io::util::object_to_area_extents(
&image_ctx, object_extent.object_no,
{{object_extent.offset, object_extent.length}});
this->m_image_extents.insert(this->m_image_extents.end(),
image_extents.begin(), image_extents.end());
}
}
aio_comp->set_request_count(object_extents.size());
if (!object_extents.empty()) {
uint64_t journal_tid = 0;
if (journaling) {
// in-flight ops are flushed prior to closing the journal
ceph_assert(image_ctx.journal != NULL);
journal_tid = append_journal_event(m_synchronous);
}
// it's very important that IOContext is captured here instead of
// e.g. at the API layer so that an up-to-date snap context is used
// when owning the exclusive lock
send_object_requests(object_extents, image_ctx.get_data_io_context(),
journal_tid);
}
update_stats(clip_len);
}
template <typename I>
void AbstractImageWriteRequest<I>::send_object_requests(
const LightweightObjectExtents &object_extents, IOContext io_context,
uint64_t journal_tid) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
AioCompletion *aio_comp = this->m_aio_comp;
bool single_extent = (object_extents.size() == 1);
for (auto& oe : object_extents) {
ldout(cct, 20) << data_object_name(&image_ctx, oe.object_no) << " "
<< oe.offset << "~" << oe.length << " from "
<< oe.buffer_extents << dendl;
C_AioRequest *req_comp = new C_AioRequest(aio_comp);
auto request = create_object_request(oe, io_context, journal_tid,
single_extent, req_comp);
request->send();
}
}
template <typename I>
void ImageWriteRequest<I>::assemble_extent(
const LightweightObjectExtent &object_extent, bufferlist *bl) {
for (auto q = object_extent.buffer_extents.begin();
q != object_extent.buffer_extents.end(); ++q) {
bufferlist sub_bl;
sub_bl.substr_of(m_bl, q->first, q->second);
bl->claim_append(sub_bl);
}
}
template <typename I>
uint64_t ImageWriteRequest<I>::append_journal_event(bool synchronous) {
I &image_ctx = this->m_image_ctx;
uint64_t tid = 0;
uint64_t buffer_offset = 0;
ceph_assert(!this->m_image_extents.empty());
for (auto &extent : this->m_image_extents) {
bufferlist sub_bl;
sub_bl.substr_of(m_bl, buffer_offset, extent.second);
buffer_offset += extent.second;
tid = image_ctx.journal->append_write_event(extent.first, extent.second,
sub_bl, synchronous);
}
return tid;
}
template <typename I>
ObjectDispatchSpec *ImageWriteRequest<I>::create_object_request(
const LightweightObjectExtent &object_extent, IOContext io_context,
uint64_t journal_tid, bool single_extent, Context *on_finish) {
I &image_ctx = this->m_image_ctx;
bufferlist bl;
if (single_extent && object_extent.buffer_extents.size() == 1 &&
m_bl.length() == object_extent.length) {
// optimization for single object/buffer extent writes
bl = std::move(m_bl);
} else {
assemble_extent(object_extent, &bl);
}
auto req = ObjectDispatchSpec::create_write(
&image_ctx, OBJECT_DISPATCH_LAYER_NONE, object_extent.object_no,
object_extent.offset, std::move(bl), io_context, m_op_flags, 0,
std::nullopt, journal_tid, this->m_trace, on_finish);
return req;
}
template <typename I>
void ImageWriteRequest<I>::update_stats(size_t length) {
I &image_ctx = this->m_image_ctx;
image_ctx.perfcounter->inc(l_librbd_wr);
image_ctx.perfcounter->inc(l_librbd_wr_bytes, length);
}
template <typename I>
uint64_t ImageDiscardRequest<I>::append_journal_event(bool synchronous) {
I &image_ctx = this->m_image_ctx;
uint64_t tid = 0;
ceph_assert(!this->m_image_extents.empty());
for (auto &extent : this->m_image_extents) {
journal::EventEntry event_entry(
journal::AioDiscardEvent(extent.first,
extent.second,
this->m_discard_granularity_bytes));
tid = image_ctx.journal->append_io_event(std::move(event_entry),
extent.first, extent.second,
synchronous, 0);
}
return tid;
}
template <typename I>
ObjectDispatchSpec *ImageDiscardRequest<I>::create_object_request(
const LightweightObjectExtent &object_extent, IOContext io_context,
uint64_t journal_tid, bool single_extent, Context *on_finish) {
I &image_ctx = this->m_image_ctx;
auto req = ObjectDispatchSpec::create_discard(
&image_ctx, OBJECT_DISPATCH_LAYER_NONE, object_extent.object_no,
object_extent.offset, object_extent.length, io_context,
OBJECT_DISCARD_FLAG_DISABLE_CLONE_REMOVE, journal_tid, this->m_trace,
on_finish);
return req;
}
template <typename I>
void ImageDiscardRequest<I>::update_stats(size_t length) {
I &image_ctx = this->m_image_ctx;
image_ctx.perfcounter->inc(l_librbd_discard);
image_ctx.perfcounter->inc(l_librbd_discard_bytes, length);
}
template <typename I>
int ImageDiscardRequest<I>::prune_object_extents(
LightweightObjectExtents* object_extents) const {
if (m_discard_granularity_bytes == 0) {
return 0;
}
// Align the range to discard_granularity_bytes boundary and skip
// and discards that are too small to free up any space.
//
// discard_granularity_bytes >= object_size && tail truncation
// is a special case for filestore
bool prune_required = false;
bool length_modified = false;
auto object_size = this->m_image_ctx.layout.object_size;
auto discard_granularity_bytes = std::min(m_discard_granularity_bytes,
object_size);
auto xform_lambda =
[discard_granularity_bytes, object_size, &prune_required, &length_modified]
(LightweightObjectExtent& object_extent) {
auto& offset = object_extent.offset;
auto& length = object_extent.length;
auto next_offset = offset + length;
if ((discard_granularity_bytes < object_size) ||
(next_offset < object_size)) {
offset = p2roundup<uint64_t>(offset, discard_granularity_bytes);
next_offset = p2align<uint64_t>(next_offset, discard_granularity_bytes);
if (offset >= next_offset) {
prune_required = true;
length = 0;
} else {
auto new_length = next_offset - offset;
if (length != new_length) {
length_modified = true;
length = new_length;
}
}
}
};
std::for_each(object_extents->begin(), object_extents->end(),
xform_lambda);
if (prune_required) {
// one or more object extents were skipped
auto remove_lambda =
[](const LightweightObjectExtent& object_extent) {
return (object_extent.length == 0);
};
object_extents->erase(
std::remove_if(object_extents->begin(), object_extents->end(),
remove_lambda),
object_extents->end());
}
// object extents were modified, image extents needs updating
if (length_modified || prune_required) {
return 1;
}
return 0;
}
template <typename I>
void ImageFlushRequest<I>::send_request() {
I &image_ctx = this->m_image_ctx;
bool journaling = false;
{
std::shared_lock image_locker{image_ctx.image_lock};
journaling = (m_flush_source == FLUSH_SOURCE_USER &&
image_ctx.journal != nullptr &&
image_ctx.journal->is_journal_appending());
}
AioCompletion *aio_comp = this->m_aio_comp;
aio_comp->set_request_count(1);
Context *ctx = new C_AioRequest(aio_comp);
// ensure no locks are held when flush is complete
ctx = librbd::util::create_async_context_callback(image_ctx, ctx);
uint64_t journal_tid = 0;
if (journaling) {
// in-flight ops are flushed prior to closing the journal
ceph_assert(image_ctx.journal != NULL);
journal_tid = image_ctx.journal->append_io_event(
journal::EventEntry(journal::AioFlushEvent()), 0, 0, false, 0);
image_ctx.journal->user_flushed();
}
auto object_dispatch_spec = ObjectDispatchSpec::create_flush(
&image_ctx, OBJECT_DISPATCH_LAYER_NONE, m_flush_source, journal_tid,
this->m_trace, ctx);
ctx = new LambdaContext([object_dispatch_spec](int r) {
object_dispatch_spec->send();
});
// ensure all in-flight IOs are settled if non-user flush request
if (m_flush_source == FLUSH_SOURCE_WRITEBACK) {
ctx->complete(0);
} else {
aio_comp->async_op.flush(ctx);
}
// might be flushing during image shutdown
if (image_ctx.perfcounter != nullptr) {
image_ctx.perfcounter->inc(l_librbd_flush);
}
}
template <typename I>
uint64_t ImageWriteSameRequest<I>::append_journal_event(bool synchronous) {
I &image_ctx = this->m_image_ctx;
uint64_t tid = 0;
ceph_assert(!this->m_image_extents.empty());
for (auto &extent : this->m_image_extents) {
journal::EventEntry event_entry(journal::AioWriteSameEvent(extent.first,
extent.second,
m_data_bl));
tid = image_ctx.journal->append_io_event(std::move(event_entry),
extent.first, extent.second,
synchronous, 0);
}
return tid;
}
template <typename I>
ObjectDispatchSpec *ImageWriteSameRequest<I>::create_object_request(
const LightweightObjectExtent &object_extent, IOContext io_context,
uint64_t journal_tid, bool single_extent, Context *on_finish) {
I &image_ctx = this->m_image_ctx;
bufferlist bl;
ObjectDispatchSpec *req;
if (util::assemble_write_same_extent(object_extent, m_data_bl, &bl, false)) {
auto buffer_extents{object_extent.buffer_extents};
req = ObjectDispatchSpec::create_write_same(
&image_ctx, OBJECT_DISPATCH_LAYER_NONE, object_extent.object_no,
object_extent.offset, object_extent.length, std::move(buffer_extents),
std::move(bl), io_context, m_op_flags, journal_tid,
this->m_trace, on_finish);
return req;
}
req = ObjectDispatchSpec::create_write(
&image_ctx, OBJECT_DISPATCH_LAYER_NONE, object_extent.object_no,
object_extent.offset, std::move(bl), io_context, m_op_flags, 0,
std::nullopt, journal_tid, this->m_trace, on_finish);
return req;
}
template <typename I>
void ImageWriteSameRequest<I>::update_stats(size_t length) {
I &image_ctx = this->m_image_ctx;
image_ctx.perfcounter->inc(l_librbd_ws);
image_ctx.perfcounter->inc(l_librbd_ws_bytes, length);
}
template <typename I>
uint64_t ImageCompareAndWriteRequest<I>::append_journal_event(
bool synchronous) {
I &image_ctx = this->m_image_ctx;
uint64_t tid = 0;
ceph_assert(this->m_image_extents.size() == 1);
auto &extent = this->m_image_extents.front();
tid = image_ctx.journal->append_compare_and_write_event(extent.first,
extent.second,
m_cmp_bl,
m_bl,
synchronous);
return tid;
}
template <typename I>
void ImageCompareAndWriteRequest<I>::assemble_extent(
const LightweightObjectExtent &object_extent, bufferlist *bl,
bufferlist *cmp_bl) {
for (auto q = object_extent.buffer_extents.begin();
q != object_extent.buffer_extents.end(); ++q) {
bufferlist sub_bl;
sub_bl.substr_of(m_bl, q->first, q->second);
bl->claim_append(sub_bl);
bufferlist sub_cmp_bl;
sub_cmp_bl.substr_of(m_cmp_bl, q->first, q->second);
cmp_bl->claim_append(sub_cmp_bl);
}
}
template <typename I>
ObjectDispatchSpec *ImageCompareAndWriteRequest<I>::create_object_request(
const LightweightObjectExtent &object_extent, IOContext io_context,
uint64_t journal_tid, bool single_extent, Context *on_finish) {
I &image_ctx = this->m_image_ctx;
bufferlist bl;
bufferlist cmp_bl;
assemble_extent(object_extent, &bl, &cmp_bl);
auto req = ObjectDispatchSpec::create_compare_and_write(
&image_ctx, OBJECT_DISPATCH_LAYER_NONE, object_extent.object_no,
object_extent.offset, std::move(cmp_bl), std::move(bl), io_context,
m_mismatch_offset, m_op_flags, journal_tid, this->m_trace, on_finish);
return req;
}
template <typename I>
void ImageCompareAndWriteRequest<I>::update_stats(size_t length) {
I &image_ctx = this->m_image_ctx;
image_ctx.perfcounter->inc(l_librbd_cmp);
image_ctx.perfcounter->inc(l_librbd_cmp_bytes, length);
}
template <typename I>
int ImageCompareAndWriteRequest<I>::prune_object_extents(
LightweightObjectExtents* object_extents) const {
if (object_extents->size() > 1)
return -EINVAL;
I &image_ctx = this->m_image_ctx;
uint64_t su = image_ctx.layout.stripe_unit;
auto& object_extent = object_extents->front();
if (su == 0 || (object_extent.offset % su + object_extent.length > su))
return -EINVAL;
return 0;
}
template <typename I>
ImageListSnapsRequest<I>::ImageListSnapsRequest(
I& image_ctx, AioCompletion* aio_comp, Extents&& image_extents,
ImageArea area, SnapIds&& snap_ids, int list_snaps_flags,
SnapshotDelta* snapshot_delta, const ZTracer::Trace& parent_trace)
: ImageRequest<I>(image_ctx, aio_comp, std::move(image_extents), area,
"list-snaps", parent_trace),
m_snap_ids(std::move(snap_ids)), m_list_snaps_flags(list_snaps_flags),
m_snapshot_delta(snapshot_delta) {
}
template <typename I>
void ImageListSnapsRequest<I>::send_request() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
// map image extents to object extents
auto &image_extents = this->m_image_extents;
std::map<uint64_t, Extents> object_number_extents;
for (auto& image_extent : image_extents) {
if (image_extent.second == 0) {
continue;
}
striper::LightweightObjectExtents object_extents;
io::util::area_to_object_extents(&image_ctx, image_extent.first,
image_extent.second, this->m_image_area, 0,
&object_extents);
for (auto& object_extent : object_extents) {
object_number_extents[object_extent.object_no].emplace_back(
object_extent.offset, object_extent.length);
}
}
// reassemble the deltas back into image-extents when complete
auto aio_comp = this->m_aio_comp;
aio_comp->set_request_count(1);
auto assemble_ctx = new C_AssembleSnapshotDeltas<I>(
&image_ctx, aio_comp, m_snapshot_delta);
auto sub_aio_comp = AioCompletion::create_and_start<
Context, &Context::complete>(assemble_ctx, get_image_ctx(&image_ctx),
AIO_TYPE_GENERIC);
// issue the requests
sub_aio_comp->set_request_count(object_number_extents.size());
for (auto& oe : object_number_extents) {
ldout(cct, 20) << data_object_name(&image_ctx, oe.first) << " "
<< oe.second << dendl;
auto ctx = new C_AioRequest(sub_aio_comp);
auto req = ObjectDispatchSpec::create_list_snaps(
&image_ctx, OBJECT_DISPATCH_LAYER_NONE, oe.first, std::move(oe.second),
SnapIds{m_snap_ids}, m_list_snaps_flags, this->m_trace,
assemble_ctx->get_snapshot_delta(oe.first), ctx);
req->send();
}
}
} // namespace io
} // namespace librbd
template class librbd::io::ImageRequest<librbd::ImageCtx>;
template class librbd::io::ImageReadRequest<librbd::ImageCtx>;
template class librbd::io::AbstractImageWriteRequest<librbd::ImageCtx>;
template class librbd::io::ImageWriteRequest<librbd::ImageCtx>;
template class librbd::io::ImageDiscardRequest<librbd::ImageCtx>;
template class librbd::io::ImageFlushRequest<librbd::ImageCtx>;
template class librbd::io::ImageWriteSameRequest<librbd::ImageCtx>;
template class librbd::io::ImageCompareAndWriteRequest<librbd::ImageCtx>;
template class librbd::io::ImageListSnapsRequest<librbd::ImageCtx>;
| 32,378 | 34.581319 | 81 | cc |
null | ceph-main/src/librbd/io/ImageRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_IMAGE_REQUEST_H
#define CEPH_LIBRBD_IO_IMAGE_REQUEST_H
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "common/zipkin_trace.h"
#include "osd/osd_types.h"
#include "librbd/Utils.h"
#include "librbd/Types.h"
#include "librbd/io/Types.h"
#include <list>
#include <utility>
#include <vector>
namespace librbd {
class ImageCtx;
namespace io {
class AioCompletion;
class ObjectDispatchSpec;
class ReadResult;
template <typename ImageCtxT = ImageCtx>
class ImageRequest {
public:
virtual ~ImageRequest() {
m_trace.event("finish");
}
static void aio_read(ImageCtxT *ictx, AioCompletion *c,
Extents &&image_extents, ImageArea area,
ReadResult &&read_result, IOContext io_context,
int op_flags, int read_flags,
const ZTracer::Trace &parent_trace);
static void aio_write(ImageCtxT *ictx, AioCompletion *c,
Extents &&image_extents, ImageArea area,
bufferlist &&bl, int op_flags,
const ZTracer::Trace &parent_trace);
static void aio_discard(ImageCtxT *ictx, AioCompletion *c,
Extents &&image_extents, ImageArea area,
uint32_t discard_granularity_bytes,
const ZTracer::Trace &parent_trace);
static void aio_flush(ImageCtxT *ictx, AioCompletion *c,
FlushSource flush_source,
const ZTracer::Trace &parent_trace);
static void aio_writesame(ImageCtxT *ictx, AioCompletion *c,
Extents &&image_extents, ImageArea area,
bufferlist &&bl, int op_flags,
const ZTracer::Trace &parent_trace);
static void aio_compare_and_write(ImageCtxT *ictx, AioCompletion *c,
Extents &&image_extents, ImageArea area,
bufferlist &&cmp_bl, bufferlist &&bl,
uint64_t *mismatch_offset, int op_flags,
const ZTracer::Trace &parent_trace);
void send();
inline const ZTracer::Trace &get_trace() const {
return m_trace;
}
protected:
typedef std::list<ObjectDispatchSpec*> ObjectRequests;
ImageCtxT &m_image_ctx;
AioCompletion *m_aio_comp;
Extents m_image_extents;
ImageArea m_image_area;
ZTracer::Trace m_trace;
ImageRequest(ImageCtxT &image_ctx, AioCompletion *aio_comp,
Extents &&image_extents, ImageArea area, const char *trace_name,
const ZTracer::Trace &parent_trace)
: m_image_ctx(image_ctx), m_aio_comp(aio_comp),
m_image_extents(std::move(image_extents)), m_image_area(area),
m_trace(librbd::util::create_trace(image_ctx, trace_name, parent_trace)) {
m_trace.event("start");
}
virtual void update_timestamp();
virtual void send_request() = 0;
virtual aio_type_t get_aio_type() const = 0;
virtual const char *get_request_type() const = 0;
};
template <typename ImageCtxT = ImageCtx>
class ImageReadRequest : public ImageRequest<ImageCtxT> {
public:
ImageReadRequest(ImageCtxT &image_ctx, AioCompletion *aio_comp,
Extents &&image_extents, ImageArea area,
ReadResult &&read_result, IOContext io_context, int op_flags,
int read_flags, const ZTracer::Trace &parent_trace);
protected:
void send_request() override;
aio_type_t get_aio_type() const override {
return AIO_TYPE_READ;
}
const char *get_request_type() const override {
return "aio_read";
}
private:
IOContext m_io_context;
int m_op_flags;
int m_read_flags;
};
template <typename ImageCtxT = ImageCtx>
class AbstractImageWriteRequest : public ImageRequest<ImageCtxT> {
public:
inline void flag_synchronous() {
m_synchronous = true;
}
protected:
using typename ImageRequest<ImageCtxT>::ObjectRequests;
AbstractImageWriteRequest(ImageCtxT &image_ctx, AioCompletion *aio_comp,
Extents &&image_extents, ImageArea area,
const char *trace_name,
const ZTracer::Trace &parent_trace)
: ImageRequest<ImageCtxT>(image_ctx, aio_comp, std::move(image_extents),
area, trace_name, parent_trace),
m_synchronous(false) {
}
void send_request() override;
virtual int prune_object_extents(
LightweightObjectExtents* object_extents) const {
return 0;
}
void send_object_requests(const LightweightObjectExtents &object_extents,
IOContext io_context, uint64_t journal_tid);
virtual ObjectDispatchSpec *create_object_request(
const LightweightObjectExtent &object_extent, IOContext io_context,
uint64_t journal_tid, bool single_extent, Context *on_finish) = 0;
virtual uint64_t append_journal_event(bool synchronous) = 0;
virtual void update_stats(size_t length) = 0;
private:
bool m_synchronous;
};
template <typename ImageCtxT = ImageCtx>
class ImageWriteRequest : public AbstractImageWriteRequest<ImageCtxT> {
public:
ImageWriteRequest(ImageCtxT &image_ctx, AioCompletion *aio_comp,
Extents &&image_extents, ImageArea area, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace)
: AbstractImageWriteRequest<ImageCtxT>(
image_ctx, aio_comp, std::move(image_extents), area,
"write", parent_trace),
m_bl(std::move(bl)), m_op_flags(op_flags) {
}
protected:
using typename ImageRequest<ImageCtxT>::ObjectRequests;
aio_type_t get_aio_type() const override {
return AIO_TYPE_WRITE;
}
const char *get_request_type() const override {
return "aio_write";
}
void assemble_extent(const LightweightObjectExtent &object_extent,
bufferlist *bl);
ObjectDispatchSpec *create_object_request(
const LightweightObjectExtent &object_extent, IOContext io_context,
uint64_t journal_tid, bool single_extent, Context *on_finish) override;
uint64_t append_journal_event(bool synchronous) override;
void update_stats(size_t length) override;
private:
bufferlist m_bl;
int m_op_flags;
};
template <typename ImageCtxT = ImageCtx>
class ImageDiscardRequest : public AbstractImageWriteRequest<ImageCtxT> {
public:
ImageDiscardRequest(ImageCtxT &image_ctx, AioCompletion *aio_comp,
Extents&& image_extents, ImageArea area,
uint32_t discard_granularity_bytes,
const ZTracer::Trace &parent_trace)
: AbstractImageWriteRequest<ImageCtxT>(
image_ctx, aio_comp, std::move(image_extents), area,
"discard", parent_trace),
m_discard_granularity_bytes(discard_granularity_bytes) {
}
protected:
using typename ImageRequest<ImageCtxT>::ObjectRequests;
aio_type_t get_aio_type() const override {
return AIO_TYPE_DISCARD;
}
const char *get_request_type() const override {
return "aio_discard";
}
ObjectDispatchSpec *create_object_request(
const LightweightObjectExtent &object_extent, IOContext io_context,
uint64_t journal_tid, bool single_extent, Context *on_finish) override;
uint64_t append_journal_event(bool synchronous) override;
void update_stats(size_t length) override;
int prune_object_extents(
LightweightObjectExtents* object_extents) const override;
private:
uint32_t m_discard_granularity_bytes;
};
template <typename ImageCtxT = ImageCtx>
class ImageFlushRequest : public ImageRequest<ImageCtxT> {
public:
ImageFlushRequest(ImageCtxT &image_ctx, AioCompletion *aio_comp,
FlushSource flush_source,
const ZTracer::Trace &parent_trace)
: ImageRequest<ImageCtxT>(image_ctx, aio_comp, {},
ImageArea::DATA /* dummy for {} */,
"flush", parent_trace),
m_flush_source(flush_source) {
}
protected:
using typename ImageRequest<ImageCtxT>::ObjectRequests;
void update_timestamp() override {
}
void send_request() override;
aio_type_t get_aio_type() const override {
return AIO_TYPE_FLUSH;
}
const char *get_request_type() const override {
return "aio_flush";
}
private:
FlushSource m_flush_source;
};
template <typename ImageCtxT = ImageCtx>
class ImageWriteSameRequest : public AbstractImageWriteRequest<ImageCtxT> {
public:
ImageWriteSameRequest(ImageCtxT &image_ctx, AioCompletion *aio_comp,
Extents&& image_extents, ImageArea area,
bufferlist &&bl, int op_flags,
const ZTracer::Trace &parent_trace)
: AbstractImageWriteRequest<ImageCtxT>(
image_ctx, aio_comp, std::move(image_extents), area,
"writesame", parent_trace),
m_data_bl(std::move(bl)), m_op_flags(op_flags) {
}
protected:
using typename ImageRequest<ImageCtxT>::ObjectRequests;
aio_type_t get_aio_type() const override {
return AIO_TYPE_WRITESAME;
}
const char *get_request_type() const override {
return "aio_writesame";
}
ObjectDispatchSpec *create_object_request(
const LightweightObjectExtent &object_extent, IOContext io_context,
uint64_t journal_tid, bool single_extent, Context *on_finish) override;
uint64_t append_journal_event(bool synchronous) override;
void update_stats(size_t length) override;
private:
bufferlist m_data_bl;
int m_op_flags;
};
template <typename ImageCtxT = ImageCtx>
class ImageCompareAndWriteRequest : public AbstractImageWriteRequest<ImageCtxT> {
public:
using typename ImageRequest<ImageCtxT>::ObjectRequests;
ImageCompareAndWriteRequest(ImageCtxT &image_ctx, AioCompletion *aio_comp,
Extents &&image_extents, ImageArea area,
bufferlist &&cmp_bl, bufferlist &&bl,
uint64_t *mismatch_offset, int op_flags,
const ZTracer::Trace &parent_trace)
: AbstractImageWriteRequest<ImageCtxT>(
image_ctx, aio_comp, std::move(image_extents), area,
"compare_and_write", parent_trace),
m_cmp_bl(std::move(cmp_bl)), m_bl(std::move(bl)),
m_mismatch_offset(mismatch_offset), m_op_flags(op_flags) {
}
protected:
void assemble_extent(const LightweightObjectExtent &object_extent,
bufferlist *bl, bufferlist *cmp_bl);
ObjectDispatchSpec *create_object_request(
const LightweightObjectExtent &object_extent, IOContext io_context,
uint64_t journal_tid, bool single_extent, Context *on_finish) override;
uint64_t append_journal_event(bool synchronous) override;
void update_stats(size_t length) override;
aio_type_t get_aio_type() const override {
return AIO_TYPE_COMPARE_AND_WRITE;
}
const char *get_request_type() const override {
return "aio_compare_and_write";
}
int prune_object_extents(
LightweightObjectExtents* object_extents) const override;
private:
bufferlist m_cmp_bl;
bufferlist m_bl;
uint64_t *m_mismatch_offset;
int m_op_flags;
};
template <typename ImageCtxT = ImageCtx>
class ImageListSnapsRequest : public ImageRequest<ImageCtxT> {
public:
ImageListSnapsRequest(
ImageCtxT& image_ctx, AioCompletion* aio_comp,
Extents&& image_extents, ImageArea area, SnapIds&& snap_ids,
int list_snaps_flags, SnapshotDelta* snapshot_delta,
const ZTracer::Trace& parent_trace);
protected:
void update_timestamp() override {}
void send_request() override;
aio_type_t get_aio_type() const override {
return AIO_TYPE_GENERIC;
}
const char *get_request_type() const override {
return "list-snaps";
}
private:
SnapIds m_snap_ids;
int m_list_snaps_flags;
SnapshotDelta* m_snapshot_delta;
};
} // namespace io
} // namespace librbd
extern template class librbd::io::ImageRequest<librbd::ImageCtx>;
extern template class librbd::io::ImageReadRequest<librbd::ImageCtx>;
extern template class librbd::io::AbstractImageWriteRequest<librbd::ImageCtx>;
extern template class librbd::io::ImageWriteRequest<librbd::ImageCtx>;
extern template class librbd::io::ImageDiscardRequest<librbd::ImageCtx>;
extern template class librbd::io::ImageFlushRequest<librbd::ImageCtx>;
extern template class librbd::io::ImageWriteSameRequest<librbd::ImageCtx>;
extern template class librbd::io::ImageCompareAndWriteRequest<librbd::ImageCtx>;
extern template class librbd::io::ImageListSnapsRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IO_IMAGE_REQUEST_H
| 12,696 | 32.589947 | 81 | h |
null | ceph-main/src/librbd/io/IoOperations.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <boost/lexical_cast.hpp>
#include <boost/algorithm/string.hpp>
#include "librbd/io/Types.h"
#include "librbd/io/IoOperations.h"
#include <map>
#include <vector>
namespace librbd {
namespace io {
#define RBD_IO_OPERATION_NAME_READ "read"
#define RBD_IO_OPERATION_NAME_WRITE "write"
#define RBD_IO_OPERATION_NAME_DISCARD "discard"
#define RBD_IO_OPERATION_NAME_WRITE_SAME "write_same"
#define RBD_IO_OPERATION_NAME_COMPARE_AND_WRITE "compare_and_write"
static const std::map<std::string, uint64_t> RBD_IO_OPERATION_MAP = {
{RBD_IO_OPERATION_NAME_READ, RBD_IO_OPERATION_READ},
{RBD_IO_OPERATION_NAME_WRITE, RBD_IO_OPERATION_WRITE},
{RBD_IO_OPERATION_NAME_DISCARD, RBD_IO_OPERATION_DISCARD},
{RBD_IO_OPERATION_NAME_WRITE_SAME, RBD_IO_OPERATION_WRITE_SAME},
{RBD_IO_OPERATION_NAME_COMPARE_AND_WRITE, RBD_IO_OPERATION_COMPARE_AND_WRITE},
};
static_assert((RBD_IO_OPERATION_COMPARE_AND_WRITE << 1) > RBD_IO_OPERATIONS_ALL,
"new RBD io operation added");
std::string rbd_io_operations_to_string(uint64_t operations,
std::ostream *err)
{
std::string r;
for (auto& i : RBD_IO_OPERATION_MAP) {
if (operations & i.second) {
if (!r.empty()) {
r += ",";
}
r += i.first;
operations &= ~i.second;
}
}
if (err && operations) {
*err << "ignoring unknown io operation mask 0x"
<< std::hex << operations << std::dec;
}
return r;
}
uint64_t rbd_io_operations_from_string(const std::string& orig_value,
std::ostream *err)
{
uint64_t operations = 0;
std::string value = orig_value;
boost::trim(value);
// empty string means default operations
if (!value.size()) {
return RBD_IO_OPERATIONS_DEFAULT;
}
try {
// numeric?
operations = boost::lexical_cast<uint64_t>(value);
// drop unrecognized bits
uint64_t unsupported_operations = (operations & ~RBD_IO_OPERATIONS_ALL);
if (unsupported_operations != 0ull) {
operations &= RBD_IO_OPERATIONS_ALL;
if (err) {
*err << "ignoring unknown operation mask 0x"
<< std::hex << unsupported_operations << std::dec;
}
}
} catch (boost::bad_lexical_cast&) {
// operation name list?
bool errors = false;
std::vector<std::string> operation_names;
boost::split(operation_names, value, boost::is_any_of(","));
for (auto operation_name: operation_names) {
boost::trim(operation_name);
auto operation_it = RBD_IO_OPERATION_MAP.find(operation_name);
if (operation_it != RBD_IO_OPERATION_MAP.end()) {
operations += operation_it->second;
} else if (err) {
if (errors) {
*err << ", ";
} else {
errors = true;
}
*err << "ignoring unknown operation " << operation_name;
}
}
}
return operations;
}
} // namespace io
} // namespace librbd
| 2,999 | 28.411765 | 80 | cc |
null | ceph-main/src/librbd/io/IoOperations.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
#include <ostream>
namespace librbd {
namespace io {
std::string rbd_io_operations_to_string(uint64_t ops,
std::ostream *err);
uint64_t rbd_io_operations_from_string(const std::string& value,
std::ostream *err);
} // namespace io
} // namespace librbd
| 473 | 23.947368 | 70 | h |
null | ceph-main/src/librbd/io/ObjectDispatch.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/ObjectDispatch.h"
#include "common/dout.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/io/ObjectRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::ObjectDispatch: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace io {
using librbd::util::data_object_name;
template <typename I>
ObjectDispatch<I>::ObjectDispatch(I* image_ctx)
: m_image_ctx(image_ctx) {
}
template <typename I>
void ObjectDispatch<I>::shut_down(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
m_image_ctx->asio_engine->post(on_finish, 0);
}
template <typename I>
bool ObjectDispatch<I>::read(
uint64_t object_no, ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "object_no=" << object_no << " " << *extents << dendl;
*dispatch_result = DISPATCH_RESULT_COMPLETE;
auto req = new ObjectReadRequest<I>(m_image_ctx, object_no, extents,
io_context, op_flags, read_flags,
parent_trace, version, on_dispatched);
req->send();
return true;
}
template <typename I>
bool ObjectDispatch<I>::discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << object_len << dendl;
*dispatch_result = DISPATCH_RESULT_COMPLETE;
auto req = new ObjectDiscardRequest<I>(m_image_ctx, object_no, object_off,
object_len, io_context, discard_flags,
parent_trace, on_dispatched);
req->send();
return true;
}
template <typename I>
bool ObjectDispatch<I>::write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << data.length() << dendl;
*dispatch_result = DISPATCH_RESULT_COMPLETE;
auto req = new ObjectWriteRequest<I>(m_image_ctx, object_no, object_off,
std::move(data), io_context, op_flags,
write_flags, assert_version,
parent_trace, on_dispatched);
req->send();
return true;
}
template <typename I>
bool ObjectDispatch<I>::write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << object_len << dendl;
*dispatch_result = DISPATCH_RESULT_COMPLETE;
auto req = new ObjectWriteSameRequest<I>(m_image_ctx, object_no,
object_off, object_len,
std::move(data), io_context,
op_flags, parent_trace,
on_dispatched);
req->send();
return true;
}
template <typename I>
bool ObjectDispatch<I>::compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* object_dispatch_flags, uint64_t* journal_tid,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << write_data.length() << dendl;
*dispatch_result = DISPATCH_RESULT_COMPLETE;
auto req = new ObjectCompareAndWriteRequest<I>(m_image_ctx, object_no,
object_off,
std::move(cmp_data),
std::move(write_data),
io_context, mismatch_offset,
op_flags, parent_trace,
on_dispatched);
req->send();
return true;
}
template <typename I>
bool ObjectDispatch<I>::list_snaps(
uint64_t object_no, io::Extents&& extents, SnapIds&& snap_ids,
int list_snap_flags, const ZTracer::Trace &parent_trace,
SnapshotDelta* snapshot_delta, int* object_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< "extents=" << extents << ", "
<< "snap_ids=" << snap_ids << dendl;
*dispatch_result = DISPATCH_RESULT_COMPLETE;
auto req = ObjectListSnapsRequest<I>::create(
m_image_ctx, object_no, std::move(extents), std::move(snap_ids),
list_snap_flags, parent_trace, snapshot_delta, on_dispatched);
req->send();
return true;
}
} // namespace io
} // namespace librbd
template class librbd::io::ObjectDispatch<librbd::ImageCtx>;
| 6,378 | 38.376543 | 79 | cc |
null | ceph-main/src/librbd/io/ObjectDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_OBJECT_DISPATCH_H
#define CEPH_LIBRBD_IO_OBJECT_DISPATCH_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "common/zipkin_trace.h"
#include "librbd/io/Types.h"
#include "librbd/io/ObjectDispatchInterface.h"
struct Context;
namespace librbd {
struct ImageCtx;
namespace io {
struct AioCompletion;
template <typename ImageCtxT = librbd::ImageCtx>
class ObjectDispatch : public ObjectDispatchInterface {
public:
ObjectDispatch(ImageCtxT* image_ctx);
ObjectDispatchLayer get_dispatch_layer() const override {
return OBJECT_DISPATCH_LAYER_CORE;
}
void shut_down(Context* on_finish) override;
bool read(
uint64_t object_no, ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* object_dispatch_flags, uint64_t* journal_tid,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool flush(
FlushSource flush_source, const ZTracer::Trace &parent_trace,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override {
return false;
}
bool list_snaps(
uint64_t object_no, io::Extents&& extents, SnapIds&& snap_ids,
int list_snap_flags, const ZTracer::Trace &parent_trace,
SnapshotDelta* snapshot_delta, int* object_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool invalidate_cache(Context* on_finish) override {
return false;
}
bool reset_existence_cache(Context* on_finish) override {
return false;
}
void extent_overwritten(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
uint64_t journal_tid, uint64_t new_journal_tid) override {
}
int prepare_copyup(
uint64_t object_no,
SnapshotSparseBufferlist* snapshot_sparse_bufferlist) override {
return 0;
}
private:
ImageCtxT* m_image_ctx;
};
} // namespace io
} // namespace librbd
extern template class librbd::io::ObjectDispatch<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IO_OBJECT_DISPATCH_H
| 3,809 | 31.844828 | 75 | h |
null | ceph-main/src/librbd/io/ObjectDispatchInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_OBJECT_DISPATCH_INTERFACE_H
#define CEPH_LIBRBD_IO_OBJECT_DISPATCH_INTERFACE_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "common/zipkin_trace.h"
#include "librbd/Types.h"
#include "librbd/io/Types.h"
struct Context;
struct RWLock;
namespace librbd {
namespace io {
struct AioCompletion;
struct ObjectDispatchInterface;
struct ObjectDispatchSpec;
struct ObjectDispatchInterface {
typedef ObjectDispatchInterface Dispatch;
typedef ObjectDispatchLayer DispatchLayer;
typedef ObjectDispatchSpec DispatchSpec;
virtual ~ObjectDispatchInterface() {
}
virtual ObjectDispatchLayer get_dispatch_layer() const = 0;
virtual void shut_down(Context* on_finish) = 0;
virtual bool read(
uint64_t object_no, ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) = 0;
virtual bool discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context**on_finish, Context* on_dispatched) = 0;
virtual bool write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context**on_finish, Context* on_dispatched) = 0;
virtual bool write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace,
int* object_dispatch_flags, uint64_t* journal_tid,
DispatchResult* dispatch_result, Context**on_finish,
Context* on_dispatched) = 0;
virtual bool compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* object_dispatch_flags, uint64_t* journal_tid,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) = 0;
virtual bool flush(
FlushSource flush_source, const ZTracer::Trace &parent_trace,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) = 0;
virtual bool list_snaps(
uint64_t object_no, Extents&& extents, SnapIds&& snap_ids,
int list_snap_flags, const ZTracer::Trace &parent_trace,
SnapshotDelta* snapshot_delta, int* object_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) = 0;
virtual bool invalidate_cache(Context* on_finish) = 0;
virtual bool reset_existence_cache(Context* on_finish) = 0;
virtual void extent_overwritten(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
uint64_t journal_tid, uint64_t new_journal_tid) = 0;
virtual int prepare_copyup(
uint64_t object_no,
SnapshotSparseBufferlist* snapshot_sparse_bufferlist) = 0;
};
} // namespace io
} // namespace librbd
#endif // CEPH_LIBRBD_IO_OBJECT_DISPATCH_INTERFACE_H
| 3,719 | 35.116505 | 77 | h |
null | ceph-main/src/librbd/io/ObjectDispatchSpec.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/ObjectDispatchSpec.h"
#include "include/Context.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include <boost/variant.hpp>
namespace librbd {
namespace io {
void ObjectDispatchSpec::C_Dispatcher::complete(int r) {
if (r < 0) {
finish(r);
return;
}
switch (object_dispatch_spec->dispatch_result) {
case DISPATCH_RESULT_CONTINUE:
object_dispatch_spec->send();
break;
case DISPATCH_RESULT_COMPLETE:
finish(r);
break;
case DISPATCH_RESULT_INVALID:
case DISPATCH_RESULT_RESTART:
ceph_abort();
break;
}
}
void ObjectDispatchSpec::C_Dispatcher::finish(int r) {
on_finish->complete(r);
delete object_dispatch_spec;
}
void ObjectDispatchSpec::send() {
object_dispatcher->send(this);
}
void ObjectDispatchSpec::fail(int r) {
ceph_assert(r < 0);
dispatcher_ctx.complete(r);
}
} // namespace io
} // namespace librbd
| 995 | 19.75 | 70 | cc |
null | ceph-main/src/librbd/io/ObjectDispatchSpec.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_OBJECT_DISPATCH_SPEC_H
#define CEPH_LIBRBD_IO_OBJECT_DISPATCH_SPEC_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "common/zipkin_trace.h"
#include "librbd/Types.h"
#include "librbd/io/Types.h"
#include <boost/variant/variant.hpp>
namespace librbd {
namespace io {
struct ObjectDispatcherInterface;
struct ObjectDispatchSpec {
private:
// helper to avoid extra heap allocation per object IO
struct C_Dispatcher : public Context {
ObjectDispatchSpec* object_dispatch_spec;
Context* on_finish;
C_Dispatcher(ObjectDispatchSpec* object_dispatch_spec, Context* on_finish)
: object_dispatch_spec(object_dispatch_spec), on_finish(on_finish) {
}
void complete(int r) override;
void finish(int r) override;
};
public:
struct RequestBase {
uint64_t object_no;
RequestBase(uint64_t object_no)
: object_no(object_no) {
}
};
struct ReadRequest : public RequestBase {
ReadExtents* extents;
int read_flags;
uint64_t* version;
ReadRequest(uint64_t object_no, ReadExtents* extents, int read_flags,
uint64_t* version)
: RequestBase(object_no), extents(extents), read_flags(read_flags),
version(version) {
}
};
struct WriteRequestBase : public RequestBase {
uint64_t object_off;
uint64_t journal_tid;
WriteRequestBase(uint64_t object_no, uint64_t object_off,
uint64_t journal_tid)
: RequestBase(object_no), object_off(object_off),
journal_tid(journal_tid) {
}
};
struct DiscardRequest : public WriteRequestBase {
uint64_t object_len;
int discard_flags;
DiscardRequest(uint64_t object_no, uint64_t object_off, uint64_t object_len,
int discard_flags, uint64_t journal_tid)
: WriteRequestBase(object_no, object_off, journal_tid),
object_len(object_len), discard_flags(discard_flags) {
}
};
struct WriteRequest : public WriteRequestBase {
ceph::bufferlist data;
int write_flags;
std::optional<uint64_t> assert_version;
WriteRequest(uint64_t object_no, uint64_t object_off,
ceph::bufferlist&& data, int write_flags,
std::optional<uint64_t> assert_version, uint64_t journal_tid)
: WriteRequestBase(object_no, object_off, journal_tid),
data(std::move(data)), write_flags(write_flags),
assert_version(assert_version) {
}
};
struct WriteSameRequest : public WriteRequestBase {
uint64_t object_len;
LightweightBufferExtents buffer_extents;
ceph::bufferlist data;
WriteSameRequest(uint64_t object_no, uint64_t object_off,
uint64_t object_len,
LightweightBufferExtents&& buffer_extents,
ceph::bufferlist&& data, uint64_t journal_tid)
: WriteRequestBase(object_no, object_off, journal_tid),
object_len(object_len), buffer_extents(std::move(buffer_extents)),
data(std::move(data)) {
}
};
struct CompareAndWriteRequest : public WriteRequestBase {
ceph::bufferlist cmp_data;
ceph::bufferlist data;
uint64_t* mismatch_offset;
CompareAndWriteRequest(uint64_t object_no, uint64_t object_off,
ceph::bufferlist&& cmp_data, ceph::bufferlist&& data,
uint64_t* mismatch_offset,
uint64_t journal_tid)
: WriteRequestBase(object_no, object_off, journal_tid),
cmp_data(std::move(cmp_data)), data(std::move(data)),
mismatch_offset(mismatch_offset) {
}
};
struct FlushRequest {
FlushSource flush_source;
uint64_t journal_tid;
FlushRequest(FlushSource flush_source, uint64_t journal_tid)
: flush_source(flush_source), journal_tid(journal_tid) {
}
};
struct ListSnapsRequest : public RequestBase {
Extents extents;
SnapIds snap_ids;
int list_snaps_flags;
SnapshotDelta* snapshot_delta;
ListSnapsRequest(uint64_t object_no, Extents&& extents,
SnapIds&& snap_ids, int list_snaps_flags,
SnapshotDelta* snapshot_delta)
: RequestBase(object_no), extents(std::move(extents)),
snap_ids(std::move(snap_ids)),list_snaps_flags(list_snaps_flags),
snapshot_delta(snapshot_delta) {
}
};
typedef boost::variant<ReadRequest,
DiscardRequest,
WriteRequest,
WriteSameRequest,
CompareAndWriteRequest,
FlushRequest,
ListSnapsRequest> Request;
C_Dispatcher dispatcher_ctx;
ObjectDispatcherInterface* object_dispatcher;
ObjectDispatchLayer dispatch_layer;
int object_dispatch_flags = 0;
DispatchResult dispatch_result = DISPATCH_RESULT_INVALID;
Request request;
IOContext io_context;
int op_flags;
ZTracer::Trace parent_trace;
template <typename ImageCtxT>
static ObjectDispatchSpec* create_read(
ImageCtxT* image_ctx, ObjectDispatchLayer object_dispatch_layer,
uint64_t object_no, ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, Context* on_finish) {
return new ObjectDispatchSpec(image_ctx->io_object_dispatcher,
object_dispatch_layer,
ReadRequest{object_no, extents,
read_flags, version},
io_context, op_flags, parent_trace,
on_finish);
}
template <typename ImageCtxT>
static ObjectDispatchSpec* create_discard(
ImageCtxT* image_ctx, ObjectDispatchLayer object_dispatch_layer,
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags, uint64_t journal_tid,
const ZTracer::Trace &parent_trace, Context *on_finish) {
return new ObjectDispatchSpec(image_ctx->io_object_dispatcher,
object_dispatch_layer,
DiscardRequest{object_no, object_off,
object_len, discard_flags,
journal_tid},
io_context, 0, parent_trace, on_finish);
}
template <typename ImageCtxT>
static ObjectDispatchSpec* create_write(
ImageCtxT* image_ctx, ObjectDispatchLayer object_dispatch_layer,
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version, uint64_t journal_tid,
const ZTracer::Trace &parent_trace, Context *on_finish) {
return new ObjectDispatchSpec(image_ctx->io_object_dispatcher,
object_dispatch_layer,
WriteRequest{object_no, object_off,
std::move(data), write_flags,
assert_version, journal_tid},
io_context, op_flags, parent_trace,
on_finish);
}
template <typename ImageCtxT>
static ObjectDispatchSpec* create_write_same(
ImageCtxT* image_ctx, ObjectDispatchLayer object_dispatch_layer,
uint64_t object_no, uint64_t object_off, uint64_t object_len,
LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags, uint64_t journal_tid,
const ZTracer::Trace &parent_trace, Context *on_finish) {
return new ObjectDispatchSpec(image_ctx->io_object_dispatcher,
object_dispatch_layer,
WriteSameRequest{object_no, object_off,
object_len,
std::move(buffer_extents),
std::move(data),
journal_tid},
io_context, op_flags, parent_trace,
on_finish);
}
template <typename ImageCtxT>
static ObjectDispatchSpec* create_compare_and_write(
ImageCtxT* image_ctx, ObjectDispatchLayer object_dispatch_layer,
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context,
uint64_t *mismatch_offset, int op_flags, uint64_t journal_tid,
const ZTracer::Trace &parent_trace, Context *on_finish) {
return new ObjectDispatchSpec(image_ctx->io_object_dispatcher,
object_dispatch_layer,
CompareAndWriteRequest{object_no,
object_off,
std::move(cmp_data),
std::move(write_data),
mismatch_offset,
journal_tid},
io_context, op_flags, parent_trace,
on_finish);
}
template <typename ImageCtxT>
static ObjectDispatchSpec* create_flush(
ImageCtxT* image_ctx, ObjectDispatchLayer object_dispatch_layer,
FlushSource flush_source, uint64_t journal_tid,
const ZTracer::Trace &parent_trace, Context *on_finish) {
return new ObjectDispatchSpec(image_ctx->io_object_dispatcher,
object_dispatch_layer,
FlushRequest{flush_source, journal_tid},
{}, 0, parent_trace, on_finish);
}
template <typename ImageCtxT>
static ObjectDispatchSpec* create_list_snaps(
ImageCtxT* image_ctx, ObjectDispatchLayer object_dispatch_layer,
uint64_t object_no, Extents&& extents, SnapIds&& snap_ids,
int list_snaps_flags, const ZTracer::Trace &parent_trace,
SnapshotDelta* snapshot_delta, Context* on_finish) {
return new ObjectDispatchSpec(image_ctx->io_object_dispatcher,
object_dispatch_layer,
ListSnapsRequest{object_no,
std::move(extents),
std::move(snap_ids),
list_snaps_flags,
snapshot_delta},
{}, 0, parent_trace, on_finish);
}
void send();
void fail(int r);
private:
template <typename> friend class ObjectDispatcher;
ObjectDispatchSpec(ObjectDispatcherInterface* object_dispatcher,
ObjectDispatchLayer object_dispatch_layer,
Request&& request, IOContext io_context, int op_flags,
const ZTracer::Trace& parent_trace, Context* on_finish)
: dispatcher_ctx(this, on_finish), object_dispatcher(object_dispatcher),
dispatch_layer(object_dispatch_layer), request(std::move(request)),
io_context(io_context), op_flags(op_flags), parent_trace(parent_trace) {
}
};
} // namespace io
} // namespace librbd
#endif // CEPH_LIBRBD_IO_OBJECT_DISPATCH_SPEC_H
| 11,715 | 38.581081 | 80 | h |
null | ceph-main/src/librbd/io/ObjectDispatcher.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/ObjectDispatcher.h"
#include "include/Context.h"
#include "common/AsyncOpTracker.h"
#include "common/dout.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/io/ObjectDispatch.h"
#include "librbd/io/ObjectDispatchSpec.h"
#include <boost/variant.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::ObjectDispatcher: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace io {
template <typename I>
struct ObjectDispatcher<I>::C_ResetExistenceCache : public C_LayerIterator {
C_ResetExistenceCache(ObjectDispatcher* object_dispatcher, Context* on_finish)
: C_LayerIterator(object_dispatcher, OBJECT_DISPATCH_LAYER_NONE, on_finish) {
}
bool execute(ObjectDispatchInterface* object_dispatch,
Context* on_finish) override {
return object_dispatch->reset_existence_cache(on_finish);
}
};
template <typename I>
struct ObjectDispatcher<I>::SendVisitor : public boost::static_visitor<bool> {
ObjectDispatchInterface* object_dispatch;
ObjectDispatchSpec* object_dispatch_spec;
SendVisitor(ObjectDispatchInterface* object_dispatch,
ObjectDispatchSpec* object_dispatch_spec)
: object_dispatch(object_dispatch),
object_dispatch_spec(object_dispatch_spec) {
}
bool operator()(ObjectDispatchSpec::ReadRequest& read) const {
return object_dispatch->read(
read.object_no, read.extents, object_dispatch_spec->io_context,
object_dispatch_spec->op_flags, read.read_flags,
object_dispatch_spec->parent_trace, read.version,
&object_dispatch_spec->object_dispatch_flags,
&object_dispatch_spec->dispatch_result,
&object_dispatch_spec->dispatcher_ctx.on_finish,
&object_dispatch_spec->dispatcher_ctx);
}
bool operator()(ObjectDispatchSpec::DiscardRequest& discard) const {
return object_dispatch->discard(
discard.object_no, discard.object_off, discard.object_len,
object_dispatch_spec->io_context, discard.discard_flags,
object_dispatch_spec->parent_trace,
&object_dispatch_spec->object_dispatch_flags, &discard.journal_tid,
&object_dispatch_spec->dispatch_result,
&object_dispatch_spec->dispatcher_ctx.on_finish,
&object_dispatch_spec->dispatcher_ctx);
}
bool operator()(ObjectDispatchSpec::WriteRequest& write) const {
return object_dispatch->write(
write.object_no, write.object_off, std::move(write.data),
object_dispatch_spec->io_context, object_dispatch_spec->op_flags,
write.write_flags, write.assert_version,
object_dispatch_spec->parent_trace,
&object_dispatch_spec->object_dispatch_flags, &write.journal_tid,
&object_dispatch_spec->dispatch_result,
&object_dispatch_spec->dispatcher_ctx.on_finish,
&object_dispatch_spec->dispatcher_ctx);
}
bool operator()(ObjectDispatchSpec::WriteSameRequest& write_same) const {
return object_dispatch->write_same(
write_same.object_no, write_same.object_off, write_same.object_len,
std::move(write_same.buffer_extents), std::move(write_same.data),
object_dispatch_spec->io_context, object_dispatch_spec->op_flags,
object_dispatch_spec->parent_trace,
&object_dispatch_spec->object_dispatch_flags, &write_same.journal_tid,
&object_dispatch_spec->dispatch_result,
&object_dispatch_spec->dispatcher_ctx.on_finish,
&object_dispatch_spec->dispatcher_ctx);
}
bool operator()(
ObjectDispatchSpec::CompareAndWriteRequest& compare_and_write) const {
return object_dispatch->compare_and_write(
compare_and_write.object_no, compare_and_write.object_off,
std::move(compare_and_write.cmp_data), std::move(compare_and_write.data),
object_dispatch_spec->io_context, object_dispatch_spec->op_flags,
object_dispatch_spec->parent_trace, compare_and_write.mismatch_offset,
&object_dispatch_spec->object_dispatch_flags,
&compare_and_write.journal_tid,
&object_dispatch_spec->dispatch_result,
&object_dispatch_spec->dispatcher_ctx.on_finish,
&object_dispatch_spec->dispatcher_ctx);
}
bool operator()(ObjectDispatchSpec::FlushRequest& flush) const {
return object_dispatch->flush(
flush.flush_source, object_dispatch_spec->parent_trace,
&flush.journal_tid,
&object_dispatch_spec->dispatch_result,
&object_dispatch_spec->dispatcher_ctx.on_finish,
&object_dispatch_spec->dispatcher_ctx);
}
bool operator()(ObjectDispatchSpec::ListSnapsRequest& list_snaps) const {
return object_dispatch->list_snaps(
list_snaps.object_no, std::move(list_snaps.extents),
std::move(list_snaps.snap_ids), list_snaps.list_snaps_flags,
object_dispatch_spec->parent_trace, list_snaps.snapshot_delta,
&object_dispatch_spec->object_dispatch_flags,
&object_dispatch_spec->dispatch_result,
&object_dispatch_spec->dispatcher_ctx.on_finish,
&object_dispatch_spec->dispatcher_ctx);
}
};
template <typename I>
ObjectDispatcher<I>::ObjectDispatcher(I* image_ctx)
: Dispatcher<I, ObjectDispatcherInterface>(image_ctx) {
// configure the core object dispatch handler on startup
auto object_dispatch = new ObjectDispatch(image_ctx);
this->register_dispatch(object_dispatch);
}
template <typename I>
void ObjectDispatcher<I>::invalidate_cache(Context* on_finish) {
auto image_ctx = this->m_image_ctx;
auto cct = image_ctx->cct;
ldout(cct, 5) << dendl;
on_finish = util::create_async_context_callback(*image_ctx, on_finish);
auto ctx = new C_InvalidateCache(
this, OBJECT_DISPATCH_LAYER_NONE, on_finish);
ctx->complete(0);
}
template <typename I>
void ObjectDispatcher<I>::reset_existence_cache(Context* on_finish) {
auto image_ctx = this->m_image_ctx;
auto cct = image_ctx->cct;
ldout(cct, 5) << dendl;
on_finish = util::create_async_context_callback(*image_ctx, on_finish);
auto ctx = new C_ResetExistenceCache(this, on_finish);
ctx->complete(0);
}
template <typename I>
void ObjectDispatcher<I>::extent_overwritten(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
uint64_t journal_tid, uint64_t new_journal_tid) {
auto cct = this->m_image_ctx->cct;
ldout(cct, 20) << object_no << " " << object_off << "~" << object_len
<< dendl;
std::shared_lock locker{this->m_lock};
for (auto it : this->m_dispatches) {
auto& object_dispatch_meta = it.second;
auto object_dispatch = object_dispatch_meta.dispatch;
object_dispatch->extent_overwritten(object_no, object_off, object_len,
journal_tid, new_journal_tid);
}
}
template <typename I>
int ObjectDispatcher<I>::prepare_copyup(
uint64_t object_no,
SnapshotSparseBufferlist* snapshot_sparse_bufferlist) {
auto cct = this->m_image_ctx->cct;
ldout(cct, 20) << "object_no=" << object_no << dendl;
std::shared_lock locker{this->m_lock};
for (auto it : this->m_dispatches) {
auto& object_dispatch_meta = it.second;
auto object_dispatch = object_dispatch_meta.dispatch;
auto r = object_dispatch->prepare_copyup(
object_no, snapshot_sparse_bufferlist);
if (r < 0) {
return r;
}
}
return 0;
}
template <typename I>
bool ObjectDispatcher<I>::send_dispatch(
ObjectDispatchInterface* object_dispatch,
ObjectDispatchSpec* object_dispatch_spec) {
return boost::apply_visitor(
SendVisitor{object_dispatch, object_dispatch_spec},
object_dispatch_spec->request);
}
} // namespace io
} // namespace librbd
template class librbd::io::ObjectDispatcher<librbd::ImageCtx>;
| 7,792 | 36.287081 | 81 | cc |
null | ceph-main/src/librbd/io/ObjectDispatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_OBJECT_DISPATCHER_H
#define CEPH_LIBRBD_IO_OBJECT_DISPATCHER_H
#include "include/int_types.h"
#include "common/ceph_mutex.h"
#include "librbd/io/Dispatcher.h"
#include "librbd/io/ObjectDispatchInterface.h"
#include "librbd/io/ObjectDispatchSpec.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/io/Types.h"
#include <map>
struct Context;
namespace librbd {
struct ImageCtx;
namespace io {
template <typename ImageCtxT = ImageCtx>
class ObjectDispatcher
: public Dispatcher<ImageCtxT, ObjectDispatcherInterface> {
public:
ObjectDispatcher(ImageCtxT* image_ctx);
void invalidate_cache(Context* on_finish) override;
void reset_existence_cache(Context* on_finish) override;
void extent_overwritten(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
uint64_t journal_tid, uint64_t new_journal_tid) override;
int prepare_copyup(
uint64_t object_no,
SnapshotSparseBufferlist* snapshot_sparse_bufferlist) override;
using typename Dispatcher<ImageCtxT, ObjectDispatcherInterface>::C_LayerIterator;
using typename Dispatcher<ImageCtxT, ObjectDispatcherInterface>::C_InvalidateCache;
protected:
bool send_dispatch(ObjectDispatchInterface* object_dispatch,
ObjectDispatchSpec* object_dispatch_spec) override;
private:
struct C_ResetExistenceCache;
struct SendVisitor;
};
} // namespace io
} // namespace librbd
extern template class librbd::io::ObjectDispatcher<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IO_OBJECT_DISPATCHER_H
| 1,651 | 26.081967 | 85 | h |
null | ceph-main/src/librbd/io/ObjectDispatcherInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_OBJECT_DISPATCHER_INTERFACE_H
#define CEPH_LIBRBD_IO_OBJECT_DISPATCHER_INTERFACE_H
#include "include/int_types.h"
#include "librbd/io/DispatcherInterface.h"
#include "librbd/io/ObjectDispatchInterface.h"
struct Context;
namespace librbd {
namespace io {
struct ObjectDispatcherInterface
: public DispatcherInterface<ObjectDispatchInterface> {
public:
virtual void invalidate_cache(Context* on_finish) = 0;
virtual void reset_existence_cache(Context* on_finish) = 0;
virtual void extent_overwritten(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
uint64_t journal_tid, uint64_t new_journal_tid) = 0;
virtual int prepare_copyup(
uint64_t object_no,
SnapshotSparseBufferlist* snapshot_sparse_bufferlist) = 0;
};
} // namespace io
} // namespace librbd
#endif // CEPH_LIBRBD_IO_OBJECT_DISPATCHER_INTERFACE_H
| 984 | 26.361111 | 70 | h |
null | ceph-main/src/librbd/io/ObjectRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/ObjectRequest.h"
#include "common/ceph_context.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/ceph_mutex.h"
#include "include/Context.h"
#include "include/err.h"
#include "include/neorados/RADOS.hpp"
#include "osd/osd_types.h"
#include "librados/snap_set_diff.h"
#include "librbd/AsioEngine.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/asio/Utils.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/CopyupRequest.h"
#include "librbd/io/ImageRequest.h"
#include "librbd/io/Utils.h"
#include <boost/optional.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::ObjectRequest: " << this \
<< " " << __func__ << ": " \
<< data_object_name(this->m_ictx, \
this->m_object_no) << " "
namespace librbd {
namespace io {
using librbd::util::data_object_name;
using librbd::util::create_context_callback;
using librbd::util::create_trace;
namespace {
template <typename I>
inline bool is_copy_on_read(I *ictx, const IOContext& io_context) {
std::shared_lock image_locker{ictx->image_lock};
return (ictx->clone_copy_on_read && !ictx->read_only &&
io_context->read_snap().value_or(CEPH_NOSNAP) == CEPH_NOSNAP &&
(ictx->exclusive_lock == nullptr ||
ictx->exclusive_lock->is_lock_owner()));
}
template <typename S, typename D>
void convert_snap_set(const S& src_snap_set,
D* dst_snap_set) {
dst_snap_set->seq = src_snap_set.seq;
dst_snap_set->clones.reserve(src_snap_set.clones.size());
for (auto& src_clone : src_snap_set.clones) {
dst_snap_set->clones.emplace_back();
auto& dst_clone = dst_snap_set->clones.back();
dst_clone.cloneid = src_clone.cloneid;
dst_clone.snaps = src_clone.snaps;
dst_clone.overlap = src_clone.overlap;
dst_clone.size = src_clone.size;
}
}
} // anonymous namespace
template <typename I>
ObjectRequest<I>*
ObjectRequest<I>::create_write(
I *ictx, uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, Context *completion) {
return new ObjectWriteRequest<I>(ictx, object_no, object_off,
std::move(data), io_context, op_flags,
write_flags, assert_version,
parent_trace, completion);
}
template <typename I>
ObjectRequest<I>*
ObjectRequest<I>::create_discard(
I *ictx, uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, Context *completion) {
return new ObjectDiscardRequest<I>(ictx, object_no, object_off,
object_len, io_context, discard_flags,
parent_trace, completion);
}
template <typename I>
ObjectRequest<I>*
ObjectRequest<I>::create_write_same(
I *ictx, uint64_t object_no, uint64_t object_off, uint64_t object_len,
ceph::bufferlist&& data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, Context *completion) {
return new ObjectWriteSameRequest<I>(ictx, object_no, object_off,
object_len, std::move(data), io_context,
op_flags, parent_trace, completion);
}
template <typename I>
ObjectRequest<I>*
ObjectRequest<I>::create_compare_and_write(
I *ictx, uint64_t object_no, uint64_t object_off,
ceph::bufferlist&& cmp_data, ceph::bufferlist&& write_data,
IOContext io_context, uint64_t *mismatch_offset, int op_flags,
const ZTracer::Trace &parent_trace, Context *completion) {
return new ObjectCompareAndWriteRequest<I>(ictx, object_no, object_off,
std::move(cmp_data),
std::move(write_data), io_context,
mismatch_offset, op_flags,
parent_trace, completion);
}
template <typename I>
ObjectRequest<I>::ObjectRequest(
I *ictx, uint64_t objectno, IOContext io_context,
const char *trace_name, const ZTracer::Trace &trace, Context *completion)
: m_ictx(ictx), m_object_no(objectno), m_io_context(io_context),
m_completion(completion),
m_trace(create_trace(*ictx, "", trace)) {
ceph_assert(m_ictx->data_ctx.is_valid());
if (m_trace.valid()) {
m_trace.copy_name(trace_name + std::string(" ") +
data_object_name(ictx, objectno));
m_trace.event("start");
}
}
template <typename I>
void ObjectRequest<I>::add_write_hint(I& image_ctx, neorados::WriteOp* wr) {
auto alloc_hint_flags = static_cast<neorados::alloc_hint::alloc_hint_t>(
image_ctx.alloc_hint_flags);
if (image_ctx.enable_alloc_hint) {
wr->set_alloc_hint(image_ctx.get_object_size(),
image_ctx.get_object_size(),
alloc_hint_flags);
} else if (image_ctx.alloc_hint_flags != 0U) {
wr->set_alloc_hint(0, 0, alloc_hint_flags);
}
}
template <typename I>
bool ObjectRequest<I>::compute_parent_extents(Extents *parent_extents,
ImageArea *area,
bool read_request) {
ceph_assert(ceph_mutex_is_locked(m_ictx->image_lock));
m_has_parent = false;
parent_extents->clear();
*area = ImageArea::DATA;
uint64_t raw_overlap;
int r = m_ictx->get_parent_overlap(
m_io_context->read_snap().value_or(CEPH_NOSNAP), &raw_overlap);
if (r < 0) {
// NOTE: it's possible for a snapshot to be deleted while we are
// still reading from it
lderr(m_ictx->cct) << "failed to retrieve parent overlap: "
<< cpp_strerror(r) << dendl;
return false;
}
bool migration_write = !read_request && !m_ictx->migration_info.empty();
if (migration_write) {
raw_overlap = m_ictx->migration_info.overlap;
}
if (raw_overlap == 0) {
return false;
}
std::tie(*parent_extents, *area) = io::util::object_to_area_extents(
m_ictx, m_object_no, {{0, m_ictx->layout.object_size}});
uint64_t object_overlap = m_ictx->prune_parent_extents(
*parent_extents, *area, raw_overlap, migration_write);
if (object_overlap > 0) {
m_has_parent = true;
return true;
}
return false;
}
template <typename I>
void ObjectRequest<I>::async_finish(int r) {
ldout(m_ictx->cct, 20) << "r=" << r << dendl;
m_ictx->asio_engine->post([this, r]() { finish(r); });
}
template <typename I>
void ObjectRequest<I>::finish(int r) {
ldout(m_ictx->cct, 20) << "r=" << r << dendl;
m_completion->complete(r);
delete this;
}
/** read **/
template <typename I>
ObjectReadRequest<I>::ObjectReadRequest(
I *ictx, uint64_t objectno, ReadExtents* extents,
IOContext io_context, int op_flags, int read_flags,
const ZTracer::Trace &parent_trace, uint64_t* version,
Context *completion)
: ObjectRequest<I>(ictx, objectno, io_context, "read", parent_trace,
completion),
m_extents(extents), m_op_flags(op_flags),m_read_flags(read_flags),
m_version(version) {
}
template <typename I>
void ObjectReadRequest<I>::send() {
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << dendl;
read_object();
}
template <typename I>
void ObjectReadRequest<I>::read_object() {
I *image_ctx = this->m_ictx;
std::shared_lock image_locker{image_ctx->image_lock};
auto read_snap_id = this->m_io_context->read_snap().value_or(CEPH_NOSNAP);
if (read_snap_id == image_ctx->snap_id &&
image_ctx->object_map != nullptr &&
!image_ctx->object_map->object_may_exist(this->m_object_no)) {
image_ctx->asio_engine->post([this]() { read_parent(); });
return;
}
image_locker.unlock();
ldout(image_ctx->cct, 20) << "snap_id=" << read_snap_id << dendl;
neorados::ReadOp read_op;
for (auto& extent: *this->m_extents) {
if (extent.length >= image_ctx->sparse_read_threshold_bytes) {
read_op.sparse_read(extent.offset, extent.length, &extent.bl,
&extent.extent_map);
} else {
read_op.read(extent.offset, extent.length, &extent.bl);
}
}
util::apply_op_flags(
m_op_flags, image_ctx->get_read_flags(read_snap_id), &read_op);
image_ctx->rados_api.execute(
{data_object_name(this->m_ictx, this->m_object_no)},
*this->m_io_context, std::move(read_op), nullptr,
librbd::asio::util::get_callback_adapter(
[this](int r) { handle_read_object(r); }), m_version,
(this->m_trace.valid() ? this->m_trace.get_info() : nullptr));
}
template <typename I>
void ObjectReadRequest<I>::handle_read_object(int r) {
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << "r=" << r << dendl;
if (m_version != nullptr) {
ldout(image_ctx->cct, 20) << "version=" << *m_version << dendl;
}
if (r == -ENOENT) {
read_parent();
return;
} else if (r < 0) {
lderr(image_ctx->cct) << "failed to read from object: "
<< cpp_strerror(r) << dendl;
this->finish(r);
return;
}
this->finish(0);
}
template <typename I>
void ObjectReadRequest<I>::read_parent() {
if ((m_read_flags & READ_FLAG_DISABLE_READ_FROM_PARENT) != 0) {
this->finish(-ENOENT);
return;
}
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << dendl;
auto ctx = create_context_callback<
ObjectReadRequest<I>, &ObjectReadRequest<I>::handle_read_parent>(this);
io::util::read_parent<I>(
image_ctx, this->m_object_no, this->m_extents,
this->m_io_context->read_snap().value_or(CEPH_NOSNAP), this->m_trace,
ctx);
}
template <typename I>
void ObjectReadRequest<I>::handle_read_parent(int r) {
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << "r=" << r << dendl;
if (r == -ENOENT) {
this->finish(r);
return;
} else if (r < 0) {
lderr(image_ctx->cct) << "failed to read parent extents: "
<< cpp_strerror(r) << dendl;
this->finish(r);
return;
}
copyup();
}
template <typename I>
void ObjectReadRequest<I>::copyup() {
I *image_ctx = this->m_ictx;
if (!is_copy_on_read(image_ctx, this->m_io_context)) {
this->finish(0);
return;
}
image_ctx->owner_lock.lock_shared();
image_ctx->image_lock.lock_shared();
Extents parent_extents;
ImageArea area;
if (!this->compute_parent_extents(&parent_extents, &area, true) ||
(image_ctx->exclusive_lock != nullptr &&
!image_ctx->exclusive_lock->is_lock_owner())) {
image_ctx->image_lock.unlock_shared();
image_ctx->owner_lock.unlock_shared();
this->finish(0);
return;
}
ldout(image_ctx->cct, 20) << dendl;
image_ctx->copyup_list_lock.lock();
auto it = image_ctx->copyup_list.find(this->m_object_no);
if (it == image_ctx->copyup_list.end()) {
// create and kick off a CopyupRequest
auto new_req = CopyupRequest<I>::create(
image_ctx, this->m_object_no, std::move(parent_extents), area,
this->m_trace);
image_ctx->copyup_list[this->m_object_no] = new_req;
image_ctx->copyup_list_lock.unlock();
image_ctx->image_lock.unlock_shared();
new_req->send();
} else {
image_ctx->copyup_list_lock.unlock();
image_ctx->image_lock.unlock_shared();
}
image_ctx->owner_lock.unlock_shared();
this->finish(0);
}
/** write **/
template <typename I>
AbstractObjectWriteRequest<I>::AbstractObjectWriteRequest(
I *ictx, uint64_t object_no, uint64_t object_off, uint64_t len,
IOContext io_context, const char *trace_name,
const ZTracer::Trace &parent_trace, Context *completion)
: ObjectRequest<I>(ictx, object_no, io_context, trace_name, parent_trace,
completion),
m_object_off(object_off), m_object_len(len)
{
if (this->m_object_off == 0 &&
this->m_object_len == ictx->get_object_size()) {
m_full_object = true;
}
compute_parent_info();
ictx->image_lock.lock_shared();
if (!ictx->migration_info.empty()) {
m_guarding_migration_write = true;
}
ictx->image_lock.unlock_shared();
}
template <typename I>
void AbstractObjectWriteRequest<I>::compute_parent_info() {
I *image_ctx = this->m_ictx;
std::shared_lock image_locker{image_ctx->image_lock};
this->compute_parent_extents(&m_parent_extents, &m_image_area, false);
if (!this->has_parent() ||
(m_full_object &&
!this->m_io_context->write_snap_context() &&
!is_post_copyup_write_required())) {
m_copyup_enabled = false;
}
}
template <typename I>
void AbstractObjectWriteRequest<I>::add_write_hint(
neorados::WriteOp *wr) {
I *image_ctx = this->m_ictx;
std::shared_lock image_locker{image_ctx->image_lock};
if (image_ctx->object_map == nullptr || !this->m_object_may_exist ||
image_ctx->alloc_hint_flags != 0U) {
ObjectRequest<I>::add_write_hint(*image_ctx, wr);
}
}
template <typename I>
void AbstractObjectWriteRequest<I>::send() {
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << this->get_op_type() << " "
<< this->m_object_off << "~" << this->m_object_len
<< dendl;
{
std::shared_lock image_lock{image_ctx->image_lock};
if (image_ctx->object_map == nullptr) {
m_object_may_exist = true;
} else {
// should have been flushed prior to releasing lock
ceph_assert(image_ctx->exclusive_lock->is_lock_owner());
m_object_may_exist = image_ctx->object_map->object_may_exist(
this->m_object_no);
}
}
if (!m_object_may_exist && is_no_op_for_nonexistent_object()) {
ldout(image_ctx->cct, 20) << "skipping no-op on nonexistent object"
<< dendl;
this->async_finish(0);
return;
}
pre_write_object_map_update();
}
template <typename I>
void AbstractObjectWriteRequest<I>::pre_write_object_map_update() {
I *image_ctx = this->m_ictx;
image_ctx->image_lock.lock_shared();
if (image_ctx->object_map == nullptr || !is_object_map_update_enabled()) {
image_ctx->image_lock.unlock_shared();
write_object();
return;
}
if (!m_object_may_exist && m_copyup_enabled) {
// optimization: copyup required
image_ctx->image_lock.unlock_shared();
copyup();
return;
}
uint8_t new_state = this->get_pre_write_object_map_state();
ldout(image_ctx->cct, 20) << this->m_object_off << "~" << this->m_object_len
<< dendl;
if (image_ctx->object_map->template aio_update<
AbstractObjectWriteRequest<I>,
&AbstractObjectWriteRequest<I>::handle_pre_write_object_map_update>(
CEPH_NOSNAP, this->m_object_no, new_state, {}, this->m_trace, false,
this)) {
image_ctx->image_lock.unlock_shared();
return;
}
image_ctx->image_lock.unlock_shared();
write_object();
}
template <typename I>
void AbstractObjectWriteRequest<I>::handle_pre_write_object_map_update(int r) {
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(image_ctx->cct) << "failed to update object map: "
<< cpp_strerror(r) << dendl;
this->finish(r);
return;
}
write_object();
}
template <typename I>
void AbstractObjectWriteRequest<I>::write_object() {
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << dendl;
neorados::WriteOp write_op;
if (m_copyup_enabled) {
if (m_guarding_migration_write) {
auto snap_seq = (this->m_io_context->write_snap_context() ?
this->m_io_context->write_snap_context()->first : 0);
ldout(image_ctx->cct, 20) << "guarding write: snap_seq=" << snap_seq
<< dendl;
cls_client::assert_snapc_seq(
&write_op, snap_seq, cls::rbd::ASSERT_SNAPC_SEQ_LE_SNAPSET_SEQ);
} else {
ldout(image_ctx->cct, 20) << "guarding write" << dendl;
write_op.assert_exists();
}
}
add_write_hint(&write_op);
add_write_ops(&write_op);
ceph_assert(write_op.size() != 0);
image_ctx->rados_api.execute(
{data_object_name(this->m_ictx, this->m_object_no)},
*this->m_io_context, std::move(write_op),
librbd::asio::util::get_callback_adapter(
[this](int r) { handle_write_object(r); }), nullptr,
(this->m_trace.valid() ? this->m_trace.get_info() : nullptr));
}
template <typename I>
void AbstractObjectWriteRequest<I>::handle_write_object(int r) {
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << "r=" << r << dendl;
r = filter_write_result(r);
if (r == -ENOENT) {
if (m_copyup_enabled) {
copyup();
return;
}
} else if (r == -ERANGE && m_guarding_migration_write) {
image_ctx->image_lock.lock_shared();
m_guarding_migration_write = !image_ctx->migration_info.empty();
image_ctx->image_lock.unlock_shared();
if (m_guarding_migration_write) {
copyup();
} else {
ldout(image_ctx->cct, 10) << "migration parent gone, restart io" << dendl;
compute_parent_info();
write_object();
}
return;
} else if (r == -EILSEQ) {
ldout(image_ctx->cct, 10) << "failed to write object" << dendl;
this->finish(r);
return;
} else if (r < 0) {
lderr(image_ctx->cct) << "failed to write object: " << cpp_strerror(r)
<< dendl;
this->finish(r);
return;
}
post_write_object_map_update();
}
template <typename I>
void AbstractObjectWriteRequest<I>::copyup() {
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << dendl;
ceph_assert(!m_copyup_in_progress);
m_copyup_in_progress = true;
image_ctx->copyup_list_lock.lock();
auto it = image_ctx->copyup_list.find(this->m_object_no);
if (it == image_ctx->copyup_list.end()) {
auto new_req = CopyupRequest<I>::create(
image_ctx, this->m_object_no, std::move(this->m_parent_extents),
m_image_area, this->m_trace);
this->m_parent_extents.clear();
// make sure to wait on this CopyupRequest
new_req->append_request(this, std::move(get_copyup_overwrite_extents()));
image_ctx->copyup_list[this->m_object_no] = new_req;
image_ctx->copyup_list_lock.unlock();
new_req->send();
} else {
it->second->append_request(this, std::move(get_copyup_overwrite_extents()));
image_ctx->copyup_list_lock.unlock();
}
}
template <typename I>
void AbstractObjectWriteRequest<I>::handle_copyup(int r) {
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << "r=" << r << dendl;
ceph_assert(m_copyup_in_progress);
m_copyup_in_progress = false;
if (r < 0 && r != -ERESTART) {
lderr(image_ctx->cct) << "failed to copyup object: " << cpp_strerror(r)
<< dendl;
this->finish(r);
return;
}
if (r == -ERESTART || is_post_copyup_write_required()) {
write_object();
return;
}
post_write_object_map_update();
}
template <typename I>
void AbstractObjectWriteRequest<I>::post_write_object_map_update() {
I *image_ctx = this->m_ictx;
image_ctx->image_lock.lock_shared();
if (image_ctx->object_map == nullptr || !is_object_map_update_enabled() ||
!is_non_existent_post_write_object_map_state()) {
image_ctx->image_lock.unlock_shared();
this->finish(0);
return;
}
ldout(image_ctx->cct, 20) << dendl;
// should have been flushed prior to releasing lock
ceph_assert(image_ctx->exclusive_lock->is_lock_owner());
if (image_ctx->object_map->template aio_update<
AbstractObjectWriteRequest<I>,
&AbstractObjectWriteRequest<I>::handle_post_write_object_map_update>(
CEPH_NOSNAP, this->m_object_no, OBJECT_NONEXISTENT, OBJECT_PENDING,
this->m_trace, false, this)) {
image_ctx->image_lock.unlock_shared();
return;
}
image_ctx->image_lock.unlock_shared();
this->finish(0);
}
template <typename I>
void AbstractObjectWriteRequest<I>::handle_post_write_object_map_update(int r) {
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(image_ctx->cct) << "failed to update object map: "
<< cpp_strerror(r) << dendl;
this->finish(r);
return;
}
this->finish(0);
}
template <typename I>
void ObjectWriteRequest<I>::add_write_hint(neorados::WriteOp* wr) {
if ((m_write_flags & OBJECT_WRITE_FLAG_CREATE_EXCLUSIVE) != 0) {
wr->create(true);
} else if (m_assert_version.has_value()) {
wr->assert_version(m_assert_version.value());
}
AbstractObjectWriteRequest<I>::add_write_hint(wr);
}
template <typename I>
void ObjectWriteRequest<I>::add_write_ops(neorados::WriteOp* wr) {
if (this->m_full_object) {
wr->write_full(bufferlist{m_write_data});
} else {
wr->write(this->m_object_off, bufferlist{m_write_data});
}
util::apply_op_flags(m_op_flags, 0U, wr);
}
template <typename I>
void ObjectDiscardRequest<I>::add_write_ops(neorados::WriteOp* wr) {
switch (m_discard_action) {
case DISCARD_ACTION_REMOVE:
wr->remove();
break;
case DISCARD_ACTION_REMOVE_TRUNCATE:
wr->create(false);
// fall through
case DISCARD_ACTION_TRUNCATE:
wr->truncate(this->m_object_off);
break;
case DISCARD_ACTION_ZERO:
wr->zero(this->m_object_off, this->m_object_len);
break;
default:
ceph_abort();
break;
}
}
template <typename I>
void ObjectWriteSameRequest<I>::add_write_ops(neorados::WriteOp* wr) {
wr->writesame(this->m_object_off, this->m_object_len,
bufferlist{m_write_data});
util::apply_op_flags(m_op_flags, 0U, wr);
}
template <typename I>
void ObjectCompareAndWriteRequest<I>::add_write_ops(neorados::WriteOp* wr) {
wr->cmpext(this->m_object_off, bufferlist{m_cmp_bl}, nullptr);
if (this->m_full_object) {
wr->write_full(bufferlist{m_write_bl});
} else {
wr->write(this->m_object_off, bufferlist{m_write_bl});
}
util::apply_op_flags(m_op_flags, 0U, wr);
}
template <typename I>
int ObjectCompareAndWriteRequest<I>::filter_write_result(int r) const {
if (r <= -MAX_ERRNO) {
I *image_ctx = this->m_ictx;
// object extent compare mismatch
uint64_t offset = -MAX_ERRNO - r;
auto [image_extents, _] = io::util::object_to_area_extents(
image_ctx, this->m_object_no, {{offset, this->m_object_len}});
ceph_assert(image_extents.size() == 1);
if (m_mismatch_offset) {
*m_mismatch_offset = image_extents[0].first;
}
r = -EILSEQ;
}
return r;
}
template <typename I>
ObjectListSnapsRequest<I>::ObjectListSnapsRequest(
I *ictx, uint64_t objectno, Extents&& object_extents, SnapIds&& snap_ids,
int list_snaps_flags, const ZTracer::Trace &parent_trace,
SnapshotDelta* snapshot_delta, Context *completion)
: ObjectRequest<I>(
ictx, objectno, ictx->duplicate_data_io_context(), "snap_list",
parent_trace, completion),
m_object_extents(std::move(object_extents)),
m_snap_ids(std::move(snap_ids)), m_list_snaps_flags(list_snaps_flags),
m_snapshot_delta(snapshot_delta) {
this->m_io_context->read_snap(CEPH_SNAPDIR);
}
template <typename I>
void ObjectListSnapsRequest<I>::send() {
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << dendl;
if (m_snap_ids.size() < 2) {
lderr(image_ctx->cct) << "invalid snap ids: " << m_snap_ids << dendl;
this->async_finish(-EINVAL);
return;
}
list_snaps();
}
template <typename I>
void ObjectListSnapsRequest<I>::list_snaps() {
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << dendl;
neorados::ReadOp read_op;
read_op.list_snaps(&m_snap_set, &m_ec);
image_ctx->rados_api.execute(
{data_object_name(this->m_ictx, this->m_object_no)},
*this->m_io_context, std::move(read_op), nullptr,
librbd::asio::util::get_callback_adapter(
[this](int r) { handle_list_snaps(r); }), nullptr,
(this->m_trace.valid() ? this->m_trace.get_info() : nullptr));
}
template <typename I>
void ObjectListSnapsRequest<I>::handle_list_snaps(int r) {
I *image_ctx = this->m_ictx;
auto cct = image_ctx->cct;
if (r >= 0) {
r = -m_ec.value();
}
ldout(cct, 20) << "r=" << r << dendl;
m_snapshot_delta->clear();
auto& snapshot_delta = *m_snapshot_delta;
ceph_assert(!m_snap_ids.empty());
librados::snap_t start_snap_id = 0;
librados::snap_t first_snap_id = *m_snap_ids.begin();
librados::snap_t last_snap_id = *m_snap_ids.rbegin();
if (r == -ENOENT) {
// the object does not exist -- mark the missing extents
zero_extent(first_snap_id, true);
list_from_parent();
return;
} else if (r < 0) {
lderr(cct) << "failed to retrieve object snapshot list: " << cpp_strerror(r)
<< dendl;
this->finish(r);
return;
}
// helper function requires the librados legacy data structure
librados::snap_set_t snap_set;
convert_snap_set(m_snap_set, &snap_set);
bool initial_extents_written = false;
interval_set<uint64_t> object_interval;
for (auto& object_extent : m_object_extents) {
object_interval.insert(object_extent.first, object_extent.second);
}
ldout(cct, 20) << "object_interval=" << object_interval << dendl;
// loop through all expected snapshots and build interval sets for
// data and zeroed ranges for each snapshot
uint64_t prev_end_size = 0;
interval_set<uint64_t> initial_written_extents;
for (auto end_snap_id : m_snap_ids) {
if (start_snap_id == end_snap_id) {
continue;
} else if (end_snap_id > last_snap_id) {
break;
}
interval_set<uint64_t> diff;
uint64_t end_size;
bool exists;
librados::snap_t clone_end_snap_id;
bool read_whole_object;
calc_snap_set_diff(cct, snap_set, start_snap_id,
end_snap_id, &diff, &end_size, &exists,
&clone_end_snap_id, &read_whole_object);
if (read_whole_object ||
(!diff.empty() &&
((m_list_snaps_flags & LIST_SNAPS_FLAG_WHOLE_OBJECT) != 0))) {
ldout(cct, 1) << "need to read full object" << dendl;
diff.clear();
diff.insert(0, image_ctx->layout.object_size);
end_size = image_ctx->layout.object_size;
clone_end_snap_id = end_snap_id;
} else if (!exists) {
end_size = 0;
}
if (exists) {
// reads should be issued against the newest (existing) snapshot within
// the associated snapshot object clone. writes should be issued
// against the oldest snapshot in the snap_map.
ceph_assert(clone_end_snap_id >= end_snap_id);
if (clone_end_snap_id > last_snap_id) {
// do not read past the copy point snapshot
clone_end_snap_id = last_snap_id;
}
}
// clip diff to current object extent
interval_set<uint64_t> diff_interval;
diff_interval.intersection_of(object_interval, diff);
// clip diff to size of object (in case it was truncated)
interval_set<uint64_t> zero_interval;
if (end_size < prev_end_size) {
zero_interval.insert(end_size, prev_end_size - end_size);
zero_interval.intersection_of(object_interval);
interval_set<uint64_t> trunc_interval;
trunc_interval.intersection_of(zero_interval, diff_interval);
if (!trunc_interval.empty()) {
diff_interval.subtract(trunc_interval);
ldout(cct, 20) << "clearing truncate diff: " << trunc_interval << dendl;
}
}
ldout(cct, 20) << "start_snap_id=" << start_snap_id << ", "
<< "end_snap_id=" << end_snap_id << ", "
<< "clone_end_snap_id=" << clone_end_snap_id << ", "
<< "diff=" << diff << ", "
<< "diff_interval=" << diff_interval<< ", "
<< "zero_interval=" << zero_interval<< ", "
<< "end_size=" << end_size << ", "
<< "prev_end_size=" << prev_end_size << ", "
<< "exists=" << exists << ", "
<< "whole_object=" << read_whole_object << dendl;
// check if object exists prior to start of incremental snap delta so that
// we don't DNE the object if no additional deltas exist
if (exists && start_snap_id == 0 &&
(!diff_interval.empty() || !zero_interval.empty())) {
ldout(cct, 20) << "object exists at snap id " << end_snap_id << dendl;
initial_extents_written = true;
}
prev_end_size = end_size;
start_snap_id = end_snap_id;
if (end_snap_id <= first_snap_id) {
// don't include deltas from the starting snapshots, but we iterate over
// it to track its existence and size
ldout(cct, 20) << "skipping prior snapshot " << dendl;
continue;
}
if (exists) {
for (auto& interval : diff_interval) {
snapshot_delta[{end_snap_id, clone_end_snap_id}].insert(
interval.first, interval.second,
SparseExtent(SPARSE_EXTENT_STATE_DATA, interval.second));
}
} else {
zero_interval.union_of(diff_interval);
}
if ((m_list_snaps_flags & LIST_SNAPS_FLAG_IGNORE_ZEROED_EXTENTS) == 0) {
for (auto& interval : zero_interval) {
snapshot_delta[{end_snap_id, end_snap_id}].insert(
interval.first, interval.second,
SparseExtent(SPARSE_EXTENT_STATE_ZEROED, interval.second));
}
}
}
bool snapshot_delta_empty = snapshot_delta.empty();
if (!initial_extents_written) {
zero_extent(first_snap_id, first_snap_id > 0);
}
ldout(cct, 20) << "snapshot_delta=" << snapshot_delta << dendl;
if (snapshot_delta_empty) {
list_from_parent();
return;
}
this->finish(0);
}
template <typename I>
void ObjectListSnapsRequest<I>::list_from_parent() {
I *image_ctx = this->m_ictx;
auto cct = image_ctx->cct;
ceph_assert(!m_snap_ids.empty());
librados::snap_t snap_id_start = *m_snap_ids.begin();
librados::snap_t snap_id_end = *m_snap_ids.rbegin();
std::unique_lock image_locker{image_ctx->image_lock};
if ((snap_id_start > 0) || (image_ctx->parent == nullptr) ||
((m_list_snaps_flags & LIST_SNAPS_FLAG_DISABLE_LIST_FROM_PARENT) != 0)) {
image_locker.unlock();
this->finish(0);
return;
}
Extents parent_extents;
uint64_t raw_overlap = 0;
uint64_t object_overlap = 0;
image_ctx->get_parent_overlap(snap_id_end, &raw_overlap);
if (raw_overlap > 0) {
// calculate reverse mapping onto the parent image
std::tie(parent_extents, m_image_area) = io::util::object_to_area_extents(
image_ctx, this->m_object_no, m_object_extents);
object_overlap = image_ctx->prune_parent_extents(
parent_extents, m_image_area, raw_overlap, false);
}
if (object_overlap == 0) {
image_locker.unlock();
this->finish(0);
return;
}
auto ctx = create_context_callback<
ObjectListSnapsRequest<I>,
&ObjectListSnapsRequest<I>::handle_list_from_parent>(this);
auto aio_comp = AioCompletion::create_and_start(
ctx, librbd::util::get_image_ctx(image_ctx->parent), AIO_TYPE_GENERIC);
ldout(cct, 20) << "completion=" << aio_comp
<< " parent_extents=" << parent_extents
<< " area=" << m_image_area << dendl;
auto list_snaps_flags = (
m_list_snaps_flags | LIST_SNAPS_FLAG_IGNORE_ZEROED_EXTENTS);
ImageListSnapsRequest<I> req(
*image_ctx->parent, aio_comp, std::move(parent_extents), m_image_area,
{0, image_ctx->parent->snap_id}, list_snaps_flags, &m_parent_snapshot_delta,
this->m_trace);
req.send();
}
template <typename I>
void ObjectListSnapsRequest<I>::handle_list_from_parent(int r) {
I *image_ctx = this->m_ictx;
auto cct = image_ctx->cct;
ldout(cct, 20) << "r=" << r << ", "
<< "parent_snapshot_delta=" << m_parent_snapshot_delta
<< dendl;
// ignore special-case of fully empty dataset (we ignore zeroes)
if (m_parent_snapshot_delta.empty()) {
this->finish(0);
return;
}
// the write/read snapshot id key is not useful for parent images so
// map the special-case INITIAL_WRITE_READ_SNAP_IDS key
*m_snapshot_delta = {};
auto& intervals = (*m_snapshot_delta)[INITIAL_WRITE_READ_SNAP_IDS];
for (auto& [key, image_extents] : m_parent_snapshot_delta) {
for (auto image_extent : image_extents) {
auto state = image_extent.get_val().state;
// map image-extents back to this object
striper::LightweightObjectExtents object_extents;
io::util::area_to_object_extents(image_ctx, image_extent.get_off(),
image_extent.get_len(), m_image_area, 0,
&object_extents);
for (auto& object_extent : object_extents) {
ceph_assert(object_extent.object_no == this->m_object_no);
intervals.insert(
object_extent.offset, object_extent.length,
{state, object_extent.length});
}
}
}
ldout(cct, 20) << "snapshot_delta=" << *m_snapshot_delta << dendl;
this->finish(0);
}
template <typename I>
void ObjectListSnapsRequest<I>::zero_extent(uint64_t snap_id, bool dne) {
I *image_ctx = this->m_ictx;
auto cct = image_ctx->cct;
// the object does not exist or is (partially) under whiteout -- mark the
// missing extents which would be any portion of the object that does not
// have data in the initial snapshot set
if ((m_list_snaps_flags & LIST_SNAPS_FLAG_IGNORE_ZEROED_EXTENTS) == 0) {
interval_set<uint64_t> interval;
for (auto [object_offset, object_length] : m_object_extents) {
interval.insert(object_offset, object_length);
}
for (auto [offset, length] : interval) {
ldout(cct, 20) << "snapshot " << snap_id << ": "
<< (dne ? "DNE" : "zeroed") << " extent "
<< offset << "~" << length << dendl;
(*m_snapshot_delta)[{snap_id, snap_id}].insert(
offset, length,
SparseExtent(
(dne ? SPARSE_EXTENT_STATE_DNE : SPARSE_EXTENT_STATE_ZEROED),
length));
}
}
}
} // namespace io
} // namespace librbd
template class librbd::io::ObjectRequest<librbd::ImageCtx>;
template class librbd::io::ObjectReadRequest<librbd::ImageCtx>;
template class librbd::io::AbstractObjectWriteRequest<librbd::ImageCtx>;
template class librbd::io::ObjectWriteRequest<librbd::ImageCtx>;
template class librbd::io::ObjectDiscardRequest<librbd::ImageCtx>;
template class librbd::io::ObjectWriteSameRequest<librbd::ImageCtx>;
template class librbd::io::ObjectCompareAndWriteRequest<librbd::ImageCtx>;
template class librbd::io::ObjectListSnapsRequest<librbd::ImageCtx>;
| 34,895 | 31.49162 | 80 | cc |
null | ceph-main/src/librbd/io/ObjectRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_OBJECT_REQUEST_H
#define CEPH_LIBRBD_IO_OBJECT_REQUEST_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/neorados/RADOS.hpp"
#include "include/rados/librados.hpp"
#include "common/zipkin_trace.h"
#include "librbd/ObjectMap.h"
#include "librbd/Types.h"
#include "librbd/io/Types.h"
#include <map>
class Context;
class ObjectExtent;
namespace neorados { struct WriteOp; }
namespace librbd {
struct ImageCtx;
namespace io {
struct AioCompletion;
template <typename> class CopyupRequest;
/**
* This class represents an I/O operation to a single RBD data object.
* Its subclasses encapsulate logic for dealing with special cases
* for I/O due to layering.
*/
template <typename ImageCtxT = ImageCtx>
class ObjectRequest {
public:
static ObjectRequest* create_write(
ImageCtxT *ictx, uint64_t object_no, uint64_t object_off,
ceph::bufferlist&& data, IOContext io_context, int op_flags,
int write_flags, std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, Context *completion);
static ObjectRequest* create_discard(
ImageCtxT *ictx, uint64_t object_no, uint64_t object_off,
uint64_t object_len, IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, Context *completion);
static ObjectRequest* create_write_same(
ImageCtxT *ictx, uint64_t object_no, uint64_t object_off,
uint64_t object_len, ceph::bufferlist&& data, IOContext io_context,
int op_flags, const ZTracer::Trace &parent_trace, Context *completion);
static ObjectRequest* create_compare_and_write(
ImageCtxT *ictx, uint64_t object_no, uint64_t object_off,
ceph::bufferlist&& cmp_data, ceph::bufferlist&& write_data,
IOContext io_context, uint64_t *mismatch_offset, int op_flags,
const ZTracer::Trace &parent_trace, Context *completion);
ObjectRequest(ImageCtxT *ictx, uint64_t objectno, IOContext io_context,
const char *trace_name, const ZTracer::Trace &parent_trace,
Context *completion);
virtual ~ObjectRequest() {
m_trace.event("finish");
}
static void add_write_hint(ImageCtxT& image_ctx,
neorados::WriteOp *wr);
virtual void send() = 0;
bool has_parent() const {
return m_has_parent;
}
virtual const char *get_op_type() const = 0;
protected:
bool compute_parent_extents(Extents *parent_extents, ImageArea *area,
bool read_request);
ImageCtxT *m_ictx;
uint64_t m_object_no;
IOContext m_io_context;
Context *m_completion;
ZTracer::Trace m_trace;
void async_finish(int r);
void finish(int r);
private:
bool m_has_parent = false;
};
template <typename ImageCtxT = ImageCtx>
class ObjectReadRequest : public ObjectRequest<ImageCtxT> {
public:
static ObjectReadRequest* create(
ImageCtxT *ictx, uint64_t objectno, ReadExtents* extents,
IOContext io_context, int op_flags, int read_flags,
const ZTracer::Trace &parent_trace, uint64_t* version,
Context *completion) {
return new ObjectReadRequest(ictx, objectno, extents, io_context, op_flags,
read_flags, parent_trace, version, completion);
}
ObjectReadRequest(
ImageCtxT *ictx, uint64_t objectno, ReadExtents* extents,
IOContext io_context, int op_flags, int read_flags,
const ZTracer::Trace &parent_trace, uint64_t* version,
Context *completion);
void send() override;
const char *get_op_type() const override {
return "read";
}
private:
/**
* @verbatim
*
* <start>
* |
* |
* v
* READ_OBJECT
* |
* v (skip if not needed)
* READ_PARENT
* |
* v (skip if not needed)
* COPYUP
* |
* v
* <finish>
*
* @endverbatim
*/
ReadExtents* m_extents;
int m_op_flags;
int m_read_flags;
uint64_t* m_version;
void read_object();
void handle_read_object(int r);
void read_parent();
void handle_read_parent(int r);
void copyup();
};
template <typename ImageCtxT = ImageCtx>
class AbstractObjectWriteRequest : public ObjectRequest<ImageCtxT> {
public:
AbstractObjectWriteRequest(
ImageCtxT *ictx, uint64_t object_no, uint64_t object_off, uint64_t len,
IOContext io_context, const char *trace_name,
const ZTracer::Trace &parent_trace, Context *completion);
virtual bool is_empty_write_op() const {
return false;
}
virtual uint8_t get_pre_write_object_map_state() const {
return OBJECT_EXISTS;
}
virtual void add_copyup_ops(neorados::WriteOp *wr) {
add_write_ops(wr);
}
void handle_copyup(int r);
void send() override;
protected:
uint64_t m_object_off;
uint64_t m_object_len;
bool m_full_object = false;
bool m_copyup_enabled = true;
virtual bool is_no_op_for_nonexistent_object() const {
return false;
}
virtual bool is_object_map_update_enabled() const {
return true;
}
virtual bool is_post_copyup_write_required() const {
return false;
}
virtual bool is_non_existent_post_write_object_map_state() const {
return false;
}
virtual void add_write_hint(neorados::WriteOp *wr);
virtual void add_write_ops(neorados::WriteOp *wr) = 0;
virtual int filter_write_result(int r) const {
return r;
}
virtual Extents get_copyup_overwrite_extents() const {
return {{m_object_off, m_object_len}};
}
private:
/**
* @verbatim
*
* <start>
* |
* v (no-op write request)
* DETECT_NO_OP . . . . . . . . . . . . . . . . . . .
* | .
* v (skip if not required/disabled) .
* PRE_UPDATE_OBJECT_MAP .
* | . .
* | . (child dne) .
* | . . . . . . . . . .
* | . .
* | (post-copyup write) . .
* | . . . . . . . . . . . . . .
* | . . . .
* v v . v .
* WRITE . . . . . . . . > COPYUP (if required) .
* | | .
* |/----------------------/ .
* | .
* v (skip if not required/disabled) .
* POST_UPDATE_OBJECT_MAP .
* | .
* v .
* <finish> < . . . . . . . . . . . . . . . . . . . .
*
* @endverbatim
*/
Extents m_parent_extents;
ImageArea m_image_area = ImageArea::DATA;
bool m_object_may_exist = false;
bool m_copyup_in_progress = false;
bool m_guarding_migration_write = false;
void compute_parent_info();
void pre_write_object_map_update();
void handle_pre_write_object_map_update(int r);
void write_object();
void handle_write_object(int r);
void copyup();
void post_write_object_map_update();
void handle_post_write_object_map_update(int r);
};
template <typename ImageCtxT = ImageCtx>
class ObjectWriteRequest : public AbstractObjectWriteRequest<ImageCtxT> {
public:
ObjectWriteRequest(
ImageCtxT *ictx, uint64_t object_no, uint64_t object_off,
ceph::bufferlist&& data, IOContext io_context, int op_flags,
int write_flags, std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, Context *completion)
: AbstractObjectWriteRequest<ImageCtxT>(ictx, object_no, object_off,
data.length(), io_context, "write",
parent_trace, completion),
m_write_data(std::move(data)), m_op_flags(op_flags),
m_write_flags(write_flags), m_assert_version(assert_version) {
}
bool is_empty_write_op() const override {
return (m_write_data.length() == 0);
}
const char *get_op_type() const override {
return "write";
}
protected:
void add_write_ops(neorados::WriteOp *wr) override;
void add_write_hint(neorados::WriteOp *wr) override;
private:
ceph::bufferlist m_write_data;
int m_op_flags;
int m_write_flags;
std::optional<uint64_t> m_assert_version;
};
template <typename ImageCtxT = ImageCtx>
class ObjectDiscardRequest : public AbstractObjectWriteRequest<ImageCtxT> {
public:
ObjectDiscardRequest(
ImageCtxT *ictx, uint64_t object_no, uint64_t object_off,
uint64_t object_len, IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, Context *completion)
: AbstractObjectWriteRequest<ImageCtxT>(ictx, object_no, object_off,
object_len, io_context, "discard",
parent_trace, completion),
m_discard_flags(discard_flags) {
if (this->m_full_object) {
if ((m_discard_flags & OBJECT_DISCARD_FLAG_DISABLE_CLONE_REMOVE) != 0 &&
this->has_parent()) {
if (!this->m_copyup_enabled) {
// need to hide the parent object instead of child object
m_discard_action = DISCARD_ACTION_REMOVE_TRUNCATE;
} else {
m_discard_action = DISCARD_ACTION_TRUNCATE;
}
} else {
m_discard_action = DISCARD_ACTION_REMOVE;
}
} else if (object_off + object_len == ictx->layout.object_size) {
m_discard_action = DISCARD_ACTION_TRUNCATE;
} else {
m_discard_action = DISCARD_ACTION_ZERO;
}
}
const char* get_op_type() const override {
switch (m_discard_action) {
case DISCARD_ACTION_REMOVE:
return "remove";
case DISCARD_ACTION_REMOVE_TRUNCATE:
return "remove (create+truncate)";
case DISCARD_ACTION_TRUNCATE:
return "truncate";
case DISCARD_ACTION_ZERO:
return "zero";
}
ceph_abort();
return nullptr;
}
uint8_t get_pre_write_object_map_state() const override {
if (m_discard_action == DISCARD_ACTION_REMOVE) {
return OBJECT_PENDING;
}
return OBJECT_EXISTS;
}
protected:
bool is_no_op_for_nonexistent_object() const override {
return (!this->has_parent());
}
bool is_object_map_update_enabled() const override {
return (
(m_discard_flags & OBJECT_DISCARD_FLAG_DISABLE_OBJECT_MAP_UPDATE) == 0);
}
bool is_non_existent_post_write_object_map_state() const override {
return (m_discard_action == DISCARD_ACTION_REMOVE);
}
void add_write_hint(neorados::WriteOp *wr) override {
// no hint for discard
}
void add_write_ops(neorados::WriteOp *wr) override;
private:
enum DiscardAction {
DISCARD_ACTION_REMOVE,
DISCARD_ACTION_REMOVE_TRUNCATE,
DISCARD_ACTION_TRUNCATE,
DISCARD_ACTION_ZERO
};
DiscardAction m_discard_action;
int m_discard_flags;
};
template <typename ImageCtxT = ImageCtx>
class ObjectWriteSameRequest : public AbstractObjectWriteRequest<ImageCtxT> {
public:
ObjectWriteSameRequest(
ImageCtxT *ictx, uint64_t object_no, uint64_t object_off,
uint64_t object_len, ceph::bufferlist&& data, IOContext io_context,
int op_flags, const ZTracer::Trace &parent_trace, Context *completion)
: AbstractObjectWriteRequest<ImageCtxT>(ictx, object_no, object_off,
object_len, io_context, "writesame",
parent_trace, completion),
m_write_data(std::move(data)), m_op_flags(op_flags) {
}
const char *get_op_type() const override {
return "writesame";
}
protected:
void add_write_ops(neorados::WriteOp *wr) override;
private:
ceph::bufferlist m_write_data;
int m_op_flags;
};
template <typename ImageCtxT = ImageCtx>
class ObjectCompareAndWriteRequest : public AbstractObjectWriteRequest<ImageCtxT> {
public:
ObjectCompareAndWriteRequest(
ImageCtxT *ictx, uint64_t object_no, uint64_t object_off,
ceph::bufferlist&& cmp_bl, ceph::bufferlist&& write_bl,
IOContext io_context, uint64_t *mismatch_offset, int op_flags,
const ZTracer::Trace &parent_trace, Context *completion)
: AbstractObjectWriteRequest<ImageCtxT>(ictx, object_no, object_off,
cmp_bl.length(), io_context,
"compare_and_write", parent_trace,
completion),
m_cmp_bl(std::move(cmp_bl)), m_write_bl(std::move(write_bl)),
m_mismatch_offset(mismatch_offset), m_op_flags(op_flags) {
}
const char *get_op_type() const override {
return "compare_and_write";
}
void add_copyup_ops(neorados::WriteOp *wr) override {
// no-op on copyup
}
protected:
virtual bool is_post_copyup_write_required() const {
return true;
}
void add_write_ops(neorados::WriteOp *wr) override;
int filter_write_result(int r) const override;
Extents get_copyup_overwrite_extents() const override {
return {};
}
private:
ceph::bufferlist m_cmp_bl;
ceph::bufferlist m_write_bl;
uint64_t *m_mismatch_offset;
int m_op_flags;
};
template <typename ImageCtxT = ImageCtx>
class ObjectListSnapsRequest : public ObjectRequest<ImageCtxT> {
public:
static ObjectListSnapsRequest* create(
ImageCtxT *ictx, uint64_t objectno, Extents&& object_extents,
SnapIds&& snap_ids, int list_snaps_flags,
const ZTracer::Trace &parent_trace, SnapshotDelta* snapshot_delta,
Context *completion) {
return new ObjectListSnapsRequest(ictx, objectno,
std::move(object_extents),
std::move(snap_ids), list_snaps_flags,
parent_trace, snapshot_delta, completion);
}
ObjectListSnapsRequest(
ImageCtxT *ictx, uint64_t objectno, Extents&& object_extents,
SnapIds&& snap_ids, int list_snaps_flags,
const ZTracer::Trace &parent_trace, SnapshotDelta* snapshot_delta,
Context *completion);
void send() override;
const char *get_op_type() const override {
return "snap_list";
}
private:
Extents m_object_extents;
SnapIds m_snap_ids;
int m_list_snaps_flags;
SnapshotDelta* m_snapshot_delta;
neorados::SnapSet m_snap_set;
boost::system::error_code m_ec;
ImageArea m_image_area = ImageArea::DATA;
SnapshotDelta m_parent_snapshot_delta;
void list_snaps();
void handle_list_snaps(int r);
void list_from_parent();
void handle_list_from_parent(int r);
void zero_extent(uint64_t snap_id, bool dne);
};
} // namespace io
} // namespace librbd
extern template class librbd::io::ObjectRequest<librbd::ImageCtx>;
extern template class librbd::io::ObjectReadRequest<librbd::ImageCtx>;
extern template class librbd::io::AbstractObjectWriteRequest<librbd::ImageCtx>;
extern template class librbd::io::ObjectWriteRequest<librbd::ImageCtx>;
extern template class librbd::io::ObjectDiscardRequest<librbd::ImageCtx>;
extern template class librbd::io::ObjectWriteSameRequest<librbd::ImageCtx>;
extern template class librbd::io::ObjectCompareAndWriteRequest<librbd::ImageCtx>;
extern template class librbd::io::ObjectListSnapsRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IO_OBJECT_REQUEST_H
| 15,444 | 29.523715 | 83 | h |
null | ceph-main/src/librbd/io/QosImageDispatch.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/QosImageDispatch.h"
#include "common/dout.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/io/FlushTracker.h"
#include <utility>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::QosImageDispatch: " << this << " " \
<< __func__ << ": "
namespace librbd {
namespace io {
namespace {
uint64_t get_extent_length(const Extents& extents) {
uint64_t length = 0;
for (auto& extent : extents) {
length += extent.second;
}
return length;
}
uint64_t calculate_tokens(bool read_op, uint64_t extent_length, uint64_t flag) {
if (read_op && ((flag & IMAGE_DISPATCH_FLAG_QOS_WRITE_MASK) != 0)) {
return 0;
} else if (!read_op && ((flag & IMAGE_DISPATCH_FLAG_QOS_READ_MASK) != 0)) {
return 0;
}
return (((flag & IMAGE_DISPATCH_FLAG_QOS_BPS_MASK) != 0) ? extent_length : 1);
}
static const std::pair<uint64_t, const char*> throttle_flags[] = {
{IMAGE_DISPATCH_FLAG_QOS_IOPS_THROTTLE, "rbd_qos_iops_throttle" },
{IMAGE_DISPATCH_FLAG_QOS_BPS_THROTTLE, "rbd_qos_bps_throttle" },
{IMAGE_DISPATCH_FLAG_QOS_READ_IOPS_THROTTLE, "rbd_qos_read_iops_throttle" },
{IMAGE_DISPATCH_FLAG_QOS_WRITE_IOPS_THROTTLE, "rbd_qos_write_iops_throttle" },
{IMAGE_DISPATCH_FLAG_QOS_READ_BPS_THROTTLE, "rbd_qos_read_bps_throttle" },
{IMAGE_DISPATCH_FLAG_QOS_WRITE_BPS_THROTTLE, "rbd_qos_write_bps_throttle" }
};
} // anonymous namespace
template <typename I>
QosImageDispatch<I>::QosImageDispatch(I* image_ctx)
: m_image_ctx(image_ctx), m_flush_tracker(new FlushTracker<I>(image_ctx)) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << "ictx=" << image_ctx << dendl;
SafeTimer *timer;
ceph::mutex *timer_lock;
ImageCtx::get_timer_instance(cct, &timer, &timer_lock);
for (auto [flag, name] : throttle_flags) {
m_throttles.emplace_back(
flag,
new TokenBucketThrottle(cct, name, 0, 0, timer, timer_lock));
}
}
template <typename I>
QosImageDispatch<I>::~QosImageDispatch() {
for (auto t : m_throttles) {
delete t.second;
}
}
template <typename I>
void QosImageDispatch<I>::shut_down(Context* on_finish) {
m_flush_tracker->shut_down();
on_finish->complete(0);
}
template <typename I>
void QosImageDispatch<I>::apply_qos_schedule_tick_min(uint64_t tick) {
for (auto pair : m_throttles) {
pair.second->set_schedule_tick_min(tick);
}
}
template <typename I>
void QosImageDispatch<I>::apply_qos_limit(uint64_t flag, uint64_t limit,
uint64_t burst, uint64_t burst_seconds) {
auto cct = m_image_ctx->cct;
TokenBucketThrottle *throttle = nullptr;
for (auto pair : m_throttles) {
if (flag == pair.first) {
throttle = pair.second;
break;
}
}
ceph_assert(throttle != nullptr);
int r = throttle->set_limit(limit, burst, burst_seconds);
if (r < 0) {
lderr(cct) << throttle->get_name() << ": invalid qos parameter: "
<< "burst(" << burst << ") is less than "
<< "limit(" << limit << ")" << dendl;
// if apply failed, we should at least make sure the limit works.
throttle->set_limit(limit, 0, 1);
}
if (limit) {
m_qos_enabled_flag |= flag;
} else {
m_qos_enabled_flag &= ~flag;
}
}
template <typename I>
void QosImageDispatch<I>::apply_qos_exclude_ops(uint64_t exclude_ops) {
m_qos_exclude_ops = exclude_ops;
}
template <typename I>
bool QosImageDispatch<I>::read(
AioCompletion* aio_comp, Extents &&image_extents, ReadResult &&read_result,
IOContext io_context, int op_flags, int read_flags,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
<< dendl;
if (m_qos_exclude_ops & RBD_IO_OPERATION_READ) {
return false;
}
if (needs_throttle(true, image_extents, tid, image_dispatch_flags,
dispatch_result, on_finish, on_dispatched)) {
return true;
}
return false;
}
template <typename I>
bool QosImageDispatch<I>::write(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
<< dendl;
if (m_qos_exclude_ops & RBD_IO_OPERATION_WRITE) {
return false;
}
if (needs_throttle(false, image_extents, tid, image_dispatch_flags,
dispatch_result, on_finish, on_dispatched)) {
return true;
}
return false;
}
template <typename I>
bool QosImageDispatch<I>::discard(
AioCompletion* aio_comp, Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
<< dendl;
if (m_qos_exclude_ops & RBD_IO_OPERATION_DISCARD) {
return false;
}
if (needs_throttle(false, image_extents, tid, image_dispatch_flags,
dispatch_result, on_finish, on_dispatched)) {
return true;
}
return false;
}
template <typename I>
bool QosImageDispatch<I>::write_same(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
<< dendl;
if (m_qos_exclude_ops & RBD_IO_OPERATION_WRITE_SAME) {
return false;
}
if (needs_throttle(false, image_extents, tid, image_dispatch_flags,
dispatch_result, on_finish, on_dispatched)) {
return true;
}
return false;
}
template <typename I>
bool QosImageDispatch<I>::compare_and_write(
AioCompletion* aio_comp, Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
<< dendl;
if (m_qos_exclude_ops & RBD_IO_OPERATION_COMPARE_AND_WRITE) {
return false;
}
if (needs_throttle(false, image_extents, tid, image_dispatch_flags,
dispatch_result, on_finish, on_dispatched)) {
return true;
}
return false;
}
template <typename I>
bool QosImageDispatch<I>::flush(
AioCompletion* aio_comp, FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << dendl;
*dispatch_result = DISPATCH_RESULT_CONTINUE;
m_flush_tracker->flush(on_dispatched);
return true;
}
template <typename I>
void QosImageDispatch<I>::handle_finished(int r, uint64_t tid) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << dendl;
m_flush_tracker->finish_io(tid);
}
template <typename I>
bool QosImageDispatch<I>::set_throttle_flag(
std::atomic<uint32_t>* image_dispatch_flags, uint32_t flag) {
uint32_t expected = image_dispatch_flags->load();
uint32_t desired;
do {
desired = expected | flag;
} while (!image_dispatch_flags->compare_exchange_weak(expected, desired));
return ((desired & IMAGE_DISPATCH_FLAG_QOS_MASK) ==
IMAGE_DISPATCH_FLAG_QOS_MASK);
}
template <typename I>
bool QosImageDispatch<I>::needs_throttle(
bool read_op, const Extents& image_extents, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
auto extent_length = get_extent_length(image_extents);
bool all_qos_flags_set = false;
if (!read_op) {
m_flush_tracker->start_io(tid);
*on_finish = new LambdaContext([this, tid, on_finish=*on_finish](int r) {
handle_finished(r, tid);
on_finish->complete(r);
});
}
*dispatch_result = DISPATCH_RESULT_CONTINUE;
auto qos_enabled_flag = m_qos_enabled_flag;
for (auto [flag, throttle] : m_throttles) {
if ((qos_enabled_flag & flag) == 0) {
all_qos_flags_set = set_throttle_flag(image_dispatch_flags, flag);
continue;
}
auto tokens = calculate_tokens(read_op, extent_length, flag);
if (tokens > 0 &&
throttle->get(tokens, this, &QosImageDispatch<I>::handle_throttle_ready,
Tag{image_dispatch_flags, on_dispatched}, flag)) {
ldout(cct, 15) << "on_dispatched=" << on_dispatched << ", "
<< "flag=" << flag << dendl;
all_qos_flags_set = false;
} else {
all_qos_flags_set = set_throttle_flag(image_dispatch_flags, flag);
}
}
return !all_qos_flags_set;
}
template <typename I>
void QosImageDispatch<I>::handle_throttle_ready(Tag&& tag, uint64_t flag) {
auto cct = m_image_ctx->cct;
ldout(cct, 15) << "on_dispatched=" << tag.on_dispatched << ", "
<< "flag=" << flag << dendl;
if (set_throttle_flag(tag.image_dispatch_flags, flag)) {
// timer_lock is held -- so dispatch from outside the timer thread
m_image_ctx->asio_engine->post(tag.on_dispatched, 0);
}
}
} // namespace io
} // namespace librbd
template class librbd::io::QosImageDispatch<librbd::ImageCtx>;
| 10,317 | 30.361702 | 83 | cc |
null | ceph-main/src/librbd/io/QosImageDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_QOS_IMAGE_DISPATCH_H
#define CEPH_LIBRBD_IO_QOS_IMAGE_DISPATCH_H
#include <list>
#include <memory>
#include "librbd/io/ImageDispatchInterface.h"
#include "include/int_types.h"
#include "include/buffer.h"
#include "common/zipkin_trace.h"
#include "common/Throttle.h"
#include "librbd/io/ReadResult.h"
#include "librbd/io/Types.h"
struct Context;
namespace librbd {
struct ImageCtx;
namespace io {
struct AioCompletion;
template <typename> class FlushTracker;
template <typename ImageCtxT>
class QosImageDispatch : public ImageDispatchInterface {
public:
struct Tag {
std::atomic<uint32_t>* image_dispatch_flags;
Context* on_dispatched;
Tag(std::atomic<uint32_t>* image_dispatch_flags, Context* on_dispatched)
: image_dispatch_flags(image_dispatch_flags),
on_dispatched(on_dispatched) {
}
};
QosImageDispatch(ImageCtxT* image_ctx);
~QosImageDispatch() override;
ImageDispatchLayer get_dispatch_layer() const override {
return IMAGE_DISPATCH_LAYER_QOS;
}
void shut_down(Context* on_finish) override;
void apply_qos_schedule_tick_min(uint64_t tick);
void apply_qos_limit(uint64_t flag, uint64_t limit, uint64_t burst,
uint64_t burst_seconds);
void apply_qos_exclude_ops(uint64_t exclude_ops);
bool read(
AioCompletion* aio_comp, Extents &&image_extents,
ReadResult &&read_result, IOContext io_context, int op_flags,
int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool write(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool discard(
AioCompletion* aio_comp, Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool write_same(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool compare_and_write(
AioCompletion* aio_comp, Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool flush(
AioCompletion* aio_comp, FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool list_snaps(
AioCompletion* aio_comp, Extents&& image_extents, SnapIds&& snap_ids,
int list_snaps_flags, SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool invalidate_cache(Context* on_finish) override {
return false;
}
private:
ImageCtxT* m_image_ctx;
std::list<std::pair<uint64_t, TokenBucketThrottle*> > m_throttles;
uint64_t m_qos_enabled_flag = 0;
uint64_t m_qos_exclude_ops = 0;
std::unique_ptr<FlushTracker<ImageCtxT>> m_flush_tracker;
void handle_finished(int r, uint64_t tid);
bool set_throttle_flag(std::atomic<uint32_t>* image_dispatch_flags,
uint32_t flag);
bool needs_throttle(bool read_op, const Extents& image_extents, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched);
void handle_throttle_ready(Tag&& tag, uint64_t flag);
};
} // namespace io
} // namespace librbd
extern template class librbd::io::QosImageDispatch<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IO_QOS_IMAGE_DISPATCH_H
| 4,696 | 33.536765 | 79 | h |
null | ceph-main/src/librbd/io/QueueImageDispatch.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/QueueImageDispatch.h"
#include "common/dout.h"
#include "common/Cond.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/FlushTracker.h"
#include "librbd/io/ImageDispatchSpec.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::QueueImageDispatch: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace io {
template <typename I>
QueueImageDispatch<I>::QueueImageDispatch(I* image_ctx)
: m_image_ctx(image_ctx), m_flush_tracker(new FlushTracker<I>(image_ctx)) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << "ictx=" << image_ctx << dendl;
}
template <typename I>
QueueImageDispatch<I>::~QueueImageDispatch() {
delete m_flush_tracker;
}
template <typename I>
void QueueImageDispatch<I>::shut_down(Context* on_finish) {
m_flush_tracker->shut_down();
on_finish->complete(0);
}
template <typename I>
bool QueueImageDispatch<I>::read(
AioCompletion* aio_comp, Extents &&image_extents, ReadResult &&read_result,
IOContext io_context, int op_flags, int read_flags,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << dendl;
return enqueue(true, tid, dispatch_result, on_finish, on_dispatched);
}
template <typename I>
bool QueueImageDispatch<I>::write(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << dendl;
return enqueue(false, tid, dispatch_result, on_finish, on_dispatched);
}
template <typename I>
bool QueueImageDispatch<I>::discard(
AioCompletion* aio_comp, Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << dendl;
return enqueue(false, tid, dispatch_result, on_finish, on_dispatched);
}
template <typename I>
bool QueueImageDispatch<I>::write_same(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << dendl;
return enqueue(false, tid, dispatch_result, on_finish, on_dispatched);
}
template <typename I>
bool QueueImageDispatch<I>::compare_and_write(
AioCompletion* aio_comp, Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << dendl;
return enqueue(false, tid, dispatch_result, on_finish, on_dispatched);
}
template <typename I>
bool QueueImageDispatch<I>::flush(
AioCompletion* aio_comp, FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << dendl;
*dispatch_result = DISPATCH_RESULT_CONTINUE;
m_flush_tracker->flush(on_dispatched);
return true;
}
template <typename I>
void QueueImageDispatch<I>::handle_finished(int r, uint64_t tid) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << dendl;
m_flush_tracker->finish_io(tid);
}
template <typename I>
bool QueueImageDispatch<I>::enqueue(
bool read_op, uint64_t tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
if (!m_image_ctx->non_blocking_aio) {
return false;
}
if (!read_op) {
m_flush_tracker->start_io(tid);
*on_finish = new LambdaContext([this, tid, on_finish=*on_finish](int r) {
handle_finished(r, tid);
on_finish->complete(r);
});
}
*dispatch_result = DISPATCH_RESULT_CONTINUE;
m_image_ctx->asio_engine->post(on_dispatched, 0);
return true;
}
} // namespace io
} // namespace librbd
template class librbd::io::QueueImageDispatch<librbd::ImageCtx>;
| 5,004 | 31.290323 | 79 | cc |
null | ceph-main/src/librbd/io/QueueImageDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_QUEUE_IMAGE_DISPATCH_H
#define CEPH_LIBRBD_IO_QUEUE_IMAGE_DISPATCH_H
#include "librbd/io/ImageDispatchInterface.h"
#include "include/int_types.h"
#include "include/buffer.h"
#include "common/zipkin_trace.h"
#include "common/Throttle.h"
#include "librbd/io/ReadResult.h"
#include "librbd/io/Types.h"
#include <list>
#include <set>
struct Context;
namespace librbd {
struct ImageCtx;
namespace io {
struct AioCompletion;
template <typename> class FlushTracker;
template <typename ImageCtxT>
class QueueImageDispatch : public ImageDispatchInterface {
public:
QueueImageDispatch(ImageCtxT* image_ctx);
~QueueImageDispatch();
ImageDispatchLayer get_dispatch_layer() const override {
return IMAGE_DISPATCH_LAYER_QUEUE;
}
void shut_down(Context* on_finish) override;
bool read(
AioCompletion* aio_comp, Extents &&image_extents,
ReadResult &&read_result, IOContext io_context, int op_flags,
int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool write(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool discard(
AioCompletion* aio_comp, Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool write_same(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool compare_and_write(
AioCompletion* aio_comp, Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool flush(
AioCompletion* aio_comp, FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool list_snaps(
AioCompletion* aio_comp, Extents&& image_extents, SnapIds&& snap_ids,
int list_snaps_flags, SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool invalidate_cache(Context* on_finish) override {
return false;
}
private:
ImageCtxT* m_image_ctx;
FlushTracker<ImageCtxT>* m_flush_tracker;
void handle_finished(int r, uint64_t tid);
bool enqueue(bool read_op, uint64_t tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched);
};
} // namespace io
} // namespace librbd
extern template class librbd::io::QueueImageDispatch<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IO_QUEUE_IMAGE_DISPATCH_H
| 3,749 | 32.783784 | 77 | h |
null | ceph-main/src/librbd/io/ReadResult.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/ReadResult.h"
#include "include/buffer.h"
#include "common/dout.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/Utils.h"
#include <boost/variant/apply_visitor.hpp>
#include <boost/variant/static_visitor.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::ReadResult: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace io {
struct ReadResult::SetImageExtentsVisitor : public boost::static_visitor<void> {
Extents image_extents;
explicit SetImageExtentsVisitor(const Extents& image_extents)
: image_extents(image_extents) {
}
void operator()(Linear &linear) const {
uint64_t length = util::get_extents_length(image_extents);
ceph_assert(length <= linear.buf_len);
linear.buf_len = length;
}
void operator()(SparseBufferlist &sbl) const {
sbl.image_extents = image_extents;
}
template <typename T>
void operator()(T &t) const {
}
};
struct ReadResult::AssembleResultVisitor : public boost::static_visitor<void> {
CephContext *cct;
Striper::StripedReadResult &destriper;
AssembleResultVisitor(CephContext *cct, Striper::StripedReadResult &destriper)
: cct(cct), destriper(destriper) {
}
void operator()(Empty &empty) const {
ldout(cct, 20) << "dropping read result" << dendl;
}
void operator()(Linear &linear) const {
ldout(cct, 20) << "copying resulting bytes to "
<< reinterpret_cast<void*>(linear.buf) << dendl;
destriper.assemble_result(cct, linear.buf, linear.buf_len);
}
void operator()(Vector &vector) const {
bufferlist bl;
destriper.assemble_result(cct, bl, true);
ldout(cct, 20) << "copying resulting " << bl.length() << " bytes to iovec "
<< reinterpret_cast<const void*>(vector.iov) << dendl;
bufferlist::iterator it = bl.begin();
size_t length = bl.length();
size_t offset = 0;
int idx = 0;
for (; offset < length && idx < vector.iov_count; idx++) {
size_t len = std::min(vector.iov[idx].iov_len, length - offset);
it.copy(len, static_cast<char *>(vector.iov[idx].iov_base));
offset += len;
}
ceph_assert(offset == bl.length());
}
void operator()(Bufferlist &bufferlist) const {
bufferlist.bl->clear();
destriper.assemble_result(cct, *bufferlist.bl, true);
ldout(cct, 20) << "moved resulting " << bufferlist.bl->length() << " "
<< "bytes to bl " << reinterpret_cast<void*>(bufferlist.bl)
<< dendl;
}
void operator()(SparseBufferlist &sparse_bufferlist) const {
sparse_bufferlist.bl->clear();
ExtentMap buffer_extent_map;
auto buffer_extents_length = destriper.assemble_result(
cct, &buffer_extent_map, sparse_bufferlist.bl);
ldout(cct, 20) << "image_extents="
<< sparse_bufferlist.image_extents << ", "
<< "buffer_extent_map=" << buffer_extent_map << dendl;
sparse_bufferlist.extent_map->clear();
sparse_bufferlist.extent_map->reserve(buffer_extent_map.size());
// The extent-map is logically addressed by buffer-extents not image- or
// object-extents. Translate this address mapping to image-extent
// logical addressing since it's tied to an image-extent read
uint64_t buffer_offset = 0;
auto bem_it = buffer_extent_map.begin();
for (auto [image_offset, image_length] : sparse_bufferlist.image_extents) {
while (bem_it != buffer_extent_map.end()) {
auto [buffer_extent_offset, buffer_extent_length] = *bem_it;
if (buffer_offset + image_length <= buffer_extent_offset) {
// skip any image extent that is not included in the results
break;
}
// current buffer-extent should be within the current image-extent
ceph_assert(buffer_offset <= buffer_extent_offset &&
buffer_offset + image_length >=
buffer_extent_offset + buffer_extent_length);
auto image_extent_offset =
image_offset + (buffer_extent_offset - buffer_offset);
ldout(cct, 20) << "mapping buffer extent " << buffer_extent_offset
<< "~" << buffer_extent_length << " to image extent "
<< image_extent_offset << "~" << buffer_extent_length
<< dendl;
sparse_bufferlist.extent_map->emplace_back(
image_extent_offset, buffer_extent_length);
++bem_it;
}
buffer_offset += image_length;
}
ceph_assert(buffer_offset == buffer_extents_length);
ceph_assert(bem_it == buffer_extent_map.end());
ldout(cct, 20) << "moved resulting " << *sparse_bufferlist.extent_map
<< " extents of total " << sparse_bufferlist.bl->length()
<< " bytes to bl "
<< reinterpret_cast<void*>(sparse_bufferlist.bl) << dendl;
}
};
ReadResult::C_ImageReadRequest::C_ImageReadRequest(
AioCompletion *aio_completion, uint64_t buffer_offset,
const Extents image_extents)
: aio_completion(aio_completion), buffer_offset(buffer_offset),
image_extents(image_extents) {
aio_completion->add_request();
}
void ReadResult::C_ImageReadRequest::finish(int r) {
CephContext *cct = aio_completion->ictx->cct;
ldout(cct, 10) << "C_ImageReadRequest: r=" << r
<< dendl;
if (r >= 0 || (ignore_enoent && r == -ENOENT)) {
striper::LightweightBufferExtents buffer_extents;
size_t length = 0;
for (auto &image_extent : image_extents) {
buffer_extents.emplace_back(buffer_offset + length, image_extent.second);
length += image_extent.second;
}
ceph_assert(r == -ENOENT || length == bl.length());
aio_completion->lock.lock();
aio_completion->read_result.m_destriper.add_partial_result(
cct, std::move(bl), buffer_extents);
aio_completion->lock.unlock();
r = length;
}
aio_completion->complete_request(r);
}
ReadResult::C_ObjectReadRequest::C_ObjectReadRequest(
AioCompletion *aio_completion, ReadExtents&& extents)
: aio_completion(aio_completion), extents(std::move(extents)) {
aio_completion->add_request();
}
void ReadResult::C_ObjectReadRequest::finish(int r) {
CephContext *cct = aio_completion->ictx->cct;
ldout(cct, 10) << "C_ObjectReadRequest: r=" << r
<< dendl;
if (r == -ENOENT) {
r = 0;
}
if (r >= 0) {
uint64_t object_len = 0;
aio_completion->lock.lock();
for (auto& extent: extents) {
ldout(cct, 10) << " got " << extent.extent_map
<< " for " << extent.buffer_extents
<< " bl " << extent.bl.length() << dendl;
aio_completion->read_result.m_destriper.add_partial_sparse_result(
cct, std::move(extent.bl), extent.extent_map, extent.offset,
extent.buffer_extents);
object_len += extent.length;
}
aio_completion->lock.unlock();
r = object_len;
}
aio_completion->complete_request(r);
}
ReadResult::C_ObjectReadMergedExtents::C_ObjectReadMergedExtents(
CephContext* cct, ReadExtents* extents, Context* on_finish)
: cct(cct), extents(extents), on_finish(on_finish) {
}
void ReadResult::C_ObjectReadMergedExtents::finish(int r) {
if (r >= 0) {
for (auto& extent: *extents) {
if (bl.length() < extent.length) {
lderr(cct) << "Merged extents length is less than expected" << dendl;
r = -EIO;
break;
}
bl.splice(0, extent.length, &extent.bl);
}
if (bl.length() != 0) {
lderr(cct) << "Merged extents length is greater than expected" << dendl;
r = -EIO;
}
}
on_finish->complete(r);
}
ReadResult::ReadResult() : m_buffer(Empty()) {
}
ReadResult::ReadResult(char *buf, size_t buf_len)
: m_buffer(Linear(buf, buf_len)) {
}
ReadResult::ReadResult(const struct iovec *iov, int iov_count)
: m_buffer(Vector(iov, iov_count)) {
}
ReadResult::ReadResult(ceph::bufferlist *bl)
: m_buffer(Bufferlist(bl)) {
}
ReadResult::ReadResult(Extents* extent_map, ceph::bufferlist* bl)
: m_buffer(SparseBufferlist(extent_map, bl)) {
}
void ReadResult::set_image_extents(const Extents& image_extents) {
boost::apply_visitor(SetImageExtentsVisitor(image_extents), m_buffer);
}
void ReadResult::assemble_result(CephContext *cct) {
boost::apply_visitor(AssembleResultVisitor(cct, m_destriper), m_buffer);
}
} // namespace io
} // namespace librbd
| 8,592 | 31.673004 | 80 | cc |
null | ceph-main/src/librbd/io/ReadResult.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_READ_RESULT_H
#define CEPH_LIBRBD_IO_READ_RESULT_H
#include "include/common_fwd.h"
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/Context.h"
#include "librbd/io/Types.h"
#include "osdc/Striper.h"
#include <sys/uio.h>
#include <boost/variant/variant.hpp>
namespace librbd {
struct ImageCtx;
namespace io {
struct AioCompletion;
template <typename> struct ObjectReadRequest;
class ReadResult {
public:
struct C_ImageReadRequest : public Context {
AioCompletion *aio_completion;
uint64_t buffer_offset = 0;
Extents image_extents;
bufferlist bl;
bool ignore_enoent = false;
C_ImageReadRequest(AioCompletion *aio_completion,
uint64_t buffer_offset,
const Extents image_extents);
void finish(int r) override;
};
struct C_ObjectReadRequest : public Context {
AioCompletion *aio_completion;
ReadExtents extents;
C_ObjectReadRequest(AioCompletion *aio_completion, ReadExtents&& extents);
void finish(int r) override;
};
struct C_ObjectReadMergedExtents : public Context {
CephContext* cct;
ReadExtents* extents;
Context *on_finish;
bufferlist bl;
C_ObjectReadMergedExtents(CephContext* cct, ReadExtents* extents,
Context* on_finish);
void finish(int r) override;
};
ReadResult();
ReadResult(char *buf, size_t buf_len);
ReadResult(const struct iovec *iov, int iov_count);
ReadResult(ceph::bufferlist *bl);
ReadResult(Extents* extent_map, ceph::bufferlist* bl);
void set_image_extents(const Extents& image_extents);
void assemble_result(CephContext *cct);
private:
struct Empty {
};
struct Linear {
char *buf;
size_t buf_len;
Linear(char *buf, size_t buf_len) : buf(buf), buf_len(buf_len) {
}
};
struct Vector {
const struct iovec *iov;
int iov_count;
Vector(const struct iovec *iov, int iov_count)
: iov(iov), iov_count(iov_count) {
}
};
struct Bufferlist {
ceph::bufferlist *bl;
Bufferlist(ceph::bufferlist *bl) : bl(bl) {
}
};
struct SparseBufferlist {
Extents *extent_map;
ceph::bufferlist *bl;
Extents image_extents;
SparseBufferlist(Extents* extent_map, ceph::bufferlist* bl)
: extent_map(extent_map), bl(bl) {
}
};
typedef boost::variant<Empty,
Linear,
Vector,
Bufferlist,
SparseBufferlist> Buffer;
struct SetImageExtentsVisitor;
struct AssembleResultVisitor;
Buffer m_buffer;
Striper::StripedReadResult m_destriper;
};
} // namespace io
} // namespace librbd
#endif // CEPH_LIBRBD_IO_READ_RESULT_H
| 2,875 | 21.123077 | 78 | h |
null | ceph-main/src/librbd/io/RefreshImageDispatch.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/RefreshImageDispatch.h"
#include "common/dout.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include <map>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::RefreshImageDispatch: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace io {
template <typename I>
RefreshImageDispatch<I>::RefreshImageDispatch(I* image_ctx)
: m_image_ctx(image_ctx) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << "ictx=" << image_ctx << dendl;
}
template <typename I>
void RefreshImageDispatch<I>::shut_down(Context* on_finish) {
on_finish->complete(0);
}
template <typename I>
bool RefreshImageDispatch<I>::read(
AioCompletion* aio_comp, Extents &&image_extents, ReadResult &&read_result,
IOContext io_context, int op_flags, int read_flags,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
<< dendl;
if (needs_refresh(dispatch_result, on_dispatched)) {
return true;
}
return false;
}
template <typename I>
bool RefreshImageDispatch<I>::write(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
<< dendl;
if (needs_refresh(dispatch_result, on_dispatched)) {
return true;
}
return false;
}
template <typename I>
bool RefreshImageDispatch<I>::discard(
AioCompletion* aio_comp, Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
<< dendl;
if (needs_refresh(dispatch_result, on_dispatched)) {
return true;
}
return false;
}
template <typename I>
bool RefreshImageDispatch<I>::write_same(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
<< dendl;
if (needs_refresh(dispatch_result, on_dispatched)) {
return true;
}
return false;
}
template <typename I>
bool RefreshImageDispatch<I>::compare_and_write(
AioCompletion* aio_comp, Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
<< dendl;
if (needs_refresh(dispatch_result, on_dispatched)) {
return true;
}
return false;
}
template <typename I>
bool RefreshImageDispatch<I>::flush(
AioCompletion* aio_comp, FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << dendl;
// The refresh state machine can initiate a flush and it can
// enable the exclusive-lock which will also attempt to flush.
if (flush_source == FLUSH_SOURCE_REFRESH ||
flush_source == FLUSH_SOURCE_EXCLUSIVE_LOCK_SKIP_REFRESH ||
flush_source == FLUSH_SOURCE_SHUTDOWN) {
return false;
}
if (needs_refresh(dispatch_result, on_dispatched)) {
return true;
}
return false;
}
template <typename I>
bool RefreshImageDispatch<I>::needs_refresh(
DispatchResult* dispatch_result, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
if (m_image_ctx->state->is_refresh_required()) {
ldout(cct, 15) << "on_dispatched=" << on_dispatched << dendl;
*dispatch_result = DISPATCH_RESULT_CONTINUE;
m_image_ctx->state->refresh(on_dispatched);
return true;
}
return false;
}
} // namespace io
} // namespace librbd
template class librbd::io::RefreshImageDispatch<librbd::ImageCtx>;
| 5,028 | 29.113772 | 79 | cc |
null | ceph-main/src/librbd/io/RefreshImageDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_REFRESH_IMAGE_DISPATCH_H
#define CEPH_LIBRBD_IO_REFRESH_IMAGE_DISPATCH_H
#include "librbd/io/ImageDispatchInterface.h"
#include "include/int_types.h"
#include "include/buffer.h"
#include "common/zipkin_trace.h"
#include "common/Throttle.h"
#include "librbd/io/ReadResult.h"
#include "librbd/io/Types.h"
struct Context;
namespace librbd {
struct ImageCtx;
namespace io {
struct AioCompletion;
template <typename ImageCtxT>
class RefreshImageDispatch : public ImageDispatchInterface {
public:
RefreshImageDispatch(ImageCtxT* image_ctx);
ImageDispatchLayer get_dispatch_layer() const override {
return IMAGE_DISPATCH_LAYER_REFRESH;
}
void shut_down(Context* on_finish) override;
bool read(
AioCompletion* aio_comp, Extents &&image_extents,
ReadResult &&read_result, IOContext io_context, int op_flags,
int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool write(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool discard(
AioCompletion* aio_comp, Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool write_same(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool compare_and_write(
AioCompletion* aio_comp, Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool flush(
AioCompletion* aio_comp, FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool list_snaps(
AioCompletion* aio_comp, Extents&& image_extents, SnapIds&& snap_ids,
int list_snaps_flags, SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool invalidate_cache(Context* on_finish) override {
return false;
}
private:
ImageCtxT* m_image_ctx;
bool needs_refresh(DispatchResult* dispatch_result, Context* on_dispatched);
};
} // namespace io
} // namespace librbd
extern template class librbd::io::RefreshImageDispatch<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IO_REFRESH_IMAGE_DISPATCH_H
| 3,518 | 33.5 | 78 | h |
null | ceph-main/src/librbd/io/SimpleSchedulerObjectDispatch.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/SimpleSchedulerObjectDispatch.h"
#include "include/neorados/RADOS.hpp"
#include "common/ceph_time.h"
#include "common/Timer.h"
#include "common/errno.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/io/FlushTracker.h"
#include "librbd/io/ObjectDispatchSpec.h"
#include "librbd/io/ObjectDispatcher.h"
#include "librbd/io/Utils.h"
#include <boost/accumulators/accumulators.hpp>
#include <boost/accumulators/statistics/rolling_count.hpp>
#include <boost/accumulators/statistics/rolling_sum.hpp>
#include <boost/accumulators/statistics/stats.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::SimpleSchedulerObjectDispatch: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace io {
using namespace boost::accumulators;
using ceph::operator<<;
using librbd::util::data_object_name;
static const int LATENCY_STATS_WINDOW_SIZE = 10;
class LatencyStats {
private:
accumulator_set<uint64_t, stats<tag::rolling_count, tag::rolling_sum>> m_acc;
public:
LatencyStats()
: m_acc(tag::rolling_window::window_size = LATENCY_STATS_WINDOW_SIZE) {
}
bool is_ready() const {
return rolling_count(m_acc) == LATENCY_STATS_WINDOW_SIZE;
}
void add(uint64_t latency) {
m_acc(latency);
}
uint64_t avg() const {
auto count = rolling_count(m_acc);
if (count > 0) {
return rolling_sum(m_acc);
}
return 0;
}
};
template <typename I>
bool SimpleSchedulerObjectDispatch<I>::ObjectRequests::try_delay_request(
uint64_t object_off, ceph::bufferlist&& data, IOContext io_context,
int op_flags, int object_dispatch_flags, Context* on_dispatched) {
if (!m_delayed_requests.empty()) {
if (!m_io_context || *m_io_context != *io_context ||
op_flags != m_op_flags || data.length() == 0 ||
intersects(object_off, data.length())) {
return false;
}
} else {
m_io_context = io_context;
m_op_flags = op_flags;
}
if (data.length() == 0) {
// a zero length write is usually a special case,
// and we don't want it to be merged with others
ceph_assert(m_delayed_requests.empty());
m_delayed_request_extents.insert(0, UINT64_MAX);
} else {
m_delayed_request_extents.insert(object_off, data.length());
}
m_object_dispatch_flags |= object_dispatch_flags;
if (!m_delayed_requests.empty()) {
// try to merge front to an existing request
auto iter = m_delayed_requests.find(object_off + data.length());
if (iter != m_delayed_requests.end()) {
auto new_iter = m_delayed_requests.insert({object_off, {}}).first;
new_iter->second.data = std::move(data);
new_iter->second.data.append(std::move(iter->second.data));
new_iter->second.requests = std::move(iter->second.requests);
new_iter->second.requests.push_back(on_dispatched);
m_delayed_requests.erase(iter);
if (new_iter != m_delayed_requests.begin()) {
auto prev = new_iter;
try_merge_delayed_requests(--prev, new_iter);
}
return true;
}
// try to merge back to an existing request
iter = m_delayed_requests.lower_bound(object_off);
if (iter != m_delayed_requests.begin() &&
(iter == m_delayed_requests.end() || iter->first > object_off)) {
iter--;
}
if (iter != m_delayed_requests.end() &&
iter->first + iter->second.data.length() == object_off) {
iter->second.data.append(std::move(data));
iter->second.requests.push_back(on_dispatched);
auto next = iter;
if (++next != m_delayed_requests.end()) {
try_merge_delayed_requests(iter, next);
}
return true;
}
}
// create a new request
auto iter = m_delayed_requests.insert({object_off, {}}).first;
iter->second.data = std::move(data);
iter->second.requests.push_back(on_dispatched);
return true;
}
template <typename I>
void SimpleSchedulerObjectDispatch<I>::ObjectRequests::try_merge_delayed_requests(
typename std::map<uint64_t, MergedRequests>::iterator &iter1,
typename std::map<uint64_t, MergedRequests>::iterator &iter2) {
if (iter1->first + iter1->second.data.length() != iter2->first) {
return;
}
iter1->second.data.append(std::move(iter2->second.data));
iter1->second.requests.insert(iter1->second.requests.end(),
iter2->second.requests.begin(),
iter2->second.requests.end());
m_delayed_requests.erase(iter2);
}
template <typename I>
void SimpleSchedulerObjectDispatch<I>::ObjectRequests::dispatch_delayed_requests(
I *image_ctx, LatencyStats *latency_stats, ceph::mutex *latency_stats_lock) {
for (auto &it : m_delayed_requests) {
auto offset = it.first;
auto &merged_requests = it.second;
auto ctx = new LambdaContext(
[requests=std::move(merged_requests.requests), latency_stats,
latency_stats_lock, start_time=ceph_clock_now()](int r) {
if (latency_stats) {
std::lock_guard locker{*latency_stats_lock};
auto latency = ceph_clock_now() - start_time;
latency_stats->add(latency.to_nsec());
}
for (auto on_dispatched : requests) {
on_dispatched->complete(r);
}
});
auto req = ObjectDispatchSpec::create_write(
image_ctx, OBJECT_DISPATCH_LAYER_SCHEDULER,
m_object_no, offset, std::move(merged_requests.data), m_io_context,
m_op_flags, 0, std::nullopt, 0, {}, ctx);
req->object_dispatch_flags = m_object_dispatch_flags;
req->send();
}
m_dispatch_time = {};
}
template <typename I>
SimpleSchedulerObjectDispatch<I>::SimpleSchedulerObjectDispatch(
I* image_ctx)
: m_image_ctx(image_ctx),
m_flush_tracker(new FlushTracker<I>(image_ctx)),
m_lock(ceph::make_mutex(librbd::util::unique_lock_name(
"librbd::io::SimpleSchedulerObjectDispatch::lock", this))),
m_max_delay(image_ctx->config.template get_val<uint64_t>(
"rbd_io_scheduler_simple_max_delay")) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 5) << "ictx=" << image_ctx << dendl;
I::get_timer_instance(cct, &m_timer, &m_timer_lock);
if (m_max_delay == 0) {
m_latency_stats = std::make_unique<LatencyStats>();
}
}
template <typename I>
SimpleSchedulerObjectDispatch<I>::~SimpleSchedulerObjectDispatch() {
delete m_flush_tracker;
}
template <typename I>
void SimpleSchedulerObjectDispatch<I>::init() {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
// add ourself to the IO object dispatcher chain
m_image_ctx->io_object_dispatcher->register_dispatch(this);
}
template <typename I>
void SimpleSchedulerObjectDispatch<I>::shut_down(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
m_flush_tracker->shut_down();
on_finish->complete(0);
}
template <typename I>
bool SimpleSchedulerObjectDispatch<I>::read(
uint64_t object_no, ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " " << extents
<< dendl;
std::lock_guard locker{m_lock};
for (auto& extent : *extents) {
if (intersects(object_no, extent.offset, extent.length)) {
dispatch_delayed_requests(object_no);
break;
}
}
return false;
}
template <typename I>
bool SimpleSchedulerObjectDispatch<I>::discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << object_len << dendl;
std::lock_guard locker{m_lock};
dispatch_delayed_requests(object_no);
register_in_flight_request(object_no, {}, on_finish);
return false;
}
template <typename I>
bool SimpleSchedulerObjectDispatch<I>::write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << data.length() << dendl;
std::lock_guard locker{m_lock};
// don't try to batch assert version writes
if (assert_version.has_value() ||
(write_flags & OBJECT_WRITE_FLAG_CREATE_EXCLUSIVE) != 0) {
dispatch_delayed_requests(object_no);
return false;
}
if (try_delay_write(object_no, object_off, std::move(data), io_context,
op_flags, *object_dispatch_flags, on_dispatched)) {
auto dispatch_seq = ++m_dispatch_seq;
m_flush_tracker->start_io(dispatch_seq);
*on_finish = new LambdaContext(
[this, dispatch_seq, ctx=*on_finish](int r) {
ctx->complete(r);
m_flush_tracker->finish_io(dispatch_seq);
});
*dispatch_result = DISPATCH_RESULT_COMPLETE;
return true;
}
dispatch_delayed_requests(object_no);
register_in_flight_request(object_no, ceph_clock_now(), on_finish);
return false;
}
template <typename I>
bool SimpleSchedulerObjectDispatch<I>::write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << object_len << dendl;
std::lock_guard locker{m_lock};
dispatch_delayed_requests(object_no);
register_in_flight_request(object_no, {}, on_finish);
return false;
}
template <typename I>
bool SimpleSchedulerObjectDispatch<I>::compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* object_dispatch_flags, uint64_t* journal_tid,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << cmp_data.length() << dendl;
std::lock_guard locker{m_lock};
dispatch_delayed_requests(object_no);
register_in_flight_request(object_no, {}, on_finish);
return false;
}
template <typename I>
bool SimpleSchedulerObjectDispatch<I>::flush(
FlushSource flush_source, const ZTracer::Trace &parent_trace,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
{
std::lock_guard locker{m_lock};
dispatch_all_delayed_requests();
}
*dispatch_result = DISPATCH_RESULT_CONTINUE;
m_flush_tracker->flush(on_dispatched);
return true;
}
template <typename I>
bool SimpleSchedulerObjectDispatch<I>::intersects(
uint64_t object_no, uint64_t object_off, uint64_t len) const {
ceph_assert(ceph_mutex_is_locked(m_lock));
auto cct = m_image_ctx->cct;
auto it = m_requests.find(object_no);
bool intersects = (it != m_requests.end()) &&
it->second->intersects(object_off, len);
ldout(cct, 20) << intersects << dendl;
return intersects;
}
template <typename I>
bool SimpleSchedulerObjectDispatch<I>::try_delay_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int object_dispatch_flags,
Context* on_dispatched) {
ceph_assert(ceph_mutex_is_locked(m_lock));
auto cct = m_image_ctx->cct;
if (m_latency_stats && !m_latency_stats->is_ready()) {
ldout(cct, 20) << "latency stats not collected yet" << dendl;
return false;
}
auto it = m_requests.find(object_no);
if (it == m_requests.end()) {
ldout(cct, 20) << "no pending requests" << dendl;
return false;
}
auto &object_requests = it->second;
bool delayed = object_requests->try_delay_request(
object_off, std::move(data), io_context, op_flags, object_dispatch_flags,
on_dispatched);
ldout(cct, 20) << "delayed: " << delayed << dendl;
// schedule dispatch on the first request added
if (delayed && !object_requests->is_scheduled_dispatch()) {
auto dispatch_time = ceph::real_clock::now();
if (m_latency_stats) {
dispatch_time += std::chrono::nanoseconds(m_latency_stats->avg() / 2);
} else {
dispatch_time += std::chrono::milliseconds(m_max_delay);
}
object_requests->set_scheduled_dispatch(dispatch_time);
m_dispatch_queue.push_back(object_requests);
if (m_dispatch_queue.front() == object_requests) {
schedule_dispatch_delayed_requests();
}
}
return delayed;
}
template <typename I>
void SimpleSchedulerObjectDispatch<I>::dispatch_all_delayed_requests() {
ceph_assert(ceph_mutex_is_locked(m_lock));
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
while (!m_requests.empty()) {
auto it = m_requests.begin();
dispatch_delayed_requests(it->second);
m_requests.erase(it);
}
}
template <typename I>
void SimpleSchedulerObjectDispatch<I>::register_in_flight_request(
uint64_t object_no, const utime_t &start_time, Context **on_finish) {
auto res = m_requests.insert(
{object_no, std::make_shared<ObjectRequests>(object_no)});
ceph_assert(res.second);
auto it = res.first;
auto dispatch_seq = ++m_dispatch_seq;
m_flush_tracker->start_io(dispatch_seq);
it->second->set_dispatch_seq(dispatch_seq);
*on_finish = new LambdaContext(
[this, object_no, dispatch_seq, start_time, ctx=*on_finish](int r) {
ctx->complete(r);
std::unique_lock locker{m_lock};
if (m_latency_stats && start_time != utime_t()) {
auto latency = ceph_clock_now() - start_time;
m_latency_stats->add(latency.to_nsec());
}
auto it = m_requests.find(object_no);
if (it == m_requests.end() ||
it->second->get_dispatch_seq() != dispatch_seq) {
ldout(m_image_ctx->cct, 20) << "already dispatched" << dendl;
} else {
dispatch_delayed_requests(it->second);
m_requests.erase(it);
}
locker.unlock();
m_flush_tracker->finish_io(dispatch_seq);
});
}
template <typename I>
void SimpleSchedulerObjectDispatch<I>::dispatch_delayed_requests(
uint64_t object_no) {
ceph_assert(ceph_mutex_is_locked(m_lock));
auto cct = m_image_ctx->cct;
auto it = m_requests.find(object_no);
if (it == m_requests.end()) {
ldout(cct, 20) << "object_no=" << object_no << ": not found" << dendl;
return;
}
dispatch_delayed_requests(it->second);
m_requests.erase(it);
}
template <typename I>
void SimpleSchedulerObjectDispatch<I>::dispatch_delayed_requests(
ObjectRequestsRef object_requests) {
ceph_assert(ceph_mutex_is_locked(m_lock));
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "object_no=" << object_requests->get_object_no() << ", "
<< object_requests->delayed_requests_size() << " requests, "
<< "dispatch_time=" << object_requests->get_dispatch_time()
<< dendl;
if (!object_requests->is_scheduled_dispatch()) {
return;
}
object_requests->dispatch_delayed_requests(m_image_ctx, m_latency_stats.get(),
&m_lock);
ceph_assert(!m_dispatch_queue.empty());
if (m_dispatch_queue.front() == object_requests) {
m_dispatch_queue.pop_front();
schedule_dispatch_delayed_requests();
}
}
template <typename I>
void SimpleSchedulerObjectDispatch<I>::schedule_dispatch_delayed_requests() {
ceph_assert(ceph_mutex_is_locked(m_lock));
auto cct = m_image_ctx->cct;
std::lock_guard timer_locker{*m_timer_lock};
if (m_timer_task != nullptr) {
ldout(cct, 20) << "canceling task " << m_timer_task << dendl;
bool canceled = m_timer->cancel_event(m_timer_task);
ceph_assert(canceled);
m_timer_task = nullptr;
}
if (m_dispatch_queue.empty()) {
ldout(cct, 20) << "nothing to schedule" << dendl;
return;
}
auto object_requests = m_dispatch_queue.front().get();
while (!object_requests->is_scheduled_dispatch()) {
ldout(cct, 20) << "garbage collecting " << object_requests << dendl;
m_dispatch_queue.pop_front();
if (m_dispatch_queue.empty()) {
ldout(cct, 20) << "nothing to schedule" << dendl;
return;
}
object_requests = m_dispatch_queue.front().get();
}
m_timer_task = new LambdaContext(
[this, object_no=object_requests->get_object_no()](int r) {
ceph_assert(ceph_mutex_is_locked(*m_timer_lock));
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "running timer task " << m_timer_task << dendl;
m_timer_task = nullptr;
m_image_ctx->asio_engine->post(
[this, object_no]() {
std::lock_guard locker{m_lock};
dispatch_delayed_requests(object_no);
});
});
ldout(cct, 20) << "scheduling task " << m_timer_task << " at "
<< object_requests->get_dispatch_time() << dendl;
m_timer->add_event_at(object_requests->get_dispatch_time(), m_timer_task);
}
} // namespace io
} // namespace librbd
template class librbd::io::SimpleSchedulerObjectDispatch<librbd::ImageCtx>;
| 18,211 | 31.176678 | 82 | cc |
null | ceph-main/src/librbd/io/SimpleSchedulerObjectDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_SIMPLE_SCHEDULER_OBJECT_DISPATCH_H
#define CEPH_LIBRBD_IO_SIMPLE_SCHEDULER_OBJECT_DISPATCH_H
#include "common/ceph_mutex.h"
#include "include/interval_set.h"
#include "include/utime.h"
#include "librbd/io/ObjectDispatchInterface.h"
#include "librbd/io/TypeTraits.h"
#include <list>
#include <map>
#include <memory>
namespace librbd {
class ImageCtx;
namespace io {
template <typename> class FlushTracker;
class LatencyStats;
/**
* Simple scheduler plugin for object dispatcher layer.
*/
template <typename ImageCtxT = ImageCtx>
class SimpleSchedulerObjectDispatch : public ObjectDispatchInterface {
private:
// mock unit testing support
typedef ::librbd::io::TypeTraits<ImageCtxT> TypeTraits;
typedef typename TypeTraits::SafeTimer SafeTimer;
public:
static SimpleSchedulerObjectDispatch* create(ImageCtxT* image_ctx) {
return new SimpleSchedulerObjectDispatch(image_ctx);
}
SimpleSchedulerObjectDispatch(ImageCtxT* image_ctx);
~SimpleSchedulerObjectDispatch() override;
ObjectDispatchLayer get_dispatch_layer() const override {
return OBJECT_DISPATCH_LAYER_SCHEDULER;
}
void init();
void shut_down(Context* on_finish) override;
bool read(
uint64_t object_no, ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* object_dispatch_flags, uint64_t* journal_tid,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool flush(
FlushSource flush_source, const ZTracer::Trace &parent_trace,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool list_snaps(
uint64_t object_no, io::Extents&& extents, SnapIds&& snap_ids,
int list_snap_flags, const ZTracer::Trace &parent_trace,
SnapshotDelta* snapshot_delta, int* object_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool invalidate_cache(Context* on_finish) override {
return false;
}
bool reset_existence_cache(Context* on_finish) override {
return false;
}
void extent_overwritten(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
uint64_t journal_tid, uint64_t new_journal_tid) override {
}
int prepare_copyup(
uint64_t object_no,
SnapshotSparseBufferlist* snapshot_sparse_bufferlist) override {
return 0;
}
private:
struct MergedRequests {
ceph::bufferlist data;
std::list<Context *> requests;
};
class ObjectRequests {
public:
using clock_t = ceph::real_clock;
ObjectRequests(uint64_t object_no) : m_object_no(object_no) {
}
uint64_t get_object_no() const {
return m_object_no;
}
void set_dispatch_seq(uint64_t dispatch_seq) {
m_dispatch_seq = dispatch_seq;
}
uint64_t get_dispatch_seq() const {
return m_dispatch_seq;
}
clock_t::time_point get_dispatch_time() const {
return m_dispatch_time;
}
void set_scheduled_dispatch(const clock_t::time_point &dispatch_time) {
m_dispatch_time = dispatch_time;
}
bool is_scheduled_dispatch() const {
return !clock_t::is_zero(m_dispatch_time);
}
size_t delayed_requests_size() const {
return m_delayed_requests.size();
}
bool intersects(uint64_t object_off, uint64_t len) const {
return m_delayed_request_extents.intersects(object_off, len);
}
bool try_delay_request(uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
int object_dispatch_flags, Context* on_dispatched);
void dispatch_delayed_requests(ImageCtxT *image_ctx,
LatencyStats *latency_stats,
ceph::mutex *latency_stats_lock);
private:
uint64_t m_object_no;
uint64_t m_dispatch_seq = 0;
clock_t::time_point m_dispatch_time;
IOContext m_io_context;
int m_op_flags = 0;
int m_object_dispatch_flags = 0;
std::map<uint64_t, MergedRequests> m_delayed_requests;
interval_set<uint64_t> m_delayed_request_extents;
void try_merge_delayed_requests(
typename std::map<uint64_t, MergedRequests>::iterator &iter,
typename std::map<uint64_t, MergedRequests>::iterator &iter2);
};
typedef std::shared_ptr<ObjectRequests> ObjectRequestsRef;
typedef std::map<uint64_t, ObjectRequestsRef> Requests;
ImageCtxT *m_image_ctx;
FlushTracker<ImageCtxT>* m_flush_tracker;
ceph::mutex m_lock;
SafeTimer *m_timer;
ceph::mutex *m_timer_lock;
uint64_t m_max_delay;
uint64_t m_dispatch_seq = 0;
Requests m_requests;
std::list<ObjectRequestsRef> m_dispatch_queue;
Context *m_timer_task = nullptr;
std::unique_ptr<LatencyStats> m_latency_stats;
bool try_delay_write(uint64_t object_no, uint64_t object_off,
ceph::bufferlist&& data, IOContext io_context,
int op_flags, int object_dispatch_flags,
Context* on_dispatched);
bool intersects(uint64_t object_no, uint64_t object_off, uint64_t len) const;
void dispatch_all_delayed_requests();
void dispatch_delayed_requests(uint64_t object_no);
void dispatch_delayed_requests(ObjectRequestsRef object_requests);
void register_in_flight_request(uint64_t object_no, const utime_t &start_time,
Context** on_finish);
void schedule_dispatch_delayed_requests();
};
} // namespace io
} // namespace librbd
extern template class librbd::io::SimpleSchedulerObjectDispatch<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CACHE_SIMPLE_SCHEDULER_OBJECT_DISPATCH_H
| 7,396 | 31.442982 | 82 | h |
null | ceph-main/src/librbd/io/TypeTraits.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_TYPE_TRAITS_H
#define CEPH_LIBRBD_IO_TYPE_TRAITS_H
#include "common/Timer.h"
namespace librbd {
namespace io {
template <typename IoCtxT>
struct TypeTraits {
typedef ::SafeTimer SafeTimer;
};
} // namespace io
} // namespace librbd
#endif // CEPH_LIBRBD_IO_TYPE_TRAITS_H
| 400 | 18.095238 | 70 | h |
null | ceph-main/src/librbd/io/Types.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/Types.h"
#include <iostream>
namespace librbd {
namespace io {
const WriteReadSnapIds INITIAL_WRITE_READ_SNAP_IDS{0, 0};
std::ostream& operator<<(std::ostream& os, SparseExtentState state) {
switch (state) {
case SPARSE_EXTENT_STATE_DNE:
os << "dne";
break;
case SPARSE_EXTENT_STATE_ZEROED:
os << "zeroed";
break;
case SPARSE_EXTENT_STATE_DATA:
os << "data";
break;
default:
ceph_abort();
break;
}
return os;
}
std::ostream& operator<<(std::ostream& os, const SparseExtent& se) {
os << "["
<< "state=" << se.state << ", "
<< "length=" << se.length << "]";
return os;
}
std::ostream& operator<<(std::ostream& os, ImageArea area) {
switch (area) {
case ImageArea::DATA:
return os << "data";
case ImageArea::CRYPTO_HEADER:
return os << "crypto_header";
default:
ceph_abort();
}
}
} // namespace io
} // namespace librbd
| 1,024 | 19.5 | 70 | cc |
null | ceph-main/src/librbd/io/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_TYPES_H
#define CEPH_LIBRBD_IO_TYPES_H
#include "include/int_types.h"
#include "include/rados/rados_types.hpp"
#include "common/interval_map.h"
#include "osdc/StriperTypes.h"
#include <iosfwd>
#include <map>
#include <vector>
struct Context;
namespace librbd {
namespace io {
typedef enum {
AIO_TYPE_NONE = 0,
AIO_TYPE_GENERIC,
AIO_TYPE_OPEN,
AIO_TYPE_CLOSE,
AIO_TYPE_READ,
AIO_TYPE_WRITE,
AIO_TYPE_DISCARD,
AIO_TYPE_FLUSH,
AIO_TYPE_WRITESAME,
AIO_TYPE_COMPARE_AND_WRITE,
} aio_type_t;
enum FlushSource {
FLUSH_SOURCE_USER,
FLUSH_SOURCE_INTERNAL,
FLUSH_SOURCE_SHUTDOWN,
FLUSH_SOURCE_EXCLUSIVE_LOCK,
FLUSH_SOURCE_EXCLUSIVE_LOCK_SKIP_REFRESH,
FLUSH_SOURCE_REFRESH,
FLUSH_SOURCE_WRITEBACK,
FLUSH_SOURCE_WRITE_BLOCK,
};
enum Direction {
DIRECTION_READ,
DIRECTION_WRITE,
DIRECTION_BOTH
};
enum DispatchResult {
DISPATCH_RESULT_INVALID,
DISPATCH_RESULT_RESTART,
DISPATCH_RESULT_CONTINUE,
DISPATCH_RESULT_COMPLETE
};
enum ImageDispatchLayer {
IMAGE_DISPATCH_LAYER_NONE = 0,
IMAGE_DISPATCH_LAYER_API_START = IMAGE_DISPATCH_LAYER_NONE,
IMAGE_DISPATCH_LAYER_QUEUE,
IMAGE_DISPATCH_LAYER_QOS,
IMAGE_DISPATCH_LAYER_EXCLUSIVE_LOCK,
IMAGE_DISPATCH_LAYER_REFRESH,
IMAGE_DISPATCH_LAYER_INTERNAL_START = IMAGE_DISPATCH_LAYER_REFRESH,
IMAGE_DISPATCH_LAYER_MIGRATION,
IMAGE_DISPATCH_LAYER_JOURNAL,
IMAGE_DISPATCH_LAYER_WRITE_BLOCK,
IMAGE_DISPATCH_LAYER_WRITEBACK_CACHE,
IMAGE_DISPATCH_LAYER_CRYPTO,
IMAGE_DISPATCH_LAYER_CORE,
IMAGE_DISPATCH_LAYER_LAST
};
enum {
IMAGE_DISPATCH_FLAG_QOS_IOPS_THROTTLE = 1 << 0,
IMAGE_DISPATCH_FLAG_QOS_BPS_THROTTLE = 1 << 1,
IMAGE_DISPATCH_FLAG_QOS_READ_IOPS_THROTTLE = 1 << 2,
IMAGE_DISPATCH_FLAG_QOS_WRITE_IOPS_THROTTLE = 1 << 3,
IMAGE_DISPATCH_FLAG_QOS_READ_BPS_THROTTLE = 1 << 4,
IMAGE_DISPATCH_FLAG_QOS_WRITE_BPS_THROTTLE = 1 << 5,
IMAGE_DISPATCH_FLAG_QOS_BPS_MASK = (
IMAGE_DISPATCH_FLAG_QOS_BPS_THROTTLE |
IMAGE_DISPATCH_FLAG_QOS_READ_BPS_THROTTLE |
IMAGE_DISPATCH_FLAG_QOS_WRITE_BPS_THROTTLE),
IMAGE_DISPATCH_FLAG_QOS_IOPS_MASK = (
IMAGE_DISPATCH_FLAG_QOS_IOPS_THROTTLE |
IMAGE_DISPATCH_FLAG_QOS_READ_IOPS_THROTTLE |
IMAGE_DISPATCH_FLAG_QOS_WRITE_IOPS_THROTTLE),
IMAGE_DISPATCH_FLAG_QOS_READ_MASK = (
IMAGE_DISPATCH_FLAG_QOS_READ_IOPS_THROTTLE |
IMAGE_DISPATCH_FLAG_QOS_READ_BPS_THROTTLE),
IMAGE_DISPATCH_FLAG_QOS_WRITE_MASK = (
IMAGE_DISPATCH_FLAG_QOS_WRITE_IOPS_THROTTLE |
IMAGE_DISPATCH_FLAG_QOS_WRITE_BPS_THROTTLE),
IMAGE_DISPATCH_FLAG_QOS_MASK = (
IMAGE_DISPATCH_FLAG_QOS_BPS_MASK |
IMAGE_DISPATCH_FLAG_QOS_IOPS_MASK),
// TODO: pass area through ImageDispatchInterface and remove
// this flag
IMAGE_DISPATCH_FLAG_CRYPTO_HEADER = 1 << 6
};
enum {
RBD_IO_OPERATIONS_DEFAULT = 0,
RBD_IO_OPERATION_READ = 1 << 0,
RBD_IO_OPERATION_WRITE = 1 << 1,
RBD_IO_OPERATION_DISCARD = 1 << 2,
RBD_IO_OPERATION_WRITE_SAME = 1 << 3,
RBD_IO_OPERATION_COMPARE_AND_WRITE = 1 << 4,
RBD_IO_OPERATIONS_ALL = (
RBD_IO_OPERATION_READ |
RBD_IO_OPERATION_WRITE |
RBD_IO_OPERATION_DISCARD |
RBD_IO_OPERATION_WRITE_SAME |
RBD_IO_OPERATION_COMPARE_AND_WRITE)
};
enum ObjectDispatchLayer {
OBJECT_DISPATCH_LAYER_NONE = 0,
OBJECT_DISPATCH_LAYER_CACHE,
OBJECT_DISPATCH_LAYER_CRYPTO,
OBJECT_DISPATCH_LAYER_JOURNAL,
OBJECT_DISPATCH_LAYER_PARENT_CACHE,
OBJECT_DISPATCH_LAYER_SCHEDULER,
OBJECT_DISPATCH_LAYER_CORE,
OBJECT_DISPATCH_LAYER_LAST
};
enum {
READ_FLAG_DISABLE_READ_FROM_PARENT = 1UL << 0,
READ_FLAG_DISABLE_CLIPPING = 1UL << 1,
};
enum {
OBJECT_WRITE_FLAG_CREATE_EXCLUSIVE = 1UL << 0
};
enum {
OBJECT_DISCARD_FLAG_DISABLE_CLONE_REMOVE = 1UL << 0,
OBJECT_DISCARD_FLAG_DISABLE_OBJECT_MAP_UPDATE = 1UL << 1
};
enum {
OBJECT_DISPATCH_FLAG_FLUSH = 1UL << 0,
OBJECT_DISPATCH_FLAG_WILL_RETRY_ON_ERROR = 1UL << 1
};
enum {
LIST_SNAPS_FLAG_DISABLE_LIST_FROM_PARENT = 1UL << 0,
LIST_SNAPS_FLAG_WHOLE_OBJECT = 1UL << 1,
LIST_SNAPS_FLAG_IGNORE_ZEROED_EXTENTS = 1UL << 2,
};
enum SparseExtentState {
SPARSE_EXTENT_STATE_DNE, /* does not exist */
SPARSE_EXTENT_STATE_ZEROED,
SPARSE_EXTENT_STATE_DATA
};
std::ostream& operator<<(std::ostream& os, SparseExtentState state);
struct SparseExtent {
SparseExtentState state;
uint64_t length;
SparseExtent(SparseExtentState state, uint64_t length)
: state(state), length(length) {
}
operator SparseExtentState() const {
return state;
}
bool operator==(const SparseExtent& rhs) const {
return state == rhs.state && length == rhs.length;
}
};
std::ostream& operator<<(std::ostream& os, const SparseExtent& state);
struct SparseExtentSplitMerge {
SparseExtent split(uint64_t offset, uint64_t length, SparseExtent &se) const {
return SparseExtent(se.state, se.length);
}
bool can_merge(const SparseExtent& left, const SparseExtent& right) const {
return left.state == right.state;
}
SparseExtent merge(SparseExtent&& left, SparseExtent&& right) const {
SparseExtent se(left);
se.length += right.length;
return se;
}
uint64_t length(const SparseExtent& se) const {
return se.length;
}
};
typedef interval_map<uint64_t,
SparseExtent,
SparseExtentSplitMerge> SparseExtents;
typedef std::vector<uint64_t> SnapIds;
typedef std::pair<librados::snap_t, librados::snap_t> WriteReadSnapIds;
extern const WriteReadSnapIds INITIAL_WRITE_READ_SNAP_IDS;
typedef std::map<WriteReadSnapIds, SparseExtents> SnapshotDelta;
struct SparseBufferlistExtent : public SparseExtent {
ceph::bufferlist bl;
SparseBufferlistExtent(SparseExtentState state, uint64_t length)
: SparseExtent(state, length) {
ceph_assert(state != SPARSE_EXTENT_STATE_DATA);
}
SparseBufferlistExtent(SparseExtentState state, uint64_t length,
ceph::bufferlist&& bl_)
: SparseExtent(state, length), bl(std::move(bl_)) {
ceph_assert(state != SPARSE_EXTENT_STATE_DATA || length == bl.length());
}
bool operator==(const SparseBufferlistExtent& rhs) const {
return (state == rhs.state &&
length == rhs.length &&
bl.contents_equal(rhs.bl));
}
};
struct SparseBufferlistExtentSplitMerge {
SparseBufferlistExtent split(uint64_t offset, uint64_t length,
SparseBufferlistExtent& sbe) const {
ceph::bufferlist bl;
if (sbe.state == SPARSE_EXTENT_STATE_DATA) {
bl.substr_of(bl, offset, length);
}
return SparseBufferlistExtent(sbe.state, length, std::move(bl));
}
bool can_merge(const SparseBufferlistExtent& left,
const SparseBufferlistExtent& right) const {
return left.state == right.state;
}
SparseBufferlistExtent merge(SparseBufferlistExtent&& left,
SparseBufferlistExtent&& right) const {
if (left.state == SPARSE_EXTENT_STATE_DATA) {
ceph::bufferlist bl{std::move(left.bl)};
bl.claim_append(std::move(right.bl));
return SparseBufferlistExtent(SPARSE_EXTENT_STATE_DATA,
bl.length(), std::move(bl));
} else {
return SparseBufferlistExtent(left.state, left.length + right.length, {});
}
}
uint64_t length(const SparseBufferlistExtent& sbe) const {
return sbe.length;
}
};
typedef interval_map<uint64_t,
SparseBufferlistExtent,
SparseBufferlistExtentSplitMerge> SparseBufferlist;
typedef std::map<uint64_t, SparseBufferlist> SnapshotSparseBufferlist;
using striper::LightweightBufferExtents;
using striper::LightweightObjectExtent;
using striper::LightweightObjectExtents;
typedef std::pair<uint64_t,uint64_t> Extent;
typedef std::vector<Extent> Extents;
enum class ImageArea {
DATA,
CRYPTO_HEADER
};
std::ostream& operator<<(std::ostream& os, ImageArea area);
struct ReadExtent {
const uint64_t offset;
const uint64_t length;
const LightweightBufferExtents buffer_extents;
ceph::bufferlist bl;
Extents extent_map;
ReadExtent(uint64_t offset,
uint64_t length) : offset(offset), length(length) {};
ReadExtent(uint64_t offset,
uint64_t length,
const LightweightBufferExtents&& buffer_extents)
: offset(offset),
length(length),
buffer_extents(buffer_extents) {}
ReadExtent(uint64_t offset,
uint64_t length,
const LightweightBufferExtents&& buffer_extents,
ceph::bufferlist&& bl,
Extents&& extent_map) : offset(offset),
length(length),
buffer_extents(buffer_extents),
bl(bl),
extent_map(extent_map) {};
friend inline std::ostream& operator<<(
std::ostream& os,
const ReadExtent &extent) {
os << "offset=" << extent.offset << ", "
<< "length=" << extent.length << ", "
<< "buffer_extents=" << extent.buffer_extents << ", "
<< "bl.length=" << extent.bl.length() << ", "
<< "extent_map=" << extent.extent_map;
return os;
}
};
typedef std::vector<ReadExtent> ReadExtents;
typedef std::map<uint64_t, uint64_t> ExtentMap;
} // namespace io
} // namespace librbd
#endif // CEPH_LIBRBD_IO_TYPES_H
| 9,779 | 28.726444 | 80 | h |
null | ceph-main/src/librbd/io/Utils.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/Utils.h"
#include "common/dout.h"
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "include/neorados/RADOS.hpp"
#include "librbd/internal.h"
#include "librbd/Utils.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/ObjectRequest.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "osd/osd_types.h"
#include "osdc/Striper.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::util: " << __func__ << ": "
namespace librbd {
namespace io {
namespace util {
void apply_op_flags(uint32_t op_flags, uint32_t flags, neorados::Op* op) {
if (op_flags & LIBRADOS_OP_FLAG_FADVISE_RANDOM)
op->set_fadvise_random();
if (op_flags & LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL)
op->set_fadvise_sequential();
if (op_flags & LIBRADOS_OP_FLAG_FADVISE_WILLNEED)
op->set_fadvise_willneed();
if (op_flags & LIBRADOS_OP_FLAG_FADVISE_DONTNEED)
op->set_fadvise_dontneed();
if (op_flags & LIBRADOS_OP_FLAG_FADVISE_NOCACHE)
op->set_fadvise_nocache();
if (flags & librados::OPERATION_BALANCE_READS)
op->balance_reads();
if (flags & librados::OPERATION_LOCALIZE_READS)
op->localize_reads();
}
bool assemble_write_same_extent(
const LightweightObjectExtent &object_extent, const ceph::bufferlist& data,
ceph::bufferlist *ws_data, bool force_write) {
size_t data_len = data.length();
if (!force_write) {
bool may_writesame = true;
for (auto& q : object_extent.buffer_extents) {
if (!(q.first % data_len == 0 && q.second % data_len == 0)) {
may_writesame = false;
break;
}
}
if (may_writesame) {
ws_data->append(data);
return true;
}
}
for (auto& q : object_extent.buffer_extents) {
bufferlist sub_bl;
uint64_t sub_off = q.first % data_len;
uint64_t sub_len = data_len - sub_off;
uint64_t extent_left = q.second;
while (extent_left >= sub_len) {
sub_bl.substr_of(data, sub_off, sub_len);
ws_data->claim_append(sub_bl);
extent_left -= sub_len;
if (sub_off) {
sub_off = 0;
sub_len = data_len;
}
}
if (extent_left) {
sub_bl.substr_of(data, sub_off, extent_left);
ws_data->claim_append(sub_bl);
}
}
return false;
}
template <typename I>
void read_parent(I *image_ctx, uint64_t object_no, ReadExtents* read_extents,
librados::snap_t snap_id, const ZTracer::Trace &trace,
Context* on_finish) {
auto cct = image_ctx->cct;
std::shared_lock image_locker{image_ctx->image_lock};
Extents parent_extents;
ImageArea area;
uint64_t raw_overlap = 0;
uint64_t object_overlap = 0;
image_ctx->get_parent_overlap(snap_id, &raw_overlap);
if (raw_overlap > 0) {
// calculate reverse mapping onto the parent image
Extents extents;
for (const auto& extent : *read_extents) {
extents.emplace_back(extent.offset, extent.length);
}
std::tie(parent_extents, area) = object_to_area_extents(image_ctx,
object_no, extents);
object_overlap = image_ctx->prune_parent_extents(parent_extents, area,
raw_overlap, false);
}
if (object_overlap == 0) {
image_locker.unlock();
on_finish->complete(-ENOENT);
return;
}
ldout(cct, 20) << dendl;
ceph::bufferlist* parent_read_bl;
if (read_extents->size() > 1) {
auto parent_comp = new ReadResult::C_ObjectReadMergedExtents(
cct, read_extents, on_finish);
parent_read_bl = &parent_comp->bl;
on_finish = parent_comp;
} else {
parent_read_bl = &read_extents->front().bl;
}
auto comp = AioCompletion::create_and_start(on_finish, image_ctx->parent,
AIO_TYPE_READ);
ldout(cct, 20) << "completion=" << comp
<< " parent_extents=" << parent_extents
<< " area=" << area << dendl;
auto req = io::ImageDispatchSpec::create_read(
*image_ctx->parent, io::IMAGE_DISPATCH_LAYER_INTERNAL_START, comp,
std::move(parent_extents), area, ReadResult{parent_read_bl},
image_ctx->parent->get_data_io_context(), 0, 0, trace);
req->send();
}
template <typename I>
int clip_request(I* image_ctx, Extents* image_extents, ImageArea area) {
std::shared_lock image_locker{image_ctx->image_lock};
for (auto &image_extent : *image_extents) {
auto clip_len = image_extent.second;
int r = clip_io(librbd::util::get_image_ctx(image_ctx),
image_extent.first, &clip_len, area);
if (r < 0) {
return r;
}
image_extent.second = clip_len;
}
return 0;
}
void unsparsify(CephContext* cct, ceph::bufferlist* bl,
const Extents& extent_map, uint64_t bl_off,
uint64_t out_bl_len) {
Striper::StripedReadResult destriper;
bufferlist out_bl;
destriper.add_partial_sparse_result(cct, std::move(*bl), extent_map, bl_off,
{{0, out_bl_len}});
destriper.assemble_result(cct, out_bl, true);
*bl = out_bl;
}
template <typename I>
bool trigger_copyup(I* image_ctx, uint64_t object_no, IOContext io_context,
Context* on_finish) {
bufferlist bl;
auto req = new ObjectWriteRequest<I>(
image_ctx, object_no, 0, std::move(bl), io_context, 0, 0,
std::nullopt, {}, on_finish);
if (!req->has_parent()) {
delete req;
return false;
}
req->send();
return true;
}
template <typename I>
void area_to_object_extents(I* image_ctx, uint64_t offset, uint64_t length,
ImageArea area, uint64_t buffer_offset,
striper::LightweightObjectExtents* object_extents) {
Extents extents = {{offset, length}};
image_ctx->io_image_dispatcher->remap_to_physical(extents, area);
for (auto [off, len] : extents) {
Striper::file_to_extents(image_ctx->cct, &image_ctx->layout, off, len, 0,
buffer_offset, object_extents);
}
}
template <typename I>
std::pair<Extents, ImageArea> object_to_area_extents(
I* image_ctx, uint64_t object_no, const Extents& object_extents) {
Extents extents;
for (auto [off, len] : object_extents) {
Striper::extent_to_file(image_ctx->cct, &image_ctx->layout, object_no, off,
len, extents);
}
auto area = image_ctx->io_image_dispatcher->remap_to_logical(extents);
return {std::move(extents), area};
}
template <typename I>
uint64_t area_to_raw_offset(const I& image_ctx, uint64_t offset,
ImageArea area) {
Extents extents = {{offset, 0}};
image_ctx.io_image_dispatcher->remap_to_physical(extents, area);
return extents[0].first;
}
template <typename I>
std::pair<uint64_t, ImageArea> raw_to_area_offset(const I& image_ctx,
uint64_t offset) {
Extents extents = {{offset, 0}};
auto area = image_ctx.io_image_dispatcher->remap_to_logical(extents);
return {extents[0].first, area};
}
} // namespace util
} // namespace io
} // namespace librbd
template void librbd::io::util::read_parent(
librbd::ImageCtx *image_ctx, uint64_t object_no, ReadExtents* extents,
librados::snap_t snap_id, const ZTracer::Trace &trace, Context* on_finish);
template int librbd::io::util::clip_request(
librbd::ImageCtx* image_ctx, Extents* image_extents, ImageArea area);
template bool librbd::io::util::trigger_copyup(
librbd::ImageCtx *image_ctx, uint64_t object_no, IOContext io_context,
Context* on_finish);
template void librbd::io::util::area_to_object_extents(
librbd::ImageCtx* image_ctx, uint64_t offset, uint64_t length,
ImageArea area, uint64_t buffer_offset,
striper::LightweightObjectExtents* object_extents);
template auto librbd::io::util::object_to_area_extents(
librbd::ImageCtx* image_ctx, uint64_t object_no, const Extents& extents)
-> std::pair<Extents, ImageArea>;
template uint64_t librbd::io::util::area_to_raw_offset(
const librbd::ImageCtx& image_ctx, uint64_t offset, ImageArea area);
template auto librbd::io::util::raw_to_area_offset(
const librbd::ImageCtx& image_ctx, uint64_t offset)
-> std::pair<uint64_t, ImageArea>;
| 8,435 | 32.744 | 80 | cc |
null | ceph-main/src/librbd/io/Utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_UTILS_H
#define CEPH_LIBRBD_IO_UTILS_H
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/rados/rados_types.hpp"
#include "common/zipkin_trace.h"
#include "librbd/Types.h"
#include "librbd/io/Types.h"
#include <map>
class ObjectExtent;
namespace neorados { struct Op; }
namespace librbd {
struct ImageCtx;
namespace io {
namespace util {
void apply_op_flags(uint32_t op_flags, uint32_t flags, neorados::Op* op);
bool assemble_write_same_extent(const LightweightObjectExtent &object_extent,
const ceph::bufferlist& data,
ceph::bufferlist *ws_data,
bool force_write);
template <typename ImageCtxT = librbd::ImageCtx>
void read_parent(ImageCtxT *image_ctx, uint64_t object_no,
ReadExtents* read_extents, librados::snap_t snap_id,
const ZTracer::Trace &trace, Context* on_finish);
template <typename ImageCtxT = librbd::ImageCtx>
int clip_request(ImageCtxT* image_ctx, Extents* image_extents, ImageArea area);
inline uint64_t get_extents_length(const Extents &extents) {
uint64_t total_bytes = 0;
for (auto [_, extent_length] : extents) {
total_bytes += extent_length;
}
return total_bytes;
}
void unsparsify(CephContext* cct, ceph::bufferlist* bl,
const Extents& extent_map, uint64_t bl_off,
uint64_t out_bl_len);
template <typename ImageCtxT = librbd::ImageCtx>
bool trigger_copyup(ImageCtxT *image_ctx, uint64_t object_no,
IOContext io_context, Context* on_finish);
template <typename ImageCtxT = librbd::ImageCtx>
void area_to_object_extents(ImageCtxT* image_ctx, uint64_t offset,
uint64_t length, ImageArea area,
uint64_t buffer_offset,
striper::LightweightObjectExtents* object_extents);
template <typename ImageCtxT = librbd::ImageCtx>
std::pair<Extents, ImageArea> object_to_area_extents(
ImageCtxT* image_ctx, uint64_t object_no, const Extents& object_extents);
template <typename ImageCtxT = librbd::ImageCtx>
uint64_t area_to_raw_offset(const ImageCtxT& image_ctx, uint64_t offset,
ImageArea area);
template <typename ImageCtxT = librbd::ImageCtx>
std::pair<uint64_t, ImageArea> raw_to_area_offset(const ImageCtxT& image_ctx,
uint64_t offset);
inline ObjectDispatchLayer get_previous_layer(ObjectDispatchLayer layer) {
return (ObjectDispatchLayer)(((int)layer) - 1);
}
} // namespace util
} // namespace io
} // namespace librbd
#endif // CEPH_LIBRBD_IO_UTILS_H
| 2,803 | 32.380952 | 79 | h |
null | ceph-main/src/librbd/io/WriteBlockImageDispatch.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/io/WriteBlockImageDispatch.h"
#include "common/dout.h"
#include "common/Cond.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::io::WriteBlockImageDispatch: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace io {
template <typename I>
struct WriteBlockImageDispatch<I>::C_BlockedWrites : public Context {
WriteBlockImageDispatch *dispatch;
explicit C_BlockedWrites(WriteBlockImageDispatch *dispatch)
: dispatch(dispatch) {
}
void finish(int r) override {
dispatch->handle_blocked_writes(r);
}
};
template <typename I>
WriteBlockImageDispatch<I>::WriteBlockImageDispatch(I* image_ctx)
: m_image_ctx(image_ctx),
m_lock(ceph::make_shared_mutex(
util::unique_lock_name("librbd::io::WriteBlockImageDispatch::m_lock",
this))) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << "ictx=" << image_ctx << dendl;
}
template <typename I>
void WriteBlockImageDispatch<I>::shut_down(Context* on_finish) {
on_finish->complete(0);
}
template <typename I>
int WriteBlockImageDispatch<I>::block_writes() {
C_SaferCond cond_ctx;
block_writes(&cond_ctx);
return cond_ctx.wait();
}
template <typename I>
void WriteBlockImageDispatch<I>::block_writes(Context *on_blocked) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx->owner_lock));
auto cct = m_image_ctx->cct;
// ensure owner lock is not held after block_writes completes
on_blocked = util::create_async_context_callback(
*m_image_ctx, on_blocked);
{
std::unique_lock locker{m_lock};
++m_write_blockers;
ldout(cct, 5) << m_image_ctx << ", "
<< "num=" << m_write_blockers << dendl;
if (!m_write_blocker_contexts.empty() || m_in_flight_writes > 0) {
ldout(cct, 5) << "waiting for in-flight writes to complete: "
<< "in_flight_writes=" << m_in_flight_writes << dendl;
m_write_blocker_contexts.push_back(on_blocked);
return;
}
}
flush_io(on_blocked);
};
template <typename I>
void WriteBlockImageDispatch<I>::unblock_writes() {
auto cct = m_image_ctx->cct;
Contexts waiter_contexts;
Contexts dispatch_contexts;
{
std::unique_lock locker{m_lock};
ceph_assert(m_write_blockers > 0);
--m_write_blockers;
ldout(cct, 5) << m_image_ctx << ", "
<< "num=" << m_write_blockers << dendl;
if (m_write_blockers == 0) {
std::swap(waiter_contexts, m_unblocked_write_waiter_contexts);
std::swap(dispatch_contexts, m_on_dispatches);
}
}
for (auto ctx : waiter_contexts) {
ctx->complete(0);
}
for (auto ctx : dispatch_contexts) {
ctx->complete(0);
}
}
template <typename I>
void WriteBlockImageDispatch<I>::wait_on_writes_unblocked(
Context *on_unblocked) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx->owner_lock));
auto cct = m_image_ctx->cct;
{
std::unique_lock locker{m_lock};
ldout(cct, 20) << m_image_ctx << ", "
<< "write_blockers=" << m_write_blockers << dendl;
if (!m_unblocked_write_waiter_contexts.empty() || m_write_blockers > 0) {
m_unblocked_write_waiter_contexts.push_back(on_unblocked);
return;
}
}
on_unblocked->complete(0);
}
template <typename I>
bool WriteBlockImageDispatch<I>::write(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << dendl;
return process_io(tid, dispatch_result, on_finish, on_dispatched);
}
template <typename I>
bool WriteBlockImageDispatch<I>::discard(
AioCompletion* aio_comp, Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << dendl;
return process_io(tid, dispatch_result, on_finish, on_dispatched);
}
template <typename I>
bool WriteBlockImageDispatch<I>::write_same(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << dendl;
return process_io(tid, dispatch_result, on_finish, on_dispatched);
}
template <typename I>
bool WriteBlockImageDispatch<I>::compare_and_write(
AioCompletion* aio_comp, Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << dendl;
return process_io(tid, dispatch_result, on_finish, on_dispatched);
}
template <typename I>
bool WriteBlockImageDispatch<I>::flush(
AioCompletion* aio_comp, FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << dendl;
if (flush_source != FLUSH_SOURCE_USER) {
return false;
}
return process_io(tid, dispatch_result, on_finish, on_dispatched);
}
template <typename I>
void WriteBlockImageDispatch<I>::handle_finished(int r, uint64_t tid) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << ", tid=" << tid << dendl;
std::unique_lock locker{m_lock};
ceph_assert(m_in_flight_writes > 0);
--m_in_flight_writes;
bool writes_blocked = false;
if (m_write_blockers > 0 && m_in_flight_writes == 0) {
ldout(cct, 10) << "flushing all in-flight IO for blocked writes" << dendl;
writes_blocked = true;
}
locker.unlock();
if (writes_blocked) {
flush_io(new C_BlockedWrites(this));
}
}
template <typename I>
bool WriteBlockImageDispatch<I>::process_io(
uint64_t tid, DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
std::unique_lock locker{m_lock};
if (m_write_blockers > 0 || !m_on_dispatches.empty()) {
*dispatch_result = DISPATCH_RESULT_RESTART;
m_on_dispatches.push_back(on_dispatched);
return true;
}
++m_in_flight_writes;
*on_finish = new LambdaContext([this, tid, on_finish=*on_finish](int r) {
handle_finished(r, tid);
on_finish->complete(r);
});
return false;
}
template <typename I>
void WriteBlockImageDispatch<I>::flush_io(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
// ensure that all in-flight IO is flushed
auto aio_comp = AioCompletion::create_and_start(
on_finish, util::get_image_ctx(m_image_ctx), librbd::io::AIO_TYPE_FLUSH);
auto req = ImageDispatchSpec::create_flush(
*m_image_ctx, IMAGE_DISPATCH_LAYER_WRITE_BLOCK, aio_comp,
FLUSH_SOURCE_WRITE_BLOCK, {});
req->send();
}
template <typename I>
void WriteBlockImageDispatch<I>::handle_blocked_writes(int r) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
Contexts write_blocker_contexts;
{
std::unique_lock locker{m_lock};
std::swap(write_blocker_contexts, m_write_blocker_contexts);
}
for (auto ctx : write_blocker_contexts) {
ctx->complete(0);
}
}
} // namespace io
} // namespace librbd
template class librbd::io::WriteBlockImageDispatch<librbd::ImageCtx>;
| 8,189 | 29.221402 | 79 | cc |
null | ceph-main/src/librbd/io/WriteBlockImageDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_WRITE_BLOCK_IMAGE_DISPATCH_H
#define CEPH_LIBRBD_IO_WRITE_BLOCK_IMAGE_DISPATCH_H
#include "librbd/io/ImageDispatchInterface.h"
#include "include/int_types.h"
#include "include/buffer.h"
#include "common/ceph_mutex.h"
#include "common/zipkin_trace.h"
#include "common/Throttle.h"
#include "librbd/io/ReadResult.h"
#include "librbd/io/Types.h"
#include <list>
struct Context;
namespace librbd {
struct ImageCtx;
namespace io {
struct AioCompletion;
template <typename ImageCtxT>
class WriteBlockImageDispatch : public ImageDispatchInterface {
public:
WriteBlockImageDispatch(ImageCtxT* image_ctx);
ImageDispatchLayer get_dispatch_layer() const override {
return IMAGE_DISPATCH_LAYER_WRITE_BLOCK;
}
void shut_down(Context* on_finish) override;
int block_writes();
void block_writes(Context *on_blocked);
void unblock_writes();
inline bool writes_blocked() const {
std::shared_lock locker{m_lock};
return (m_write_blockers > 0);
}
void wait_on_writes_unblocked(Context *on_unblocked);
bool read(
AioCompletion* aio_comp, Extents &&image_extents,
ReadResult &&read_result, IOContext io_context, int op_flags,
int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool write(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool discard(
AioCompletion* aio_comp, Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool write_same(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool compare_and_write(
AioCompletion* aio_comp, Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool flush(
AioCompletion* aio_comp, FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool list_snaps(
AioCompletion* aio_comp, Extents&& image_extents, SnapIds&& snap_ids,
int list_snaps_flags, SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
private:
struct C_BlockedWrites;
typedef std::list<Context*> Contexts;
ImageCtxT* m_image_ctx;
mutable ceph::shared_mutex m_lock;
Contexts m_on_dispatches;
uint32_t m_write_blockers = 0;
Contexts m_write_blocker_contexts;
Contexts m_unblocked_write_waiter_contexts;
uint64_t m_in_flight_writes = 0;
void handle_finished(int r, uint64_t tid);
bool process_io(uint64_t tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched);
void flush_io(Context* on_finish);
bool invalidate_cache(Context* on_finish) override {
return false;
}
void handle_blocked_writes(int r);
};
} // namespace io
} // namespace librbd
extern template class librbd::io::WriteBlockImageDispatch<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IO_WRITE_BLOCK_IMAGE_DISPATCH_H
| 4,333 | 31.103704 | 77 | h |
null | ceph-main/src/librbd/journal/CreateRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/dout.h"
#include "common/errno.h"
#include "include/ceph_assert.h"
#include "librbd/Utils.h"
#include "common/Timer.h"
#include "journal/Settings.h"
#include "librbd/journal/CreateRequest.h"
#include "librbd/journal/RemoveRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::Journal::CreateRequest: "
namespace librbd {
using util::create_context_callback;
namespace journal {
template<typename I>
CreateRequest<I>::CreateRequest(IoCtx &ioctx, const std::string &imageid,
uint8_t order, uint8_t splay_width,
const std::string &object_pool,
uint64_t tag_class, TagData &tag_data,
const std::string &client_id,
ContextWQ *op_work_queue,
Context *on_finish)
: m_ioctx(ioctx), m_image_id(imageid), m_order(order),
m_splay_width(splay_width), m_object_pool(object_pool),
m_tag_class(tag_class), m_tag_data(tag_data), m_image_client_id(client_id),
m_op_work_queue(op_work_queue), m_on_finish(on_finish) {
m_cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
}
template<typename I>
void CreateRequest<I>::send() {
ldout(m_cct, 20) << this << " " << __func__ << dendl;
if (m_order > 64 || m_order < 12) {
lderr(m_cct) << "order must be in the range [12, 64]" << dendl;
complete(-EDOM);
return;
}
if (m_splay_width == 0) {
complete(-EINVAL);
return;
}
get_pool_id();
}
template<typename I>
void CreateRequest<I>::get_pool_id() {
ldout(m_cct, 20) << this << " " << __func__ << dendl;
if (m_object_pool.empty()) {
create_journal();
return;
}
librados::Rados rados(m_ioctx);
IoCtx data_ioctx;
int r = rados.ioctx_create(m_object_pool.c_str(), data_ioctx);
if (r != 0) {
lderr(m_cct) << "failed to create journal: "
<< "error opening journal object pool '" << m_object_pool
<< "': " << cpp_strerror(r) << dendl;
complete(r);
return;
}
data_ioctx.set_namespace(m_ioctx.get_namespace());
m_pool_id = data_ioctx.get_id();
create_journal();
}
template<typename I>
void CreateRequest<I>::create_journal() {
ldout(m_cct, 20) << this << " " << __func__ << dendl;
ImageCtx::get_timer_instance(m_cct, &m_timer, &m_timer_lock);
m_journaler = new Journaler(m_op_work_queue, m_timer, m_timer_lock, m_ioctx,
m_image_id, m_image_client_id, {}, nullptr);
using klass = CreateRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_create_journal>(this);
m_journaler->create(m_order, m_splay_width, m_pool_id, ctx);
}
template<typename I>
Context *CreateRequest<I>::handle_create_journal(int *result) {
ldout(m_cct, 20) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(m_cct) << "failed to create journal: " << cpp_strerror(*result) << dendl;
shut_down_journaler(*result);
return nullptr;
}
allocate_journal_tag();
return nullptr;
}
template<typename I>
void CreateRequest<I>::allocate_journal_tag() {
ldout(m_cct, 20) << this << " " << __func__ << dendl;
using klass = CreateRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_journal_tag>(this);
encode(m_tag_data, m_bl);
m_journaler->allocate_tag(m_tag_class, m_bl, &m_tag, ctx);
}
template<typename I>
Context *CreateRequest<I>::handle_journal_tag(int *result) {
ldout(m_cct, 20) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(m_cct) << "failed to allocate tag: " << cpp_strerror(*result) << dendl;
shut_down_journaler(*result);
return nullptr;
}
register_client();
return nullptr;
}
template<typename I>
void CreateRequest<I>::register_client() {
ldout(m_cct, 20) << this << " " << __func__ << dendl;
m_bl.clear();
encode(ClientData{ImageClientMeta{m_tag.tag_class}}, m_bl);
using klass = CreateRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_register_client>(this);
m_journaler->register_client(m_bl, ctx);
}
template<typename I>
Context *CreateRequest<I>::handle_register_client(int *result) {
ldout(m_cct, 20) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(m_cct) << "failed to register client: " << cpp_strerror(*result) << dendl;
}
shut_down_journaler(*result);
return nullptr;
}
template<typename I>
void CreateRequest<I>::shut_down_journaler(int r) {
ldout(m_cct, 20) << this << " " << __func__ << dendl;
m_r_saved = r;
using klass = CreateRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_journaler_shutdown>(this);
m_journaler->shut_down(ctx);
}
template<typename I>
Context *CreateRequest<I>::handle_journaler_shutdown(int *result) {
ldout(m_cct, 20) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(m_cct) << "failed to shut down journaler: " << cpp_strerror(*result) << dendl;
}
delete m_journaler;
if (!m_r_saved) {
complete(0);
return nullptr;
}
// there was an error during journal creation, so we rollback
// what ever was done. the easiest way to do this is to invoke
// journal remove state machine, although it's not the most
// cleanest approach when it comes to redundancy, but that's
// ok during the failure path.
remove_journal();
return nullptr;
}
template<typename I>
void CreateRequest<I>::remove_journal() {
ldout(m_cct, 20) << this << " " << __func__ << dendl;
using klass = CreateRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_remove_journal>(this);
RemoveRequest<I> *req = RemoveRequest<I>::create(
m_ioctx, m_image_id, m_image_client_id, m_op_work_queue, ctx);
req->send();
}
template<typename I>
Context *CreateRequest<I>::handle_remove_journal(int *result) {
ldout(m_cct, 20) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(m_cct) << "error cleaning up journal after creation failed: "
<< cpp_strerror(*result) << dendl;
}
complete(m_r_saved);
return nullptr;
}
template<typename I>
void CreateRequest<I>::complete(int r) {
ldout(m_cct, 20) << this << " " << __func__ << dendl;
if (r == 0) {
ldout(m_cct, 20) << "done." << dendl;
}
m_on_finish->complete(r);
delete this;
}
} // namespace journal
} // namespace librbd
template class librbd::journal::CreateRequest<librbd::ImageCtx>;
| 6,623 | 27.187234 | 89 | cc |
null | ceph-main/src/librbd/journal/CreateRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_JOURNAL_CREATE_REQUEST_H
#define CEPH_LIBRBD_JOURNAL_CREATE_REQUEST_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include "librbd/ImageCtx.h"
#include "journal/Journaler.h"
#include "librbd/journal/Types.h"
#include "librbd/journal/TypeTraits.h"
#include "cls/journal/cls_journal_types.h"
using librados::IoCtx;
using journal::Journaler;
class Context;
class ContextWQ;
namespace journal {
class Journaler;
}
namespace librbd {
class ImageCtx;
namespace journal {
template<typename ImageCtxT = ImageCtx>
class CreateRequest {
public:
static CreateRequest *create(IoCtx &ioctx, const std::string &imageid,
uint8_t order, uint8_t splay_width,
const std::string &object_pool,
uint64_t tag_class, TagData &tag_data,
const std::string &client_id,
ContextWQ *op_work_queue, Context *on_finish) {
return new CreateRequest(ioctx, imageid, order, splay_width, object_pool,
tag_class, tag_data, client_id, op_work_queue,
on_finish);
}
void send();
private:
typedef typename TypeTraits<ImageCtxT>::Journaler Journaler;
CreateRequest(IoCtx &ioctx, const std::string &imageid, uint8_t order,
uint8_t splay_width, const std::string &object_pool,
uint64_t tag_class, TagData &tag_data,
const std::string &client_id, ContextWQ *op_work_queue,
Context *on_finish);
IoCtx &m_ioctx;
std::string m_image_id;
uint8_t m_order;
uint8_t m_splay_width;
std::string m_object_pool;
uint64_t m_tag_class;
TagData m_tag_data;
std::string m_image_client_id;
ContextWQ *m_op_work_queue;
Context *m_on_finish;
CephContext *m_cct;
cls::journal::Tag m_tag;
bufferlist m_bl;
Journaler *m_journaler;
SafeTimer *m_timer;
ceph::mutex *m_timer_lock;
int m_r_saved;
int64_t m_pool_id = -1;
void get_pool_id();
void create_journal();
Context *handle_create_journal(int *result);
void allocate_journal_tag();
Context *handle_journal_tag(int *result);
void register_client();
Context *handle_register_client(int *result);
void shut_down_journaler(int r);
Context *handle_journaler_shutdown(int *result);
void remove_journal();
Context *handle_remove_journal(int *result);
void complete(int r);
};
} // namespace journal
} // namespace librbd
extern template class librbd::journal::CreateRequest<librbd::ImageCtx>;
#endif /* CEPH_LIBRBD_JOURNAL_CREATE_REQUEST_H */
| 2,939 | 26.476636 | 85 | h |
null | ceph-main/src/librbd/journal/DemoteRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/journal/DemoteRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "journal/Journaler.h"
#include "journal/Settings.h"
#include "librbd/ImageCtx.h"
#include "librbd/Journal.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/journal/OpenRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::journal::DemoteRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace journal {
using librbd::util::create_async_context_callback;
using librbd::util::create_context_callback;
template <typename I>
DemoteRequest<I>::DemoteRequest(I &image_ctx, Context *on_finish)
: m_image_ctx(image_ctx), m_on_finish(on_finish),
m_lock(ceph::make_mutex("DemoteRequest::m_lock")) {
}
template <typename I>
DemoteRequest<I>::~DemoteRequest() {
ceph_assert(m_journaler == nullptr);
}
template <typename I>
void DemoteRequest<I>::send() {
open_journaler();
}
template <typename I>
void DemoteRequest<I>::open_journaler() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
m_journaler = new Journaler(m_image_ctx.md_ctx, m_image_ctx.id,
Journal<>::IMAGE_CLIENT_ID, {}, nullptr);
auto ctx = create_async_context_callback(
m_image_ctx, create_context_callback<
DemoteRequest<I>, &DemoteRequest<I>::handle_open_journaler>(this));
auto req = OpenRequest<I>::create(&m_image_ctx, m_journaler, &m_lock,
&m_client_meta, &m_tag_tid, &m_tag_data,
ctx);
req->send();
}
template <typename I>
void DemoteRequest<I>::handle_open_journaler(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
m_ret_val = r;
lderr(cct) << "failed to open journal: " << cpp_strerror(r) << dendl;
shut_down_journaler();
return;
} else if (m_tag_data.mirror_uuid != Journal<>::LOCAL_MIRROR_UUID) {
m_ret_val = -EINVAL;
lderr(cct) << "image is not currently the primary" << dendl;
shut_down_journaler();
return;
}
allocate_tag();
}
template <typename I>
void DemoteRequest<I>::allocate_tag() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
cls::journal::Client client;
int r = m_journaler->get_cached_client(Journal<>::IMAGE_CLIENT_ID, &client);
if (r < 0) {
m_ret_val = r;
lderr(cct) << "failed to retrieve client: " << cpp_strerror(r) << dendl;
shut_down_journaler();
return;
}
TagPredecessor predecessor;
predecessor.mirror_uuid = Journal<>::LOCAL_MIRROR_UUID;
if (!client.commit_position.object_positions.empty()) {
auto position = client.commit_position.object_positions.front();
predecessor.commit_valid = true;
predecessor.tag_tid = position.tag_tid;
predecessor.entry_tid = position.entry_tid;
}
TagData tag_data;
tag_data.mirror_uuid = Journal<>::ORPHAN_MIRROR_UUID;
tag_data.predecessor = std::move(predecessor);
bufferlist tag_bl;
encode(tag_data, tag_bl);
auto ctx = create_context_callback<
DemoteRequest<I>, &DemoteRequest<I>::handle_allocate_tag>(this);
m_journaler->allocate_tag(m_client_meta.tag_class, tag_bl, &m_tag, ctx);
}
template <typename I>
void DemoteRequest<I>::handle_allocate_tag(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
m_ret_val = r;
lderr(cct) << "failed to allocate tag: " << cpp_strerror(r) << dendl;
shut_down_journaler();
return;
}
m_tag_tid = m_tag.tid;
append_event();
}
template <typename I>
void DemoteRequest<I>::append_event() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
EventEntry event_entry{DemotePromoteEvent{}, {}};
bufferlist event_entry_bl;
encode(event_entry, event_entry_bl);
m_journaler->start_append(0);
m_future = m_journaler->append(m_tag_tid, event_entry_bl);
auto ctx = create_context_callback<
DemoteRequest<I>, &DemoteRequest<I>::handle_append_event>(this);
m_future.flush(ctx);
}
template <typename I>
void DemoteRequest<I>::handle_append_event(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
m_ret_val = r;
lderr(cct) << "failed to append demotion journal event: " << cpp_strerror(r)
<< dendl;
stop_append();
return;
}
commit_event();
}
template <typename I>
void DemoteRequest<I>::commit_event() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
m_journaler->committed(m_future);
auto ctx = create_context_callback<
DemoteRequest<I>, &DemoteRequest<I>::handle_commit_event>(this);
m_journaler->flush_commit_position(ctx);
}
template <typename I>
void DemoteRequest<I>::handle_commit_event(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
m_ret_val = r;
lderr(cct) << "failed to flush demotion commit position: "
<< cpp_strerror(r) << dendl;
}
stop_append();
}
template <typename I>
void DemoteRequest<I>::stop_append() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
auto ctx = create_context_callback<
DemoteRequest<I>, &DemoteRequest<I>::handle_stop_append>(this);
m_journaler->stop_append(ctx);
}
template <typename I>
void DemoteRequest<I>::handle_stop_append(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
if (m_ret_val == 0) {
m_ret_val = r;
}
lderr(cct) << "failed to stop journal append: " << cpp_strerror(r) << dendl;
}
shut_down_journaler();
}
template <typename I>
void DemoteRequest<I>::shut_down_journaler() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
Context *ctx = create_async_context_callback(
m_image_ctx, create_context_callback<
DemoteRequest<I>, &DemoteRequest<I>::handle_shut_down_journaler>(this));
m_journaler->shut_down(ctx);
}
template <typename I>
void DemoteRequest<I>::handle_shut_down_journaler(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to shut down journal: " << cpp_strerror(r) << dendl;
}
delete m_journaler;
m_journaler = nullptr;
finish(r);
}
template <typename I>
void DemoteRequest<I>::finish(int r) {
if (m_ret_val < 0) {
r = m_ret_val;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace journal
} // namespace librbd
template class librbd::journal::DemoteRequest<librbd::ImageCtx>;
| 6,801 | 25.570313 | 80 | cc |
null | ceph-main/src/librbd/journal/DemoteRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_JOURNAL_DEMOTE_REQUEST_H
#define CEPH_LIBRBD_JOURNAL_DEMOTE_REQUEST_H
#include "common/ceph_mutex.h"
#include "cls/journal/cls_journal_types.h"
#include "journal/Future.h"
#include "librbd/journal/Types.h"
#include "librbd/journal/TypeTraits.h"
struct Context;
namespace librbd {
struct ImageCtx;
namespace journal {
template <typename ImageCtxT = librbd::ImageCtx>
class DemoteRequest {
public:
static DemoteRequest *create(ImageCtxT &image_ctx, Context *on_finish) {
return new DemoteRequest(image_ctx, on_finish);
}
DemoteRequest(ImageCtxT &image_ctx, Context *on_finish);
~DemoteRequest();
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* OPEN_JOURNALER * * * * *
* | *
* v *
* ALLOCATE_TAG * * * * * *
* | *
* v *
* APPEND_EVENT * * * *
* | * *
* v * *
* COMMIT_EVENT * *
* | * *
* v * *
* STOP_APPEND <* * * *
* | *
* v *
* SHUT_DOWN_JOURNALER <* *
* |
* v
* <finish>
*
* @endverbatim
*/
typedef typename TypeTraits<ImageCtxT>::Journaler Journaler;
typedef typename TypeTraits<ImageCtxT>::Future Future;
ImageCtxT &m_image_ctx;
Context *m_on_finish;
Journaler *m_journaler = nullptr;
int m_ret_val = 0;
ceph::mutex m_lock;
ImageClientMeta m_client_meta;
uint64_t m_tag_tid = 0;
TagData m_tag_data;
cls::journal::Tag m_tag;
Future m_future;
void open_journaler();
void handle_open_journaler(int r);
void allocate_tag();
void handle_allocate_tag(int r);
void append_event();
void handle_append_event(int r);
void commit_event();
void handle_commit_event(int r);
void stop_append();
void handle_stop_append(int r);
void shut_down_journaler();
void handle_shut_down_journaler(int r);
void finish(int r);
};
} // namespace journal
} // namespace librbd
extern template class librbd::journal::DemoteRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_JOURNAL_DEMOTE_REQUEST_H
| 2,305 | 20.351852 | 74 | h |
null | ceph-main/src/librbd/journal/DisabledPolicy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_JOURNAL_DISABLED_POLICY_H
#define CEPH_LIBRBD_JOURNAL_DISABLED_POLICY_H
#include "librbd/journal/Policy.h"
namespace librbd {
struct ImageCtx;
namespace journal {
class DisabledPolicy : public Policy {
public:
bool append_disabled() const override {
return true;
}
bool journal_disabled() const override {
return true;
}
void allocate_tag_on_lock(Context *on_finish) override {
ceph_abort();
}
};
} // namespace journal
} // namespace librbd
#endif // CEPH_LIBRBD_JOURNAL_DISABLED_POLICY_H
| 640 | 19.03125 | 70 | h |
null | ceph-main/src/librbd/journal/ObjectDispatch.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/journal/ObjectDispatch.h"
#include "common/dout.h"
#include "osdc/Striper.h"
#include "librbd/ImageCtx.h"
#include "librbd/Journal.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/io/ObjectDispatchSpec.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/io/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::journal::ObjectDispatch: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace journal {
using librbd::util::data_object_name;
using util::create_context_callback;
namespace {
template <typename I>
struct C_CommitIOEvent : public Context {
I* image_ctx;
Journal<I>* journal;
uint64_t object_no;
uint64_t object_off;
uint64_t object_len;
uint64_t journal_tid;
int object_dispatch_flags;
Context* on_finish;
C_CommitIOEvent(I* image_ctx, Journal<I>* journal, uint64_t object_no,
uint64_t object_off, uint64_t object_len,
uint64_t journal_tid, int object_dispatch_flags,
Context* on_finish)
: image_ctx(image_ctx), journal(journal), object_no(object_no),
object_off(object_off), object_len(object_len), journal_tid(journal_tid),
object_dispatch_flags(object_dispatch_flags), on_finish(on_finish) {
}
void finish(int r) override {
// don't commit the IO extent if a previous dispatch handler will just
// retry the failed IO
if (r >= 0 ||
(object_dispatch_flags &
io::OBJECT_DISPATCH_FLAG_WILL_RETRY_ON_ERROR) == 0) {
auto [image_extents, _] = io::util::object_to_area_extents(
image_ctx, object_no, {{object_off, object_len}});
for (const auto& extent : image_extents) {
journal->commit_io_event_extent(journal_tid, extent.first,
extent.second, r);
}
}
if (on_finish != nullptr) {
on_finish->complete(r);
}
}
};
} // anonymous namespace
template <typename I>
ObjectDispatch<I>::ObjectDispatch(I* image_ctx, Journal<I>* journal)
: m_image_ctx(image_ctx), m_journal(journal) {
}
template <typename I>
void ObjectDispatch<I>::shut_down(Context* on_finish) {
m_image_ctx->op_work_queue->queue(on_finish, 0);
}
template <typename I>
bool ObjectDispatch<I>::discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
if (*journal_tid == 0) {
// non-journaled IO
return false;
}
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << object_len << dendl;
*on_finish = new C_CommitIOEvent<I>(m_image_ctx, m_journal, object_no,
object_off, object_len, *journal_tid,
*object_dispatch_flags, *on_finish);
*on_finish = create_context_callback<
Context, &Context::complete>(*on_finish, m_journal);
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
wait_or_flush_event(*journal_tid, *object_dispatch_flags, on_dispatched);
return true;
}
template <typename I>
bool ObjectDispatch<I>::write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
if (*journal_tid == 0) {
// non-journaled IO
return false;
}
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << data.length() << dendl;
*on_finish = new C_CommitIOEvent<I>(m_image_ctx, m_journal, object_no,
object_off, data.length(), *journal_tid,
*object_dispatch_flags, *on_finish);
*on_finish = create_context_callback<
Context, &Context::complete>(*on_finish, m_journal);
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
wait_or_flush_event(*journal_tid, *object_dispatch_flags, on_dispatched);
return true;
}
template <typename I>
bool ObjectDispatch<I>::write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
io::LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
if (*journal_tid == 0) {
// non-journaled IO
return false;
}
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << object_len << dendl;
*on_finish = new C_CommitIOEvent<I>(m_image_ctx, m_journal, object_no,
object_off, object_len, *journal_tid,
*object_dispatch_flags, *on_finish);
*on_finish = create_context_callback<
Context, &Context::complete>(*on_finish, m_journal);
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
wait_or_flush_event(*journal_tid, *object_dispatch_flags, on_dispatched);
return true;
}
template <typename I>
bool ObjectDispatch<I>::compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* object_dispatch_flags, uint64_t* journal_tid,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
if (*journal_tid == 0) {
// non-journaled IO
return false;
}
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << write_data.length()
<< dendl;
*on_finish = new C_CommitIOEvent<I>(m_image_ctx, m_journal, object_no,
object_off, write_data.length(),
*journal_tid, *object_dispatch_flags,
*on_finish);
*on_finish = create_context_callback<
Context, &Context::complete>(*on_finish, m_journal);
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
wait_or_flush_event(*journal_tid, *object_dispatch_flags, on_dispatched);
return true;
}
template <typename I>
bool ObjectDispatch<I>::flush(
io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
if (*journal_tid == 0) {
// non-journaled IO
return false;
}
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
auto ctx = *on_finish;
*on_finish = new LambdaContext(
[image_ctx=m_image_ctx, ctx, journal_tid=*journal_tid](int r) {
image_ctx->journal->commit_io_event(journal_tid, r);
ctx->complete(r);
});
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
wait_or_flush_event(*journal_tid, io::OBJECT_DISPATCH_FLAG_FLUSH,
on_dispatched);
return true;
}
template <typename I>
void ObjectDispatch<I>::extent_overwritten(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
uint64_t journal_tid, uint64_t new_journal_tid) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << object_no << " " << object_off << "~" << object_len
<< dendl;
Context *ctx = new C_CommitIOEvent<I>(m_image_ctx, m_journal, object_no,
object_off, object_len, journal_tid, false,
nullptr);
if (new_journal_tid != 0) {
// ensure new journal event is safely committed to disk before
// committing old event
m_journal->flush_event(new_journal_tid, ctx);
} else {
ctx = create_context_callback<
Context, &Context::complete>(ctx, m_journal);
ctx->complete(0);
}
}
template <typename I>
void ObjectDispatch<I>::wait_or_flush_event(
uint64_t journal_tid, int object_dispatch_flags, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "journal_tid=" << journal_tid << dendl;
if ((object_dispatch_flags & io::OBJECT_DISPATCH_FLAG_FLUSH) != 0) {
m_journal->flush_event(journal_tid, on_dispatched);
} else {
m_journal->wait_event(journal_tid, on_dispatched);
}
}
} // namespace journal
} // namespace librbd
template class librbd::journal::ObjectDispatch<librbd::ImageCtx>;
| 9,041 | 34.046512 | 83 | cc |
null | ceph-main/src/librbd/journal/ObjectDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_JOURNAL_OBJECT_DISPATCH_H
#define CEPH_LIBRBD_JOURNAL_OBJECT_DISPATCH_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "common/zipkin_trace.h"
#include "librbd/io/Types.h"
#include "librbd/io/ObjectDispatchInterface.h"
struct Context;
namespace librbd {
struct ImageCtx;
template <typename> class Journal;
namespace journal {
template <typename ImageCtxT = librbd::ImageCtx>
class ObjectDispatch : public io::ObjectDispatchInterface {
public:
static ObjectDispatch* create(ImageCtxT* image_ctx,
Journal<ImageCtxT>* journal) {
return new ObjectDispatch(image_ctx, journal);
}
ObjectDispatch(ImageCtxT* image_ctx, Journal<ImageCtxT>* journal);
io::ObjectDispatchLayer get_dispatch_layer() const override {
return io::OBJECT_DISPATCH_LAYER_JOURNAL;
}
void shut_down(Context* on_finish) override;
bool read(
uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
return false;
}
bool discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
io::LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* object_dispatch_flags, uint64_t* journal_tid,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool flush(
io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool list_snaps(
uint64_t object_no, io::Extents&& extents, io::SnapIds&& snap_ids,
int list_snap_flags, const ZTracer::Trace &parent_trace,
io::SnapshotDelta* snapshot_delta, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool invalidate_cache(Context* on_finish) override {
return false;
}
bool reset_existence_cache(Context* on_finish) override {
return false;
}
void extent_overwritten(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
uint64_t journal_tid, uint64_t new_journal_tid) override;
int prepare_copyup(
uint64_t object_no,
io::SnapshotSparseBufferlist* snapshot_sparse_bufferlist) override {
return 0;
}
private:
ImageCtxT* m_image_ctx;
Journal<ImageCtxT>* m_journal;
void wait_or_flush_event(uint64_t journal_tid, int object_dispatch_flags,
Context* on_dispatched);
};
} // namespace journal
} // namespace librbd
extern template class librbd::journal::ObjectDispatch<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_JOURNAL_OBJECT_DISPATCH_H
| 4,291 | 33.336 | 77 | h |
null | ceph-main/src/librbd/journal/OpenRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/journal/OpenRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "journal/Journaler.h"
#include "librbd/ImageCtx.h"
#include "librbd/Journal.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/journal/Types.h"
#include "librbd/journal/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::journal::OpenRequest: " << this << " " \
<< __func__ << ": "
namespace librbd {
namespace journal {
using librbd::util::create_async_context_callback;
using librbd::util::create_context_callback;
using util::C_DecodeTags;
template <typename I>
OpenRequest<I>::OpenRequest(I *image_ctx, Journaler *journaler, ceph::mutex *lock,
journal::ImageClientMeta *client_meta,
uint64_t *tag_tid, journal::TagData *tag_data,
Context *on_finish)
: m_image_ctx(image_ctx), m_journaler(journaler), m_lock(lock),
m_client_meta(client_meta), m_tag_tid(tag_tid), m_tag_data(tag_data),
m_on_finish(on_finish) {
}
template <typename I>
void OpenRequest<I>::send() {
send_init();
}
template <typename I>
void OpenRequest<I>::send_init() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
m_journaler->init(create_async_context_callback(
*m_image_ctx, create_context_callback<
OpenRequest<I>, &OpenRequest<I>::handle_init>(this)));
}
template <typename I>
void OpenRequest<I>::handle_init(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to initialize journal: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
// locate the master image client record
cls::journal::Client client;
r = m_journaler->get_cached_client(Journal<ImageCtx>::IMAGE_CLIENT_ID,
&client);
if (r < 0) {
lderr(cct) << "failed to locate master image client" << dendl;
finish(r);
return;
}
librbd::journal::ClientData client_data;
auto bl = client.data.cbegin();
try {
decode(client_data, bl);
} catch (const buffer::error &err) {
lderr(cct) << "failed to decode client meta data: " << err.what()
<< dendl;
finish(-EINVAL);
return;
}
journal::ImageClientMeta *image_client_meta =
boost::get<journal::ImageClientMeta>(&client_data.client_meta);
if (image_client_meta == nullptr) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to extract client meta data" << dendl;
finish(-EINVAL);
return;
}
ldout(cct, 20) << this << " " << __func__ << ": "
<< "client: " << client << ", "
<< "image meta: " << *image_client_meta << dendl;
m_tag_class = image_client_meta->tag_class;
{
std::lock_guard locker{*m_lock};
*m_client_meta = *image_client_meta;
}
send_get_tags();
}
template <typename I>
void OpenRequest<I>::send_get_tags() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
C_DecodeTags *tags_ctx = new C_DecodeTags(
cct, m_lock, m_tag_tid, m_tag_data, create_async_context_callback(
*m_image_ctx, create_context_callback<
OpenRequest<I>, &OpenRequest<I>::handle_get_tags>(this)));
m_journaler->get_tags(m_tag_class, &tags_ctx->tags, tags_ctx);
}
template <typename I>
void OpenRequest<I>::handle_get_tags(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to decode journal tags: " << cpp_strerror(r) << dendl;
}
finish(r);
}
template <typename I>
void OpenRequest<I>::finish(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace journal
} // namespace librbd
template class librbd::journal::OpenRequest<librbd::ImageCtx>;
| 4,117 | 27.4 | 82 | cc |
null | ceph-main/src/librbd/journal/OpenRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_JOURNAL_OPEN_REQUEST_H
#define CEPH_LIBRBD_JOURNAL_OPEN_REQUEST_H
#include "common/ceph_mutex.h"
#include "include/int_types.h"
#include "librbd/journal/TypeTraits.h"
struct Context;
namespace librbd {
struct ImageCtx;
namespace journal {
struct ImageClientMeta;
struct TagData;
template <typename ImageCtxT = ImageCtx>
class OpenRequest {
public:
typedef typename TypeTraits<ImageCtxT>::Journaler Journaler;
static OpenRequest* create(ImageCtxT *image_ctx, Journaler *journaler,
ceph::mutex *lock, journal::ImageClientMeta *client_meta,
uint64_t *tag_tid, journal::TagData *tag_data,
Context *on_finish) {
return new OpenRequest(image_ctx, journaler, lock, client_meta, tag_tid,
tag_data, on_finish);
}
OpenRequest(ImageCtxT *image_ctx, Journaler *journaler, ceph::mutex *lock,
journal::ImageClientMeta *client_meta, uint64_t *tag_tid,
journal::TagData *tag_data, Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* INIT
* |
* v
* GET_TAGS
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT *m_image_ctx;
Journaler *m_journaler;
ceph::mutex *m_lock;
journal::ImageClientMeta *m_client_meta;
uint64_t *m_tag_tid;
journal::TagData *m_tag_data;
Context *m_on_finish;
uint64_t m_tag_class = 0;
void send_init();
void handle_init(int r);
void send_get_tags();
void handle_get_tags(int r);
void finish(int r);
};
} // namespace journal
} // namespace librbd
extern template class librbd::journal::OpenRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_JOURNAL_OPEN_REQUEST_H
| 1,883 | 20.906977 | 86 | h |
null | ceph-main/src/librbd/journal/Policy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_JOURNAL_POLICY_H
#define CEPH_LIBRBD_JOURNAL_POLICY_H
class Context;
namespace librbd {
namespace journal {
struct Policy {
virtual ~Policy() {
}
virtual bool append_disabled() const = 0;
virtual bool journal_disabled() const = 0;
virtual void allocate_tag_on_lock(Context *on_finish) = 0;
};
} // namespace journal
} // namespace librbd
#endif // CEPH_LIBRBD_JOURNAL_POLICY_H
| 513 | 18.769231 | 70 | h |
null | ceph-main/src/librbd/journal/PromoteRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/journal/PromoteRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "journal/Journaler.h"
#include "journal/Settings.h"
#include "librbd/ImageCtx.h"
#include "librbd/Journal.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/journal/OpenRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::journal::PromoteRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace journal {
using librbd::util::create_async_context_callback;
using librbd::util::create_context_callback;
template <typename I>
PromoteRequest<I>::PromoteRequest(I *image_ctx, bool force, Context *on_finish)
: m_image_ctx(image_ctx), m_force(force), m_on_finish(on_finish),
m_lock(ceph::make_mutex("PromoteRequest::m_lock")) {
}
template <typename I>
void PromoteRequest<I>::send() {
send_open();
}
template <typename I>
void PromoteRequest<I>::send_open() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
m_journaler = new Journaler(m_image_ctx->md_ctx, m_image_ctx->id,
Journal<>::IMAGE_CLIENT_ID, {}, nullptr);
Context *ctx = create_async_context_callback(
*m_image_ctx, create_context_callback<
PromoteRequest<I>, &PromoteRequest<I>::handle_open>(this));
auto open_req = OpenRequest<I>::create(m_image_ctx, m_journaler,
&m_lock, &m_client_meta,
&m_tag_tid, &m_tag_data, ctx);
open_req->send();
}
template <typename I>
void PromoteRequest<I>::handle_open(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
m_ret_val = r;
lderr(cct) << "failed to open journal: " << cpp_strerror(r) << dendl;
shut_down();
return;
}
allocate_tag();
}
template <typename I>
void PromoteRequest<I>::allocate_tag() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
journal::TagPredecessor predecessor;
if (!m_force && m_tag_data.mirror_uuid == Journal<>::ORPHAN_MIRROR_UUID) {
// orderly promotion -- demotion epoch will have a single entry
// so link to our predecessor (demotion) epoch
predecessor = TagPredecessor{Journal<>::ORPHAN_MIRROR_UUID, true, m_tag_tid,
1};
} else {
// forced promotion -- create an epoch no peers can link against
predecessor = TagPredecessor{Journal<>::LOCAL_MIRROR_UUID, true, m_tag_tid,
0};
}
TagData tag_data;
tag_data.mirror_uuid = Journal<>::LOCAL_MIRROR_UUID;
tag_data.predecessor = predecessor;
bufferlist tag_bl;
encode(tag_data, tag_bl);
Context *ctx = create_context_callback<
PromoteRequest<I>, &PromoteRequest<I>::handle_allocate_tag>(this);
m_journaler->allocate_tag(m_client_meta.tag_class, tag_bl, &m_tag, ctx);
}
template <typename I>
void PromoteRequest<I>::handle_allocate_tag(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
m_ret_val = r;
lderr(cct) << "failed to allocate tag: " << cpp_strerror(r) << dendl;
shut_down();
return;
}
m_tag_tid = m_tag.tid;
append_event();
}
template <typename I>
void PromoteRequest<I>::append_event() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
EventEntry event_entry{DemotePromoteEvent{}, {}};
bufferlist event_entry_bl;
encode(event_entry, event_entry_bl);
m_journaler->start_append(0);
m_future = m_journaler->append(m_tag_tid, event_entry_bl);
auto ctx = create_context_callback<
PromoteRequest<I>, &PromoteRequest<I>::handle_append_event>(this);
m_future.flush(ctx);
}
template <typename I>
void PromoteRequest<I>::handle_append_event(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
m_ret_val = r;
lderr(cct) << "failed to append promotion journal event: "
<< cpp_strerror(r) << dendl;
stop_append();
return;
}
commit_event();
}
template <typename I>
void PromoteRequest<I>::commit_event() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
m_journaler->committed(m_future);
auto ctx = create_context_callback<
PromoteRequest<I>, &PromoteRequest<I>::handle_commit_event>(this);
m_journaler->flush_commit_position(ctx);
}
template <typename I>
void PromoteRequest<I>::handle_commit_event(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
m_ret_val = r;
lderr(cct) << "failed to flush promote commit position: "
<< cpp_strerror(r) << dendl;
}
stop_append();
}
template <typename I>
void PromoteRequest<I>::stop_append() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
auto ctx = create_context_callback<
PromoteRequest<I>, &PromoteRequest<I>::handle_stop_append>(this);
m_journaler->stop_append(ctx);
}
template <typename I>
void PromoteRequest<I>::handle_stop_append(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
if (m_ret_val == 0) {
m_ret_val = r;
}
lderr(cct) << "failed to stop journal append: " << cpp_strerror(r) << dendl;
}
shut_down();
}
template <typename I>
void PromoteRequest<I>::shut_down() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
Context *ctx = create_async_context_callback(
*m_image_ctx, create_context_callback<
PromoteRequest<I>, &PromoteRequest<I>::handle_shut_down>(this));
m_journaler->shut_down(ctx);
}
template <typename I>
void PromoteRequest<I>::handle_shut_down(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to shut down journal: " << cpp_strerror(r) << dendl;
}
delete m_journaler;
finish(r);
}
template <typename I>
void PromoteRequest<I>::finish(int r) {
if (m_ret_val < 0) {
r = m_ret_val;
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace journal
} // namespace librbd
template class librbd::journal::PromoteRequest<librbd::ImageCtx>;
| 6,417 | 25.966387 | 80 | cc |
null | ceph-main/src/librbd/journal/PromoteRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_JOURNAL_PROMOTE_REQUEST_H
#define CEPH_LIBRBD_JOURNAL_PROMOTE_REQUEST_H
#include "include/int_types.h"
#include "common/ceph_mutex.h"
#include "cls/journal/cls_journal_types.h"
#include "journal/Future.h"
#include "librbd/journal/Types.h"
#include "librbd/journal/TypeTraits.h"
struct Context;
namespace librbd {
struct ImageCtx;
namespace journal {
template <typename ImageCtxT = ImageCtx>
class PromoteRequest {
public:
static PromoteRequest* create(ImageCtxT *image_ctx, bool force,
Context *on_finish) {
return new PromoteRequest(image_ctx, force, on_finish);
}
PromoteRequest(ImageCtxT *image_ctx, bool force, Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* OPEN * * * * * * * * * *
* | *
* v *
* ALLOCATE_TAG * * * * * *
* | *
* v *
* APPEND_EVENT * * * *
* | * *
* v * *
* COMMIT_EVENT * *
* | * *
* v * *
* STOP_APPEND <* * * *
* | *
* v *
* SHUT_DOWN <* * * * * * *
* |
* v
* <finish>
*
* @endverbatim
*/
typedef typename TypeTraits<ImageCtxT>::Journaler Journaler;
typedef typename TypeTraits<ImageCtxT>::Future Future;
ImageCtxT *m_image_ctx;
bool m_force;
Context *m_on_finish;
Journaler *m_journaler = nullptr;
int m_ret_val = 0;
ceph::mutex m_lock;
ImageClientMeta m_client_meta;
uint64_t m_tag_tid = 0;
TagData m_tag_data;
cls::journal::Tag m_tag;
Future m_future;
void send_open();
void handle_open(int r);
void allocate_tag();
void handle_allocate_tag(int r);
void append_event();
void handle_append_event(int r);
void commit_event();
void handle_commit_event(int r);
void stop_append();
void handle_stop_append(int r);
void shut_down();
void handle_shut_down(int r);
void finish(int r);
};
} // namespace journal
} // namespace librbd
extern template class librbd::journal::PromoteRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_JOURNAL_PROMOTE_REQUEST_H
| 2,360 | 20.463636 | 72 | h |
null | ceph-main/src/librbd/journal/RemoveRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/dout.h"
#include "common/errno.h"
#include "common/Timer.h"
#include "journal/Settings.h"
#include "include/ceph_assert.h"
#include "librbd/Utils.h"
#include "librbd/journal/RemoveRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::Journal::RemoveRequest: "
namespace librbd {
using util::create_context_callback;
namespace journal {
template<typename I>
RemoveRequest<I>::RemoveRequest(IoCtx &ioctx, const std::string &image_id,
const std::string &client_id,
ContextWQ *op_work_queue,
Context *on_finish)
: m_ioctx(ioctx), m_image_id(image_id), m_image_client_id(client_id),
m_op_work_queue(op_work_queue), m_on_finish(on_finish) {
m_cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
}
template<typename I>
void RemoveRequest<I>::send() {
ldout(m_cct, 20) << this << " " << __func__ << dendl;
stat_journal();
}
template<typename I>
void RemoveRequest<I>::stat_journal() {
ldout(m_cct, 20) << this << " " << __func__ << dendl;
ImageCtx::get_timer_instance(m_cct, &m_timer, &m_timer_lock);
m_journaler = new Journaler(m_op_work_queue, m_timer, m_timer_lock, m_ioctx,
m_image_id, m_image_client_id, {}, nullptr);
using klass = RemoveRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_stat_journal>(this);
m_journaler->exists(ctx);
}
template<typename I>
Context *RemoveRequest<I>::handle_stat_journal(int *result) {
ldout(m_cct, 20) << __func__ << ": r=" << *result << dendl;
if ((*result < 0) && (*result != -ENOENT)) {
lderr(m_cct) << "failed to stat journal header: " << cpp_strerror(*result) << dendl;
shut_down_journaler(*result);
return nullptr;
}
if (*result == -ENOENT) {
shut_down_journaler(0);
return nullptr;
}
init_journaler();
return nullptr;
}
template<typename I>
void RemoveRequest<I>::init_journaler() {
ldout(m_cct, 20) << this << " " << __func__ << dendl;
using klass = RemoveRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_init_journaler>(this);
m_journaler->init(ctx);
}
template<typename I>
Context *RemoveRequest<I>::handle_init_journaler(int *result) {
ldout(m_cct, 20) << __func__ << ": r=" << *result << dendl;
if ((*result < 0) && (*result != -ENOENT)) {
lderr(m_cct) << "failed to init journaler: " << cpp_strerror(*result) << dendl;
shut_down_journaler(*result);
return nullptr;
}
remove_journal();
return nullptr;
}
template<typename I>
void RemoveRequest<I>::remove_journal() {
ldout(m_cct, 20) << this << " " << __func__ << dendl;
using klass = RemoveRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_remove_journal>(this);
m_journaler->remove(true, ctx);
}
template<typename I>
Context *RemoveRequest<I>::handle_remove_journal(int *result) {
ldout(m_cct, 20) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(m_cct) << "failed to remove journal: " << cpp_strerror(*result) << dendl;
}
shut_down_journaler(*result);
return nullptr;
}
template<typename I>
void RemoveRequest<I>::shut_down_journaler(int r) {
ldout(m_cct, 20) << this << " " << __func__ << dendl;
m_r_saved = r;
using klass = RemoveRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_journaler_shutdown>(this);
m_journaler->shut_down(ctx);
}
template<typename I>
Context *RemoveRequest<I>::handle_journaler_shutdown(int *result) {
ldout(m_cct, 20) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(m_cct) << "failed to shut down journaler: " << cpp_strerror(*result) << dendl;
}
delete m_journaler;
if (m_r_saved == 0) {
ldout(m_cct, 20) << "done." << dendl;
}
m_on_finish->complete(m_r_saved);
delete this;
return nullptr;
}
} // namespace journal
} // namespace librbd
template class librbd::journal::RemoveRequest<librbd::ImageCtx>;
| 4,141 | 25.896104 | 89 | cc |
null | ceph-main/src/librbd/journal/RemoveRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_JOURNAL_REMOVE_REQUEST_H
#define CEPH_LIBRBD_JOURNAL_REMOVE_REQUEST_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
#include "librbd/ImageCtx.h"
#include "journal/Journaler.h"
#include "librbd/journal/TypeTraits.h"
#include "common/Timer.h"
using librados::IoCtx;
using journal::Journaler;
class Context;
class ContextWQ;
namespace journal {
class Journaler;
}
namespace librbd {
class ImageCtx;
namespace journal {
template<typename ImageCtxT = ImageCtx>
class RemoveRequest {
public:
static RemoveRequest *create(IoCtx &ioctx, const std::string &image_id,
const std::string &client_id,
ContextWQ *op_work_queue, Context *on_finish) {
return new RemoveRequest(ioctx, image_id, client_id,
op_work_queue, on_finish);
}
void send();
private:
typedef typename TypeTraits<ImageCtxT>::Journaler Journaler;
RemoveRequest(IoCtx &ioctx, const std::string &image_id,
const std::string &client_id,
ContextWQ *op_work_queue, Context *on_finish);
IoCtx &m_ioctx;
std::string m_image_id;
std::string m_image_client_id;
ContextWQ *m_op_work_queue;
Context *m_on_finish;
CephContext *m_cct;
Journaler *m_journaler;
SafeTimer *m_timer;
ceph::mutex *m_timer_lock;
int m_r_saved;
void stat_journal();
Context *handle_stat_journal(int *result);
void init_journaler();
Context *handle_init_journaler(int *result);
void remove_journal();
Context *handle_remove_journal(int *result);
void shut_down_journaler(int r);
Context *handle_journaler_shutdown(int *result);
};
} // namespace journal
} // namespace librbd
extern template class librbd::journal::RemoveRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_JOURNAL_REMOVE_REQUEST_H
| 2,045 | 23.95122 | 85 | h |
null | ceph-main/src/librbd/journal/Replay.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/journal/Replay.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/internal.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::journal::Replay: " << this << " "
namespace librbd {
namespace journal {
namespace {
static const uint64_t IN_FLIGHT_IO_LOW_WATER_MARK(32);
static const uint64_t IN_FLIGHT_IO_HIGH_WATER_MARK(64);
static NoOpProgressContext no_op_progress_callback;
template <typename I, typename E>
struct ExecuteOp : public Context {
I &image_ctx;
E event;
Context *on_op_complete;
ExecuteOp(I &image_ctx, const E &event, Context *on_op_complete)
: image_ctx(image_ctx), event(event), on_op_complete(on_op_complete) {
}
void execute(const journal::SnapCreateEvent &_) {
image_ctx.operations->execute_snap_create(event.snap_namespace,
event.snap_name,
on_op_complete,
event.op_tid,
SNAP_CREATE_FLAG_SKIP_NOTIFY_QUIESCE,
no_op_progress_callback);
}
void execute(const journal::SnapRemoveEvent &_) {
image_ctx.operations->execute_snap_remove(event.snap_namespace,
event.snap_name,
on_op_complete);
}
void execute(const journal::SnapRenameEvent &_) {
image_ctx.operations->execute_snap_rename(event.snap_id,
event.dst_snap_name,
on_op_complete);
}
void execute(const journal::SnapProtectEvent &_) {
image_ctx.operations->execute_snap_protect(event.snap_namespace,
event.snap_name,
on_op_complete);
}
void execute(const journal::SnapUnprotectEvent &_) {
image_ctx.operations->execute_snap_unprotect(event.snap_namespace,
event.snap_name,
on_op_complete);
}
void execute(const journal::SnapRollbackEvent &_) {
image_ctx.operations->execute_snap_rollback(event.snap_namespace,
event.snap_name,
no_op_progress_callback,
on_op_complete);
}
void execute(const journal::RenameEvent &_) {
image_ctx.operations->execute_rename(event.image_name,
on_op_complete);
}
void execute(const journal::ResizeEvent &_) {
image_ctx.operations->execute_resize(event.size, true, no_op_progress_callback,
on_op_complete, event.op_tid);
}
void execute(const journal::FlattenEvent &_) {
image_ctx.operations->execute_flatten(no_op_progress_callback,
on_op_complete);
}
void execute(const journal::SnapLimitEvent &_) {
image_ctx.operations->execute_snap_set_limit(event.limit, on_op_complete);
}
void execute(const journal::UpdateFeaturesEvent &_) {
image_ctx.operations->execute_update_features(event.features, event.enabled,
on_op_complete, event.op_tid);
}
void execute(const journal::MetadataSetEvent &_) {
image_ctx.operations->execute_metadata_set(event.key, event.value,
on_op_complete);
}
void execute(const journal::MetadataRemoveEvent &_) {
image_ctx.operations->execute_metadata_remove(event.key, on_op_complete);
}
void finish(int r) override {
CephContext *cct = image_ctx.cct;
if (r < 0) {
lderr(cct) << ": ExecuteOp::" << __func__ << ": r=" << r << dendl;
on_op_complete->complete(r);
return;
}
ldout(cct, 20) << ": ExecuteOp::" << __func__ << dendl;
std::shared_lock owner_locker{image_ctx.owner_lock};
if (image_ctx.exclusive_lock == nullptr ||
!image_ctx.exclusive_lock->accept_ops()) {
ldout(cct, 5) << ": lost exclusive lock -- skipping op" << dendl;
on_op_complete->complete(-ECANCELED);
return;
}
execute(event);
}
};
template <typename I>
struct C_RefreshIfRequired : public Context {
I &image_ctx;
Context *on_finish;
C_RefreshIfRequired(I &image_ctx, Context *on_finish)
: image_ctx(image_ctx), on_finish(on_finish) {
}
~C_RefreshIfRequired() override {
delete on_finish;
}
void finish(int r) override {
CephContext *cct = image_ctx.cct;
Context *ctx = on_finish;
on_finish = nullptr;
if (r < 0) {
lderr(cct) << ": C_RefreshIfRequired::" << __func__ << ": r=" << r << dendl;
image_ctx.op_work_queue->queue(ctx, r);
return;
}
if (image_ctx.state->is_refresh_required()) {
ldout(cct, 20) << ": C_RefreshIfRequired::" << __func__ << ": "
<< "refresh required" << dendl;
image_ctx.state->refresh(ctx);
return;
}
image_ctx.op_work_queue->queue(ctx, 0);
}
};
} // anonymous namespace
#undef dout_prefix
#define dout_prefix *_dout << "librbd::journal::Replay: " << this << " " \
<< __func__
template <typename I>
Replay<I>::Replay(I &image_ctx)
: m_image_ctx(image_ctx) {
}
template <typename I>
Replay<I>::~Replay() {
std::lock_guard locker{m_lock};
ceph_assert(m_in_flight_aio_flush == 0);
ceph_assert(m_in_flight_aio_modify == 0);
ceph_assert(m_aio_modify_unsafe_contexts.empty());
ceph_assert(m_aio_modify_safe_contexts.empty());
ceph_assert(m_op_events.empty());
ceph_assert(m_in_flight_op_events == 0);
}
template <typename I>
int Replay<I>::decode(bufferlist::const_iterator *it, EventEntry *event_entry) {
try {
using ceph::decode;
decode(*event_entry, *it);
} catch (const buffer::error &err) {
return -EBADMSG;
}
return 0;
}
template <typename I>
void Replay<I>::process(const EventEntry &event_entry,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": on_ready=" << on_ready << ", on_safe=" << on_safe
<< dendl;
on_ready = util::create_async_context_callback(m_image_ctx, on_ready);
std::shared_lock owner_lock{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock == nullptr ||
!m_image_ctx.exclusive_lock->accept_ops()) {
ldout(cct, 5) << ": lost exclusive lock -- skipping event" << dendl;
m_image_ctx.op_work_queue->queue(on_safe, -ECANCELED);
on_ready->complete(0);
return;
}
boost::apply_visitor(EventVisitor(this, on_ready, on_safe),
event_entry.event);
}
template <typename I>
void Replay<I>::shut_down(bool cancel_ops, Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
io::AioCompletion *flush_comp = nullptr;
on_finish = util::create_async_context_callback(
m_image_ctx, on_finish);
{
std::lock_guard locker{m_lock};
// safely commit any remaining AIO modify operations
if ((m_in_flight_aio_flush + m_in_flight_aio_modify) != 0) {
flush_comp = create_aio_flush_completion(nullptr);
ceph_assert(flush_comp != nullptr);
}
for (auto &op_event_pair : m_op_events) {
OpEvent &op_event = op_event_pair.second;
if (cancel_ops) {
// cancel ops that are waiting to start (waiting for
// OpFinishEvent or waiting for ready)
if (op_event.on_start_ready == nullptr &&
op_event.on_op_finish_event != nullptr) {
Context *on_op_finish_event = nullptr;
std::swap(on_op_finish_event, op_event.on_op_finish_event);
m_image_ctx.op_work_queue->queue(on_op_finish_event, -ERESTART);
}
} else if (op_event.on_op_finish_event != nullptr) {
// start ops waiting for OpFinishEvent
Context *on_op_finish_event = nullptr;
std::swap(on_op_finish_event, op_event.on_op_finish_event);
m_image_ctx.op_work_queue->queue(on_op_finish_event, 0);
} else if (op_event.on_start_ready != nullptr) {
// waiting for op ready
op_event_pair.second.finish_on_ready = true;
}
}
ceph_assert(!m_shut_down);
m_shut_down = true;
ceph_assert(m_flush_ctx == nullptr);
if (m_in_flight_op_events > 0 || flush_comp != nullptr) {
std::swap(m_flush_ctx, on_finish);
}
}
// execute the following outside of lock scope
if (flush_comp != nullptr) {
std::shared_lock owner_locker{m_image_ctx.owner_lock};
io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
io::FLUSH_SOURCE_INTERNAL, {});
}
if (on_finish != nullptr) {
on_finish->complete(0);
}
}
template <typename I>
void Replay<I>::flush(Context *on_finish) {
io::AioCompletion *aio_comp;
{
std::lock_guard locker{m_lock};
aio_comp = create_aio_flush_completion(
util::create_async_context_callback(m_image_ctx, on_finish));
if (aio_comp == nullptr) {
return;
}
}
std::shared_lock owner_locker{m_image_ctx.owner_lock};
io::ImageRequest<I>::aio_flush(&m_image_ctx, aio_comp,
io::FLUSH_SOURCE_INTERNAL, {});
}
template <typename I>
void Replay<I>::replay_op_ready(uint64_t op_tid, Context *on_resume) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": op_tid=" << op_tid << dendl;
std::lock_guard locker{m_lock};
auto op_it = m_op_events.find(op_tid);
ceph_assert(op_it != m_op_events.end());
OpEvent &op_event = op_it->second;
ceph_assert(op_event.op_in_progress &&
op_event.on_op_finish_event == nullptr &&
op_event.on_finish_ready == nullptr &&
op_event.on_finish_safe == nullptr);
// resume processing replay events
Context *on_start_ready = nullptr;
std::swap(on_start_ready, op_event.on_start_ready);
on_start_ready->complete(0);
// cancel has been requested -- send error to paused state machine
if (!op_event.finish_on_ready && m_flush_ctx != nullptr) {
m_image_ctx.op_work_queue->queue(on_resume, -ERESTART);
return;
}
// resume the op state machine once the associated OpFinishEvent
// is processed
op_event.on_op_finish_event = new LambdaContext(
[on_resume](int r) {
on_resume->complete(r);
});
// shut down request -- don't expect OpFinishEvent
if (op_event.finish_on_ready) {
m_image_ctx.op_work_queue->queue(on_resume, 0);
}
}
template <typename I>
void Replay<I>::handle_event(const journal::AioDiscardEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": AIO discard event" << dendl;
bool flush_required;
auto aio_comp = create_aio_modify_completion(on_ready, on_safe,
io::AIO_TYPE_DISCARD,
&flush_required,
{});
if (aio_comp == nullptr) {
return;
}
if (!clipped_io(event.offset, aio_comp)) {
io::ImageRequest<I>::aio_discard(&m_image_ctx, aio_comp,
{{event.offset, event.length}},
io::ImageArea::DATA,
event.discard_granularity_bytes, {});
}
if (flush_required) {
m_lock.lock();
auto flush_comp = create_aio_flush_completion(nullptr);
m_lock.unlock();
if (flush_comp != nullptr) {
io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
io::FLUSH_SOURCE_INTERNAL, {});
}
}
}
template <typename I>
void Replay<I>::handle_event(const journal::AioWriteEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": AIO write event" << dendl;
bufferlist data = event.data;
bool flush_required;
auto aio_comp = create_aio_modify_completion(on_ready, on_safe,
io::AIO_TYPE_WRITE,
&flush_required,
{});
if (aio_comp == nullptr) {
return;
}
if (!clipped_io(event.offset, aio_comp)) {
io::ImageRequest<I>::aio_write(&m_image_ctx, aio_comp,
{{event.offset, event.length}},
io::ImageArea::DATA, std::move(data),
0, {});
}
if (flush_required) {
m_lock.lock();
auto flush_comp = create_aio_flush_completion(nullptr);
m_lock.unlock();
if (flush_comp != nullptr) {
io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
io::FLUSH_SOURCE_INTERNAL, {});
}
}
}
template <typename I>
void Replay<I>::handle_event(const journal::AioFlushEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": AIO flush event" << dendl;
io::AioCompletion *aio_comp;
{
std::lock_guard locker{m_lock};
aio_comp = create_aio_flush_completion(on_safe);
}
if (aio_comp != nullptr) {
io::ImageRequest<I>::aio_flush(&m_image_ctx, aio_comp,
io::FLUSH_SOURCE_INTERNAL, {});
}
on_ready->complete(0);
}
template <typename I>
void Replay<I>::handle_event(const journal::AioWriteSameEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": AIO writesame event" << dendl;
bufferlist data = event.data;
bool flush_required;
auto aio_comp = create_aio_modify_completion(on_ready, on_safe,
io::AIO_TYPE_WRITESAME,
&flush_required,
{});
if (aio_comp == nullptr) {
return;
}
if (!clipped_io(event.offset, aio_comp)) {
io::ImageRequest<I>::aio_writesame(&m_image_ctx, aio_comp,
{{event.offset, event.length}},
io::ImageArea::DATA, std::move(data),
0, {});
}
if (flush_required) {
m_lock.lock();
auto flush_comp = create_aio_flush_completion(nullptr);
m_lock.unlock();
if (flush_comp != nullptr) {
io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
io::FLUSH_SOURCE_INTERNAL, {});
}
}
}
template <typename I>
void Replay<I>::handle_event(const journal::AioCompareAndWriteEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": AIO CompareAndWrite event" << dendl;
bufferlist cmp_data = event.cmp_data;
bufferlist write_data = event.write_data;
bool flush_required;
auto aio_comp = create_aio_modify_completion(on_ready, on_safe,
io::AIO_TYPE_COMPARE_AND_WRITE,
&flush_required,
{-EILSEQ});
if (!clipped_io(event.offset, aio_comp)) {
io::ImageRequest<I>::aio_compare_and_write(&m_image_ctx, aio_comp,
{{event.offset, event.length}},
io::ImageArea::DATA,
std::move(cmp_data),
std::move(write_data),
nullptr, 0, {});
}
if (flush_required) {
m_lock.lock();
auto flush_comp = create_aio_flush_completion(nullptr);
m_lock.unlock();
io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
io::FLUSH_SOURCE_INTERNAL, {});
}
}
template <typename I>
void Replay<I>::handle_event(const journal::OpFinishEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Op finish event: "
<< "op_tid=" << event.op_tid << dendl;
bool op_in_progress;
bool filter_ret_val;
Context *on_op_complete = nullptr;
Context *on_op_finish_event = nullptr;
{
std::lock_guard locker{m_lock};
auto op_it = m_op_events.find(event.op_tid);
if (op_it == m_op_events.end()) {
ldout(cct, 10) << ": unable to locate associated op: assuming previously "
<< "committed." << dendl;
on_ready->complete(0);
m_image_ctx.op_work_queue->queue(on_safe, 0);
return;
}
OpEvent &op_event = op_it->second;
ceph_assert(op_event.on_finish_safe == nullptr);
op_event.on_finish_ready = on_ready;
op_event.on_finish_safe = on_safe;
op_in_progress = op_event.op_in_progress;
std::swap(on_op_complete, op_event.on_op_complete);
std::swap(on_op_finish_event, op_event.on_op_finish_event);
// special errors which indicate op never started but was recorded
// as failed in the journal
filter_ret_val = (op_event.op_finish_error_codes.count(event.r) != 0);
}
if (event.r < 0) {
if (op_in_progress) {
// bubble the error up to the in-progress op to cancel it
on_op_finish_event->complete(event.r);
} else {
// op hasn't been started -- bubble the error up since
// our image is now potentially in an inconsistent state
// since simple errors should have been caught before
// creating the op event
delete on_op_complete;
delete on_op_finish_event;
handle_op_complete(event.op_tid, filter_ret_val ? 0 : event.r);
}
return;
}
// journal recorded success -- apply the op now
on_op_finish_event->complete(0);
}
template <typename I>
void Replay<I>::handle_event(const journal::SnapCreateEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Snap create event" << dendl;
std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
if (on_op_complete == nullptr) {
return;
}
// ignore errors caused due to replay
op_event->ignore_error_codes = {-EEXIST};
// avoid lock cycles
m_image_ctx.op_work_queue->queue(new C_RefreshIfRequired<I>(
m_image_ctx, new ExecuteOp<I, journal::SnapCreateEvent>(m_image_ctx, event,
on_op_complete)),
0);
// do not process more events until the state machine is ready
// since it will affect IO
op_event->op_in_progress = true;
op_event->on_start_ready = on_ready;
}
template <typename I>
void Replay<I>::handle_event(const journal::SnapRemoveEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Snap remove event" << dendl;
std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
if (on_op_complete == nullptr) {
return;
}
op_event->on_op_finish_event = new C_RefreshIfRequired<I>(
m_image_ctx, new ExecuteOp<I, journal::SnapRemoveEvent>(m_image_ctx, event,
on_op_complete));
// ignore errors caused due to replay
op_event->ignore_error_codes = {-ENOENT};
on_ready->complete(0);
}
template <typename I>
void Replay<I>::handle_event(const journal::SnapRenameEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Snap rename event" << dendl;
std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
if (on_op_complete == nullptr) {
return;
}
op_event->on_op_finish_event = new C_RefreshIfRequired<I>(
m_image_ctx, new ExecuteOp<I, journal::SnapRenameEvent>(m_image_ctx, event,
on_op_complete));
// ignore errors caused due to replay
op_event->ignore_error_codes = {-EEXIST};
on_ready->complete(0);
}
template <typename I>
void Replay<I>::handle_event(const journal::SnapProtectEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Snap protect event" << dendl;
std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
if (on_op_complete == nullptr) {
return;
}
op_event->on_op_finish_event = new C_RefreshIfRequired<I>(
m_image_ctx, new ExecuteOp<I, journal::SnapProtectEvent>(m_image_ctx, event,
on_op_complete));
// ignore errors caused due to replay
op_event->ignore_error_codes = {-EBUSY};
on_ready->complete(0);
}
template <typename I>
void Replay<I>::handle_event(const journal::SnapUnprotectEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Snap unprotect event" << dendl;
std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
if (on_op_complete == nullptr) {
return;
}
op_event->on_op_finish_event = new C_RefreshIfRequired<I>(
m_image_ctx, new ExecuteOp<I, journal::SnapUnprotectEvent>(m_image_ctx,
event,
on_op_complete));
// ignore errors recorded in the journal
op_event->op_finish_error_codes = {-EBUSY};
// ignore errors caused due to replay
op_event->ignore_error_codes = {-EINVAL};
on_ready->complete(0);
}
template <typename I>
void Replay<I>::handle_event(const journal::SnapRollbackEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Snap rollback start event" << dendl;
std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
if (on_op_complete == nullptr) {
return;
}
op_event->on_op_finish_event = new C_RefreshIfRequired<I>(
m_image_ctx, new ExecuteOp<I, journal::SnapRollbackEvent>(m_image_ctx,
event,
on_op_complete));
on_ready->complete(0);
}
template <typename I>
void Replay<I>::handle_event(const journal::RenameEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Rename event" << dendl;
std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
if (on_op_complete == nullptr) {
return;
}
op_event->on_op_finish_event = new C_RefreshIfRequired<I>(
m_image_ctx, new ExecuteOp<I, journal::RenameEvent>(m_image_ctx, event,
on_op_complete));
// ignore errors caused due to replay
op_event->ignore_error_codes = {-EEXIST};
on_ready->complete(0);
}
template <typename I>
void Replay<I>::handle_event(const journal::ResizeEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Resize start event" << dendl;
std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
if (on_op_complete == nullptr) {
return;
}
// avoid lock cycles
m_image_ctx.op_work_queue->queue(new C_RefreshIfRequired<I>(
m_image_ctx, new ExecuteOp<I, journal::ResizeEvent>(m_image_ctx, event,
on_op_complete)), 0);
// do not process more events until the state machine is ready
// since it will affect IO
op_event->op_in_progress = true;
op_event->on_start_ready = on_ready;
}
template <typename I>
void Replay<I>::handle_event(const journal::FlattenEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Flatten start event" << dendl;
std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
if (on_op_complete == nullptr) {
return;
}
op_event->on_op_finish_event = new C_RefreshIfRequired<I>(
m_image_ctx, new ExecuteOp<I, journal::FlattenEvent>(m_image_ctx, event,
on_op_complete));
// ignore errors caused due to replay
op_event->ignore_error_codes = {-EINVAL};
on_ready->complete(0);
}
template <typename I>
void Replay<I>::handle_event(const journal::DemotePromoteEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Demote/Promote event" << dendl;
on_ready->complete(0);
on_safe->complete(0);
}
template <typename I>
void Replay<I>::handle_event(const journal::SnapLimitEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Snap limit event" << dendl;
std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
if (on_op_complete == nullptr) {
return;
}
op_event->on_op_finish_event = new C_RefreshIfRequired<I>(
m_image_ctx, new ExecuteOp<I, journal::SnapLimitEvent>(m_image_ctx,
event,
on_op_complete));
op_event->ignore_error_codes = {-ERANGE};
on_ready->complete(0);
}
template <typename I>
void Replay<I>::handle_event(const journal::UpdateFeaturesEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Update features event" << dendl;
std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
if (on_op_complete == nullptr) {
return;
}
// avoid lock cycles
m_image_ctx.op_work_queue->queue(new C_RefreshIfRequired<I>(
m_image_ctx, new ExecuteOp<I, journal::UpdateFeaturesEvent>(
m_image_ctx, event, on_op_complete)), 0);
// do not process more events until the state machine is ready
// since it will affect IO
op_event->op_in_progress = true;
op_event->on_start_ready = on_ready;
}
template <typename I>
void Replay<I>::handle_event(const journal::MetadataSetEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Metadata set event" << dendl;
std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
if (on_op_complete == nullptr) {
return;
}
on_op_complete = new C_RefreshIfRequired<I>(m_image_ctx, on_op_complete);
op_event->on_op_finish_event = util::create_async_context_callback(
m_image_ctx, new ExecuteOp<I, journal::MetadataSetEvent>(
m_image_ctx, event, on_op_complete));
on_ready->complete(0);
}
template <typename I>
void Replay<I>::handle_event(const journal::MetadataRemoveEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Metadata remove event" << dendl;
std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
if (on_op_complete == nullptr) {
return;
}
on_op_complete = new C_RefreshIfRequired<I>(m_image_ctx, on_op_complete);
op_event->on_op_finish_event = util::create_async_context_callback(
m_image_ctx, new ExecuteOp<I, journal::MetadataRemoveEvent>(
m_image_ctx, event, on_op_complete));
// ignore errors caused due to replay
op_event->ignore_error_codes = {-ENOENT};
on_ready->complete(0);
}
template <typename I>
void Replay<I>::handle_event(const journal::UnknownEvent &event,
Context *on_ready, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": unknown event" << dendl;
on_ready->complete(0);
on_safe->complete(0);
}
template <typename I>
void Replay<I>::handle_aio_modify_complete(Context *on_ready, Context *on_safe,
int r, std::set<int> &filters) {
std::lock_guard locker{m_lock};
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": on_ready=" << on_ready << ", "
<< "on_safe=" << on_safe << ", r=" << r << dendl;
if (on_ready != nullptr) {
on_ready->complete(0);
}
if (filters.find(r) != filters.end())
r = 0;
if (r < 0) {
lderr(cct) << ": AIO modify op failed: " << cpp_strerror(r) << dendl;
m_image_ctx.op_work_queue->queue(on_safe, r);
return;
}
// will be completed after next flush operation completes
m_aio_modify_safe_contexts.insert(on_safe);
}
template <typename I>
void Replay<I>::handle_aio_flush_complete(Context *on_flush_safe,
Contexts &on_safe_ctxs, int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": r=" << r << dendl;
if (r < 0) {
lderr(cct) << ": AIO flush failed: " << cpp_strerror(r) << dendl;
}
Context *on_aio_ready = nullptr;
Context *on_flush = nullptr;
{
std::lock_guard locker{m_lock};
ceph_assert(m_in_flight_aio_flush > 0);
ceph_assert(m_in_flight_aio_modify >= on_safe_ctxs.size());
--m_in_flight_aio_flush;
m_in_flight_aio_modify -= on_safe_ctxs.size();
std::swap(on_aio_ready, m_on_aio_ready);
if (m_in_flight_op_events == 0 &&
(m_in_flight_aio_flush + m_in_flight_aio_modify) == 0) {
on_flush = m_flush_ctx;
}
// strip out previously failed on_safe contexts
for (auto it = on_safe_ctxs.begin(); it != on_safe_ctxs.end(); ) {
if (m_aio_modify_safe_contexts.erase(*it)) {
++it;
} else {
it = on_safe_ctxs.erase(it);
}
}
}
if (on_aio_ready != nullptr) {
ldout(cct, 10) << ": resuming paused AIO" << dendl;
on_aio_ready->complete(0);
}
if (on_flush_safe != nullptr) {
on_safe_ctxs.push_back(on_flush_safe);
}
for (auto ctx : on_safe_ctxs) {
ldout(cct, 20) << ": completing safe context: " << ctx << dendl;
ctx->complete(r);
}
if (on_flush != nullptr) {
ldout(cct, 20) << ": completing flush context: " << on_flush << dendl;
on_flush->complete(r);
}
}
template <typename I>
Context *Replay<I>::create_op_context_callback(uint64_t op_tid,
Context *on_ready,
Context *on_safe,
OpEvent **op_event) {
CephContext *cct = m_image_ctx.cct;
if (m_shut_down) {
ldout(cct, 5) << ": ignoring event after shut down" << dendl;
on_ready->complete(0);
m_image_ctx.op_work_queue->queue(on_safe, -ESHUTDOWN);
return nullptr;
}
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_op_events.count(op_tid) != 0) {
lderr(cct) << ": duplicate op tid detected: " << op_tid << dendl;
// on_ready is already async but on failure invoke on_safe async
// as well
on_ready->complete(0);
m_image_ctx.op_work_queue->queue(on_safe, -EINVAL);
return nullptr;
}
++m_in_flight_op_events;
*op_event = &m_op_events[op_tid];
(*op_event)->on_start_safe = on_safe;
Context *on_op_complete = new C_OpOnComplete(this, op_tid);
(*op_event)->on_op_complete = on_op_complete;
return on_op_complete;
}
template <typename I>
void Replay<I>::handle_op_complete(uint64_t op_tid, int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": op_tid=" << op_tid << ", "
<< "r=" << r << dendl;
OpEvent op_event;
bool shutting_down = false;
{
std::lock_guard locker{m_lock};
auto op_it = m_op_events.find(op_tid);
ceph_assert(op_it != m_op_events.end());
op_event = std::move(op_it->second);
m_op_events.erase(op_it);
if (m_shut_down) {
ceph_assert(m_flush_ctx != nullptr);
shutting_down = true;
}
}
ceph_assert(op_event.on_start_ready == nullptr || (r < 0 && r != -ERESTART));
if (op_event.on_start_ready != nullptr) {
// blocking op event failed before it became ready
ceph_assert(op_event.on_finish_ready == nullptr &&
op_event.on_finish_safe == nullptr);
op_event.on_start_ready->complete(0);
} else {
// event kicked off by OpFinishEvent
ceph_assert((op_event.on_finish_ready != nullptr &&
op_event.on_finish_safe != nullptr) || shutting_down);
}
if (op_event.on_op_finish_event != nullptr) {
op_event.on_op_finish_event->complete(r);
}
if (op_event.on_finish_ready != nullptr) {
op_event.on_finish_ready->complete(0);
}
// filter out errors caused by replay of the same op
if (r < 0 && op_event.ignore_error_codes.count(r) != 0) {
r = 0;
}
op_event.on_start_safe->complete(r);
if (op_event.on_finish_safe != nullptr) {
op_event.on_finish_safe->complete(r);
}
// shut down request might have occurred while lock was
// dropped -- handle if pending
Context *on_flush = nullptr;
{
std::lock_guard locker{m_lock};
ceph_assert(m_in_flight_op_events > 0);
--m_in_flight_op_events;
if (m_in_flight_op_events == 0 &&
(m_in_flight_aio_flush + m_in_flight_aio_modify) == 0) {
on_flush = m_flush_ctx;
}
}
if (on_flush != nullptr) {
m_image_ctx.op_work_queue->queue(on_flush, 0);
}
}
template <typename I>
io::AioCompletion *
Replay<I>::create_aio_modify_completion(Context *on_ready,
Context *on_safe,
io::aio_type_t aio_type,
bool *flush_required,
std::set<int> &&filters) {
std::lock_guard locker{m_lock};
CephContext *cct = m_image_ctx.cct;
ceph_assert(m_on_aio_ready == nullptr);
if (m_shut_down) {
ldout(cct, 5) << ": ignoring event after shut down" << dendl;
on_ready->complete(0);
m_image_ctx.op_work_queue->queue(on_safe, -ESHUTDOWN);
return nullptr;
}
++m_in_flight_aio_modify;
m_aio_modify_unsafe_contexts.push_back(on_safe);
// FLUSH if we hit the low-water mark -- on_safe contexts are
// completed by flushes-only so that we don't move the journal
// commit position until safely on-disk
*flush_required = (m_aio_modify_unsafe_contexts.size() ==
IN_FLIGHT_IO_LOW_WATER_MARK);
if (*flush_required) {
ldout(cct, 10) << ": hit AIO replay low-water mark: scheduling flush"
<< dendl;
}
// READY for more events if:
// * not at high-water mark for IO
// * in-flight ops are at a consistent point (snap create has IO flushed,
// shrink has adjusted clip boundary, etc) -- should have already been
// flagged not-ready
if (m_in_flight_aio_modify == IN_FLIGHT_IO_HIGH_WATER_MARK) {
ldout(cct, 10) << ": hit AIO replay high-water mark: pausing replay"
<< dendl;
ceph_assert(m_on_aio_ready == nullptr);
std::swap(m_on_aio_ready, on_ready);
}
// when the modification is ACKed by librbd, we can process the next
// event. when flushed, the completion of the next flush will fire the
// on_safe callback
auto aio_comp = io::AioCompletion::create_and_start<Context>(
new C_AioModifyComplete(this, on_ready, on_safe, std::move(filters)),
util::get_image_ctx(&m_image_ctx), aio_type);
return aio_comp;
}
template <typename I>
io::AioCompletion *Replay<I>::create_aio_flush_completion(Context *on_safe) {
ceph_assert(ceph_mutex_is_locked(m_lock));
CephContext *cct = m_image_ctx.cct;
if (m_shut_down) {
ldout(cct, 5) << ": ignoring event after shut down" << dendl;
if (on_safe != nullptr) {
m_image_ctx.op_work_queue->queue(on_safe, -ESHUTDOWN);
}
return nullptr;
}
++m_in_flight_aio_flush;
// associate all prior write/discard ops to this flush request
auto aio_comp = io::AioCompletion::create_and_start<Context>(
new C_AioFlushComplete(this, on_safe,
std::move(m_aio_modify_unsafe_contexts)),
util::get_image_ctx(&m_image_ctx), io::AIO_TYPE_FLUSH);
m_aio_modify_unsafe_contexts.clear();
return aio_comp;
}
template <typename I>
bool Replay<I>::clipped_io(uint64_t image_offset, io::AioCompletion *aio_comp) {
CephContext *cct = m_image_ctx.cct;
m_image_ctx.image_lock.lock_shared();
size_t image_size = m_image_ctx.size;
m_image_ctx.image_lock.unlock_shared();
if (image_offset >= image_size) {
// rbd-mirror image sync might race an IO event w/ associated resize between
// the point the peer is registered and the sync point is created, so no-op
// IO events beyond the current image extents since under normal conditions
// it wouldn't have been recorded in the journal
ldout(cct, 5) << ": no-op IO event beyond image size" << dendl;
aio_comp->get();
aio_comp->set_request_count(0);
aio_comp->put();
return true;
}
return false;
}
} // namespace journal
} // namespace librbd
template class librbd::journal::Replay<librbd::ImageCtx>;
| 38,979 | 32.146259 | 83 | cc |
null | ceph-main/src/librbd/journal/Replay.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_JOURNAL_REPLAY_H
#define CEPH_LIBRBD_JOURNAL_REPLAY_H
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/Context.h"
#include "common/ceph_mutex.h"
#include "librbd/io/Types.h"
#include "librbd/journal/Types.h"
#include <boost/variant.hpp>
#include <list>
#include <unordered_set>
#include <unordered_map>
namespace librbd {
class ImageCtx;
namespace io { struct AioCompletion; }
namespace journal {
template <typename ImageCtxT = ImageCtx>
class Replay {
public:
static Replay *create(ImageCtxT &image_ctx) {
return new Replay(image_ctx);
}
Replay(ImageCtxT &image_ctx);
~Replay();
int decode(bufferlist::const_iterator *it, EventEntry *event_entry);
void process(const EventEntry &event_entry,
Context *on_ready, Context *on_safe);
void shut_down(bool cancel_ops, Context *on_finish);
void flush(Context *on_finish);
void replay_op_ready(uint64_t op_tid, Context *on_resume);
private:
typedef std::unordered_set<int> ReturnValues;
struct OpEvent {
bool op_in_progress = false;
bool finish_on_ready = false;
Context *on_op_finish_event = nullptr;
Context *on_start_ready = nullptr;
Context *on_start_safe = nullptr;
Context *on_finish_ready = nullptr;
Context *on_finish_safe = nullptr;
Context *on_op_complete = nullptr;
ReturnValues op_finish_error_codes;
ReturnValues ignore_error_codes;
};
typedef std::list<uint64_t> OpTids;
typedef std::list<Context *> Contexts;
typedef std::unordered_set<Context *> ContextSet;
typedef std::unordered_map<uint64_t, OpEvent> OpEvents;
struct C_OpOnComplete : public Context {
Replay *replay;
uint64_t op_tid;
C_OpOnComplete(Replay *replay, uint64_t op_tid)
: replay(replay), op_tid(op_tid) {
}
void finish(int r) override {
replay->handle_op_complete(op_tid, r);
}
};
struct C_AioModifyComplete : public Context {
Replay *replay;
Context *on_ready;
Context *on_safe;
std::set<int> filters;
C_AioModifyComplete(Replay *replay, Context *on_ready,
Context *on_safe, std::set<int> &&filters)
: replay(replay), on_ready(on_ready), on_safe(on_safe),
filters(std::move(filters)) {
}
void finish(int r) override {
replay->handle_aio_modify_complete(on_ready, on_safe, r, filters);
}
};
struct C_AioFlushComplete : public Context {
Replay *replay;
Context *on_flush_safe;
Contexts on_safe_ctxs;
C_AioFlushComplete(Replay *replay, Context *on_flush_safe,
Contexts &&on_safe_ctxs)
: replay(replay), on_flush_safe(on_flush_safe),
on_safe_ctxs(on_safe_ctxs) {
}
void finish(int r) override {
replay->handle_aio_flush_complete(on_flush_safe, on_safe_ctxs, r);
}
};
struct EventVisitor : public boost::static_visitor<void> {
Replay *replay;
Context *on_ready;
Context *on_safe;
EventVisitor(Replay *_replay, Context *_on_ready, Context *_on_safe)
: replay(_replay), on_ready(_on_ready), on_safe(_on_safe) {
}
template <typename Event>
inline void operator()(const Event &event) const {
replay->handle_event(event, on_ready, on_safe);
}
};
ImageCtxT &m_image_ctx;
ceph::mutex m_lock = ceph::make_mutex("Replay<I>::m_lock");
uint64_t m_in_flight_aio_flush = 0;
uint64_t m_in_flight_aio_modify = 0;
Contexts m_aio_modify_unsafe_contexts;
ContextSet m_aio_modify_safe_contexts;
OpEvents m_op_events;
uint64_t m_in_flight_op_events = 0;
bool m_shut_down = false;
Context *m_flush_ctx = nullptr;
Context *m_on_aio_ready = nullptr;
void handle_event(const AioDiscardEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const AioWriteEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const AioWriteSameEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const AioCompareAndWriteEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const AioFlushEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const OpFinishEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const SnapCreateEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const SnapRemoveEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const SnapRenameEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const SnapProtectEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const SnapUnprotectEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const SnapRollbackEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const RenameEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const ResizeEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const FlattenEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const DemotePromoteEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const SnapLimitEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const UpdateFeaturesEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const MetadataSetEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const MetadataRemoveEvent &event, Context *on_ready,
Context *on_safe);
void handle_event(const UnknownEvent &event, Context *on_ready,
Context *on_safe);
void handle_aio_modify_complete(Context *on_ready, Context *on_safe,
int r, std::set<int> &filters);
void handle_aio_flush_complete(Context *on_flush_safe, Contexts &on_safe_ctxs,
int r);
Context *create_op_context_callback(uint64_t op_tid, Context *on_ready,
Context *on_safe, OpEvent **op_event);
void handle_op_complete(uint64_t op_tid, int r);
io::AioCompletion *create_aio_modify_completion(Context *on_ready,
Context *on_safe,
io::aio_type_t aio_type,
bool *flush_required,
std::set<int> &&filters);
io::AioCompletion *create_aio_flush_completion(Context *on_safe);
void handle_aio_completion(io::AioCompletion *aio_comp);
bool clipped_io(uint64_t image_offset, io::AioCompletion *aio_comp);
};
} // namespace journal
} // namespace librbd
extern template class librbd::journal::Replay<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_JOURNAL_REPLAY_H
| 7,233 | 34.116505 | 80 | h |
null | ceph-main/src/librbd/journal/ResetRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/journal/ResetRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Timer.h"
#include "common/WorkQueue.h"
#include "journal/Journaler.h"
#include "journal/Settings.h"
#include "include/ceph_assert.h"
#include "librbd/Journal.h"
#include "librbd/Utils.h"
#include "librbd/journal/CreateRequest.h"
#include "librbd/journal/RemoveRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::journal::ResetRequest: " << this << " " \
<< __func__ << ": "
namespace librbd {
namespace journal {
using util::create_async_context_callback;
using util::create_context_callback;
template<typename I>
void ResetRequest<I>::send() {
init_journaler();
}
template<typename I>
void ResetRequest<I>::init_journaler() {
ldout(m_cct, 10) << dendl;
m_journaler = new Journaler(m_io_ctx, m_image_id, m_client_id, {}, nullptr);
Context *ctx = create_context_callback<
ResetRequest<I>, &ResetRequest<I>::handle_init_journaler>(this);
m_journaler->init(ctx);
}
template<typename I>
void ResetRequest<I>::handle_init_journaler(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r == -ENOENT) {
ldout(m_cct, 5) << "journal does not exist" << dendl;
m_ret_val = r;
} else if (r < 0) {
lderr(m_cct) << "failed to init journaler: " << cpp_strerror(r) << dendl;
m_ret_val = r;
} else {
int64_t pool_id;
m_journaler->get_metadata(&m_order, &m_splay_width, &pool_id);
if (pool_id != -1) {
librados::Rados rados(m_io_ctx);
r = rados.pool_reverse_lookup(pool_id, &m_object_pool_name);
if (r < 0) {
lderr(m_cct) << "failed to lookup data pool: " << cpp_strerror(r)
<< dendl;
m_ret_val = r;
}
}
}
shut_down_journaler();
}
template<typename I>
void ResetRequest<I>::shut_down_journaler() {
ldout(m_cct, 10) << dendl;
Context *ctx = create_async_context_callback(
m_op_work_queue, create_context_callback<
ResetRequest<I>, &ResetRequest<I>::handle_journaler_shutdown>(this));
m_journaler->shut_down(ctx);
}
template<typename I>
void ResetRequest<I>::handle_journaler_shutdown(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
delete m_journaler;
if (r < 0) {
lderr(m_cct) << "failed to shut down journaler: " << cpp_strerror(r)
<< dendl;
if (m_ret_val == 0) {
m_ret_val = r;
}
}
if (m_ret_val < 0) {
finish(m_ret_val);
return;
}
remove_journal();
}
template<typename I>
void ResetRequest<I>::remove_journal() {
ldout(m_cct, 10) << dendl;
Context *ctx = create_context_callback<
ResetRequest<I>, &ResetRequest<I>::handle_remove_journal>(this);
auto req = RemoveRequest<I>::create(m_io_ctx, m_image_id, m_client_id,
m_op_work_queue, ctx);
req->send();
}
template<typename I>
void ResetRequest<I>::handle_remove_journal(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to remove journal: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
create_journal();
}
template<typename I>
void ResetRequest<I>::create_journal() {
ldout(m_cct, 10) << dendl;
Context *ctx = create_context_callback<
ResetRequest<I>, &ResetRequest<I>::handle_create_journal>(this);
journal::TagData tag_data(m_mirror_uuid);
auto req = CreateRequest<I>::create(m_io_ctx, m_image_id, m_order,
m_splay_width, m_object_pool_name,
cls::journal::Tag::TAG_CLASS_NEW,
tag_data, m_client_id, m_op_work_queue,
ctx);
req->send();
}
template<typename I>
void ResetRequest<I>::handle_create_journal(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to create journal: " << cpp_strerror(r) << dendl;
}
finish(r);
}
template<typename I>
void ResetRequest<I>::finish(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace journal
} // namespace librbd
template class librbd::journal::ResetRequest<librbd::ImageCtx>;
| 4,348 | 25.680982 | 80 | cc |
null | ceph-main/src/librbd/journal/ResetRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_JOURNAL_RESET_REQUEST_H
#define CEPH_LIBRBD_JOURNAL_RESET_REQUEST_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
#include "librbd/journal/TypeTraits.h"
#include "common/Timer.h"
#include <string>
class Context;
class ContextWQ;
namespace journal { class Journaler; }
namespace librbd {
class ImageCtx;
namespace journal {
template<typename ImageCtxT = ImageCtx>
class ResetRequest {
public:
static ResetRequest *create(librados::IoCtx &io_ctx,
const std::string &image_id,
const std::string &client_id,
const std::string &mirror_uuid,
ContextWQ *op_work_queue, Context *on_finish) {
return new ResetRequest(io_ctx, image_id, client_id, mirror_uuid,
op_work_queue, on_finish);
}
ResetRequest(librados::IoCtx &io_ctx, const std::string &image_id,
const std::string &client_id, const std::string &mirror_uuid,
ContextWQ *op_work_queue, Context *on_finish)
: m_io_ctx(io_ctx), m_image_id(image_id), m_client_id(client_id),
m_mirror_uuid(mirror_uuid), m_op_work_queue(op_work_queue),
m_on_finish(on_finish),
m_cct(reinterpret_cast<CephContext *>(m_io_ctx.cct())) {
}
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* INIT_JOURNALER
* |
* v
* SHUT_DOWN_JOURNALER
* |
* v
* REMOVE_JOURNAL
* |
* v
* CREATE_JOURNAL
* |
* v
* <finish>
*
* @endverbatim
*/
typedef typename TypeTraits<ImageCtxT>::Journaler Journaler;
librados::IoCtx &m_io_ctx;
std::string m_image_id;
std::string m_client_id;
std::string m_mirror_uuid;
ContextWQ *m_op_work_queue;
Context *m_on_finish;
CephContext *m_cct;
Journaler *m_journaler = nullptr;
int m_ret_val = 0;
uint8_t m_order = 0;
uint8_t m_splay_width = 0;
std::string m_object_pool_name;
void init_journaler();
void handle_init_journaler(int r);
void shut_down_journaler();
void handle_journaler_shutdown(int r);
void remove_journal();
void handle_remove_journal(int r);
void create_journal();
void handle_create_journal(int r);
void finish(int r);
};
} // namespace journal
} // namespace librbd
extern template class librbd::journal::ResetRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_JOURNAL_REMOVE_REQUEST_H
| 2,630 | 22.702703 | 77 | h |
null | ceph-main/src/librbd/journal/StandardPolicy.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/journal/StandardPolicy.h"
#include "librbd/ImageCtx.h"
#include "librbd/Journal.h"
#include "librbd/asio/ContextWQ.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::journal::StandardPolicy: "
namespace librbd {
namespace journal {
template<typename I>
void StandardPolicy<I>::allocate_tag_on_lock(Context *on_finish) {
ceph_assert(m_image_ctx->journal != nullptr);
if (!m_image_ctx->journal->is_tag_owner()) {
lderr(m_image_ctx->cct) << "local image not promoted" << dendl;
m_image_ctx->op_work_queue->queue(on_finish, -EPERM);
return;
}
m_image_ctx->journal->allocate_local_tag(on_finish);
}
} // namespace journal
} // namespace librbd
template class librbd::journal::StandardPolicy<librbd::ImageCtx>;
| 894 | 26.121212 | 70 | cc |
null | ceph-main/src/librbd/journal/StandardPolicy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_JOURNAL_STANDARD_POLICY_H
#define CEPH_LIBRBD_JOURNAL_STANDARD_POLICY_H
#include "librbd/journal/Policy.h"
namespace librbd {
struct ImageCtx;
namespace journal {
template<typename ImageCtxT = ImageCtx>
class StandardPolicy : public Policy {
public:
StandardPolicy(ImageCtxT *image_ctx) : m_image_ctx(image_ctx) {
}
bool append_disabled() const override {
return false;
}
bool journal_disabled() const override {
return false;
}
void allocate_tag_on_lock(Context *on_finish) override;
private:
ImageCtxT *m_image_ctx;
};
} // namespace journal
} // namespace librbd
extern template class librbd::journal::StandardPolicy<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_JOURNAL_STANDARD_POLICY_H
| 840 | 20.564103 | 72 | h |
null | ceph-main/src/librbd/journal/TypeTraits.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_JOURNAL_TYPE_TRAITS_H
#define CEPH_LIBRBD_JOURNAL_TYPE_TRAITS_H
struct ContextWQ;
namespace journal {
class Future;
class Journaler;
class ReplayEntry;
}
namespace librbd {
namespace journal {
template <typename ImageCtxT>
struct TypeTraits {
typedef ::journal::Journaler Journaler;
typedef ::journal::Future Future;
typedef ::journal::ReplayEntry ReplayEntry;
typedef ::ContextWQ ContextWQ;
};
} // namespace journal
} // namespace librbd
#endif // CEPH_LIBRBD_JOURNAL_TYPE_TRAITS_H
| 617 | 19.6 | 70 | h |
null | ceph-main/src/librbd/journal/Types.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/journal/Types.h"
#include "include/ceph_assert.h"
#include "include/stringify.h"
#include "include/types.h"
#include "common/Formatter.h"
namespace librbd {
namespace journal {
using ceph::encode;
using ceph::decode;
namespace {
template <typename E>
class GetTypeVisitor : public boost::static_visitor<E> {
public:
template <typename T>
inline E operator()(const T&) const {
return T::TYPE;
}
};
class EncodeVisitor : public boost::static_visitor<void> {
public:
explicit EncodeVisitor(bufferlist &bl) : m_bl(bl) {
}
template <typename T>
inline void operator()(const T& t) const {
encode(static_cast<uint32_t>(T::TYPE), m_bl);
t.encode(m_bl);
}
private:
bufferlist &m_bl;
};
class DecodeVisitor : public boost::static_visitor<void> {
public:
DecodeVisitor(__u8 version, bufferlist::const_iterator &iter)
: m_version(version), m_iter(iter) {
}
template <typename T>
inline void operator()(T& t) const {
t.decode(m_version, m_iter);
}
private:
__u8 m_version;
bufferlist::const_iterator &m_iter;
};
class DumpVisitor : public boost::static_visitor<void> {
public:
explicit DumpVisitor(Formatter *formatter, const std::string &key)
: m_formatter(formatter), m_key(key) {}
template <typename T>
inline void operator()(const T& t) const {
auto type = T::TYPE;
m_formatter->dump_string(m_key.c_str(), stringify(type));
t.dump(m_formatter);
}
private:
ceph::Formatter *m_formatter;
std::string m_key;
};
} // anonymous namespace
void AioDiscardEvent::encode(bufferlist& bl) const {
using ceph::encode;
encode(offset, bl);
encode(length, bl);
bool skip_partial_discard = (discard_granularity_bytes > 0);
encode(skip_partial_discard, bl);
encode(discard_granularity_bytes, bl);
}
void AioDiscardEvent::decode(__u8 version, bufferlist::const_iterator& it) {
using ceph::decode;
decode(offset, it);
decode(length, it);
bool skip_partial_discard = false;
if (version >= 4) {
decode(skip_partial_discard, it);
}
if (version >= 5) {
decode(discard_granularity_bytes, it);
} else {
if (skip_partial_discard) {
// use a size larger than the maximum object size which will
// truncated down to object size during IO processing
discard_granularity_bytes = std::numeric_limits<uint32_t>::max();
} else {
discard_granularity_bytes = 0;
}
}
}
void AioDiscardEvent::dump(Formatter *f) const {
f->dump_unsigned("offset", offset);
f->dump_unsigned("length", length);
f->dump_unsigned("discard_granularity_bytes", discard_granularity_bytes);
}
uint32_t AioWriteEvent::get_fixed_size() {
return EventEntry::get_fixed_size() + 16 /* offset, length */;
}
void AioWriteEvent::encode(bufferlist& bl) const {
using ceph::encode;
encode(offset, bl);
encode(length, bl);
encode(data, bl);
}
void AioWriteEvent::decode(__u8 version, bufferlist::const_iterator& it) {
using ceph::decode;
decode(offset, it);
decode(length, it);
decode(data, it);
}
void AioWriteEvent::dump(Formatter *f) const {
f->dump_unsigned("offset", offset);
f->dump_unsigned("length", length);
}
void AioWriteSameEvent::encode(bufferlist& bl) const {
using ceph::encode;
encode(offset, bl);
encode(length, bl);
encode(data, bl);
}
void AioWriteSameEvent::decode(__u8 version, bufferlist::const_iterator& it) {
using ceph::decode;
decode(offset, it);
decode(length, it);
decode(data, it);
}
void AioWriteSameEvent::dump(Formatter *f) const {
f->dump_unsigned("offset", offset);
f->dump_unsigned("length", length);
}
uint32_t AioCompareAndWriteEvent::get_fixed_size() {
return EventEntry::get_fixed_size() + 32 /* offset, length */;
}
void AioCompareAndWriteEvent::encode(bufferlist& bl) const {
using ceph::encode;
encode(offset, bl);
encode(length, bl);
encode(cmp_data, bl);
encode(write_data, bl);
}
void AioCompareAndWriteEvent::decode(__u8 version, bufferlist::const_iterator& it) {
using ceph::decode;
decode(offset, it);
decode(length, it);
decode(cmp_data, it);
decode(write_data, it);
}
void AioCompareAndWriteEvent::dump(Formatter *f) const {
f->dump_unsigned("offset", offset);
f->dump_unsigned("length", length);
}
void AioFlushEvent::encode(bufferlist& bl) const {
}
void AioFlushEvent::decode(__u8 version, bufferlist::const_iterator& it) {
}
void AioFlushEvent::dump(Formatter *f) const {
}
void OpEventBase::encode(bufferlist& bl) const {
using ceph::encode;
encode(op_tid, bl);
}
void OpEventBase::decode(__u8 version, bufferlist::const_iterator& it) {
using ceph::decode;
decode(op_tid, it);
}
void OpEventBase::dump(Formatter *f) const {
f->dump_unsigned("op_tid", op_tid);
}
void OpFinishEvent::encode(bufferlist& bl) const {
OpEventBase::encode(bl);
using ceph::encode;
encode(op_tid, bl);
encode(r, bl);
}
void OpFinishEvent::decode(__u8 version, bufferlist::const_iterator& it) {
OpEventBase::decode(version, it);
using ceph::decode;
decode(op_tid, it);
decode(r, it);
}
void OpFinishEvent::dump(Formatter *f) const {
OpEventBase::dump(f);
f->dump_unsigned("op_tid", op_tid);
f->dump_int("result", r);
}
void SnapEventBase::encode(bufferlist& bl) const {
using ceph::encode;
OpEventBase::encode(bl);
encode(snap_name, bl);
encode(snap_namespace, bl);
}
void SnapEventBase::decode(__u8 version, bufferlist::const_iterator& it) {
using ceph::decode;
OpEventBase::decode(version, it);
using ceph::decode;
decode(snap_name, it);
if (version >= 4) {
decode(snap_namespace, it);
}
}
void SnapEventBase::dump(Formatter *f) const {
OpEventBase::dump(f);
f->dump_string("snap_name", snap_name);
snap_namespace.dump(f);
}
void SnapCreateEvent::encode(bufferlist &bl) const {
SnapEventBase::encode(bl);
}
void SnapCreateEvent::decode(__u8 version, bufferlist::const_iterator& it) {
using ceph::decode;
SnapEventBase::decode(version, it);
if (version == 3) {
decode(snap_namespace, it);
}
}
void SnapCreateEvent::dump(Formatter *f) const {
SnapEventBase::dump(f);
}
void SnapLimitEvent::encode(bufferlist &bl) const {
OpEventBase::encode(bl);
using ceph::encode;
encode(limit, bl);
}
void SnapLimitEvent::decode(__u8 version, bufferlist::const_iterator& it) {
OpEventBase::decode(version, it);
using ceph::decode;
decode(limit, it);
}
void SnapLimitEvent::dump(Formatter *f) const {
OpEventBase::dump(f);
f->dump_unsigned("limit", limit);
}
void SnapRenameEvent::encode(bufferlist& bl) const {
OpEventBase::encode(bl);
using ceph::encode;
encode(dst_snap_name, bl);
encode(snap_id, bl);
encode(src_snap_name, bl);
}
void SnapRenameEvent::decode(__u8 version, bufferlist::const_iterator& it) {
using ceph::decode;
OpEventBase::decode(version, it);
decode(dst_snap_name, it);
decode(snap_id, it);
if (version >= 2) {
decode(src_snap_name, it);
}
}
void SnapRenameEvent::dump(Formatter *f) const {
OpEventBase::dump(f);
f->dump_unsigned("src_snap_id", snap_id);
f->dump_string("src_snap_name", src_snap_name);
f->dump_string("dest_snap_name", dst_snap_name);
}
void RenameEvent::encode(bufferlist& bl) const {
OpEventBase::encode(bl);
using ceph::encode;
encode(image_name, bl);
}
void RenameEvent::decode(__u8 version, bufferlist::const_iterator& it) {
OpEventBase::decode(version, it);
using ceph::decode;
decode(image_name, it);
}
void RenameEvent::dump(Formatter *f) const {
OpEventBase::dump(f);
f->dump_string("image_name", image_name);
}
void ResizeEvent::encode(bufferlist& bl) const {
OpEventBase::encode(bl);
using ceph::encode;
encode(size, bl);
}
void ResizeEvent::decode(__u8 version, bufferlist::const_iterator& it) {
OpEventBase::decode(version, it);
using ceph::decode;
decode(size, it);
}
void ResizeEvent::dump(Formatter *f) const {
OpEventBase::dump(f);
f->dump_unsigned("size", size);
}
void DemotePromoteEvent::encode(bufferlist& bl) const {
}
void DemotePromoteEvent::decode(__u8 version, bufferlist::const_iterator& it) {
}
void DemotePromoteEvent::dump(Formatter *f) const {
}
void UpdateFeaturesEvent::encode(bufferlist& bl) const {
OpEventBase::encode(bl);
using ceph::encode;
encode(features, bl);
encode(enabled, bl);
}
void UpdateFeaturesEvent::decode(__u8 version, bufferlist::const_iterator& it) {
OpEventBase::decode(version, it);
using ceph::decode;
decode(features, it);
decode(enabled, it);
}
void UpdateFeaturesEvent::dump(Formatter *f) const {
OpEventBase::dump(f);
f->dump_unsigned("features", features);
f->dump_bool("enabled", enabled);
}
void MetadataSetEvent::encode(bufferlist& bl) const {
OpEventBase::encode(bl);
using ceph::encode;
encode(key, bl);
encode(value, bl);
}
void MetadataSetEvent::decode(__u8 version, bufferlist::const_iterator& it) {
OpEventBase::decode(version, it);
using ceph::decode;
decode(key, it);
decode(value, it);
}
void MetadataSetEvent::dump(Formatter *f) const {
OpEventBase::dump(f);
f->dump_string("key", key);
f->dump_string("value", value);
}
void MetadataRemoveEvent::encode(bufferlist& bl) const {
OpEventBase::encode(bl);
using ceph::encode;
encode(key, bl);
}
void MetadataRemoveEvent::decode(__u8 version, bufferlist::const_iterator& it) {
OpEventBase::decode(version, it);
using ceph::decode;
decode(key, it);
}
void MetadataRemoveEvent::dump(Formatter *f) const {
OpEventBase::dump(f);
f->dump_string("key", key);
}
void UnknownEvent::encode(bufferlist& bl) const {
ceph_abort();
}
void UnknownEvent::decode(__u8 version, bufferlist::const_iterator& it) {
}
void UnknownEvent::dump(Formatter *f) const {
}
EventType EventEntry::get_event_type() const {
return boost::apply_visitor(GetTypeVisitor<EventType>(), event);
}
void EventEntry::encode(bufferlist& bl) const {
ENCODE_START(5, 1, bl);
boost::apply_visitor(EncodeVisitor(bl), event);
ENCODE_FINISH(bl);
encode_metadata(bl);
}
void EventEntry::decode(bufferlist::const_iterator& it) {
DECODE_START(1, it);
uint32_t event_type;
decode(event_type, it);
// select the correct payload variant based upon the encoded op
switch (event_type) {
case EVENT_TYPE_AIO_DISCARD:
event = AioDiscardEvent();
break;
case EVENT_TYPE_AIO_WRITE:
event = AioWriteEvent();
break;
case EVENT_TYPE_AIO_FLUSH:
event = AioFlushEvent();
break;
case EVENT_TYPE_OP_FINISH:
event = OpFinishEvent();
break;
case EVENT_TYPE_SNAP_CREATE:
event = SnapCreateEvent();
break;
case EVENT_TYPE_SNAP_REMOVE:
event = SnapRemoveEvent();
break;
case EVENT_TYPE_SNAP_RENAME:
event = SnapRenameEvent();
break;
case EVENT_TYPE_SNAP_PROTECT:
event = SnapProtectEvent();
break;
case EVENT_TYPE_SNAP_UNPROTECT:
event = SnapUnprotectEvent();
break;
case EVENT_TYPE_SNAP_ROLLBACK:
event = SnapRollbackEvent();
break;
case EVENT_TYPE_RENAME:
event = RenameEvent();
break;
case EVENT_TYPE_RESIZE:
event = ResizeEvent();
break;
case EVENT_TYPE_FLATTEN:
event = FlattenEvent();
break;
case EVENT_TYPE_DEMOTE_PROMOTE:
event = DemotePromoteEvent();
break;
case EVENT_TYPE_SNAP_LIMIT:
event = SnapLimitEvent();
break;
case EVENT_TYPE_UPDATE_FEATURES:
event = UpdateFeaturesEvent();
break;
case EVENT_TYPE_METADATA_SET:
event = MetadataSetEvent();
break;
case EVENT_TYPE_METADATA_REMOVE:
event = MetadataRemoveEvent();
break;
case EVENT_TYPE_AIO_WRITESAME:
event = AioWriteSameEvent();
break;
case EVENT_TYPE_AIO_COMPARE_AND_WRITE:
event = AioCompareAndWriteEvent();
break;
default:
event = UnknownEvent();
break;
}
boost::apply_visitor(DecodeVisitor(struct_v, it), event);
DECODE_FINISH(it);
if (struct_v >= 4) {
decode_metadata(it);
}
}
void EventEntry::dump(Formatter *f) const {
boost::apply_visitor(DumpVisitor(f, "event_type"), event);
f->dump_stream("timestamp") << timestamp;
}
void EventEntry::encode_metadata(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(timestamp, bl);
ENCODE_FINISH(bl);
}
void EventEntry::decode_metadata(bufferlist::const_iterator& it) {
DECODE_START(1, it);
decode(timestamp, it);
DECODE_FINISH(it);
}
void EventEntry::generate_test_instances(std::list<EventEntry *> &o) {
o.push_back(new EventEntry(AioDiscardEvent()));
o.push_back(new EventEntry(AioDiscardEvent(123, 345, 4096), utime_t(1, 1)));
bufferlist bl;
bl.append(std::string(32, '1'));
o.push_back(new EventEntry(AioWriteEvent()));
o.push_back(new EventEntry(AioWriteEvent(123, 456, bl), utime_t(1, 1)));
o.push_back(new EventEntry(AioFlushEvent()));
o.push_back(new EventEntry(OpFinishEvent(123, -1), utime_t(1, 1)));
o.push_back(new EventEntry(SnapCreateEvent(), utime_t(1, 1)));
o.push_back(new EventEntry(SnapCreateEvent(234, cls::rbd::UserSnapshotNamespace(), "snap"), utime_t(1, 1)));
o.push_back(new EventEntry(SnapRemoveEvent()));
o.push_back(new EventEntry(SnapRemoveEvent(345, cls::rbd::UserSnapshotNamespace(), "snap"), utime_t(1, 1)));
o.push_back(new EventEntry(SnapRenameEvent()));
o.push_back(new EventEntry(SnapRenameEvent(456, 1, "src snap", "dest snap"),
utime_t(1, 1)));
o.push_back(new EventEntry(SnapProtectEvent()));
o.push_back(new EventEntry(SnapProtectEvent(567, cls::rbd::UserSnapshotNamespace(), "snap"), utime_t(1, 1)));
o.push_back(new EventEntry(SnapUnprotectEvent()));
o.push_back(new EventEntry(SnapUnprotectEvent(678, cls::rbd::UserSnapshotNamespace(), "snap"), utime_t(1, 1)));
o.push_back(new EventEntry(SnapRollbackEvent()));
o.push_back(new EventEntry(SnapRollbackEvent(789, cls::rbd::UserSnapshotNamespace(), "snap"), utime_t(1, 1)));
o.push_back(new EventEntry(RenameEvent()));
o.push_back(new EventEntry(RenameEvent(890, "image name"), utime_t(1, 1)));
o.push_back(new EventEntry(ResizeEvent()));
o.push_back(new EventEntry(ResizeEvent(901, 1234), utime_t(1, 1)));
o.push_back(new EventEntry(FlattenEvent(123), utime_t(1, 1)));
o.push_back(new EventEntry(DemotePromoteEvent()));
o.push_back(new EventEntry(UpdateFeaturesEvent()));
o.push_back(new EventEntry(UpdateFeaturesEvent(123, 127, true), utime_t(1, 1)));
o.push_back(new EventEntry(MetadataSetEvent()));
o.push_back(new EventEntry(MetadataSetEvent(123, "key", "value"), utime_t(1, 1)));
o.push_back(new EventEntry(MetadataRemoveEvent()));
o.push_back(new EventEntry(MetadataRemoveEvent(123, "key"), utime_t(1, 1)));
}
// Journal Client
void ImageClientMeta::encode(bufferlist& bl) const {
using ceph::encode;
encode(tag_class, bl);
encode(resync_requested, bl);
}
void ImageClientMeta::decode(__u8 version, bufferlist::const_iterator& it) {
using ceph::decode;
decode(tag_class, it);
decode(resync_requested, it);
}
void ImageClientMeta::dump(Formatter *f) const {
f->dump_unsigned("tag_class", tag_class);
f->dump_bool("resync_requested", resync_requested);
}
void MirrorPeerSyncPoint::encode(bufferlist& bl) const {
using ceph::encode;
encode(snap_name, bl);
encode(from_snap_name, bl);
encode(object_number, bl);
encode(snap_namespace, bl);
}
void MirrorPeerSyncPoint::decode(__u8 version, bufferlist::const_iterator& it) {
using ceph::decode;
decode(snap_name, it);
decode(from_snap_name, it);
decode(object_number, it);
if (version >= 2) {
decode(snap_namespace, it);
}
}
void MirrorPeerSyncPoint::dump(Formatter *f) const {
f->dump_string("snap_name", snap_name);
f->dump_string("from_snap_name", from_snap_name);
if (object_number) {
f->dump_unsigned("object_number", *object_number);
}
snap_namespace.dump(f);
}
void MirrorPeerClientMeta::encode(bufferlist& bl) const {
using ceph::encode;
encode(image_id, bl);
encode(static_cast<uint32_t>(state), bl);
encode(sync_object_count, bl);
encode(static_cast<uint32_t>(sync_points.size()), bl);
for (auto &sync_point : sync_points) {
sync_point.encode(bl);
}
encode(snap_seqs, bl);
}
void MirrorPeerClientMeta::decode(__u8 version, bufferlist::const_iterator& it) {
using ceph::decode;
decode(image_id, it);
uint32_t decode_state;
decode(decode_state, it);
state = static_cast<MirrorPeerState>(decode_state);
decode(sync_object_count, it);
uint32_t sync_point_count;
decode(sync_point_count, it);
sync_points.resize(sync_point_count);
for (auto &sync_point : sync_points) {
sync_point.decode(version, it);
}
decode(snap_seqs, it);
}
void MirrorPeerClientMeta::dump(Formatter *f) const {
f->dump_string("image_id", image_id);
f->dump_stream("state") << state;
f->dump_unsigned("sync_object_count", sync_object_count);
f->open_array_section("sync_points");
for (auto &sync_point : sync_points) {
f->open_object_section("sync_point");
sync_point.dump(f);
f->close_section();
}
f->close_section();
f->open_array_section("snap_seqs");
for (auto &pair : snap_seqs) {
f->open_object_section("snap_seq");
f->dump_unsigned("local_snap_seq", pair.first);
f->dump_unsigned("peer_snap_seq", pair.second);
f->close_section();
}
f->close_section();
}
void CliClientMeta::encode(bufferlist& bl) const {
}
void CliClientMeta::decode(__u8 version, bufferlist::const_iterator& it) {
}
void CliClientMeta::dump(Formatter *f) const {
}
void UnknownClientMeta::encode(bufferlist& bl) const {
ceph_abort();
}
void UnknownClientMeta::decode(__u8 version, bufferlist::const_iterator& it) {
}
void UnknownClientMeta::dump(Formatter *f) const {
}
ClientMetaType ClientData::get_client_meta_type() const {
return boost::apply_visitor(GetTypeVisitor<ClientMetaType>(), client_meta);
}
void ClientData::encode(bufferlist& bl) const {
ENCODE_START(2, 1, bl);
boost::apply_visitor(EncodeVisitor(bl), client_meta);
ENCODE_FINISH(bl);
}
void ClientData::decode(bufferlist::const_iterator& it) {
DECODE_START(1, it);
uint32_t client_meta_type;
decode(client_meta_type, it);
// select the correct payload variant based upon the encoded op
switch (client_meta_type) {
case IMAGE_CLIENT_META_TYPE:
client_meta = ImageClientMeta();
break;
case MIRROR_PEER_CLIENT_META_TYPE:
client_meta = MirrorPeerClientMeta();
break;
case CLI_CLIENT_META_TYPE:
client_meta = CliClientMeta();
break;
default:
client_meta = UnknownClientMeta();
break;
}
boost::apply_visitor(DecodeVisitor(struct_v, it), client_meta);
DECODE_FINISH(it);
}
void ClientData::dump(Formatter *f) const {
boost::apply_visitor(DumpVisitor(f, "client_meta_type"), client_meta);
}
void ClientData::generate_test_instances(std::list<ClientData *> &o) {
o.push_back(new ClientData(ImageClientMeta()));
o.push_back(new ClientData(ImageClientMeta(123)));
o.push_back(new ClientData(MirrorPeerClientMeta()));
o.push_back(new ClientData(MirrorPeerClientMeta("image_id",
{{{}, "snap 2", "snap 1", 123}},
{{1, 2}, {3, 4}})));
o.push_back(new ClientData(CliClientMeta()));
}
// Journal Tag
void TagPredecessor::encode(bufferlist& bl) const {
using ceph::encode;
encode(mirror_uuid, bl);
encode(commit_valid, bl);
encode(tag_tid, bl);
encode(entry_tid, bl);
}
void TagPredecessor::decode(bufferlist::const_iterator& it) {
using ceph::decode;
decode(mirror_uuid, it);
decode(commit_valid, it);
decode(tag_tid, it);
decode(entry_tid, it);
}
void TagPredecessor::dump(Formatter *f) const {
f->dump_string("mirror_uuid", mirror_uuid);
f->dump_string("commit_valid", commit_valid ? "true" : "false");
f->dump_unsigned("tag_tid", tag_tid);
f->dump_unsigned("entry_tid", entry_tid);
}
void TagData::encode(bufferlist& bl) const {
using ceph::encode;
encode(mirror_uuid, bl);
predecessor.encode(bl);
}
void TagData::decode(bufferlist::const_iterator& it) {
using ceph::decode;
decode(mirror_uuid, it);
predecessor.decode(it);
}
void TagData::dump(Formatter *f) const {
f->dump_string("mirror_uuid", mirror_uuid);
f->open_object_section("predecessor");
predecessor.dump(f);
f->close_section();
}
void TagData::generate_test_instances(std::list<TagData *> &o) {
o.push_back(new TagData());
o.push_back(new TagData("mirror-uuid"));
o.push_back(new TagData("mirror-uuid", "remote-mirror-uuid", true, 123, 234));
}
std::ostream &operator<<(std::ostream &out, const EventType &type) {
using namespace librbd::journal;
switch (type) {
case EVENT_TYPE_AIO_DISCARD:
out << "AioDiscard";
break;
case EVENT_TYPE_AIO_WRITE:
out << "AioWrite";
break;
case EVENT_TYPE_AIO_FLUSH:
out << "AioFlush";
break;
case EVENT_TYPE_OP_FINISH:
out << "OpFinish";
break;
case EVENT_TYPE_SNAP_CREATE:
out << "SnapCreate";
break;
case EVENT_TYPE_SNAP_REMOVE:
out << "SnapRemove";
break;
case EVENT_TYPE_SNAP_RENAME:
out << "SnapRename";
break;
case EVENT_TYPE_SNAP_PROTECT:
out << "SnapProtect";
break;
case EVENT_TYPE_SNAP_UNPROTECT:
out << "SnapUnprotect";
break;
case EVENT_TYPE_SNAP_ROLLBACK:
out << "SnapRollback";
break;
case EVENT_TYPE_RENAME:
out << "Rename";
break;
case EVENT_TYPE_RESIZE:
out << "Resize";
break;
case EVENT_TYPE_FLATTEN:
out << "Flatten";
break;
case EVENT_TYPE_DEMOTE_PROMOTE:
out << "Demote/Promote";
break;
case EVENT_TYPE_SNAP_LIMIT:
out << "SnapLimit";
break;
case EVENT_TYPE_UPDATE_FEATURES:
out << "UpdateFeatures";
break;
case EVENT_TYPE_METADATA_SET:
out << "MetadataSet";
break;
case EVENT_TYPE_METADATA_REMOVE:
out << "MetadataRemove";
break;
case EVENT_TYPE_AIO_WRITESAME:
out << "AioWriteSame";
break;
case EVENT_TYPE_AIO_COMPARE_AND_WRITE:
out << "AioCompareAndWrite";
break;
default:
out << "Unknown (" << static_cast<uint32_t>(type) << ")";
break;
}
return out;
}
std::ostream &operator<<(std::ostream &out, const ClientMetaType &type) {
using namespace librbd::journal;
switch (type) {
case IMAGE_CLIENT_META_TYPE:
out << "Master Image";
break;
case MIRROR_PEER_CLIENT_META_TYPE:
out << "Mirror Peer";
break;
case CLI_CLIENT_META_TYPE:
out << "CLI Tool";
break;
default:
out << "Unknown (" << static_cast<uint32_t>(type) << ")";
break;
}
return out;
}
std::ostream &operator<<(std::ostream &out, const ImageClientMeta &meta) {
out << "[tag_class=" << meta.tag_class << "]";
return out;
}
std::ostream &operator<<(std::ostream &out, const MirrorPeerSyncPoint &sync) {
out << "[snap_name=" << sync.snap_name << ", "
<< "from_snap_name=" << sync.from_snap_name;
if (sync.object_number) {
out << ", " << *sync.object_number;
}
out << "]";
return out;
}
std::ostream &operator<<(std::ostream &out, const MirrorPeerState &state) {
switch (state) {
case MIRROR_PEER_STATE_SYNCING:
out << "Syncing";
break;
case MIRROR_PEER_STATE_REPLAYING:
out << "Replaying";
break;
default:
out << "Unknown (" << static_cast<uint32_t>(state) << ")";
break;
}
return out;
}
std::ostream &operator<<(std::ostream &out, const MirrorPeerClientMeta &meta) {
out << "[image_id=" << meta.image_id << ", "
<< "state=" << meta.state << ", "
<< "sync_object_count=" << meta.sync_object_count << ", "
<< "sync_points=[";
std::string delimiter;
for (auto &sync_point : meta.sync_points) {
out << delimiter << "[" << sync_point << "]";
delimiter = ", ";
}
out << "], snap_seqs=[";
delimiter = "";
for (auto &pair : meta.snap_seqs) {
out << delimiter << "["
<< "local_snap_seq=" << pair.first << ", "
<< "peer_snap_seq" << pair.second << "]";
delimiter = ", ";
}
out << "]";
return out;
}
std::ostream &operator<<(std::ostream &out, const TagPredecessor &predecessor) {
out << "["
<< "mirror_uuid=" << predecessor.mirror_uuid;
if (predecessor.commit_valid) {
out << ", "
<< "tag_tid=" << predecessor.tag_tid << ", "
<< "entry_tid=" << predecessor.entry_tid;
}
out << "]";
return out;
}
std::ostream &operator<<(std::ostream &out, const TagData &tag_data) {
out << "["
<< "mirror_uuid=" << tag_data.mirror_uuid << ", "
<< "predecessor=" << tag_data.predecessor
<< "]";
return out;
}
} // namespace journal
} // namespace librbd
| 24,589 | 24.69488 | 113 | cc |
null | ceph-main/src/librbd/journal/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_JOURNAL_TYPES_H
#define CEPH_LIBRBD_JOURNAL_TYPES_H
#include "cls/rbd/cls_rbd_types.h"
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/encoding.h"
#include "include/types.h"
#include "include/utime.h"
#include "librbd/Types.h"
#include <iosfwd>
#include <list>
#include <boost/none.hpp>
#include <boost/optional.hpp>
#include <boost/variant.hpp>
#include <boost/mpl/vector.hpp>
namespace ceph {
class Formatter;
}
namespace librbd {
namespace journal {
enum EventType {
EVENT_TYPE_AIO_DISCARD = 0,
EVENT_TYPE_AIO_WRITE = 1,
EVENT_TYPE_AIO_FLUSH = 2,
EVENT_TYPE_OP_FINISH = 3,
EVENT_TYPE_SNAP_CREATE = 4,
EVENT_TYPE_SNAP_REMOVE = 5,
EVENT_TYPE_SNAP_RENAME = 6,
EVENT_TYPE_SNAP_PROTECT = 7,
EVENT_TYPE_SNAP_UNPROTECT = 8,
EVENT_TYPE_SNAP_ROLLBACK = 9,
EVENT_TYPE_RENAME = 10,
EVENT_TYPE_RESIZE = 11,
EVENT_TYPE_FLATTEN = 12,
EVENT_TYPE_DEMOTE_PROMOTE = 13,
EVENT_TYPE_SNAP_LIMIT = 14,
EVENT_TYPE_UPDATE_FEATURES = 15,
EVENT_TYPE_METADATA_SET = 16,
EVENT_TYPE_METADATA_REMOVE = 17,
EVENT_TYPE_AIO_WRITESAME = 18,
EVENT_TYPE_AIO_COMPARE_AND_WRITE = 19,
};
struct AioDiscardEvent {
static const EventType TYPE = EVENT_TYPE_AIO_DISCARD;
uint64_t offset = 0;
uint64_t length = 0;
uint32_t discard_granularity_bytes = 0;
AioDiscardEvent() {
}
AioDiscardEvent(uint64_t _offset, uint64_t _length,
uint32_t discard_granularity_bytes)
: offset(_offset), length(_length),
discard_granularity_bytes(discard_granularity_bytes) {
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct AioWriteEvent {
static const EventType TYPE = EVENT_TYPE_AIO_WRITE;
uint64_t offset;
uint64_t length;
bufferlist data;
static uint32_t get_fixed_size();
AioWriteEvent() : offset(0), length(0) {
}
AioWriteEvent(uint64_t _offset, uint64_t _length, const bufferlist &_data)
: offset(_offset), length(_length), data(_data) {
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct AioWriteSameEvent {
static const EventType TYPE = EVENT_TYPE_AIO_WRITESAME;
uint64_t offset;
uint64_t length;
bufferlist data;
AioWriteSameEvent() : offset(0), length(0) {
}
AioWriteSameEvent(uint64_t _offset, uint64_t _length,
const bufferlist &_data)
: offset(_offset), length(_length), data(_data) {
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct AioCompareAndWriteEvent {
static const EventType TYPE = EVENT_TYPE_AIO_COMPARE_AND_WRITE;
uint64_t offset;
uint64_t length;
bufferlist cmp_data;
bufferlist write_data;
static uint32_t get_fixed_size();
AioCompareAndWriteEvent() : offset(0), length(0) {
}
AioCompareAndWriteEvent(uint64_t _offset, uint64_t _length,
const bufferlist &_cmp_data, const bufferlist &_write_data)
: offset(_offset), length(_length), cmp_data(_cmp_data), write_data(_write_data) {
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct AioFlushEvent {
static const EventType TYPE = EVENT_TYPE_AIO_FLUSH;
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct OpEventBase {
uint64_t op_tid;
protected:
OpEventBase() : op_tid(0) {
}
OpEventBase(uint64_t op_tid) : op_tid(op_tid) {
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct OpFinishEvent : public OpEventBase {
static const EventType TYPE = EVENT_TYPE_OP_FINISH;
int r;
OpFinishEvent() : r(0) {
}
OpFinishEvent(uint64_t op_tid, int r) : OpEventBase(op_tid), r(r) {
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct SnapEventBase : public OpEventBase {
cls::rbd::SnapshotNamespace snap_namespace;
std::string snap_name;
protected:
SnapEventBase() {
}
SnapEventBase(uint64_t op_tid, const cls::rbd::SnapshotNamespace& _snap_namespace,
const std::string &_snap_name)
: OpEventBase(op_tid),
snap_namespace(_snap_namespace),
snap_name(_snap_name) {
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct SnapCreateEvent : public SnapEventBase {
static const EventType TYPE = EVENT_TYPE_SNAP_CREATE;
SnapCreateEvent() {
}
SnapCreateEvent(uint64_t op_tid, const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name)
: SnapEventBase(op_tid, snap_namespace, snap_name) {
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct SnapRemoveEvent : public SnapEventBase {
static const EventType TYPE = EVENT_TYPE_SNAP_REMOVE;
SnapRemoveEvent() {
}
SnapRemoveEvent(uint64_t op_tid, const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name)
: SnapEventBase(op_tid, snap_namespace, snap_name) {
}
using SnapEventBase::encode;
using SnapEventBase::decode;
using SnapEventBase::dump;
};
struct SnapRenameEvent : public OpEventBase{
static const EventType TYPE = EVENT_TYPE_SNAP_RENAME;
uint64_t snap_id;
std::string src_snap_name;
std::string dst_snap_name;
SnapRenameEvent() : snap_id(CEPH_NOSNAP) {
}
SnapRenameEvent(uint64_t op_tid, uint64_t src_snap_id,
const std::string &src_snap_name,
const std::string &dest_snap_name)
: OpEventBase(op_tid),
snap_id(src_snap_id),
src_snap_name(src_snap_name),
dst_snap_name(dest_snap_name) {
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct SnapProtectEvent : public SnapEventBase {
static const EventType TYPE = EVENT_TYPE_SNAP_PROTECT;
SnapProtectEvent() {
}
SnapProtectEvent(uint64_t op_tid, const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name)
: SnapEventBase(op_tid, snap_namespace, snap_name) {
}
using SnapEventBase::encode;
using SnapEventBase::decode;
using SnapEventBase::dump;
};
struct SnapUnprotectEvent : public SnapEventBase {
static const EventType TYPE = EVENT_TYPE_SNAP_UNPROTECT;
SnapUnprotectEvent() {
}
SnapUnprotectEvent(uint64_t op_tid, const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name)
: SnapEventBase(op_tid, snap_namespace, snap_name) {
}
using SnapEventBase::encode;
using SnapEventBase::decode;
using SnapEventBase::dump;
};
struct SnapLimitEvent : public OpEventBase {
static const EventType TYPE = EVENT_TYPE_SNAP_LIMIT;
uint64_t limit;
SnapLimitEvent() {
}
SnapLimitEvent(uint64_t op_tid, const uint64_t _limit)
: OpEventBase(op_tid), limit(_limit) {
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct SnapRollbackEvent : public SnapEventBase {
static const EventType TYPE = EVENT_TYPE_SNAP_ROLLBACK;
SnapRollbackEvent() {
}
SnapRollbackEvent(uint64_t op_tid, const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name)
: SnapEventBase(op_tid, snap_namespace, snap_name) {
}
using SnapEventBase::encode;
using SnapEventBase::decode;
using SnapEventBase::dump;
};
struct RenameEvent : public OpEventBase {
static const EventType TYPE = EVENT_TYPE_RENAME;
std::string image_name;
RenameEvent() {
}
RenameEvent(uint64_t op_tid, const std::string &_image_name)
: OpEventBase(op_tid), image_name(_image_name) {
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct ResizeEvent : public OpEventBase {
static const EventType TYPE = EVENT_TYPE_RESIZE;
uint64_t size;
ResizeEvent() : size(0) {
}
ResizeEvent(uint64_t op_tid, uint64_t _size)
: OpEventBase(op_tid), size(_size) {
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct FlattenEvent : public OpEventBase {
static const EventType TYPE = EVENT_TYPE_FLATTEN;
FlattenEvent() {
}
FlattenEvent(uint64_t op_tid) : OpEventBase(op_tid) {
}
using OpEventBase::encode;
using OpEventBase::decode;
using OpEventBase::dump;
};
struct DemotePromoteEvent {
static const EventType TYPE = static_cast<EventType>(
EVENT_TYPE_DEMOTE_PROMOTE);
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct UpdateFeaturesEvent : public OpEventBase {
static const EventType TYPE = EVENT_TYPE_UPDATE_FEATURES;
uint64_t features;
bool enabled;
UpdateFeaturesEvent() : features(0), enabled(false) {
}
UpdateFeaturesEvent(uint64_t op_tid, uint64_t _features, bool _enabled)
: OpEventBase(op_tid), features(_features), enabled(_enabled) {
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct MetadataSetEvent : public OpEventBase {
static const EventType TYPE = EVENT_TYPE_METADATA_SET;
std::string key;
std::string value;
MetadataSetEvent() {
}
MetadataSetEvent(uint64_t op_tid, const std::string &_key, const std::string &_value)
: OpEventBase(op_tid), key(_key), value(_value) {
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct MetadataRemoveEvent : public OpEventBase {
static const EventType TYPE = EVENT_TYPE_METADATA_REMOVE;
std::string key;
MetadataRemoveEvent() {
}
MetadataRemoveEvent(uint64_t op_tid, const std::string &_key)
: OpEventBase(op_tid), key(_key) {
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct UnknownEvent {
static const EventType TYPE = static_cast<EventType>(-1);
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
typedef boost::mpl::vector<AioDiscardEvent,
AioWriteEvent,
AioFlushEvent,
OpFinishEvent,
SnapCreateEvent,
SnapRemoveEvent,
SnapRenameEvent,
SnapProtectEvent,
SnapUnprotectEvent,
SnapRollbackEvent,
RenameEvent,
ResizeEvent,
FlattenEvent,
DemotePromoteEvent,
SnapLimitEvent,
UpdateFeaturesEvent,
MetadataSetEvent,
MetadataRemoveEvent,
AioWriteSameEvent,
AioCompareAndWriteEvent,
UnknownEvent> EventVector;
typedef boost::make_variant_over<EventVector>::type Event;
struct EventEntry {
static uint32_t get_fixed_size() {
return EVENT_FIXED_SIZE + METADATA_FIXED_SIZE;
}
EventEntry() : event(UnknownEvent()) {
}
EventEntry(const Event &_event, const utime_t &_timestamp = utime_t())
: event(_event), timestamp(_timestamp) {
}
Event event;
utime_t timestamp;
EventType get_event_type() const;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<EventEntry *> &o);
private:
static const uint32_t EVENT_FIXED_SIZE = 14; /// version encoding, type
static const uint32_t METADATA_FIXED_SIZE = 14; /// version encoding, timestamp
void encode_metadata(bufferlist& bl) const;
void decode_metadata(bufferlist::const_iterator& it);
};
// Journal Client data structures
enum ClientMetaType {
IMAGE_CLIENT_META_TYPE = 0,
MIRROR_PEER_CLIENT_META_TYPE = 1,
CLI_CLIENT_META_TYPE = 2
};
struct ImageClientMeta {
static const ClientMetaType TYPE = IMAGE_CLIENT_META_TYPE;
uint64_t tag_class = 0;
bool resync_requested = false;
ImageClientMeta() {
}
ImageClientMeta(uint64_t tag_class) : tag_class(tag_class) {
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct MirrorPeerSyncPoint {
typedef boost::optional<uint64_t> ObjectNumber;
cls::rbd::SnapshotNamespace snap_namespace;
std::string snap_name;
std::string from_snap_name;
ObjectNumber object_number;
MirrorPeerSyncPoint() : MirrorPeerSyncPoint({}, "", "", boost::none) {
}
MirrorPeerSyncPoint(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
const ObjectNumber &object_number)
: MirrorPeerSyncPoint(snap_namespace, snap_name, "", object_number) {
}
MirrorPeerSyncPoint(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
const std::string &from_snap_name,
const ObjectNumber &object_number)
: snap_namespace(snap_namespace), snap_name(snap_name),
from_snap_name(from_snap_name), object_number(object_number) {
}
inline bool operator==(const MirrorPeerSyncPoint &sync) const {
return (snap_name == sync.snap_name &&
from_snap_name == sync.from_snap_name &&
object_number == sync.object_number &&
snap_namespace == sync.snap_namespace);
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
enum MirrorPeerState {
MIRROR_PEER_STATE_SYNCING,
MIRROR_PEER_STATE_REPLAYING
};
struct MirrorPeerClientMeta {
typedef std::list<MirrorPeerSyncPoint> SyncPoints;
static const ClientMetaType TYPE = MIRROR_PEER_CLIENT_META_TYPE;
std::string image_id;
MirrorPeerState state = MIRROR_PEER_STATE_SYNCING; ///< replay state
uint64_t sync_object_count = 0; ///< maximum number of objects ever sync'ed
SyncPoints sync_points; ///< max two in-use snapshots for sync
SnapSeqs snap_seqs; ///< local to peer snap seq mapping
MirrorPeerClientMeta() {
}
MirrorPeerClientMeta(const std::string &image_id,
const SyncPoints &sync_points = SyncPoints(),
const SnapSeqs &snap_seqs = SnapSeqs())
: image_id(image_id), sync_points(sync_points), snap_seqs(snap_seqs) {
}
inline bool operator==(const MirrorPeerClientMeta &meta) const {
return (image_id == meta.image_id &&
state == meta.state &&
sync_object_count == meta.sync_object_count &&
sync_points == meta.sync_points &&
snap_seqs == meta.snap_seqs);
}
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct CliClientMeta {
static const ClientMetaType TYPE = CLI_CLIENT_META_TYPE;
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct UnknownClientMeta {
static const ClientMetaType TYPE = static_cast<ClientMetaType>(-1);
void encode(bufferlist& bl) const;
void decode(__u8 version, bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
typedef boost::variant<ImageClientMeta,
MirrorPeerClientMeta,
CliClientMeta,
UnknownClientMeta> ClientMeta;
struct ClientData {
ClientData() {
}
ClientData(const ClientMeta &client_meta) : client_meta(client_meta) {
}
ClientMeta client_meta;
ClientMetaType get_client_meta_type() const;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<ClientData *> &o);
};
// Journal Tag data structures
struct TagPredecessor {
std::string mirror_uuid; // empty if local
bool commit_valid = false;
uint64_t tag_tid = 0;
uint64_t entry_tid = 0;
TagPredecessor() {
}
TagPredecessor(const std::string &mirror_uuid, bool commit_valid,
uint64_t tag_tid, uint64_t entry_tid)
: mirror_uuid(mirror_uuid), commit_valid(commit_valid), tag_tid(tag_tid),
entry_tid(entry_tid) {
}
inline bool operator==(const TagPredecessor &rhs) const {
return (mirror_uuid == rhs.mirror_uuid &&
commit_valid == rhs.commit_valid &&
tag_tid == rhs.tag_tid &&
entry_tid == rhs.entry_tid);
}
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
};
struct TagData {
// owner of the tag (exclusive lock epoch)
std::string mirror_uuid; // empty if local
// mapping to last committed record of previous tag
TagPredecessor predecessor;
TagData() {
}
TagData(const std::string &mirror_uuid) : mirror_uuid(mirror_uuid) {
}
TagData(const std::string &mirror_uuid,
const std::string &predecessor_mirror_uuid,
bool predecessor_commit_valid,
uint64_t predecessor_tag_tid, uint64_t predecessor_entry_tid)
: mirror_uuid(mirror_uuid),
predecessor(predecessor_mirror_uuid, predecessor_commit_valid,
predecessor_tag_tid, predecessor_entry_tid) {
}
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<TagData *> &o);
};
std::ostream &operator<<(std::ostream &out, const EventType &type);
std::ostream &operator<<(std::ostream &out, const ClientMetaType &type);
std::ostream &operator<<(std::ostream &out, const ImageClientMeta &meta);
std::ostream &operator<<(std::ostream &out, const MirrorPeerSyncPoint &sync);
std::ostream &operator<<(std::ostream &out, const MirrorPeerState &meta);
std::ostream &operator<<(std::ostream &out, const MirrorPeerClientMeta &meta);
std::ostream &operator<<(std::ostream &out, const TagPredecessor &predecessor);
std::ostream &operator<<(std::ostream &out, const TagData &tag_data);
struct Listener {
virtual ~Listener() {
}
/// invoked when journal close is requested
virtual void handle_close() = 0;
/// invoked when journal is promoted to primary
virtual void handle_promoted() = 0;
/// invoked when journal resync is requested
virtual void handle_resync() = 0;
};
WRITE_CLASS_ENCODER(EventEntry);
WRITE_CLASS_ENCODER(ClientData);
WRITE_CLASS_ENCODER(TagData);
} // namespace journal
} // namespace librbd
#endif // CEPH_LIBRBD_JOURNAL_TYPES_H
| 19,656 | 27.654519 | 88 | h |
null | ceph-main/src/librbd/journal/Utils.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/journal/Utils.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/journal/Types.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::journal::"
namespace librbd {
namespace journal {
namespace util {
int C_DecodeTag::decode(bufferlist::const_iterator *it, TagData *tag_data) {
try {
using ceph::decode;
decode(*tag_data, *it);
} catch (const buffer::error &err) {
return -EBADMSG;
}
return 0;
}
int C_DecodeTag::process(int r) {
if (r < 0) {
lderr(cct) << "C_DecodeTag: " << this << " " << __func__ << ": "
<< "failed to allocate tag: " << cpp_strerror(r)
<< dendl;
return r;
}
std::lock_guard locker{*lock};
*tag_tid = tag.tid;
auto data_it = tag.data.cbegin();
r = decode(&data_it, tag_data);
if (r < 0) {
lderr(cct) << "C_DecodeTag: " << this << " " << __func__ << ": "
<< "failed to decode allocated tag" << dendl;
return r;
}
ldout(cct, 20) << "C_DecodeTag: " << this << " " << __func__ << ": "
<< "allocated journal tag: "
<< "tid=" << tag.tid << ", "
<< "data=" << *tag_data << dendl;
return 0;
}
int C_DecodeTags::process(int r) {
if (r < 0) {
lderr(cct) << "C_DecodeTags: " << this << " " << __func__ << ": "
<< "failed to retrieve journal tags: " << cpp_strerror(r)
<< dendl;
return r;
}
if (tags.empty()) {
lderr(cct) << "C_DecodeTags: " << this << " " << __func__ << ": "
<< "no journal tags retrieved" << dendl;
return -ENOENT;
}
std::lock_guard locker{*lock};
*tag_tid = tags.back().tid;
auto data_it = tags.back().data.cbegin();
r = C_DecodeTag::decode(&data_it, tag_data);
if (r < 0) {
lderr(cct) << "C_DecodeTags: " << this << " " << __func__ << ": "
<< "failed to decode journal tag" << dendl;
return r;
}
ldout(cct, 20) << "C_DecodeTags: " << this << " " << __func__ << ": "
<< "most recent journal tag: "
<< "tid=" << *tag_tid << ", "
<< "data=" << *tag_data << dendl;
return 0;
}
} // namespace util
} // namespace journal
} // namespace librbd
| 2,361 | 26.149425 | 76 | cc |
null | ceph-main/src/librbd/journal/Utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_JOURNAL_UTILS_H
#define CEPH_LIBRBD_JOURNAL_UTILS_H
#include "include/common_fwd.h"
#include "include/int_types.h"
#include "include/Context.h"
#include "cls/journal/cls_journal_types.h"
#include <list>
namespace librbd {
namespace journal {
struct TagData;
namespace util {
struct C_DecodeTag : public Context {
CephContext *cct;
ceph::mutex *lock;
uint64_t *tag_tid;
TagData *tag_data;
Context *on_finish;
cls::journal::Tag tag;
C_DecodeTag(CephContext *cct, ceph::mutex *lock, uint64_t *tag_tid,
TagData *tag_data, Context *on_finish)
: cct(cct), lock(lock), tag_tid(tag_tid), tag_data(tag_data),
on_finish(on_finish) {
}
void complete(int r) override {
on_finish->complete(process(r));
Context::complete(0);
}
void finish(int r) override {
}
int process(int r);
static int decode(bufferlist::const_iterator *it, TagData *tag_data);
};
struct C_DecodeTags : public Context {
typedef std::list<cls::journal::Tag> Tags;
CephContext *cct;
ceph::mutex *lock;
uint64_t *tag_tid;
TagData *tag_data;
Context *on_finish;
Tags tags;
C_DecodeTags(CephContext *cct, ceph::mutex *lock, uint64_t *tag_tid,
TagData *tag_data, Context *on_finish)
: cct(cct), lock(lock), tag_tid(tag_tid), tag_data(tag_data),
on_finish(on_finish) {
}
void complete(int r) override {
on_finish->complete(process(r));
Context::complete(0);
}
void finish(int r) override {
}
int process(int r);
};
} // namespace util
} // namespace journal
} // namespace librbd
#endif // CEPH_LIBRBD_JOURNAL_UTILS_H
| 1,729 | 20.358025 | 71 | h |
null | ceph-main/src/librbd/managed_lock/AcquireRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/managed_lock/AcquireRequest.h"
#include "librbd/Watcher.h"
#include "cls/lock/cls_lock_client.h"
#include "cls/lock/cls_lock_types.h"
#include "common/dout.h"
#include "common/errno.h"
#include "include/stringify.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/managed_lock/BreakRequest.h"
#include "librbd/managed_lock/GetLockerRequest.h"
#include "librbd/managed_lock/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::managed_lock::AcquireRequest: " << this \
<< " " << __func__ << ": "
using std::string;
namespace librbd {
using librbd::util::detail::C_AsyncCallback;
using librbd::util::create_context_callback;
using librbd::util::create_rados_callback;
namespace managed_lock {
template <typename I>
AcquireRequest<I>* AcquireRequest<I>::create(librados::IoCtx& ioctx,
Watcher *watcher,
AsioEngine& asio_engine,
const string& oid,
const string& cookie,
bool exclusive,
bool blocklist_on_break_lock,
uint32_t blocklist_expire_seconds,
Context *on_finish) {
return new AcquireRequest(ioctx, watcher, asio_engine, oid, cookie,
exclusive, blocklist_on_break_lock,
blocklist_expire_seconds, on_finish);
}
template <typename I>
AcquireRequest<I>::AcquireRequest(librados::IoCtx& ioctx, Watcher *watcher,
AsioEngine& asio_engine,
const string& oid,
const string& cookie, bool exclusive,
bool blocklist_on_break_lock,
uint32_t blocklist_expire_seconds,
Context *on_finish)
: m_ioctx(ioctx), m_watcher(watcher),
m_cct(reinterpret_cast<CephContext *>(m_ioctx.cct())),
m_asio_engine(asio_engine), m_oid(oid), m_cookie(cookie),
m_exclusive(exclusive),
m_blocklist_on_break_lock(blocklist_on_break_lock),
m_blocklist_expire_seconds(blocklist_expire_seconds),
m_on_finish(new C_AsyncCallback<asio::ContextWQ>(
asio_engine.get_work_queue(), on_finish)) {
}
template <typename I>
AcquireRequest<I>::~AcquireRequest() {
}
template <typename I>
void AcquireRequest<I>::send() {
send_get_locker();
}
template <typename I>
void AcquireRequest<I>::send_get_locker() {
ldout(m_cct, 10) << dendl;
Context *ctx = create_context_callback<
AcquireRequest<I>, &AcquireRequest<I>::handle_get_locker>(this);
auto req = GetLockerRequest<I>::create(m_ioctx, m_oid, m_exclusive,
&m_locker, ctx);
req->send();
}
template <typename I>
void AcquireRequest<I>::handle_get_locker(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r == -ENOENT) {
ldout(m_cct, 20) << "no lockers detected" << dendl;
m_locker = {};
} else if (r == -EBUSY) {
ldout(m_cct, 5) << "incompatible lock detected" << dendl;
finish(r);
return;
} else if (r < 0) {
lderr(m_cct) << "failed to retrieve lockers: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
send_lock();
}
template <typename I>
void AcquireRequest<I>::send_lock() {
ldout(m_cct, 10) << "entity=client." << m_ioctx.get_instance_id() << ", "
<< "cookie=" << m_cookie << dendl;
librados::ObjectWriteOperation op;
rados::cls::lock::lock(&op, RBD_LOCK_NAME,
m_exclusive ? ClsLockType::EXCLUSIVE : ClsLockType::SHARED, m_cookie,
util::get_watcher_lock_tag(), "", utime_t(), 0);
using klass = AcquireRequest;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_lock>(this);
int r = m_ioctx.aio_operate(m_oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
void AcquireRequest<I>::handle_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r == 0) {
finish(0);
return;
} else if (r == -EBUSY && m_locker.cookie.empty()) {
ldout(m_cct, 5) << "already locked, refreshing locker" << dendl;
send_get_locker();
return;
} else if (r != -EBUSY) {
lderr(m_cct) << "failed to lock: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
send_break_lock();
}
template <typename I>
void AcquireRequest<I>::send_break_lock() {
ldout(m_cct, 10) << dendl;
Context *ctx = create_context_callback<
AcquireRequest<I>, &AcquireRequest<I>::handle_break_lock>(this);
auto req = BreakRequest<I>::create(
m_ioctx, m_asio_engine, m_oid, m_locker, m_exclusive,
m_blocklist_on_break_lock, m_blocklist_expire_seconds, false, ctx);
req->send();
}
template <typename I>
void AcquireRequest<I>::handle_break_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r == -EAGAIN) {
ldout(m_cct, 5) << "lock owner is still alive" << dendl;
finish(r);
return;
} else if (r < 0) {
lderr(m_cct) << "failed to break lock : " << cpp_strerror(r) << dendl;
finish(r);
return;
}
m_locker = {};
send_lock();
}
template <typename I>
void AcquireRequest<I>::finish(int r) {
m_on_finish->complete(r);
delete this;
}
} // namespace managed_lock
} // namespace librbd
template class librbd::managed_lock::AcquireRequest<librbd::ImageCtx>;
| 5,791 | 30.308108 | 94 | cc |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.