repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | ceph-main/src/librbd/managed_lock/AcquireRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MANAGED_LOCK_ACQUIRE_REQUEST_H
#define CEPH_LIBRBD_MANAGED_LOCK_ACQUIRE_REQUEST_H
#include "include/rados/librados.hpp"
#include "include/int_types.h"
#include "include/buffer.h"
#include "msg/msg_types.h"
#include "librbd/managed_lock/Types.h"
#include "librbd/watcher/Types.h"
#include <string>
class Context;
namespace librbd {
class AsioEngine;
class Watcher;
namespace managed_lock {
template <typename ImageCtxT>
class AcquireRequest {
private:
typedef watcher::Traits<ImageCtxT> TypeTraits;
typedef typename TypeTraits::Watcher Watcher;
public:
static AcquireRequest* create(librados::IoCtx& ioctx, Watcher *watcher,
AsioEngine& asio_engine,
const std::string& oid,
const std::string& cookie,
bool exclusive,
bool blocklist_on_break_lock,
uint32_t blocklist_expire_seconds,
Context *on_finish);
~AcquireRequest();
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* GET_LOCKER
* | ^
* | . (EBUSY && no cached locker)
* | .
* | . (EBUSY && cached locker)
* \--> LOCK_IMAGE * * * * * * * * > BREAK_LOCK . . . . .
* | ^ | .
* | | | (success) .
* | \-------------------------/ .
* v .
* <finish> < . . . . . . . . . . . . . . . . . . .
*
* @endverbatim
*/
AcquireRequest(librados::IoCtx& ioctx, Watcher *watcher,
AsioEngine& asio_engine, const std::string& oid,
const std::string& cookie, bool exclusive,
bool blocklist_on_break_lock,
uint32_t blocklist_expire_seconds, Context *on_finish);
librados::IoCtx& m_ioctx;
Watcher *m_watcher;
CephContext *m_cct;
AsioEngine& m_asio_engine;
std::string m_oid;
std::string m_cookie;
bool m_exclusive;
bool m_blocklist_on_break_lock;
uint32_t m_blocklist_expire_seconds;
Context *m_on_finish;
bufferlist m_out_bl;
Locker m_locker;
void send_get_locker();
void handle_get_locker(int r);
void send_lock();
void handle_lock(int r);
void send_break_lock();
void handle_break_lock(int r);
void finish(int r);
};
} // namespace managed_lock
} // namespace librbd
#endif // CEPH_LIBRBD_MANAGED_LOCK_ACQUIRE_REQUEST_H
| 2,727 | 25.485437 | 73 | h |
null | ceph-main/src/librbd/managed_lock/BreakRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/managed_lock/BreakRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "include/neorados/RADOS.hpp"
#include "include/stringify.h"
#include "cls/lock/cls_lock_client.h"
#include "cls/lock/cls_lock_types.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/asio/Utils.h"
#include "librbd/managed_lock/GetLockerRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::managed_lock::BreakRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace managed_lock {
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
BreakRequest<I>::BreakRequest(librados::IoCtx& ioctx,
AsioEngine& asio_engine,
const std::string& oid, const Locker &locker,
bool exclusive, bool blocklist_locker,
uint32_t blocklist_expire_seconds,
bool force_break_lock, Context *on_finish)
: m_ioctx(ioctx), m_cct(reinterpret_cast<CephContext *>(m_ioctx.cct())),
m_asio_engine(asio_engine), m_oid(oid), m_locker(locker),
m_exclusive(exclusive), m_blocklist_locker(blocklist_locker),
m_blocklist_expire_seconds(blocklist_expire_seconds),
m_force_break_lock(force_break_lock), m_on_finish(on_finish) {
}
template <typename I>
void BreakRequest<I>::send() {
send_get_watchers();
}
template <typename I>
void BreakRequest<I>::send_get_watchers() {
ldout(m_cct, 10) << dendl;
librados::ObjectReadOperation op;
op.list_watchers(&m_watchers, &m_watchers_ret_val);
using klass = BreakRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_get_watchers>(this);
m_out_bl.clear();
int r = m_ioctx.aio_operate(m_oid, rados_completion, &op, &m_out_bl);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
void BreakRequest<I>::handle_get_watchers(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r == 0) {
r = m_watchers_ret_val;
}
if (r < 0) {
lderr(m_cct) << "failed to retrieve watchers: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
bool found_alive_locker = false;
for (auto &watcher : m_watchers) {
ldout(m_cct, 20) << "watcher=["
<< "addr=" << watcher.addr << ", "
<< "entity=client." << watcher.watcher_id << "]" << dendl;
if ((strncmp(m_locker.address.c_str(),
watcher.addr, sizeof(watcher.addr)) == 0) &&
(m_locker.handle == watcher.cookie)) {
ldout(m_cct, 10) << "lock owner is still alive" << dendl;
found_alive_locker = true;
}
}
if (!m_force_break_lock && found_alive_locker) {
finish(-EAGAIN);
return;
}
send_get_locker();
}
template <typename I>
void BreakRequest<I>::send_get_locker() {
ldout(m_cct, 10) << dendl;
using klass = BreakRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_get_locker>(
this);
auto req = GetLockerRequest<I>::create(m_ioctx, m_oid, m_exclusive,
&m_refreshed_locker, ctx);
req->send();
}
template <typename I>
void BreakRequest<I>::handle_get_locker(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r == -ENOENT) {
ldout(m_cct, 5) << "no lock owner" << dendl;
finish(0);
return;
} else if (r < 0 && r != -EBUSY) {
lderr(m_cct) << "failed to retrieve lockers: " << cpp_strerror(r) << dendl;
finish(r);
return;
} else if (r < 0) {
m_refreshed_locker = {};
}
if (m_refreshed_locker != m_locker || m_refreshed_locker == Locker{}) {
ldout(m_cct, 5) << "no longer lock owner" << dendl;
finish(-EAGAIN);
return;
}
send_blocklist();
}
template <typename I>
void BreakRequest<I>::send_blocklist() {
if (!m_blocklist_locker) {
send_break_lock();
return;
}
entity_name_t entity_name = entity_name_t::CLIENT(m_ioctx.get_instance_id());
ldout(m_cct, 10) << "local entity=" << entity_name << ", "
<< "locker entity=" << m_locker.entity << dendl;
if (m_locker.entity == entity_name) {
lderr(m_cct) << "attempting to self-blocklist" << dendl;
finish(-EINVAL);
return;
}
entity_addr_t locker_addr;
if (!locker_addr.parse(m_locker.address)) {
lderr(m_cct) << "unable to parse locker address: " << m_locker.address
<< dendl;
finish(-EINVAL);
return;
}
std::optional<std::chrono::seconds> expire;
if (m_blocklist_expire_seconds != 0) {
expire = std::chrono::seconds(m_blocklist_expire_seconds);
}
m_asio_engine.get_rados_api().blocklist_add(
m_locker.address, expire,
librbd::asio::util::get_callback_adapter(
[this](int r) { handle_blocklist(r); }));
}
template <typename I>
void BreakRequest<I>::handle_blocklist(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to blocklist lock owner: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
wait_for_osd_map();
}
template <typename I>
void BreakRequest<I>::wait_for_osd_map() {
ldout(m_cct, 10) << dendl;
m_asio_engine.get_rados_api().wait_for_latest_osd_map(
librbd::asio::util::get_callback_adapter(
[this](int r) { handle_wait_for_osd_map(r); }));
}
template <typename I>
void BreakRequest<I>::handle_wait_for_osd_map(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to wait for updated OSD map: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
send_break_lock();
}
template <typename I>
void BreakRequest<I>::send_break_lock() {
ldout(m_cct, 10) << dendl;
librados::ObjectWriteOperation op;
rados::cls::lock::break_lock(&op, RBD_LOCK_NAME, m_locker.cookie,
m_locker.entity);
using klass = BreakRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_break_lock>(this);
int r = m_ioctx.aio_operate(m_oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
void BreakRequest<I>::handle_break_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "failed to break lock: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
void BreakRequest<I>::finish(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace managed_lock
} // namespace librbd
template class librbd::managed_lock::BreakRequest<librbd::ImageCtx>;
| 6,969 | 26.88 | 79 | cc |
null | ceph-main/src/librbd/managed_lock/BreakRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MANAGED_LOCK_BREAK_REQUEST_H
#define CEPH_LIBRBD_MANAGED_LOCK_BREAK_REQUEST_H
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/rados/librados_fwd.hpp"
#include "msg/msg_types.h"
#include <list>
#include <string>
#include <boost/optional.hpp>
#include "librbd/managed_lock/Types.h"
class Context;
class ContextWQ;
class obj_watch_t;
namespace librbd {
class AsioEngine;
class ImageCtx;
template <typename> class Journal;
namespace asio { struct ContextWQ; }
namespace managed_lock {
template <typename ImageCtxT = ImageCtx>
class BreakRequest {
public:
static BreakRequest* create(librados::IoCtx& ioctx,
AsioEngine& asio_engine,
const std::string& oid, const Locker &locker,
bool exclusive, bool blocklist_locker,
uint32_t blocklist_expire_seconds,
bool force_break_lock, Context *on_finish) {
return new BreakRequest(ioctx, asio_engine, oid, locker, exclusive,
blocklist_locker, blocklist_expire_seconds,
force_break_lock, on_finish);
}
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* GET_WATCHERS
* |
* v
* GET_LOCKER
* |
* v
* BLOCKLIST (skip if disabled)
* |
* v
* WAIT_FOR_OSD_MAP
* |
* v
* BREAK_LOCK
* |
* v
* <finish>
*
* @endvertbatim
*/
librados::IoCtx &m_ioctx;
CephContext *m_cct;
AsioEngine& m_asio_engine;
std::string m_oid;
Locker m_locker;
bool m_exclusive;
bool m_blocklist_locker;
uint32_t m_blocklist_expire_seconds;
bool m_force_break_lock;
Context *m_on_finish;
bufferlist m_out_bl;
std::list<obj_watch_t> m_watchers;
int m_watchers_ret_val;
Locker m_refreshed_locker;
BreakRequest(librados::IoCtx& ioctx, AsioEngine& asio_engine,
const std::string& oid, const Locker &locker,
bool exclusive, bool blocklist_locker,
uint32_t blocklist_expire_seconds, bool force_break_lock,
Context *on_finish);
void send_get_watchers();
void handle_get_watchers(int r);
void send_get_locker();
void handle_get_locker(int r);
void send_blocklist();
void handle_blocklist(int r);
void wait_for_osd_map();
void handle_wait_for_osd_map(int r);
void send_break_lock();
void handle_break_lock(int r);
void finish(int r);
};
} // namespace managed_lock
} // namespace librbd
extern template class librbd::managed_lock::BreakRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MANAGED_LOCK_BREAK_REQUEST_H
| 2,824 | 22.347107 | 75 | h |
null | ceph-main/src/librbd/managed_lock/GetLockerRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/managed_lock/GetLockerRequest.h"
#include "cls/lock/cls_lock_client.h"
#include "cls/lock/cls_lock_types.h"
#include "common/dout.h"
#include "common/errno.h"
#include "include/stringify.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/managed_lock/Types.h"
#include "librbd/managed_lock/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::managed_lock::GetLockerRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace managed_lock {
using librbd::util::create_rados_callback;
template <typename I>
GetLockerRequest<I>::GetLockerRequest(librados::IoCtx& ioctx,
const std::string& oid, bool exclusive,
Locker *locker, Context *on_finish)
: m_ioctx(ioctx), m_cct(reinterpret_cast<CephContext *>(m_ioctx.cct())),
m_oid(oid), m_exclusive(exclusive), m_locker(locker),
m_on_finish(on_finish) {
}
template <typename I>
void GetLockerRequest<I>::send() {
send_get_lockers();
}
template <typename I>
void GetLockerRequest<I>::send_get_lockers() {
ldout(m_cct, 10) << dendl;
librados::ObjectReadOperation op;
rados::cls::lock::get_lock_info_start(&op, RBD_LOCK_NAME);
using klass = GetLockerRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_get_lockers>(this);
m_out_bl.clear();
int r = m_ioctx.aio_operate(m_oid, rados_completion, &op, &m_out_bl);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
void GetLockerRequest<I>::handle_get_lockers(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
std::map<rados::cls::lock::locker_id_t,
rados::cls::lock::locker_info_t> lockers;
ClsLockType lock_type = ClsLockType::NONE;
std::string lock_tag;
if (r == 0) {
auto it = m_out_bl.cbegin();
r = rados::cls::lock::get_lock_info_finish(&it, &lockers, &lock_type,
&lock_tag);
}
if (r < 0) {
lderr(m_cct) << "failed to retrieve lockers: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
if (lockers.empty()) {
ldout(m_cct, 20) << "no lockers detected" << dendl;
finish(-ENOENT);
return;
}
if (lock_tag != util::get_watcher_lock_tag()) {
ldout(m_cct, 5) <<"locked by external mechanism: tag=" << lock_tag << dendl;
finish(-EBUSY);
return;
}
if (m_exclusive && lock_type == ClsLockType::SHARED) {
ldout(m_cct, 5) << "incompatible shared lock type detected" << dendl;
finish(-EBUSY);
return;
} else if (!m_exclusive && lock_type == ClsLockType::EXCLUSIVE) {
ldout(m_cct, 5) << "incompatible exclusive lock type detected" << dendl;
finish(-EBUSY);
return;
}
std::map<rados::cls::lock::locker_id_t,
rados::cls::lock::locker_info_t>::iterator iter = lockers.begin();
if (!util::decode_lock_cookie(iter->first.cookie, &m_locker->handle)) {
ldout(m_cct, 5) << "locked by external mechanism: "
<< "cookie=" << iter->first.cookie << dendl;
finish(-EBUSY);
return;
}
if (iter->second.addr.is_blank_ip()) {
ldout(m_cct, 5) << "locker has a blank address" << dendl;
finish(-EBUSY);
return;
}
m_locker->entity = iter->first.locker;
m_locker->cookie = iter->first.cookie;
m_locker->address = iter->second.addr.get_legacy_str();
ldout(m_cct, 10) << "retrieved exclusive locker: "
<< m_locker->entity << "@" << m_locker->address << dendl;
finish(0);
}
template <typename I>
void GetLockerRequest<I>::finish(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace managed_lock
} // namespace librbd
template class librbd::managed_lock::GetLockerRequest<librbd::ImageCtx>;
| 3,942 | 28.871212 | 80 | cc |
null | ceph-main/src/librbd/managed_lock/GetLockerRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MANAGED_LOCK_GET_LOCKER_REQUEST_H
#define CEPH_LIBRBD_MANAGED_LOCK_GET_LOCKER_REQUEST_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/rados/librados_fwd.hpp"
#include <string>
class Context;
namespace librbd {
struct ImageCtx;
namespace managed_lock {
struct Locker;
template <typename ImageCtxT = ImageCtx>
class GetLockerRequest {
public:
static GetLockerRequest* create(librados::IoCtx& ioctx,
const std::string& oid, bool exclusive,
Locker *locker, Context *on_finish) {
return new GetLockerRequest(ioctx, oid, exclusive, locker, on_finish);
}
void send();
private:
librados::IoCtx &m_ioctx;
CephContext *m_cct;
std::string m_oid;
bool m_exclusive;
Locker *m_locker;
Context *m_on_finish;
bufferlist m_out_bl;
GetLockerRequest(librados::IoCtx& ioctx, const std::string& oid,
bool exclusive, Locker *locker, Context *on_finish);
void send_get_lockers();
void handle_get_lockers(int r);
void finish(int r);
};
} // namespace managed_lock
} // namespace librbd
extern template class librbd::managed_lock::GetLockerRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MANAGED_LOCK_GET_LOCKER_REQUEST_H
| 1,384 | 22.474576 | 79 | h |
null | ceph-main/src/librbd/managed_lock/ReacquireRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/managed_lock/ReacquireRequest.h"
#include "librbd/Watcher.h"
#include "cls/lock/cls_lock_client.h"
#include "cls/lock/cls_lock_types.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/managed_lock/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::managed_lock::ReacquireRequest: " \
<< this << ": " << __func__
using std::string;
namespace librbd {
namespace managed_lock {
using librbd::util::create_rados_callback;
template <typename I>
ReacquireRequest<I>::ReacquireRequest(librados::IoCtx& ioctx,
const string& oid,
const string& old_cookie,
const string &new_cookie,
bool exclusive,
Context *on_finish)
: m_ioctx(ioctx), m_oid(oid), m_old_cookie(old_cookie),
m_new_cookie(new_cookie), m_exclusive(exclusive), m_on_finish(on_finish) {
}
template <typename I>
void ReacquireRequest<I>::send() {
set_cookie();
}
template <typename I>
void ReacquireRequest<I>::set_cookie() {
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
ldout(cct, 10) << dendl;
librados::ObjectWriteOperation op;
rados::cls::lock::set_cookie(&op, RBD_LOCK_NAME,
m_exclusive ? ClsLockType::EXCLUSIVE : ClsLockType::SHARED,
m_old_cookie, util::get_watcher_lock_tag(),
m_new_cookie);
librados::AioCompletion *rados_completion = create_rados_callback<
ReacquireRequest, &ReacquireRequest::handle_set_cookie>(this);
int r = m_ioctx.aio_operate(m_oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
void ReacquireRequest<I>::handle_set_cookie(int r) {
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
ldout(cct, 10) << ": r=" << r << dendl;
if (r == -EOPNOTSUPP) {
ldout(cct, 10) << ": OSD doesn't support updating lock" << dendl;
} else if (r < 0) {
lderr(cct) << ": failed to update lock: " << cpp_strerror(r) << dendl;
}
m_on_finish->complete(r);
delete this;
}
} // namespace managed_lock
} // namespace librbd
template class librbd::managed_lock::ReacquireRequest<librbd::ImageCtx>;
| 2,561 | 31.025 | 90 | cc |
null | ceph-main/src/librbd/managed_lock/ReacquireRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MANAGED_LOCK_REACQUIRE_REQUEST_H
#define CEPH_LIBRBD_MANAGED_LOCK_REACQUIRE_REQUEST_H
#include "include/rados/librados.hpp"
#include "include/int_types.h"
#include <string>
class Context;
namespace librbd {
class Watcher;
namespace managed_lock {
template <typename ImageCtxT>
class ReacquireRequest {
public:
static ReacquireRequest *create(librados::IoCtx& ioctx,
const std::string& oid,
const std::string& old_cookie,
const std::string &new_cookie,
bool exclusive,
Context *on_finish) {
return new ReacquireRequest(ioctx, oid, old_cookie, new_cookie, exclusive,
on_finish);
}
ReacquireRequest(librados::IoCtx& ioctx, const std::string& oid,
const std::string& old_cookie,
const std::string &new_cookie, bool exclusive,
Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* SET_COOKIE
* |
* v
* <finish>
*
* @endverbatim
*/
librados::IoCtx& m_ioctx;
std::string m_oid;
std::string m_old_cookie;
std::string m_new_cookie;
bool m_exclusive;
Context *m_on_finish;
void set_cookie();
void handle_set_cookie(int r);
};
} // namespace managed_lock
} // namespace librbd
#endif // CEPH_LIBRBD_MANAGED_LOCK_REACQUIRE_REQUEST_H
| 1,618 | 22.128571 | 78 | h |
null | ceph-main/src/librbd/managed_lock/ReleaseRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/managed_lock/ReleaseRequest.h"
#include "cls/lock/cls_lock_client.h"
#include "cls/lock/cls_lock_types.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/Watcher.h"
#include "librbd/asio/ContextWQ.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::managed_lock::ReleaseRequest: " \
<< this << " " << __func__ << ": "
using std::string;
namespace librbd {
namespace managed_lock {
using util::detail::C_AsyncCallback;
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
ReleaseRequest<I>* ReleaseRequest<I>::create(librados::IoCtx& ioctx,
Watcher *watcher,
asio::ContextWQ *work_queue,
const string& oid,
const string& cookie,
Context *on_finish) {
return new ReleaseRequest(ioctx, watcher, work_queue, oid, cookie,
on_finish);
}
template <typename I>
ReleaseRequest<I>::ReleaseRequest(librados::IoCtx& ioctx, Watcher *watcher,
asio::ContextWQ *work_queue,
const string& oid, const string& cookie,
Context *on_finish)
: m_ioctx(ioctx), m_watcher(watcher), m_oid(oid), m_cookie(cookie),
m_on_finish(new C_AsyncCallback<asio::ContextWQ>(work_queue, on_finish)) {
}
template <typename I>
ReleaseRequest<I>::~ReleaseRequest() {
}
template <typename I>
void ReleaseRequest<I>::send() {
send_unlock();
}
template <typename I>
void ReleaseRequest<I>::send_unlock() {
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
ldout(cct, 10) << "entity=client." << m_ioctx.get_instance_id() << ", "
<< "cookie=" << m_cookie << dendl;
librados::ObjectWriteOperation op;
rados::cls::lock::unlock(&op, RBD_LOCK_NAME, m_cookie);
using klass = ReleaseRequest;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_unlock>(this);
int r = m_ioctx.aio_operate(m_oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
void ReleaseRequest<I>::handle_unlock(int r) {
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to unlock: " << cpp_strerror(r) << dendl;
}
finish();
}
template <typename I>
void ReleaseRequest<I>::finish() {
m_on_finish->complete(0);
delete this;
}
} // namespace managed_lock
} // namespace librbd
template class librbd::managed_lock::ReleaseRequest<librbd::ImageCtx>;
| 3,016 | 29.785714 | 78 | cc |
null | ceph-main/src/librbd/managed_lock/ReleaseRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MANAGED_LOCK_RELEASE_REQUEST_H
#define CEPH_LIBRBD_MANAGED_LOCK_RELEASE_REQUEST_H
#include "include/rados/librados.hpp"
#include "librbd/watcher/Types.h"
#include <string>
class Context;
class ContextWQ;
namespace librbd {
class Watcher;
namespace asio { struct ContextWQ; }
namespace managed_lock {
template <typename ImageCtxT>
class ReleaseRequest {
private:
typedef watcher::Traits<ImageCtxT> TypeTraits;
typedef typename TypeTraits::Watcher Watcher;
public:
static ReleaseRequest* create(librados::IoCtx& ioctx, Watcher *watcher,
asio::ContextWQ *work_queue,
const std::string& oid,
const std::string& cookie,
Context *on_finish);
~ReleaseRequest();
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* UNLOCK
* |
* v
* <finish>
*
* @endverbatim
*/
ReleaseRequest(librados::IoCtx& ioctx, Watcher *watcher,
asio::ContextWQ *work_queue, const std::string& oid,
const std::string& cookie, Context *on_finish);
librados::IoCtx& m_ioctx;
Watcher *m_watcher;
std::string m_oid;
std::string m_cookie;
Context *m_on_finish;
void send_unlock();
void handle_unlock(int r);
void finish();
};
} // namespace managed_lock
} // namespace librbd
#endif // CEPH_LIBRBD_MANAGED_LOCK_RELEASE_REQUEST_H
| 1,571 | 20.534247 | 73 | h |
null | ceph-main/src/librbd/managed_lock/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MANAGED_LOCK_TYPES_H
#define CEPH_LIBRBD_MANAGED_LOCK_TYPES_H
#include "msg/msg_types.h"
#include <string>
namespace librbd {
namespace managed_lock {
struct Locker {
entity_name_t entity;
std::string cookie;
std::string address;
uint64_t handle = 0;
Locker() {
}
Locker(const entity_name_t& entity, const std::string &cookie,
const std::string &address, uint64_t handle)
: entity(entity), cookie(cookie), address(address), handle(handle) {
}
inline bool operator==(const Locker &rhs) const {
return (entity == rhs.entity &&
cookie == rhs.cookie &&
address == rhs.address &&
handle == rhs.handle);
}
inline bool operator!=(const Locker &rhs) const {
return !(*this == rhs);
}
};
enum Mode {
EXCLUSIVE,
SHARED
};
} // namespace managed_lock
} // namespace librbd
#endif // CEPH_LIBRBD_MANAGED_LOCK_TYPES_H
| 1,018 | 20.680851 | 72 | h |
null | ceph-main/src/librbd/managed_lock/Utils.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/ceph_assert.h"
#include "librbd/managed_lock/Utils.h"
#include <sstream>
namespace librbd {
namespace managed_lock {
namespace util {
namespace {
const std::string WATCHER_LOCK_COOKIE_PREFIX = "auto";
const std::string WATCHER_LOCK_TAG("internal");
} // anonymous namespace
const std::string &get_watcher_lock_tag() {
return WATCHER_LOCK_TAG;
}
bool decode_lock_cookie(const std::string &tag, uint64_t *handle) {
std::string prefix;
std::istringstream ss(tag);
if (!(ss >> prefix >> *handle) || prefix != WATCHER_LOCK_COOKIE_PREFIX) {
return false;
}
return true;
}
std::string encode_lock_cookie(uint64_t watch_handle) {
ceph_assert(watch_handle != 0);
std::ostringstream ss;
ss << WATCHER_LOCK_COOKIE_PREFIX << " " << watch_handle;
return ss.str();
}
} // namespace util
} // namespace managed_lock
} // namespace librbd
| 974 | 21.159091 | 75 | cc |
null | ceph-main/src/librbd/managed_lock/Utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MANAGED_LOCK_UTILS_H
#define CEPH_LIBRBD_MANAGED_LOCK_UTILS_H
#include "include/int_types.h"
#include <string>
namespace librbd {
namespace managed_lock {
namespace util {
const std::string &get_watcher_lock_tag();
bool decode_lock_cookie(const std::string &tag, uint64_t *handle);
std::string encode_lock_cookie(uint64_t watch_handle);
} // namespace util
} // namespace managed_lock
} // namespace librbd
#endif // CEPH_LIBRBD_MANAGED_LOCK_UTILS_H
| 575 | 23 | 70 | h |
null | ceph-main/src/librbd/migration/FileStream.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef _LARGEFILE64_SOURCE
#define _LARGEFILE64_SOURCE
#endif // _LARGEFILE64_SOURCE
#include "librbd/migration/FileStream.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/asio/Utils.h"
#include <boost/asio/buffer.hpp>
#include <boost/asio/post.hpp>
#include <boost/asio/read.hpp>
#include <fcntl.h>
#include <unistd.h>
namespace librbd {
namespace migration {
namespace {
const std::string FILE_PATH {"file_path"};
} // anonymous namespace
#ifdef BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::FileStream::ReadRequest " \
<< this << " " << __func__ << ": "
template <typename I>
struct FileStream<I>::ReadRequest {
FileStream* file_stream;
io::Extents byte_extents;
bufferlist* data;
Context* on_finish;
size_t index = 0;
ReadRequest(FileStream* file_stream, io::Extents&& byte_extents,
bufferlist* data, Context* on_finish)
: file_stream(file_stream), byte_extents(std::move(byte_extents)),
data(data), on_finish(on_finish) {
auto cct = file_stream->m_cct;
ldout(cct, 20) << dendl;
}
void send() {
data->clear();
read();
}
void read() {
auto cct = file_stream->m_cct;
if (index >= byte_extents.size()) {
finish(0);
return;
}
auto& byte_extent = byte_extents[index++];
ldout(cct, 20) << "byte_extent=" << byte_extent << dendl;
auto ptr = buffer::ptr_node::create(buffer::create_small_page_aligned(
byte_extent.second));
auto buffer = boost::asio::mutable_buffer(
ptr->c_str(), byte_extent.second);
data->push_back(std::move(ptr));
int r;
auto offset = lseek64(file_stream->m_file_no, byte_extent.first, SEEK_SET);
if (offset == -1) {
r = -errno;
lderr(cct) << "failed to seek file stream: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
boost::system::error_code ec;
size_t bytes_read = boost::asio::read(
*file_stream->m_stream_descriptor, std::move(buffer), ec);
r = -ec.value();
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to read from file stream: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
} else if (bytes_read < byte_extent.second) {
lderr(cct) << "failed to read " << byte_extent.second << " bytes from "
<< "file stream" << dendl;
finish(-ERANGE);
return;
}
// re-queue the remainder of the read requests
boost::asio::post(file_stream->m_strand, [this]() { read(); });
}
void finish(int r) {
auto cct = file_stream->m_cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
data->clear();
}
on_finish->complete(r);
delete this;
}
};
#endif // BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::FileStream: " << this \
<< " " << __func__ << ": "
template <typename I>
FileStream<I>::FileStream(I* image_ctx, const json_spirit::mObject& json_object)
: m_cct(image_ctx->cct), m_asio_engine(image_ctx->asio_engine),
m_json_object(json_object),
m_strand(boost::asio::make_strand(*m_asio_engine)) {
}
template <typename I>
FileStream<I>::~FileStream() {
if (m_file_no != -1) {
::close(m_file_no);
}
}
#ifdef BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR
template <typename I>
void FileStream<I>::open(Context* on_finish) {
auto& file_path_value = m_json_object[FILE_PATH];
if (file_path_value.type() != json_spirit::str_type) {
lderr(m_cct) << "failed to locate '" << FILE_PATH << "' key" << dendl;
on_finish->complete(-EINVAL);
return;
}
auto& file_path = file_path_value.get_str();
ldout(m_cct, 10) << "file_path=" << file_path << dendl;
m_file_no = ::open(file_path.c_str(), O_RDONLY);
if (m_file_no < 0) {
int r = -errno;
lderr(m_cct) << "failed to open file stream '" << file_path << "': "
<< cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
m_stream_descriptor = std::make_optional<
boost::asio::posix::stream_descriptor>(m_strand, m_file_no);
on_finish->complete(0);
}
template <typename I>
void FileStream<I>::close(Context* on_finish) {
ldout(m_cct, 10) << dendl;
m_stream_descriptor.reset();
on_finish->complete(0);
}
template <typename I>
void FileStream<I>::get_size(uint64_t* size, Context* on_finish) {
ldout(m_cct, 10) << dendl;
// execute IO operations in a single strand to prevent seek races
boost::asio::post(
m_strand, [this, size, on_finish]() {
auto offset = lseek64(m_file_no, 0, SEEK_END);
if (offset == -1) {
int r = -errno;
lderr(m_cct) << "failed to seek to file end: " << cpp_strerror(r)
<< dendl;
on_finish->complete(r);
return;
}
ldout(m_cct, 10) << "size=" << offset << dendl;
*size = offset;
on_finish->complete(0);
});
}
template <typename I>
void FileStream<I>::read(io::Extents&& byte_extents, bufferlist* data,
Context* on_finish) {
ldout(m_cct, 20) << byte_extents << dendl;
auto ctx = new ReadRequest(this, std::move(byte_extents), data, on_finish);
// execute IO operations in a single strand to prevent seek races
boost::asio::post(m_strand, [ctx]() { ctx->send(); });
}
#else // BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR
template <typename I>
void FileStream<I>::open(Context* on_finish) {
on_finish->complete(-EIO);
}
template <typename I>
void FileStream<I>::close(Context* on_finish) {
on_finish->complete(-EIO);
}
template <typename I>
void FileStream<I>::get_size(uint64_t* size, Context* on_finish) {
on_finish->complete(-EIO);
}
template <typename I>
void FileStream<I>::read(io::Extents&& byte_extents, bufferlist* data,
Context* on_finish) {
on_finish->complete(-EIO);
}
#endif // BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR
} // namespace migration
} // namespace librbd
template class librbd::migration::FileStream<librbd::ImageCtx>;
| 6,301 | 26.04721 | 80 | cc |
null | ceph-main/src/librbd/migration/FileStream.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIGRATION_FILE_STREAM_H
#define CEPH_LIBRBD_MIGRATION_FILE_STREAM_H
#include "include/int_types.h"
#include "librbd/migration/StreamInterface.h"
#include <boost/asio/io_context.hpp>
#include <boost/asio/strand.hpp>
#include <boost/asio/posix/basic_stream_descriptor.hpp>
#include <json_spirit/json_spirit.h>
#include <memory>
#include <string>
struct Context;
namespace librbd {
struct AsioEngine;
struct ImageCtx;
namespace migration {
template <typename ImageCtxT>
class FileStream : public StreamInterface {
public:
static FileStream* create(ImageCtxT* image_ctx,
const json_spirit::mObject& json_object) {
return new FileStream(image_ctx, json_object);
}
FileStream(ImageCtxT* image_ctx, const json_spirit::mObject& json_object);
~FileStream() override;
FileStream(const FileStream&) = delete;
FileStream& operator=(const FileStream&) = delete;
void open(Context* on_finish) override;
void close(Context* on_finish) override;
void get_size(uint64_t* size, Context* on_finish) override;
void read(io::Extents&& byte_extents, bufferlist* data,
Context* on_finish) override;
private:
CephContext* m_cct;
std::shared_ptr<AsioEngine> m_asio_engine;
json_spirit::mObject m_json_object;
boost::asio::strand<boost::asio::io_context::executor_type> m_strand;
#ifdef BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR
std::optional<boost::asio::posix::stream_descriptor> m_stream_descriptor;
struct ReadRequest;
#endif // BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR
int m_file_no = -1;
};
} // namespace migration
} // namespace librbd
extern template class librbd::migration::FileStream<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIGRATION_FILE_STREAM_H
| 1,846 | 25.768116 | 76 | h |
null | ceph-main/src/librbd/migration/FormatInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIGRATION_FORMAT_INTERFACE_H
#define CEPH_LIBRBD_MIGRATION_FORMAT_INTERFACE_H
#include "include/buffer_fwd.h"
#include "include/int_types.h"
#include "common/zipkin_trace.h"
#include "librbd/Types.h"
#include "librbd/io/Types.h"
#include <map>
struct Context;
namespace librbd {
namespace io {
struct AioCompletion;
struct ReadResult;
} // namespace io
namespace migration {
struct FormatInterface {
typedef std::map<uint64_t, SnapInfo> SnapInfos;
virtual ~FormatInterface() {
}
virtual void open(Context* on_finish) = 0;
virtual void close(Context* on_finish) = 0;
virtual void get_snapshots(SnapInfos* snap_infos, Context* on_finish) = 0;
virtual void get_image_size(uint64_t snap_id, uint64_t* size,
Context* on_finish) = 0;
virtual bool read(io::AioCompletion* aio_comp, uint64_t snap_id,
io::Extents&& image_extents, io::ReadResult&& read_result,
int op_flags, int read_flags,
const ZTracer::Trace &parent_trace) = 0;
virtual void list_snaps(io::Extents&& image_extents, io::SnapIds&& snap_ids,
int list_snaps_flags,
io::SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace,
Context* on_finish) = 0;
};
} // namespace migration
} // namespace librbd
#endif // CEPH_LIBRBD_MIGRATION_FORMAT_INTERFACE_H
| 1,563 | 27.962963 | 78 | h |
null | ceph-main/src/librbd/migration/HttpClient.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/migration/HttpClient.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/asio/Utils.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ReadResult.h"
#include "librbd/migration/Utils.h"
#include <boost/asio/buffer.hpp>
#include <boost/asio/post.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/read.hpp>
#include <boost/asio/ssl.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/http/read.hpp>
#include <boost/lexical_cast.hpp>
#include <deque>
namespace librbd {
namespace migration {
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::HttpClient::" \
<< "HttpSession " << this << " " << __func__ \
<< ": "
/**
* boost::beast utilizes non-inheriting template classes for handling plain vs
* encrypted TCP streams. Utilize a base-class for handling the majority of the
* logic for handling connecting, disconnecting, reseting, and sending requests.
*/
template <typename I>
template <typename D>
class HttpClient<I>::HttpSession : public HttpSessionInterface {
public:
void init(Context* on_finish) override {
ceph_assert(m_http_client->m_strand.running_in_this_thread());
auto cct = m_http_client->m_cct;
ldout(cct, 15) << dendl;
ceph_assert(m_state == STATE_UNINITIALIZED);
m_state = STATE_CONNECTING;
resolve_host(on_finish);
}
void shut_down(Context* on_finish) override {
ceph_assert(m_http_client->m_strand.running_in_this_thread());
auto cct = m_http_client->m_cct;
ldout(cct, 15) << dendl;
ceph_assert(on_finish != nullptr);
ceph_assert(m_on_shutdown == nullptr);
m_on_shutdown = on_finish;
auto current_state = m_state;
if (current_state == STATE_UNINITIALIZED) {
// never initialized or resolve/connect failed
on_finish->complete(0);
return;
}
m_state = STATE_SHUTTING_DOWN;
if (current_state != STATE_READY) {
// delay shutdown until current state transition completes
return;
}
disconnect(new LambdaContext([this](int r) { handle_shut_down(r); }));
}
void issue(std::shared_ptr<Work>&& work) override {
ceph_assert(m_http_client->m_strand.running_in_this_thread());
auto cct = m_http_client->m_cct;
ldout(cct, 20) << "work=" << work.get() << dendl;
if (is_shutdown()) {
lderr(cct) << "cannot issue HTTP request, client is shutdown"
<< dendl;
work->complete(-ESHUTDOWN, {});
return;
}
bool first_issue = m_issue_queue.empty();
m_issue_queue.emplace_back(work);
if (m_state == STATE_READY && first_issue) {
ldout(cct, 20) << "sending http request: work=" << work.get() << dendl;
finalize_issue(std::move(work));
} else if (m_state == STATE_UNINITIALIZED) {
ldout(cct, 20) << "resetting HTTP session: work=" << work.get() << dendl;
m_state = STATE_RESET_CONNECTING;
resolve_host(nullptr);
} else {
ldout(cct, 20) << "queueing HTTP request: work=" << work.get() << dendl;
}
}
void finalize_issue(std::shared_ptr<Work>&& work) {
auto cct = m_http_client->m_cct;
ldout(cct, 20) << "work=" << work.get() << dendl;
++m_in_flight_requests;
(*work)(derived().stream());
}
void handle_issue(boost::system::error_code ec,
std::shared_ptr<Work>&& work) override {
ceph_assert(m_http_client->m_strand.running_in_this_thread());
auto cct = m_http_client->m_cct;
ldout(cct, 20) << "work=" << work.get() << ", r=" << -ec.value() << dendl;
ceph_assert(m_in_flight_requests > 0);
--m_in_flight_requests;
if (maybe_finalize_reset()) {
// previous request is attempting reset to this request will be resent
return;
}
ceph_assert(!m_issue_queue.empty());
m_issue_queue.pop_front();
if (is_shutdown()) {
lderr(cct) << "client shutdown during in-flight request" << dendl;
work->complete(-ESHUTDOWN, {});
maybe_finalize_shutdown();
return;
}
if (ec) {
if (ec == boost::asio::error::bad_descriptor ||
ec == boost::asio::error::broken_pipe ||
ec == boost::asio::error::connection_reset ||
ec == boost::asio::error::operation_aborted ||
ec == boost::asio::ssl::error::stream_truncated ||
ec == boost::beast::http::error::end_of_stream ||
ec == boost::beast::http::error::partial_message) {
ldout(cct, 5) << "remote peer stream closed, retrying request" << dendl;
m_issue_queue.push_front(work);
} else if (ec == boost::beast::error::timeout) {
lderr(cct) << "timed-out while issuing request" << dendl;
work->complete(-ETIMEDOUT, {});
} else {
lderr(cct) << "failed to issue request: " << ec.message() << dendl;
work->complete(-ec.value(), {});
}
// attempt to recover the connection
reset();
return;
}
bool first_receive = m_receive_queue.empty();
m_receive_queue.push_back(work);
if (first_receive) {
receive(std::move(work));
}
// TODO disable pipelining for non-idempotent requests
// pipeline the next request into the stream
if (!m_issue_queue.empty()) {
work = m_issue_queue.front();
ldout(cct, 20) << "sending http request: work=" << work.get() << dendl;
finalize_issue(std::move(work));
}
}
protected:
HttpClient* m_http_client;
HttpSession(HttpClient* http_client)
: m_http_client(http_client), m_resolver(http_client->m_strand) {
}
virtual void connect(boost::asio::ip::tcp::resolver::results_type results,
Context* on_finish) = 0;
virtual void disconnect(Context* on_finish) = 0;
void close_socket() {
auto cct = m_http_client->m_cct;
ldout(cct, 15) << dendl;
boost::system::error_code ec;
boost::beast::get_lowest_layer(derived().stream()).socket().close(ec);
}
private:
enum State {
STATE_UNINITIALIZED,
STATE_CONNECTING,
STATE_READY,
STATE_RESET_PENDING,
STATE_RESET_DISCONNECTING,
STATE_RESET_CONNECTING,
STATE_SHUTTING_DOWN,
STATE_SHUTDOWN,
};
State m_state = STATE_UNINITIALIZED;
boost::asio::ip::tcp::resolver m_resolver;
Context* m_on_shutdown = nullptr;
uint64_t m_in_flight_requests = 0;
std::deque<std::shared_ptr<Work>> m_issue_queue;
std::deque<std::shared_ptr<Work>> m_receive_queue;
boost::beast::flat_buffer m_buffer;
std::optional<boost::beast::http::parser<false, EmptyBody>> m_header_parser;
std::optional<boost::beast::http::parser<false, StringBody>> m_parser;
D& derived() {
return static_cast<D&>(*this);
}
void resolve_host(Context* on_finish) {
auto cct = m_http_client->m_cct;
ldout(cct, 15) << dendl;
shutdown_socket();
m_resolver.async_resolve(
m_http_client->m_url_spec.host, m_http_client->m_url_spec.port,
[this, on_finish](boost::system::error_code ec, auto results) {
handle_resolve_host(ec, results, on_finish); });
}
void handle_resolve_host(
boost::system::error_code ec,
boost::asio::ip::tcp::resolver::results_type results,
Context* on_finish) {
auto cct = m_http_client->m_cct;
int r = -ec.value();
ldout(cct, 15) << "r=" << r << dendl;
if (ec) {
if (ec == boost::asio::error::host_not_found) {
r = -ENOENT;
} else if (ec == boost::asio::error::host_not_found_try_again) {
// TODO: add retry throttle
r = -EAGAIN;
}
lderr(cct) << "failed to resolve host '"
<< m_http_client->m_url_spec.host << "': "
<< cpp_strerror(r) << dendl;
advance_state(STATE_UNINITIALIZED, r, on_finish);
return;
}
connect(results, new LambdaContext([this, on_finish](int r) {
handle_connect(r, on_finish); }));
}
void handle_connect(int r, Context* on_finish) {
auto cct = m_http_client->m_cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to connect to host '"
<< m_http_client->m_url_spec.host << "': "
<< cpp_strerror(r) << dendl;
advance_state(STATE_UNINITIALIZED, r, on_finish);
return;
}
advance_state(STATE_READY, 0, on_finish);
}
void handle_shut_down(int r) {
auto cct = m_http_client->m_cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to disconnect stream: '" << cpp_strerror(r)
<< dendl;
}
// cancel all in-flight send/receives (if any)
shutdown_socket();
maybe_finalize_shutdown();
}
void maybe_finalize_shutdown() {
if (m_in_flight_requests > 0) {
return;
}
// cancel any queued IOs
fail_queued_work(-ESHUTDOWN);
advance_state(STATE_SHUTDOWN, 0, nullptr);
}
bool is_shutdown() const {
ceph_assert(m_http_client->m_strand.running_in_this_thread());
return (m_state == STATE_SHUTTING_DOWN || m_state == STATE_SHUTDOWN);
}
void reset() {
ceph_assert(m_http_client->m_strand.running_in_this_thread());
ceph_assert(m_state == STATE_READY);
auto cct = m_http_client->m_cct;
ldout(cct, 15) << dendl;
m_state = STATE_RESET_PENDING;
maybe_finalize_reset();
}
bool maybe_finalize_reset() {
if (m_state != STATE_RESET_PENDING) {
return false;
}
if (m_in_flight_requests > 0) {
return true;
}
ceph_assert(m_http_client->m_strand.running_in_this_thread());
auto cct = m_http_client->m_cct;
ldout(cct, 15) << dendl;
m_buffer.clear();
// move in-flight request back to the front of the issue queue
m_issue_queue.insert(m_issue_queue.begin(),
m_receive_queue.begin(), m_receive_queue.end());
m_receive_queue.clear();
m_state = STATE_RESET_DISCONNECTING;
disconnect(new LambdaContext([this](int r) { handle_reset(r); }));
return true;
}
void handle_reset(int r) {
auto cct = m_http_client->m_cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to disconnect stream: '" << cpp_strerror(r)
<< dendl;
}
advance_state(STATE_RESET_CONNECTING, r, nullptr);
}
int shutdown_socket() {
if (!boost::beast::get_lowest_layer(
derived().stream()).socket().is_open()) {
return 0;
}
auto cct = m_http_client->m_cct;
ldout(cct, 15) << dendl;
boost::system::error_code ec;
boost::beast::get_lowest_layer(derived().stream()).socket().shutdown(
boost::asio::ip::tcp::socket::shutdown_both, ec);
if (ec && ec != boost::beast::errc::not_connected) {
lderr(cct) << "failed to shutdown socket: " << ec.message() << dendl;
return -ec.value();
}
close_socket();
return 0;
}
void receive(std::shared_ptr<Work>&& work) {
auto cct = m_http_client->m_cct;
ldout(cct, 15) << "work=" << work.get() << dendl;
ceph_assert(!m_receive_queue.empty());
++m_in_flight_requests;
// receive the response for this request
m_parser.emplace();
if (work->header_only()) {
// HEAD requests don't transfer data but the parser still cares about max
// content-length
m_header_parser.emplace();
m_header_parser->body_limit(std::numeric_limits<uint64_t>::max());
boost::beast::http::async_read_header(
derived().stream(), m_buffer, *m_header_parser,
[this, work=std::move(work)]
(boost::beast::error_code ec, std::size_t) mutable {
handle_receive(ec, std::move(work));
});
} else {
m_parser->body_limit(1 << 25); // max RBD object size
boost::beast::http::async_read(
derived().stream(), m_buffer, *m_parser,
[this, work=std::move(work)]
(boost::beast::error_code ec, std::size_t) mutable {
handle_receive(ec, std::move(work));
});
}
}
void handle_receive(boost::system::error_code ec,
std::shared_ptr<Work>&& work) {
auto cct = m_http_client->m_cct;
ldout(cct, 15) << "work=" << work.get() << ", r=" << -ec.value() << dendl;
ceph_assert(m_in_flight_requests > 0);
--m_in_flight_requests;
if (maybe_finalize_reset()) {
// previous request is attempting reset to this request will be resent
return;
}
ceph_assert(!m_receive_queue.empty());
m_receive_queue.pop_front();
if (is_shutdown()) {
lderr(cct) << "client shutdown with in-flight request" << dendl;
work->complete(-ESHUTDOWN, {});
maybe_finalize_shutdown();
return;
}
if (ec) {
if (ec == boost::asio::error::bad_descriptor ||
ec == boost::asio::error::broken_pipe ||
ec == boost::asio::error::connection_reset ||
ec == boost::asio::error::operation_aborted ||
ec == boost::asio::ssl::error::stream_truncated ||
ec == boost::beast::http::error::end_of_stream ||
ec == boost::beast::http::error::partial_message) {
ldout(cct, 5) << "remote peer stream closed, retrying request" << dendl;
m_receive_queue.push_front(work);
} else if (ec == boost::beast::error::timeout) {
lderr(cct) << "timed-out while issuing request" << dendl;
work->complete(-ETIMEDOUT, {});
} else {
lderr(cct) << "failed to issue request: " << ec.message() << dendl;
work->complete(-ec.value(), {});
}
reset();
return;
}
Response response;
if (work->header_only()) {
m_parser.emplace(std::move(*m_header_parser));
}
response = m_parser->release();
// basic response code handling in a common location
int r = 0;
auto result = response.result();
if (result == boost::beast::http::status::not_found) {
lderr(cct) << "requested resource does not exist" << dendl;
r = -ENOENT;
} else if (result == boost::beast::http::status::forbidden) {
lderr(cct) << "permission denied attempting to access resource" << dendl;
r = -EACCES;
} else if (boost::beast::http::to_status_class(result) !=
boost::beast::http::status_class::successful) {
lderr(cct) << "failed to retrieve size: HTTP " << result << dendl;
r = -EIO;
}
bool need_eof = response.need_eof();
if (r < 0) {
work->complete(r, {});
} else {
work->complete(0, std::move(response));
}
if (need_eof) {
ldout(cct, 20) << "reset required for non-pipelined response: "
<< "work=" << work.get() << dendl;
reset();
} else if (!m_receive_queue.empty()) {
auto work = m_receive_queue.front();
receive(std::move(work));
}
}
void advance_state(State next_state, int r, Context* on_finish) {
auto cct = m_http_client->m_cct;
auto current_state = m_state;
ldout(cct, 15) << "current_state=" << current_state << ", "
<< "next_state=" << next_state << ", "
<< "r=" << r << dendl;
m_state = next_state;
if (current_state == STATE_CONNECTING) {
if (next_state == STATE_UNINITIALIZED) {
shutdown_socket();
on_finish->complete(r);
return;
} else if (next_state == STATE_READY) {
on_finish->complete(r);
return;
}
} else if (current_state == STATE_SHUTTING_DOWN) {
if (next_state == STATE_READY) {
// shut down requested while connecting/resetting
disconnect(new LambdaContext([this](int r) { handle_shut_down(r); }));
return;
} else if (next_state == STATE_UNINITIALIZED ||
next_state == STATE_SHUTDOWN ||
next_state == STATE_RESET_CONNECTING) {
ceph_assert(m_on_shutdown != nullptr);
m_on_shutdown->complete(r);
return;
}
} else if (current_state == STATE_RESET_DISCONNECTING) {
// disconnected from peer -- ignore errors and reconnect
ceph_assert(next_state == STATE_RESET_CONNECTING);
ceph_assert(on_finish == nullptr);
shutdown_socket();
resolve_host(nullptr);
return;
} else if (current_state == STATE_RESET_CONNECTING) {
ceph_assert(on_finish == nullptr);
if (next_state == STATE_READY) {
// restart queued IO
if (!m_issue_queue.empty()) {
auto& work = m_issue_queue.front();
finalize_issue(std::move(work));
}
return;
} else if (next_state == STATE_UNINITIALIZED) {
shutdown_socket();
// fail all queued IO
fail_queued_work(r);
return;
}
}
lderr(cct) << "unexpected state transition: "
<< "current_state=" << current_state << ", "
<< "next_state=" << next_state << dendl;
ceph_assert(false);
}
void complete_work(std::shared_ptr<Work> work, int r, Response&& response) {
auto cct = m_http_client->m_cct;
ldout(cct, 20) << "work=" << work.get() << ", r=" << r << dendl;
work->complete(r, std::move(response));
}
void fail_queued_work(int r) {
auto cct = m_http_client->m_cct;
ldout(cct, 10) << "r=" << r << dendl;
for (auto& work : m_issue_queue) {
complete_work(work, r, {});
}
m_issue_queue.clear();
ceph_assert(m_receive_queue.empty());
}
};
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::HttpClient::" \
<< "PlainHttpSession " << this << " " << __func__ \
<< ": "
template <typename I>
class HttpClient<I>::PlainHttpSession : public HttpSession<PlainHttpSession> {
public:
PlainHttpSession(HttpClient* http_client)
: HttpSession<PlainHttpSession>(http_client),
m_stream(http_client->m_strand) {
}
~PlainHttpSession() override {
this->close_socket();
}
inline boost::beast::tcp_stream&
stream() {
return m_stream;
}
protected:
void connect(boost::asio::ip::tcp::resolver::results_type results,
Context* on_finish) override {
auto http_client = this->m_http_client;
auto cct = http_client->m_cct;
ldout(cct, 15) << dendl;
m_stream.async_connect(
results,
[on_finish](boost::system::error_code ec, const auto& endpoint) {
on_finish->complete(-ec.value());
});
}
void disconnect(Context* on_finish) override {
on_finish->complete(0);
}
private:
boost::beast::tcp_stream m_stream;
};
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::HttpClient::" \
<< "SslHttpSession " << this << " " << __func__ \
<< ": "
template <typename I>
class HttpClient<I>::SslHttpSession : public HttpSession<SslHttpSession> {
public:
SslHttpSession(HttpClient* http_client)
: HttpSession<SslHttpSession>(http_client),
m_stream(http_client->m_strand, http_client->m_ssl_context) {
}
~SslHttpSession() override {
this->close_socket();
}
inline boost::beast::ssl_stream<boost::beast::tcp_stream>&
stream() {
return m_stream;
}
protected:
void connect(boost::asio::ip::tcp::resolver::results_type results,
Context* on_finish) override {
auto http_client = this->m_http_client;
auto cct = http_client->m_cct;
ldout(cct, 15) << dendl;
boost::beast::get_lowest_layer(m_stream).async_connect(
results,
[this, on_finish](boost::system::error_code ec, const auto& endpoint) {
handle_connect(-ec.value(), on_finish);
});
}
void disconnect(Context* on_finish) override {
auto http_client = this->m_http_client;
auto cct = http_client->m_cct;
ldout(cct, 15) << dendl;
if (!m_ssl_enabled) {
on_finish->complete(0);
return;
}
m_stream.async_shutdown(
asio::util::get_callback_adapter([this, on_finish](int r) {
shutdown(r, on_finish); }));
}
private:
boost::beast::ssl_stream<boost::beast::tcp_stream> m_stream;
bool m_ssl_enabled = false;
void handle_connect(int r, Context* on_finish) {
auto http_client = this->m_http_client;
auto cct = http_client->m_cct;
ldout(cct, 15) << dendl;
if (r < 0) {
lderr(cct) << "failed to connect to host '"
<< http_client->m_url_spec.host << "': "
<< cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
handshake(on_finish);
}
void handshake(Context* on_finish) {
auto http_client = this->m_http_client;
auto cct = http_client->m_cct;
ldout(cct, 15) << dendl;
auto& host = http_client->m_url_spec.host;
m_stream.set_verify_mode(
boost::asio::ssl::verify_peer |
boost::asio::ssl::verify_fail_if_no_peer_cert);
m_stream.set_verify_callback(
[host, next=boost::asio::ssl::host_name_verification(host),
ignore_self_signed=http_client->m_ignore_self_signed_cert]
(bool preverified, boost::asio::ssl::verify_context& ctx) {
if (!preverified && ignore_self_signed) {
auto ec = X509_STORE_CTX_get_error(ctx.native_handle());
switch (ec) {
case X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT:
case X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN:
// ignore self-signed cert issues
preverified = true;
break;
default:
break;
}
}
return next(preverified, ctx);
});
// Set SNI Hostname (many hosts need this to handshake successfully)
if(!SSL_set_tlsext_host_name(m_stream.native_handle(),
http_client->m_url_spec.host.c_str())) {
int r = -::ERR_get_error();
lderr(cct) << "failed to initialize SNI hostname: " << cpp_strerror(r)
<< dendl;
on_finish->complete(r);
return;
}
// Perform the SSL/TLS handshake
m_stream.async_handshake(
boost::asio::ssl::stream_base::client,
asio::util::get_callback_adapter(
[this, on_finish](int r) { handle_handshake(r, on_finish); }));
}
void handle_handshake(int r, Context* on_finish) {
auto http_client = this->m_http_client;
auto cct = http_client->m_cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to complete handshake: " << cpp_strerror(r)
<< dendl;
disconnect(new LambdaContext([r, on_finish](int) {
on_finish->complete(r); }));
return;
}
m_ssl_enabled = true;
on_finish->complete(0);
}
void shutdown(int r, Context* on_finish) {
auto http_client = this->m_http_client;
auto cct = http_client->m_cct;
ldout(cct, 15) << "r=" << r << dendl;
on_finish->complete(r);
}
};
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::HttpClient: " << this \
<< " " << __func__ << ": "
template <typename I>
HttpClient<I>::HttpClient(I* image_ctx, const std::string& url)
: m_cct(image_ctx->cct), m_image_ctx(image_ctx),
m_asio_engine(image_ctx->asio_engine), m_url(url),
m_strand(boost::asio::make_strand(*m_asio_engine)),
m_ssl_context(boost::asio::ssl::context::sslv23_client) {
m_ssl_context.set_default_verify_paths();
}
template <typename I>
void HttpClient<I>::open(Context* on_finish) {
ldout(m_cct, 10) << "url=" << m_url << dendl;
int r = util::parse_url(m_cct, m_url, &m_url_spec);
if (r < 0) {
lderr(m_cct) << "failed to parse url '" << m_url << "': " << cpp_strerror(r)
<< dendl;
on_finish->complete(-EINVAL);
return;
}
boost::asio::post(m_strand, [this, on_finish]() mutable {
create_http_session(on_finish); });
}
template <typename I>
void HttpClient<I>::close(Context* on_finish) {
boost::asio::post(m_strand, [this, on_finish]() mutable {
shut_down_http_session(on_finish); });
}
template <typename I>
void HttpClient<I>::get_size(uint64_t* size, Context* on_finish) {
ldout(m_cct, 10) << dendl;
Request req;
req.method(boost::beast::http::verb::head);
issue(
std::move(req), [this, size, on_finish](int r, Response&& response) {
handle_get_size(r, std::move(response), size, on_finish);
});
}
template <typename I>
void HttpClient<I>::handle_get_size(int r, Response&& response, uint64_t* size,
Context* on_finish) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to retrieve size: " << cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
} else if (!response.has_content_length()) {
lderr(m_cct) << "failed to retrieve size: missing content-length" << dendl;
on_finish->complete(-EINVAL);
return;
}
auto content_length = response[boost::beast::http::field::content_length];
try {
*size = boost::lexical_cast<uint64_t>(content_length);
} catch (boost::bad_lexical_cast&) {
lderr(m_cct) << "invalid content-length in response" << dendl;
on_finish->complete(-EBADMSG);
return;
}
on_finish->complete(0);
}
template <typename I>
void HttpClient<I>::read(io::Extents&& byte_extents, bufferlist* data,
Context* on_finish) {
ldout(m_cct, 20) << dendl;
auto aio_comp = io::AioCompletion::create_and_start(
on_finish, librbd::util::get_image_ctx(m_image_ctx), io::AIO_TYPE_READ);
aio_comp->set_request_count(byte_extents.size());
// utilize ReadResult to assemble multiple byte extents into a single bl
// since boost::beast doesn't support multipart responses out-of-the-box
io::ReadResult read_result{data};
aio_comp->read_result = std::move(read_result);
aio_comp->read_result.set_image_extents(byte_extents);
// issue a range get request for each extent
uint64_t buffer_offset = 0;
for (auto [byte_offset, byte_length] : byte_extents) {
auto ctx = new io::ReadResult::C_ImageReadRequest(
aio_comp, buffer_offset, {{byte_offset, byte_length}});
buffer_offset += byte_length;
Request req;
req.method(boost::beast::http::verb::get);
std::stringstream range;
ceph_assert(byte_length > 0);
range << "bytes=" << byte_offset << "-" << (byte_offset + byte_length - 1);
req.set(boost::beast::http::field::range, range.str());
issue(
std::move(req),
[this, byte_offset=byte_offset, byte_length=byte_length, ctx]
(int r, Response&& response) {
handle_read(r, std::move(response), byte_offset, byte_length, &ctx->bl,
ctx);
});
}
}
template <typename I>
void HttpClient<I>::handle_read(int r, Response&& response,
uint64_t byte_offset, uint64_t byte_length,
bufferlist* data, Context* on_finish) {
ldout(m_cct, 20) << "bytes=" << byte_offset << "~" << byte_length << ", "
<< "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to read requested byte range: "
<< cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
} else if (response.result() != boost::beast::http::status::partial_content) {
lderr(m_cct) << "failed to retrieve requested byte range: HTTP "
<< response.result() << dendl;
on_finish->complete(-EIO);
return;
} else if (byte_length != response.body().size()) {
lderr(m_cct) << "unexpected short range read: "
<< "wanted=" << byte_length << ", "
<< "received=" << response.body().size() << dendl;
on_finish->complete(-EINVAL);
return;
}
data->clear();
data->append(response.body());
on_finish->complete(data->length());
}
template <typename I>
void HttpClient<I>::issue(std::shared_ptr<Work>&& work) {
boost::asio::post(m_strand, [this, work=std::move(work)]() mutable {
m_http_session->issue(std::move(work)); });
}
template <typename I>
void HttpClient<I>::create_http_session(Context* on_finish) {
ldout(m_cct, 15) << dendl;
ceph_assert(m_http_session == nullptr);
switch (m_url_spec.scheme) {
case URL_SCHEME_HTTP:
m_http_session = std::make_unique<PlainHttpSession>(this);
break;
case URL_SCHEME_HTTPS:
m_http_session = std::make_unique<SslHttpSession>(this);
break;
default:
ceph_assert(false);
break;
}
m_http_session->init(on_finish);
}
template <typename I>
void HttpClient<I>::shut_down_http_session(Context* on_finish) {
ldout(m_cct, 15) << dendl;
if (m_http_session == nullptr) {
on_finish->complete(0);
return;
}
m_http_session->shut_down(on_finish);
}
} // namespace migration
} // namespace librbd
template class librbd::migration::HttpClient<librbd::ImageCtx>;
| 28,892 | 29.477848 | 80 | cc |
null | ceph-main/src/librbd/migration/HttpClient.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIGRATION_HTTP_CLIENT_H
#define CEPH_LIBRBD_MIGRATION_HTTP_CLIENT_H
#include "include/common_fwd.h"
#include "include/int_types.h"
#include "librbd/io/Types.h"
#include "librbd/migration/HttpProcessorInterface.h"
#include "librbd/migration/Types.h"
#include <boost/asio/io_context.hpp>
#include <boost/asio/strand.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/ssl/context.hpp>
#include <boost/beast/version.hpp>
#include <boost/beast/core/tcp_stream.hpp>
#include <boost/beast/http/empty_body.hpp>
#include <boost/beast/http/message.hpp>
#include <boost/beast/http/string_body.hpp>
#include <boost/beast/http/write.hpp>
#include <boost/beast/ssl/ssl_stream.hpp>
#include <functional>
#include <memory>
#include <string>
#include <utility>
struct Context;
namespace librbd {
struct AsioEngine;
struct ImageCtx;
namespace migration {
template <typename ImageCtxT>
class HttpClient {
public:
using EmptyBody = boost::beast::http::empty_body;
using StringBody = boost::beast::http::string_body;
using Request = boost::beast::http::request<EmptyBody>;
using Response = boost::beast::http::response<StringBody>;
using RequestPreprocessor = std::function<void(Request&)>;
static HttpClient* create(ImageCtxT* image_ctx, const std::string& url) {
return new HttpClient(image_ctx, url);
}
HttpClient(ImageCtxT* image_ctx, const std::string& url);
HttpClient(const HttpClient&) = delete;
HttpClient& operator=(const HttpClient&) = delete;
void open(Context* on_finish);
void close(Context* on_finish);
void get_size(uint64_t* size, Context* on_finish);
void read(io::Extents&& byte_extents, bufferlist* data,
Context* on_finish);
void set_ignore_self_signed_cert(bool ignore) {
m_ignore_self_signed_cert = ignore;
}
void set_http_processor(HttpProcessorInterface* http_processor) {
m_http_processor = http_processor;
}
template <class Body, typename Completion>
void issue(boost::beast::http::request<Body>&& request,
Completion&& completion) {
struct WorkImpl : Work {
HttpClient* http_client;
boost::beast::http::request<Body> request;
Completion completion;
WorkImpl(HttpClient* http_client,
boost::beast::http::request<Body>&& request,
Completion&& completion)
: http_client(http_client), request(std::move(request)),
completion(std::move(completion)) {
}
WorkImpl(const WorkImpl&) = delete;
WorkImpl& operator=(const WorkImpl&) = delete;
bool need_eof() const override {
return request.need_eof();
}
bool header_only() const override {
return (request.method() == boost::beast::http::verb::head);
}
void complete(int r, Response&& response) override {
completion(r, std::move(response));
}
void operator()(boost::beast::tcp_stream& stream) override {
preprocess_request();
boost::beast::http::async_write(
stream, request,
[http_session=http_client->m_http_session.get(),
work=this->shared_from_this()]
(boost::beast::error_code ec, std::size_t) mutable {
http_session->handle_issue(ec, std::move(work));
});
}
void operator()(
boost::beast::ssl_stream<boost::beast::tcp_stream>& stream) override {
preprocess_request();
boost::beast::http::async_write(
stream, request,
[http_session=http_client->m_http_session.get(),
work=this->shared_from_this()]
(boost::beast::error_code ec, std::size_t) mutable {
http_session->handle_issue(ec, std::move(work));
});
}
void preprocess_request() {
if (http_client->m_http_processor) {
http_client->m_http_processor->process_request(request);
}
}
};
initialize_default_fields(request);
issue(std::make_shared<WorkImpl>(this, std::move(request),
std::move(completion)));
}
private:
struct Work;
struct HttpSessionInterface {
virtual ~HttpSessionInterface() {}
virtual void init(Context* on_finish) = 0;
virtual void shut_down(Context* on_finish) = 0;
virtual void issue(std::shared_ptr<Work>&& work) = 0;
virtual void handle_issue(boost::system::error_code ec,
std::shared_ptr<Work>&& work) = 0;
};
struct Work : public std::enable_shared_from_this<Work> {
virtual ~Work() {}
virtual bool need_eof() const = 0;
virtual bool header_only() const = 0;
virtual void complete(int r, Response&&) = 0;
virtual void operator()(boost::beast::tcp_stream& stream) = 0;
virtual void operator()(
boost::beast::ssl_stream<boost::beast::tcp_stream>& stream) = 0;
};
template <typename D> struct HttpSession;
struct PlainHttpSession;
struct SslHttpSession;
CephContext* m_cct;
ImageCtxT* m_image_ctx;
std::shared_ptr<AsioEngine> m_asio_engine;
std::string m_url;
UrlSpec m_url_spec;
bool m_ignore_self_signed_cert = false;
HttpProcessorInterface* m_http_processor = nullptr;
boost::asio::strand<boost::asio::io_context::executor_type> m_strand;
boost::asio::ssl::context m_ssl_context;
std::unique_ptr<HttpSessionInterface> m_http_session;
template <typename Fields>
void initialize_default_fields(Fields& fields) const {
fields.target(m_url_spec.path);
fields.set(boost::beast::http::field::host, m_url_spec.host);
fields.set(boost::beast::http::field::user_agent,
BOOST_BEAST_VERSION_STRING);
}
void handle_get_size(int r, Response&& response, uint64_t* size,
Context* on_finish);
void handle_read(int r, Response&& response, uint64_t byte_offset,
uint64_t byte_length, bufferlist* data, Context* on_finish);
void issue(std::shared_ptr<Work>&& work);
void create_http_session(Context* on_finish);
void shut_down_http_session(Context* on_finish);
};
} // namespace migration
} // namespace librbd
extern template class librbd::migration::HttpClient<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIGRATION_HTTP_CLIENT_H
| 6,322 | 29.694175 | 80 | h |
null | ceph-main/src/librbd/migration/HttpProcessorInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIGRATION_HTTP_PROCESSOR_INTERFACE_H
#define CEPH_LIBRBD_MIGRATION_HTTP_PROCESSOR_INTERFACE_H
#include <boost/beast/http/empty_body.hpp>
#include <boost/beast/http/message.hpp>
namespace librbd {
namespace migration {
struct HttpProcessorInterface {
using EmptyBody = boost::beast::http::empty_body;
using EmptyRequest = boost::beast::http::request<EmptyBody>;
virtual ~HttpProcessorInterface() {
}
virtual void process_request(EmptyRequest& request) = 0;
};
} // namespace migration
} // namespace librbd
#endif // CEPH_LIBRBD_MIGRATION_HTTP_PROCESSOR_INTERFACE_H
| 702 | 24.107143 | 70 | h |
null | ceph-main/src/librbd/migration/HttpStream.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/migration/HttpStream.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/asio/Utils.h"
#include "librbd/migration/HttpClient.h"
#include <boost/beast/http.hpp>
namespace librbd {
namespace migration {
namespace {
const std::string URL_KEY {"url"};
} // anonymous namespace
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::HttpStream: " << this \
<< " " << __func__ << ": "
template <typename I>
HttpStream<I>::HttpStream(I* image_ctx, const json_spirit::mObject& json_object)
: m_image_ctx(image_ctx), m_cct(image_ctx->cct),
m_asio_engine(image_ctx->asio_engine), m_json_object(json_object) {
}
template <typename I>
HttpStream<I>::~HttpStream() {
}
template <typename I>
void HttpStream<I>::open(Context* on_finish) {
auto& url_value = m_json_object[URL_KEY];
if (url_value.type() != json_spirit::str_type) {
lderr(m_cct) << "failed to locate '" << URL_KEY << "' key" << dendl;
on_finish->complete(-EINVAL);
return;
}
m_url = url_value.get_str();
ldout(m_cct, 10) << "url=" << m_url << dendl;
m_http_client.reset(HttpClient<I>::create(m_image_ctx, m_url));
m_http_client->open(on_finish);
}
template <typename I>
void HttpStream<I>::close(Context* on_finish) {
ldout(m_cct, 10) << dendl;
if (!m_http_client) {
on_finish->complete(0);
return;
}
m_http_client->close(on_finish);
}
template <typename I>
void HttpStream<I>::get_size(uint64_t* size, Context* on_finish) {
ldout(m_cct, 10) << dendl;
m_http_client->get_size(size, on_finish);
}
template <typename I>
void HttpStream<I>::read(io::Extents&& byte_extents, bufferlist* data,
Context* on_finish) {
ldout(m_cct, 20) << "byte_extents=" << byte_extents << dendl;
m_http_client->read(std::move(byte_extents), data, on_finish);
}
} // namespace migration
} // namespace librbd
template class librbd::migration::HttpStream<librbd::ImageCtx>;
| 2,171 | 24.857143 | 80 | cc |
null | ceph-main/src/librbd/migration/HttpStream.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIGRATION_HTTP_STREAM_H
#define CEPH_LIBRBD_MIGRATION_HTTP_STREAM_H
#include "include/int_types.h"
#include "librbd/migration/StreamInterface.h"
#include <boost/beast/http/message.hpp>
#include <boost/beast/http/string_body.hpp>
#include <json_spirit/json_spirit.h>
#include <memory>
#include <string>
struct Context;
namespace librbd {
struct AsioEngine;
struct ImageCtx;
namespace migration {
template <typename> class HttpClient;
template <typename ImageCtxT>
class HttpStream : public StreamInterface {
public:
static HttpStream* create(ImageCtxT* image_ctx,
const json_spirit::mObject& json_object) {
return new HttpStream(image_ctx, json_object);
}
HttpStream(ImageCtxT* image_ctx, const json_spirit::mObject& json_object);
~HttpStream() override;
HttpStream(const HttpStream&) = delete;
HttpStream& operator=(const HttpStream&) = delete;
void open(Context* on_finish) override;
void close(Context* on_finish) override;
void get_size(uint64_t* size, Context* on_finish) override;
void read(io::Extents&& byte_extents, bufferlist* data,
Context* on_finish) override;
private:
using HttpResponse = boost::beast::http::response<
boost::beast::http::string_body>;
ImageCtxT* m_image_ctx;
CephContext* m_cct;
std::shared_ptr<AsioEngine> m_asio_engine;
json_spirit::mObject m_json_object;
std::string m_url;
std::unique_ptr<HttpClient<ImageCtxT>> m_http_client;
};
} // namespace migration
} // namespace librbd
extern template class librbd::migration::HttpStream<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIGRATION_HTTP_STREAM_H
| 1,750 | 24.376812 | 76 | h |
null | ceph-main/src/librbd/migration/ImageDispatch.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/migration/ImageDispatch.h"
#include "include/neorados/RADOS.hpp"
#include "common/dout.h"
#include "librbd/ImageCtx.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/migration/FormatInterface.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::ImageDispatch: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace migration {
template <typename I>
ImageDispatch<I>::ImageDispatch(I* image_ctx,
std::unique_ptr<FormatInterface> format)
: m_image_ctx(image_ctx), m_format(std::move(format)) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "ictx=" << image_ctx << dendl;
}
template <typename I>
void ImageDispatch<I>::shut_down(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
on_finish->complete(0);
}
template <typename I>
bool ImageDispatch<I>::read(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
io::ReadResult &&read_result, IOContext io_context, int op_flags,
int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
return m_format->read(aio_comp, io_context->read_snap().value_or(CEPH_NOSNAP),
std::move(image_extents), std::move(read_result),
op_flags, read_flags, parent_trace);
}
template <typename I>
bool ImageDispatch<I>::write(
io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
lderr(cct) << dendl;
fail_io(-EROFS, aio_comp, dispatch_result);
return true;
}
template <typename I>
bool ImageDispatch<I>::discard(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
lderr(cct) << dendl;
fail_io(-EROFS, aio_comp, dispatch_result);
return true;
}
template <typename I>
bool ImageDispatch<I>::write_same(
io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
lderr(cct) << dendl;
fail_io(-EROFS, aio_comp, dispatch_result);
return true;
}
template <typename I>
bool ImageDispatch<I>::compare_and_write(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
lderr(cct) << dendl;
fail_io(-EROFS, aio_comp, dispatch_result);
return true;
}
template <typename I>
bool ImageDispatch<I>::flush(
io::AioCompletion* aio_comp, io::FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
aio_comp->set_request_count(0);
return true;
}
template <typename I>
bool ImageDispatch<I>::list_snaps(
io::AioCompletion* aio_comp, io::Extents&& image_extents,
io::SnapIds&& snap_ids, int list_snaps_flags,
io::SnapshotDelta* snapshot_delta, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
aio_comp->set_request_count(1);
auto ctx = new io::C_AioRequest(aio_comp);
m_format->list_snaps(std::move(image_extents), std::move(snap_ids),
list_snaps_flags, snapshot_delta, parent_trace,
ctx);
return true;
}
template <typename I>
void ImageDispatch<I>::fail_io(int r, io::AioCompletion* aio_comp,
io::DispatchResult* dispatch_result) {
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
aio_comp->fail(r);
}
} // namespace migration
} // namespace librbd
template class librbd::migration::ImageDispatch<librbd::ImageCtx>;
| 5,231 | 32.324841 | 80 | cc |
null | ceph-main/src/librbd/migration/ImageDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIGRATION_IMAGE_DISPATCH_H
#define CEPH_LIBRBD_MIGRATION_IMAGE_DISPATCH_H
#include "librbd/io/ImageDispatchInterface.h"
#include <memory>
struct Context;
namespace librbd {
struct ImageCtx;
namespace migration {
struct FormatInterface;
template <typename ImageCtxT>
class ImageDispatch : public io::ImageDispatchInterface {
public:
static ImageDispatch* create(ImageCtxT* image_ctx,
std::unique_ptr<FormatInterface> source) {
return new ImageDispatch(image_ctx, std::move(source));
}
ImageDispatch(ImageCtxT* image_ctx, std::unique_ptr<FormatInterface> source);
void shut_down(Context* on_finish) override;
io::ImageDispatchLayer get_dispatch_layer() const override {
return io::IMAGE_DISPATCH_LAYER_MIGRATION;
}
bool read(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
io::ReadResult &&read_result, IOContext io_context, int op_flags,
int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool write(
io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool discard(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool write_same(
io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool compare_and_write(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool flush(
io::AioCompletion* aio_comp, io::FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool list_snaps(
io::AioCompletion* aio_comp, io::Extents&& image_extents,
io::SnapIds&& snap_ids, int list_snaps_flags,
io::SnapshotDelta* snapshot_delta, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool invalidate_cache(Context* on_finish) override {
return false;
}
private:
ImageCtxT* m_image_ctx;
std::unique_ptr<FormatInterface> m_format;
void fail_io(int r, io::AioCompletion* aio_comp,
io::DispatchResult* dispatch_result);
};
} // namespace migration
} // namespace librbd
extern template class librbd::migration::ImageDispatch<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIGRATION_IMAGE_DISPATCH_H
| 3,741 | 35.686275 | 80 | h |
null | ceph-main/src/librbd/migration/NativeFormat.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/migration/NativeFormat.h"
#include "include/neorados/RADOS.hpp"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "json_spirit/json_spirit.h"
#include "boost/lexical_cast.hpp"
#include <sstream>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::NativeFormat: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace migration {
namespace {
const std::string TYPE_KEY{"type"};
const std::string POOL_ID_KEY{"pool_id"};
const std::string POOL_NAME_KEY{"pool_name"};
const std::string POOL_NAMESPACE_KEY{"pool_namespace"};
const std::string IMAGE_NAME_KEY{"image_name"};
const std::string IMAGE_ID_KEY{"image_id"};
const std::string SNAP_NAME_KEY{"snap_name"};
const std::string SNAP_ID_KEY{"snap_id"};
} // anonymous namespace
template <typename I>
std::string NativeFormat<I>::build_source_spec(
int64_t pool_id, const std::string& pool_namespace,
const std::string& image_name, const std::string& image_id) {
json_spirit::mObject source_spec;
source_spec[TYPE_KEY] = "native";
source_spec[POOL_ID_KEY] = pool_id;
source_spec[POOL_NAMESPACE_KEY] = pool_namespace;
source_spec[IMAGE_NAME_KEY] = image_name;
if (!image_id.empty()) {
source_spec[IMAGE_ID_KEY] = image_id;
}
return json_spirit::write(source_spec);
}
template <typename I>
NativeFormat<I>::NativeFormat(
I* image_ctx, const json_spirit::mObject& json_object, bool import_only)
: m_image_ctx(image_ctx), m_json_object(json_object),
m_import_only(import_only) {
}
template <typename I>
void NativeFormat<I>::open(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
auto& pool_name_val = m_json_object[POOL_NAME_KEY];
if (pool_name_val.type() == json_spirit::str_type) {
librados::Rados rados(m_image_ctx->md_ctx);
librados::IoCtx io_ctx;
int r = rados.ioctx_create(pool_name_val.get_str().c_str(), io_ctx);
if (r < 0 ) {
lderr(cct) << "invalid pool name" << dendl;
on_finish->complete(r);
return;
}
m_pool_id = io_ctx.get_id();
} else if (pool_name_val.type() != json_spirit::null_type) {
lderr(cct) << "invalid pool name" << dendl;
on_finish->complete(-EINVAL);
return;
}
auto& pool_id_val = m_json_object[POOL_ID_KEY];
if (m_pool_id != -1 && pool_id_val.type() != json_spirit::null_type) {
lderr(cct) << "cannot specify both pool name and pool id" << dendl;
on_finish->complete(-EINVAL);
return;
} else if (pool_id_val.type() == json_spirit::int_type) {
m_pool_id = pool_id_val.get_int64();
} else if (pool_id_val.type() == json_spirit::str_type) {
try {
m_pool_id = boost::lexical_cast<int64_t>(pool_id_val.get_str());
} catch (boost::bad_lexical_cast &) {
}
}
if (m_pool_id == -1) {
lderr(cct) << "missing or invalid pool id" << dendl;
on_finish->complete(-EINVAL);
return;
}
auto& pool_namespace_val = m_json_object[POOL_NAMESPACE_KEY];
if (pool_namespace_val.type() == json_spirit::str_type) {
m_pool_namespace = pool_namespace_val.get_str();
} else if (pool_namespace_val.type() != json_spirit::null_type) {
lderr(cct) << "invalid pool namespace" << dendl;
on_finish->complete(-EINVAL);
return;
}
auto& image_name_val = m_json_object[IMAGE_NAME_KEY];
if (image_name_val.type() != json_spirit::str_type) {
lderr(cct) << "missing or invalid image name" << dendl;
on_finish->complete(-EINVAL);
return;
}
m_image_name = image_name_val.get_str();
auto& image_id_val = m_json_object[IMAGE_ID_KEY];
if (image_id_val.type() == json_spirit::str_type) {
m_image_id = image_id_val.get_str();
} else if (image_id_val.type() != json_spirit::null_type) {
lderr(cct) << "invalid image id" << dendl;
on_finish->complete(-EINVAL);
return;
}
auto& snap_name_val = m_json_object[SNAP_NAME_KEY];
if (snap_name_val.type() == json_spirit::str_type) {
m_snap_name = snap_name_val.get_str();
} else if (snap_name_val.type() != json_spirit::null_type) {
lderr(cct) << "invalid snap name" << dendl;
on_finish->complete(-EINVAL);
return;
}
auto& snap_id_val = m_json_object[SNAP_ID_KEY];
if (!m_snap_name.empty() && snap_id_val.type() != json_spirit::null_type) {
lderr(cct) << "cannot specify both snap name and snap id" << dendl;
on_finish->complete(-EINVAL);
return;
} else if (snap_id_val.type() == json_spirit::str_type) {
try {
m_snap_id = boost::lexical_cast<uint64_t>(snap_id_val.get_str());
} catch (boost::bad_lexical_cast &) {
}
} else if (snap_id_val.type() == json_spirit::int_type) {
m_snap_id = snap_id_val.get_uint64();
}
if (snap_id_val.type() != json_spirit::null_type &&
m_snap_id == CEPH_NOSNAP) {
lderr(cct) << "invalid snap id" << dendl;
on_finish->complete(-EINVAL);
return;
}
// snapshot is required for import to keep source read-only
if (m_import_only && m_snap_name.empty() && m_snap_id == CEPH_NOSNAP) {
lderr(cct) << "snapshot required for import" << dendl;
on_finish->complete(-EINVAL);
return;
}
// TODO add support for external clusters
librados::IoCtx io_ctx;
int r = util::create_ioctx(m_image_ctx->md_ctx, "source image",
m_pool_id, m_pool_namespace, &io_ctx);
if (r < 0) {
on_finish->complete(r);
return;
}
m_image_ctx->md_ctx.dup(io_ctx);
m_image_ctx->data_ctx.dup(io_ctx);
m_image_ctx->name = m_image_name;
uint64_t flags = 0;
if (m_image_id.empty() && !m_import_only) {
flags |= OPEN_FLAG_OLD_FORMAT;
} else {
m_image_ctx->id = m_image_id;
}
if (m_image_ctx->child != nullptr) {
// set rados flags for reading the parent image
if (m_image_ctx->child->config.template get_val<bool>("rbd_balance_parent_reads")) {
m_image_ctx->set_read_flag(librados::OPERATION_BALANCE_READS);
} else if (m_image_ctx->child->config.template get_val<bool>("rbd_localize_parent_reads")) {
m_image_ctx->set_read_flag(librados::OPERATION_LOCALIZE_READS);
}
}
// open the source RBD image
on_finish = new LambdaContext([this, on_finish](int r) {
handle_open(r, on_finish); });
m_image_ctx->state->open(flags, on_finish);
}
template <typename I>
void NativeFormat<I>::handle_open(int r, Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to open image: " << cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
if (m_snap_id == CEPH_NOSNAP && m_snap_name.empty()) {
on_finish->complete(0);
return;
}
if (!m_snap_name.empty()) {
std::shared_lock image_locker{m_image_ctx->image_lock};
m_snap_id = m_image_ctx->get_snap_id(cls::rbd::UserSnapshotNamespace{},
m_snap_name);
}
if (m_snap_id == CEPH_NOSNAP) {
lderr(cct) << "failed to locate snapshot " << m_snap_name << dendl;
on_finish = new LambdaContext([on_finish](int) {
on_finish->complete(-ENOENT); });
m_image_ctx->state->close(on_finish);
return;
}
on_finish = new LambdaContext([this, on_finish](int r) {
handle_snap_set(r, on_finish); });
m_image_ctx->state->snap_set(m_snap_id, on_finish);
}
template <typename I>
void NativeFormat<I>::handle_snap_set(int r, Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to set snapshot " << m_snap_id << ": "
<< cpp_strerror(r) << dendl;
on_finish = new LambdaContext([r, on_finish](int) {
on_finish->complete(r); });
m_image_ctx->state->close(on_finish);
return;
}
on_finish->complete(0);
}
template <typename I>
void NativeFormat<I>::close(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
// the native librbd::image::CloseRequest handles all cleanup
on_finish->complete(0);
}
template <typename I>
void NativeFormat<I>::get_snapshots(SnapInfos* snap_infos, Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
m_image_ctx->image_lock.lock_shared();
*snap_infos = m_image_ctx->snap_info;
m_image_ctx->image_lock.unlock_shared();
on_finish->complete(0);
}
template <typename I>
void NativeFormat<I>::get_image_size(uint64_t snap_id, uint64_t* size,
Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
m_image_ctx->image_lock.lock_shared();
*size = m_image_ctx->get_image_size(snap_id);
m_image_ctx->image_lock.unlock_shared();
on_finish->complete(0);
}
template <typename I>
void NativeFormat<I>::list_snaps(io::Extents&& image_extents,
io::SnapIds&& snap_ids, int list_snaps_flags,
io::SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace,
Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "image_extents=" << image_extents << dendl;
auto aio_comp = io::AioCompletion::create_and_start(
on_finish, util::get_image_ctx(m_image_ctx), io::AIO_TYPE_GENERIC);
auto req = io::ImageDispatchSpec::create_list_snaps(
*m_image_ctx, io::IMAGE_DISPATCH_LAYER_MIGRATION, aio_comp,
std::move(image_extents), io::ImageArea::DATA, std::move(snap_ids),
list_snaps_flags, snapshot_delta, {});
req->send();
}
} // namespace migration
} // namespace librbd
template class librbd::migration::NativeFormat<librbd::ImageCtx>;
| 9,909 | 30.967742 | 96 | cc |
null | ceph-main/src/librbd/migration/NativeFormat.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIGRATION_NATIVE_FORMAT_H
#define CEPH_LIBRBD_MIGRATION_NATIVE_FORMAT_H
#include "include/int_types.h"
#include "librbd/Types.h"
#include "librbd/migration/FormatInterface.h"
#include "json_spirit/json_spirit.h"
#include <memory>
struct Context;
namespace librbd {
struct AsioEngine;
struct ImageCtx;
namespace migration {
template <typename ImageCtxT>
class NativeFormat : public FormatInterface {
public:
static std::string build_source_spec(int64_t pool_id,
const std::string& pool_namespace,
const std::string& image_name,
const std::string& image_id);
static NativeFormat* create(ImageCtxT* image_ctx,
const json_spirit::mObject& json_object,
bool import_only) {
return new NativeFormat(image_ctx, json_object, import_only);
}
NativeFormat(ImageCtxT* image_ctx, const json_spirit::mObject& json_object,
bool import_only);
NativeFormat(const NativeFormat&) = delete;
NativeFormat& operator=(const NativeFormat&) = delete;
void open(Context* on_finish) override;
void close(Context* on_finish) override;
void get_snapshots(SnapInfos* snap_infos, Context* on_finish) override;
void get_image_size(uint64_t snap_id, uint64_t* size,
Context* on_finish) override;
bool read(io::AioCompletion* aio_comp, uint64_t snap_id,
io::Extents&& image_extents, io::ReadResult&& read_result,
int op_flags, int read_flags,
const ZTracer::Trace &parent_trace) override {
return false;
}
void list_snaps(io::Extents&& image_extents, io::SnapIds&& snap_ids,
int list_snaps_flags, io::SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace,
Context* on_finish) override;
private:
ImageCtxT* m_image_ctx;
json_spirit::mObject m_json_object;
bool m_import_only;
int64_t m_pool_id = -1;
std::string m_pool_namespace;
std::string m_image_name;
std::string m_image_id;
std::string m_snap_name;
uint64_t m_snap_id = CEPH_NOSNAP;
void handle_open(int r, Context* on_finish);
void handle_snap_set(int r, Context* on_finish);
};
} // namespace migration
} // namespace librbd
extern template class librbd::migration::NativeFormat<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIGRATION_NATIVE_FORMAT_H
| 2,579 | 30.084337 | 77 | h |
null | ceph-main/src/librbd/migration/OpenSourceImageRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/migration/OpenSourceImageRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Utils.h"
#include "librbd/io/ImageDispatcher.h"
#include "librbd/migration/ImageDispatch.h"
#include "librbd/migration/NativeFormat.h"
#include "librbd/migration/SourceSpecBuilder.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::OpenSourceImageRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace migration {
template <typename I>
OpenSourceImageRequest<I>::OpenSourceImageRequest(
librados::IoCtx& io_ctx, I* dst_image_ctx, uint64_t src_snap_id,
const MigrationInfo &migration_info, I** src_image_ctx, Context* on_finish)
: m_cct(reinterpret_cast<CephContext*>(io_ctx.cct())), m_io_ctx(io_ctx),
m_dst_image_ctx(dst_image_ctx), m_src_snap_id(src_snap_id),
m_migration_info(migration_info), m_src_image_ctx(src_image_ctx),
m_on_finish(on_finish) {
ldout(m_cct, 10) << dendl;
}
template <typename I>
void OpenSourceImageRequest<I>::send() {
open_source();
}
template <typename I>
void OpenSourceImageRequest<I>::open_source() {
ldout(m_cct, 10) << dendl;
// note that all source image ctx properties are placeholders
*m_src_image_ctx = I::create("", "", CEPH_NOSNAP, m_io_ctx, true);
auto src_image_ctx = *m_src_image_ctx;
src_image_ctx->child = m_dst_image_ctx;
// use default layout values (can be overridden by source layers later)
src_image_ctx->order = 22;
src_image_ctx->layout = file_layout_t();
src_image_ctx->layout.stripe_count = 1;
src_image_ctx->layout.stripe_unit = 1ULL << src_image_ctx->order;
src_image_ctx->layout.object_size = 1Ull << src_image_ctx->order;
src_image_ctx->layout.pool_id = -1;
bool import_only = true;
auto source_spec = m_migration_info.source_spec;
if (source_spec.empty()) {
// implies legacy migration from RBD image in same cluster
source_spec = NativeFormat<I>::build_source_spec(
m_migration_info.pool_id, m_migration_info.pool_namespace,
m_migration_info.image_name, m_migration_info.image_id);
import_only = false;
}
ldout(m_cct, 15) << "source_spec=" << source_spec << ", "
<< "source_snap_id=" << m_src_snap_id << ", "
<< "import_only=" << import_only << dendl;
SourceSpecBuilder<I> source_spec_builder{src_image_ctx};
json_spirit::mObject source_spec_object;
int r = source_spec_builder.parse_source_spec(source_spec,
&source_spec_object);
if (r < 0) {
lderr(m_cct) << "failed to parse migration source-spec:" << cpp_strerror(r)
<< dendl;
(*m_src_image_ctx)->state->close();
finish(r);
return;
}
r = source_spec_builder.build_format(source_spec_object, import_only,
&m_format);
if (r < 0) {
lderr(m_cct) << "failed to build migration format handler: "
<< cpp_strerror(r) << dendl;
(*m_src_image_ctx)->state->close();
finish(r);
return;
}
auto ctx = util::create_context_callback<
OpenSourceImageRequest<I>,
&OpenSourceImageRequest<I>::handle_open_source>(this);
m_format->open(ctx);
}
template <typename I>
void OpenSourceImageRequest<I>::handle_open_source(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to open migration source: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
get_image_size();
}
template <typename I>
void OpenSourceImageRequest<I>::get_image_size() {
ldout(m_cct, 10) << dendl;
auto ctx = util::create_context_callback<
OpenSourceImageRequest<I>,
&OpenSourceImageRequest<I>::handle_get_image_size>(this);
m_format->get_image_size(CEPH_NOSNAP, &m_image_size, ctx);
}
template <typename I>
void OpenSourceImageRequest<I>::handle_get_image_size(int r) {
ldout(m_cct, 10) << "r=" << r << ", "
<< "image_size=" << m_image_size << dendl;
if (r < 0) {
lderr(m_cct) << "failed to retrieve image size: " << cpp_strerror(r)
<< dendl;
close_image(r);
return;
}
auto src_image_ctx = *m_src_image_ctx;
src_image_ctx->image_lock.lock();
src_image_ctx->size = m_image_size;
src_image_ctx->image_lock.unlock();
get_snapshots();
}
template <typename I>
void OpenSourceImageRequest<I>::get_snapshots() {
ldout(m_cct, 10) << dendl;
auto ctx = util::create_context_callback<
OpenSourceImageRequest<I>,
&OpenSourceImageRequest<I>::handle_get_snapshots>(this);
m_format->get_snapshots(&m_snap_infos, ctx);
}
template <typename I>
void OpenSourceImageRequest<I>::handle_get_snapshots(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to retrieve snapshots: " << cpp_strerror(r)
<< dendl;
close_image(r);
return;
}
// copy snapshot metadata to image ctx
auto src_image_ctx = *m_src_image_ctx;
src_image_ctx->image_lock.lock();
src_image_ctx->snaps.clear();
src_image_ctx->snap_info.clear();
src_image_ctx->snap_ids.clear();
::SnapContext snapc;
for (auto it = m_snap_infos.rbegin(); it != m_snap_infos.rend(); ++it) {
auto& [snap_id, snap_info] = *it;
snapc.snaps.push_back(snap_id);
ldout(m_cct, 10) << "adding snap: ns=" << snap_info.snap_namespace << ", "
<< "name=" << snap_info.name << ", "
<< "id=" << snap_id << dendl;
src_image_ctx->add_snap(
snap_info.snap_namespace, snap_info.name, snap_id,
snap_info.size, snap_info.parent, snap_info.protection_status,
snap_info.flags, snap_info.timestamp);
}
if (!snapc.snaps.empty()) {
snapc.seq = snapc.snaps[0];
}
src_image_ctx->snapc = snapc;
ldout(m_cct, 15) << "read snap id: " << m_src_snap_id << ", "
<< "write snapc={"
<< "seq=" << snapc.seq << ", "
<< "snaps=" << snapc.snaps << "}" << dendl;
// ensure data_ctx and data_io_context are pointing to correct snapshot
if (m_src_snap_id != CEPH_NOSNAP) {
int r = src_image_ctx->snap_set(m_src_snap_id);
if (r < 0) {
src_image_ctx->image_lock.unlock();
lderr(m_cct) << "error setting source image snap id: "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
}
src_image_ctx->image_lock.unlock();
finish(0);
}
template <typename I>
void OpenSourceImageRequest<I>::close_image(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
auto ctx = new LambdaContext([this, r](int) {
finish(r);
});
(*m_src_image_ctx)->state->close(ctx);
}
template <typename I>
void OpenSourceImageRequest<I>::register_image_dispatch() {
ldout(m_cct, 10) << dendl;
// intercept any IO requests to the source image
auto io_image_dispatch = ImageDispatch<I>::create(
*m_src_image_ctx, std::move(m_format));
(*m_src_image_ctx)->io_image_dispatcher->register_dispatch(io_image_dispatch);
}
template <typename I>
void OpenSourceImageRequest<I>::finish(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
*m_src_image_ctx = nullptr;
} else {
register_image_dispatch();
}
m_on_finish->complete(r);
delete this;
}
} // namespace migration
} // namespace librbd
template class librbd::migration::OpenSourceImageRequest<librbd::ImageCtx>;
| 7,603 | 29.416 | 80 | cc |
null | ceph-main/src/librbd/migration/OpenSourceImageRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIGRATION_OPEN_SOURCE_IMAGE_REQUEST_H
#define CEPH_LIBRBD_MIGRATION_OPEN_SOURCE_IMAGE_REQUEST_H
#include "include/rados/librados_fwd.hpp"
#include "librbd/Types.h"
#include <map>
#include <memory>
struct Context;
namespace librbd {
struct ImageCtx;
namespace migration {
struct FormatInterface;
template <typename ImageCtxT>
class OpenSourceImageRequest {
public:
static OpenSourceImageRequest* create(librados::IoCtx& io_ctx,
ImageCtxT* destination_image_ctx,
uint64_t src_snap_id,
const MigrationInfo &migration_info,
ImageCtxT** source_image_ctx,
Context* on_finish) {
return new OpenSourceImageRequest(io_ctx, destination_image_ctx,
src_snap_id, migration_info,
source_image_ctx, on_finish);
}
OpenSourceImageRequest(librados::IoCtx& io_ctx,
ImageCtxT* destination_image_ctx,
uint64_t src_snap_id,
const MigrationInfo &migration_info,
ImageCtxT** source_image_ctx,
Context* on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* OPEN_SOURCE
* |
* v
* GET_IMAGE_SIZE * * * * * * *
* | *
* v v
* GET_SNAPSHOTS * * * * > CLOSE_IMAGE
* | |
* v |
* <finish> <------------------/
*
* @endverbatim
*/
typedef std::map<uint64_t, SnapInfo> SnapInfos;
CephContext* m_cct;
librados::IoCtx& m_io_ctx;
ImageCtxT* m_dst_image_ctx;
uint64_t m_src_snap_id;
MigrationInfo m_migration_info;
ImageCtxT** m_src_image_ctx;
Context* m_on_finish;
std::unique_ptr<FormatInterface> m_format;
uint64_t m_image_size = 0;
SnapInfos m_snap_infos;
void open_source();
void handle_open_source(int r);
void get_image_size();
void handle_get_image_size(int r);
void get_snapshots();
void handle_get_snapshots(int r);
void close_image(int r);
void register_image_dispatch();
void finish(int r);
};
} // namespace migration
} // namespace librbd
extern template class librbd::migration::OpenSourceImageRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIGRATION_OPEN_SOURCE_IMAGE_REQUEST_H
| 2,644 | 24.432692 | 82 | h |
null | ceph-main/src/librbd/migration/QCOW.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/* Based on QEMU block/qcow.cc and block/qcow2.h, which has this license: */
/*
* Block driver for the QCOW version 2 format
*
* Copyright (c) 2004-2006 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef CEPH_LIBRBD_MIGRATION_QCOW2_H
#define CEPH_LIBRBD_MIGRATION_QCOW2_H
#include "include/ceph_assert.h"
#include "include/int_types.h"
#include "librbd/migration/QCOW.h"
#define QCOW_MAGIC (('Q' << 24) | ('F' << 16) | ('I' << 8) | 0xfb)
#define QCOW_CRYPT_NONE 0
#define QCOW_CRYPT_AES 1
#define QCOW_CRYPT_LUKS 2
#define QCOW_MAX_CRYPT_CLUSTERS 32
#define QCOW_MAX_SNAPSHOTS 65536
/* Field widths in qcow2 mean normal cluster offsets cannot reach
* 64PB; depending on cluster size, compressed clusters can have a
* smaller limit (64PB for up to 16k clusters, then ramps down to
* 512TB for 2M clusters). */
#define QCOW_MAX_CLUSTER_OFFSET ((1ULL << 56) - 1)
/* 8 MB refcount table is enough for 2 PB images at 64k cluster size
* (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
#define QCOW_MAX_REFTABLE_SIZE (1ULL << 23)
/* 32 MB L1 table is enough for 2 PB images at 64k cluster size
* (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
#define QCOW_MAX_L1_SIZE (1ULL << 25)
/* Allow for an average of 1k per snapshot table entry, should be plenty of
* space for snapshot names and IDs */
#define QCOW_MAX_SNAPSHOTS_SIZE (1024 * QCOW_MAX_SNAPSHOTS)
/* Maximum amount of extra data per snapshot table entry to accept */
#define QCOW_MAX_SNAPSHOT_EXTRA_DATA 1024
/* Bitmap header extension constraints */
#define QCOW2_MAX_BITMAPS 65535
#define QCOW2_MAX_BITMAP_DIRECTORY_SIZE (1024 * QCOW2_MAX_BITMAPS)
/* Maximum of parallel sub-request per guest request */
#define QCOW2_MAX_WORKERS 8
/* indicate that the refcount of the referenced cluster is exactly one. */
#define QCOW_OFLAG_COPIED (1ULL << 63)
/* indicate that the cluster is compressed (they never have the copied flag) */
#define QCOW_OFLAG_COMPRESSED (1ULL << 62)
/* The cluster reads as all zeros */
#define QCOW_OFLAG_ZERO (1ULL << 0)
#define QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER 32
/* The subcluster X [0..31] is allocated */
#define QCOW_OFLAG_SUB_ALLOC(X) (1ULL << (X))
/* The subcluster X [0..31] reads as zeroes */
#define QCOW_OFLAG_SUB_ZERO(X) (QCOW_OFLAG_SUB_ALLOC(X) << 32)
/* Subclusters [X, Y) (0 <= X <= Y <= 32) are allocated */
#define QCOW_OFLAG_SUB_ALLOC_RANGE(X, Y) \
(QCOW_OFLAG_SUB_ALLOC(Y) - QCOW_OFLAG_SUB_ALLOC(X))
/* Subclusters [X, Y) (0 <= X <= Y <= 32) read as zeroes */
#define QCOW_OFLAG_SUB_ZERO_RANGE(X, Y) \
(QCOW_OFLAG_SUB_ALLOC_RANGE(X, Y) << 32)
/* L2 entry bitmap with all allocation bits set */
#define QCOW_L2_BITMAP_ALL_ALLOC (QCOW_OFLAG_SUB_ALLOC_RANGE(0, 32))
/* L2 entry bitmap with all "read as zeroes" bits set */
#define QCOW_L2_BITMAP_ALL_ZEROES (QCOW_OFLAG_SUB_ZERO_RANGE(0, 32))
/* Size of normal and extended L2 entries */
#define QCOW_L2E_SIZE_NORMAL (sizeof(uint64_t))
#define QCOW_L2E_SIZE_EXTENDED (sizeof(uint64_t) * 2)
/* Size of L1 table entries */
#define QCOW_L1E_SIZE (sizeof(uint64_t))
/* Size of reftable entries */
#define QCOW_REFTABLE_ENTRY_SIZE (sizeof(uint64_t))
#define QCOW_MIN_CLUSTER_BITS 9
#define QCOW_MAX_CLUSTER_BITS 21
/* Defined in the qcow2 spec (compressed cluster descriptor) */
#define QCOW2_COMPRESSED_SECTOR_SIZE 512U
#define QCOW2_COMPRESSED_SECTOR_MASK (~(QCOW2_COMPRESSED_SECTOR_SIZE - 1ULL))
#define QCOW_L2_CACHE_SIZE 16
/* Must be at least 2 to cover COW */
#define QCOW_MIN_L2_CACHE_SIZE 2 /* cache entries */
/* Must be at least 4 to cover all cases of refcount table growth */
#define QCOW_MIN_REFCOUNT_CACHE_SIZE 4 /* clusters */
#define QCOW_DEFAULT_L2_CACHE_MAX_SIZE (1ULL << 25)
#define QCOW_DEFAULT_CACHE_CLEAN_INTERVAL 600 /* seconds */
#define QCOW_DEFAULT_CLUSTER_SIZE 65536
#define QCOW2_OPT_DATA_FILE "data-file"
#define QCOW2_OPT_LAZY_REFCOUNTS "lazy-refcounts"
#define QCOW2_OPT_DISCARD_REQUEST "pass-discard-request"
#define QCOW2_OPT_DISCARD_SNAPSHOT "pass-discard-snapshot"
#define QCOW2_OPT_DISCARD_OTHER "pass-discard-other"
#define QCOW2_OPT_OVERLAP "overlap-check"
#define QCOW2_OPT_OVERLAP_TEMPLATE "overlap-check.template"
#define QCOW2_OPT_OVERLAP_MAIN_HEADER "overlap-check.main-header"
#define QCOW2_OPT_OVERLAP_ACTIVE_L1 "overlap-check.active-l1"
#define QCOW2_OPT_OVERLAP_ACTIVE_L2 "overlap-check.active-l2"
#define QCOW2_OPT_OVERLAP_REFCOUNT_TABLE "overlap-check.refcount-table"
#define QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK "overlap-check.refcount-block"
#define QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE "overlap-check.snapshot-table"
#define QCOW2_OPT_OVERLAP_INACTIVE_L1 "overlap-check.inactive-l1"
#define QCOW2_OPT_OVERLAP_INACTIVE_L2 "overlap-check.inactive-l2"
#define QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY "overlap-check.bitmap-directory"
#define QCOW2_OPT_CACHE_SIZE "cache-size"
#define QCOW2_OPT_L2_CACHE_SIZE "l2-cache-size"
#define QCOW2_OPT_L2_CACHE_ENTRY_SIZE "l2-cache-entry-size"
#define QCOW2_OPT_REFCOUNT_CACHE_SIZE "refcount-cache-size"
#define QCOW2_OPT_CACHE_CLEAN_INTERVAL "cache-clean-interval"
typedef struct QCowHeaderProbe {
uint32_t magic;
uint32_t version;
} __attribute__((__packed__)) QCowHeaderProbe;
typedef struct QCowHeaderV1
{
uint32_t magic;
uint32_t version;
uint64_t backing_file_offset;
uint32_t backing_file_size;
uint32_t mtime;
uint64_t size; /* in bytes */
uint8_t cluster_bits;
uint8_t l2_bits;
uint16_t padding;
uint32_t crypt_method;
uint64_t l1_table_offset;
} __attribute__((__packed__)) QCowHeaderV1;
typedef struct QCowHeader {
uint32_t magic;
uint32_t version;
uint64_t backing_file_offset;
uint32_t backing_file_size;
uint32_t cluster_bits;
uint64_t size; /* in bytes */
uint32_t crypt_method;
uint32_t l1_size; /* XXX: save number of clusters instead ? */
uint64_t l1_table_offset;
uint64_t refcount_table_offset;
uint32_t refcount_table_clusters;
uint32_t nb_snapshots;
uint64_t snapshots_offset;
/* The following fields are only valid for version >= 3 */
uint64_t incompatible_features;
uint64_t compatible_features;
uint64_t autoclear_features;
uint32_t refcount_order;
uint32_t header_length;
/* Additional fields */
uint8_t compression_type;
/* header must be a multiple of 8 */
uint8_t padding[7];
} __attribute__((__packed__)) QCowHeader;
typedef struct QCowSnapshotHeader {
/* header is 8 byte aligned */
uint64_t l1_table_offset;
uint32_t l1_size;
uint16_t id_str_size;
uint16_t name_size;
uint32_t date_sec;
uint32_t date_nsec;
uint64_t vm_clock_nsec;
uint32_t vm_state_size;
uint32_t extra_data_size; /* for extension */
/* extra data follows */
/* id_str follows */
/* name follows */
} __attribute__((__packed__)) QCowSnapshotHeader;
typedef struct QCowSnapshotExtraData {
uint64_t vm_state_size_large;
uint64_t disk_size;
uint64_t icount;
} __attribute__((__packed__)) QCowSnapshotExtraData;
typedef struct QCowSnapshot {
uint64_t l1_table_offset;
uint32_t l1_size;
char *id_str;
char *name;
uint64_t disk_size;
uint64_t vm_state_size;
uint32_t date_sec;
uint32_t date_nsec;
uint64_t vm_clock_nsec;
/* icount value for the moment when snapshot was taken */
uint64_t icount;
/* Size of all extra data, including QCowSnapshotExtraData if available */
uint32_t extra_data_size;
/* Data beyond QCowSnapshotExtraData, if any */
void *unknown_extra_data;
} QCowSnapshot;
typedef struct Qcow2CryptoHeaderExtension {
uint64_t offset;
uint64_t length;
} __attribute__((__packed__)) Qcow2CryptoHeaderExtension;
typedef struct Qcow2UnknownHeaderExtension {
uint32_t magic;
uint32_t len;
uint8_t data[];
} Qcow2UnknownHeaderExtension;
enum {
QCOW2_FEAT_TYPE_INCOMPATIBLE = 0,
QCOW2_FEAT_TYPE_COMPATIBLE = 1,
QCOW2_FEAT_TYPE_AUTOCLEAR = 2,
};
/* Incompatible feature bits */
enum {
QCOW2_INCOMPAT_DIRTY_BITNR = 0,
QCOW2_INCOMPAT_CORRUPT_BITNR = 1,
QCOW2_INCOMPAT_DATA_FILE_BITNR = 2,
QCOW2_INCOMPAT_COMPRESSION_BITNR = 3,
QCOW2_INCOMPAT_EXTL2_BITNR = 4,
QCOW2_INCOMPAT_DIRTY = 1 << QCOW2_INCOMPAT_DIRTY_BITNR,
QCOW2_INCOMPAT_CORRUPT = 1 << QCOW2_INCOMPAT_CORRUPT_BITNR,
QCOW2_INCOMPAT_DATA_FILE = 1 << QCOW2_INCOMPAT_DATA_FILE_BITNR,
QCOW2_INCOMPAT_COMPRESSION = 1 << QCOW2_INCOMPAT_COMPRESSION_BITNR,
QCOW2_INCOMPAT_EXTL2 = 1 << QCOW2_INCOMPAT_EXTL2_BITNR,
QCOW2_INCOMPAT_MASK = QCOW2_INCOMPAT_DIRTY
| QCOW2_INCOMPAT_CORRUPT
| QCOW2_INCOMPAT_DATA_FILE
| QCOW2_INCOMPAT_COMPRESSION
| QCOW2_INCOMPAT_EXTL2,
};
/* Compatible feature bits */
enum {
QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR = 0,
QCOW2_COMPAT_LAZY_REFCOUNTS = 1 << QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR,
QCOW2_COMPAT_FEAT_MASK = QCOW2_COMPAT_LAZY_REFCOUNTS,
};
/* Autoclear feature bits */
enum {
QCOW2_AUTOCLEAR_BITMAPS_BITNR = 0,
QCOW2_AUTOCLEAR_DATA_FILE_RAW_BITNR = 1,
QCOW2_AUTOCLEAR_BITMAPS = 1 << QCOW2_AUTOCLEAR_BITMAPS_BITNR,
QCOW2_AUTOCLEAR_DATA_FILE_RAW = 1 << QCOW2_AUTOCLEAR_DATA_FILE_RAW_BITNR,
QCOW2_AUTOCLEAR_MASK = QCOW2_AUTOCLEAR_BITMAPS
| QCOW2_AUTOCLEAR_DATA_FILE_RAW,
};
enum qcow2_discard_type {
QCOW2_DISCARD_NEVER = 0,
QCOW2_DISCARD_ALWAYS,
QCOW2_DISCARD_REQUEST,
QCOW2_DISCARD_SNAPSHOT,
QCOW2_DISCARD_OTHER,
QCOW2_DISCARD_MAX
};
typedef struct Qcow2Feature {
uint8_t type;
uint8_t bit;
char name[46];
} __attribute__((__packed__)) Qcow2Feature;
typedef struct Qcow2DiscardRegion {
uint64_t offset;
uint64_t bytes;
} Qcow2DiscardRegion;
typedef uint64_t Qcow2GetRefcountFunc(const void *refcount_array,
uint64_t index);
typedef void Qcow2SetRefcountFunc(void *refcount_array,
uint64_t index, uint64_t value);
typedef struct Qcow2BitmapHeaderExt {
uint32_t nb_bitmaps;
uint32_t reserved32;
uint64_t bitmap_directory_size;
uint64_t bitmap_directory_offset;
} __attribute__((__packed__)) Qcow2BitmapHeaderExt;
#define QCOW_RC_CACHE_SIZE QCOW_L2_CACHE_SIZE;
typedef struct Qcow2COWRegion {
/**
* Offset of the COW region in bytes from the start of the first cluster
* touched by the request.
*/
unsigned offset;
/** Number of bytes to copy */
unsigned nb_bytes;
} Qcow2COWRegion;
/**
* Describes an in-flight (part of a) write request that writes to clusters
* that are not referenced in their L2 table yet.
*/
typedef struct QCowL2Meta
{
/** Guest offset of the first newly allocated cluster */
uint64_t offset;
/** Host offset of the first newly allocated cluster */
uint64_t alloc_offset;
/** Number of newly allocated clusters */
int nb_clusters;
/** Do not free the old clusters */
bool keep_old_clusters;
/**
* The COW Region between the start of the first allocated cluster and the
* area the guest actually writes to.
*/
Qcow2COWRegion cow_start;
/**
* The COW Region between the area the guest actually writes to and the
* end of the last allocated cluster.
*/
Qcow2COWRegion cow_end;
/*
* Indicates that COW regions are already handled and do not require
* any more processing.
*/
bool skip_cow;
/**
* Indicates that this is not a normal write request but a preallocation.
* If the image has extended L2 entries this means that no new individual
* subclusters will be marked as allocated in the L2 bitmap (but any
* existing contents of that bitmap will be kept).
*/
bool prealloc;
/** Pointer to next L2Meta of the same write request */
struct QCowL2Meta *next;
} QCowL2Meta;
typedef enum QCow2ClusterType {
QCOW2_CLUSTER_UNALLOCATED,
QCOW2_CLUSTER_ZERO_PLAIN,
QCOW2_CLUSTER_ZERO_ALLOC,
QCOW2_CLUSTER_NORMAL,
QCOW2_CLUSTER_COMPRESSED,
} QCow2ClusterType;
typedef enum QCow2MetadataOverlap {
QCOW2_OL_MAIN_HEADER_BITNR = 0,
QCOW2_OL_ACTIVE_L1_BITNR = 1,
QCOW2_OL_ACTIVE_L2_BITNR = 2,
QCOW2_OL_REFCOUNT_TABLE_BITNR = 3,
QCOW2_OL_REFCOUNT_BLOCK_BITNR = 4,
QCOW2_OL_SNAPSHOT_TABLE_BITNR = 5,
QCOW2_OL_INACTIVE_L1_BITNR = 6,
QCOW2_OL_INACTIVE_L2_BITNR = 7,
QCOW2_OL_BITMAP_DIRECTORY_BITNR = 8,
QCOW2_OL_MAX_BITNR = 9,
QCOW2_OL_NONE = 0,
QCOW2_OL_MAIN_HEADER = (1 << QCOW2_OL_MAIN_HEADER_BITNR),
QCOW2_OL_ACTIVE_L1 = (1 << QCOW2_OL_ACTIVE_L1_BITNR),
QCOW2_OL_ACTIVE_L2 = (1 << QCOW2_OL_ACTIVE_L2_BITNR),
QCOW2_OL_REFCOUNT_TABLE = (1 << QCOW2_OL_REFCOUNT_TABLE_BITNR),
QCOW2_OL_REFCOUNT_BLOCK = (1 << QCOW2_OL_REFCOUNT_BLOCK_BITNR),
QCOW2_OL_SNAPSHOT_TABLE = (1 << QCOW2_OL_SNAPSHOT_TABLE_BITNR),
QCOW2_OL_INACTIVE_L1 = (1 << QCOW2_OL_INACTIVE_L1_BITNR),
/* NOTE: Checking overlaps with inactive L2 tables will result in bdrv
* reads. */
QCOW2_OL_INACTIVE_L2 = (1 << QCOW2_OL_INACTIVE_L2_BITNR),
QCOW2_OL_BITMAP_DIRECTORY = (1 << QCOW2_OL_BITMAP_DIRECTORY_BITNR),
} QCow2MetadataOverlap;
/* Perform all overlap checks which can be done in constant time */
#define QCOW2_OL_CONSTANT \
(QCOW2_OL_MAIN_HEADER | QCOW2_OL_ACTIVE_L1 | QCOW2_OL_REFCOUNT_TABLE | \
QCOW2_OL_SNAPSHOT_TABLE | QCOW2_OL_BITMAP_DIRECTORY)
/* Perform all overlap checks which don't require disk access */
#define QCOW2_OL_CACHED \
(QCOW2_OL_CONSTANT | QCOW2_OL_ACTIVE_L2 | QCOW2_OL_REFCOUNT_BLOCK | \
QCOW2_OL_INACTIVE_L1)
/* Perform all overlap checks */
#define QCOW2_OL_ALL \
(QCOW2_OL_CACHED | QCOW2_OL_INACTIVE_L2)
#define QCOW_L1E_OFFSET_MASK 0x00fffffffffffe00ULL
#define QCOW_L2E_OFFSET_MASK 0x00fffffffffffe00ULL
#define QCOW_L2E_COMPRESSED_OFFSET_SIZE_MASK 0x3fffffffffffffffULL
#define REFT_OFFSET_MASK 0xfffffffffffffe00ULL
#define INV_OFFSET (-1ULL)
static inline uint64_t l2meta_cow_start(QCowL2Meta *m)
{
return m->offset + m->cow_start.offset;
}
static inline uint64_t l2meta_cow_end(QCowL2Meta *m)
{
return m->offset + m->cow_end.offset + m->cow_end.nb_bytes;
}
static inline uint64_t refcount_diff(uint64_t r1, uint64_t r2)
{
return r1 > r2 ? r1 - r2 : r2 - r1;
}
#endif // CEPH_LIBRBD_MIGRATION_QCOW2_H
| 15,822 | 32.882227 | 83 | h |
null | ceph-main/src/librbd/migration/QCOWFormat.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/migration/QCOWFormat.h"
#include "common/Clock.h"
#include "common/dout.h"
#include "common/errno.h"
#include "include/intarith.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Utils.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ReadResult.h"
#include "librbd/migration/SnapshotInterface.h"
#include "librbd/migration/SourceSpecBuilder.h"
#include "librbd/migration/StreamInterface.h"
#include "librbd/migration/Utils.h"
#include <boost/asio/dispatch.hpp>
#include <boost/asio/post.hpp>
#include <boost/endian/conversion.hpp>
#include <deque>
#include <tuple>
#include <unordered_map>
#include <vector>
#define dout_subsys ceph_subsys_rbd
namespace librbd {
namespace migration {
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::QCOWFormat: " \
<< __func__ << ": "
using boost::endian::big_to_native;
namespace qcow_format {
struct ClusterExtent {
uint64_t cluster_offset;
uint64_t cluster_length;
uint64_t intra_cluster_offset;
uint64_t image_offset;
uint64_t buffer_offset;
ClusterExtent(uint64_t cluster_offset, uint64_t cluster_length,
uint64_t intra_cluster_offset, uint64_t image_offset,
uint64_t buffer_offset)
: cluster_offset(cluster_offset), cluster_length(cluster_length),
intra_cluster_offset(intra_cluster_offset), image_offset(image_offset),
buffer_offset(buffer_offset) {
}
};
typedef std::vector<ClusterExtent> ClusterExtents;
void LookupTable::init() {
if (cluster_offsets == nullptr) {
cluster_offsets = reinterpret_cast<uint64_t*>(bl.c_str());
}
}
void LookupTable::decode() {
init();
// L2 tables are selectively byte-swapped on demand if only requesting a
// single cluster offset
if (decoded) {
return;
}
// translate the lookup table (big-endian -> CPU endianness)
for (auto idx = 0UL; idx < size; ++idx) {
cluster_offsets[idx] = big_to_native(cluster_offsets[idx]);
}
decoded = true;
}
void populate_cluster_extents(CephContext* cct, uint64_t cluster_size,
const io::Extents& image_extents,
ClusterExtents* cluster_extents) {
uint64_t buffer_offset = 0;
for (auto [image_offset, image_length] : image_extents) {
while (image_length > 0) {
auto intra_cluster_offset = image_offset & (cluster_size - 1);
auto intra_cluster_length = cluster_size - intra_cluster_offset;
auto cluster_length = std::min(image_length, intra_cluster_length);
ldout(cct, 20) << "image_offset=" << image_offset << ", "
<< "image_length=" << image_length << ", "
<< "cluster_length=" << cluster_length << dendl;
cluster_extents->emplace_back(0, cluster_length, intra_cluster_offset,
image_offset, buffer_offset);
image_offset += cluster_length;
image_length -= cluster_length;
buffer_offset += cluster_length;
}
}
}
} // namespace qcow_format
using namespace qcow_format;
template <typename I>
struct QCOWFormat<I>::Cluster {
const uint64_t cluster_offset;
bufferlist cluster_data_bl;
Cluster(uint64_t cluster_offset) : cluster_offset(cluster_offset) {
}
};
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::QCOWFormat::ClusterCache: " \
<< this << " " << __func__ << ": "
template <typename I>
class QCOWFormat<I>::ClusterCache {
public:
ClusterCache(QCOWFormat* qcow_format)
: qcow_format(qcow_format),
m_strand(*qcow_format->m_image_ctx->asio_engine) {
}
void get_cluster(uint64_t cluster_offset, uint64_t cluster_length,
uint64_t intra_cluster_offset, bufferlist* bl,
Context* on_finish) {
auto cct = qcow_format->m_image_ctx->cct;
ldout(cct, 20) << "cluster_offset=" << cluster_offset << dendl;
// cache state machine runs in a single strand thread
boost::asio::dispatch(
m_strand,
[this, cluster_offset, cluster_length, intra_cluster_offset, bl,
on_finish]() {
execute_get_cluster(cluster_offset, cluster_length,
intra_cluster_offset, bl, on_finish);
});
}
private:
typedef std::tuple<uint64_t, uint64_t, bufferlist*, Context*> Completion;
typedef std::list<Completion> Completions;
QCOWFormat* qcow_format;
boost::asio::io_context::strand m_strand;
std::shared_ptr<Cluster> cluster;
std::unordered_map<uint64_t, Completions> cluster_completions;
void execute_get_cluster(uint64_t cluster_offset, uint64_t cluster_length,
uint64_t intra_cluster_offset, bufferlist* bl,
Context* on_finish) {
auto cct = qcow_format->m_image_ctx->cct;
ldout(cct, 20) << "cluster_offset=" << cluster_offset << dendl;
if (cluster && cluster->cluster_offset == cluster_offset) {
// most-recent cluster matches
bl->substr_of(cluster->cluster_data_bl, intra_cluster_offset,
cluster_length);
boost::asio::post(*qcow_format->m_image_ctx->asio_engine,
[on_finish]() { on_finish->complete(0); });
return;
}
// record callback for cluster
bool new_request = (cluster_completions.count(cluster_offset) == 0);
cluster_completions[cluster_offset].emplace_back(
intra_cluster_offset, cluster_length, bl, on_finish);
if (new_request) {
// start the new read request
read_cluster(std::make_shared<Cluster>(cluster_offset));
}
}
void read_cluster(std::shared_ptr<Cluster> cluster) {
auto cct = qcow_format->m_image_ctx->cct;
uint64_t stream_offset = cluster->cluster_offset;
uint64_t stream_length = qcow_format->m_cluster_size;
if ((cluster->cluster_offset & QCOW_OFLAG_COMPRESSED) != 0) {
// compressed clusters encode the compressed length in the lower bits
stream_offset = cluster->cluster_offset &
qcow_format->m_cluster_offset_mask;
stream_length = (cluster->cluster_offset >>
(63 - qcow_format->m_cluster_bits)) &
(qcow_format->m_cluster_size - 1);
}
ldout(cct, 20) << "cluster_offset=" << cluster->cluster_offset << ", "
<< "stream_offset=" << stream_offset << ", "
<< "stream_length=" << stream_length << dendl;
// read the cluster into the cache entry
auto ctx = new LambdaContext([this, cluster](int r) {
boost::asio::post(m_strand, [this, cluster, r]() {
handle_read_cluster(r, cluster); }); });
qcow_format->m_stream->read({{stream_offset, stream_length}},
&cluster->cluster_data_bl, ctx);
}
void handle_read_cluster(int r, std::shared_ptr<Cluster> cluster) {
auto cct = qcow_format->m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << ", "
<< "cluster_offset=" << cluster->cluster_offset << dendl;
auto completions = std::move(cluster_completions[cluster->cluster_offset]);
cluster_completions.erase(cluster->cluster_offset);
if (r < 0) {
lderr(cct) << "failed to read cluster offset " << cluster->cluster_offset
<< ": " << cpp_strerror(r) << dendl;
} else {
if ((cluster->cluster_offset & QCOW_OFLAG_COMPRESSED) != 0) {
bufferlist compressed_bl{std::move(cluster->cluster_data_bl)};
cluster->cluster_data_bl.clear();
// TODO
lderr(cct) << "support for compressed clusters is not available"
<< dendl;
r = -EINVAL;
} else {
// cache the MRU cluster in case of sequential IO
this->cluster = cluster;
}
}
// complete the IO back to caller
boost::asio::post(*qcow_format->m_image_ctx->asio_engine,
[r, cluster, completions=std::move(completions)]() {
for (auto completion : completions) {
if (r >= 0) {
std::get<2>(completion)->substr_of(
cluster->cluster_data_bl,
std::get<0>(completion),
std::get<1>(completion));
}
std::get<3>(completion)->complete(r);
}
});
}
};
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::QCOWFormat::L2TableCache: " \
<< this << " " << __func__ << ": "
template <typename I>
class QCOWFormat<I>::L2TableCache {
public:
L2TableCache(QCOWFormat* qcow_format)
: qcow_format(qcow_format),
m_strand(*qcow_format->m_image_ctx->asio_engine),
l2_cache_entries(QCOW_L2_CACHE_SIZE) {
}
void get_l2_table(const LookupTable* l1_table, uint64_t l2_table_offset,
std::shared_ptr<const LookupTable>* l2_table,
Context* on_finish) {
auto cct = qcow_format->m_image_ctx->cct;
ldout(cct, 20) << "l2_table_offset=" << l2_table_offset << dendl;
// cache state machine runs in a single strand thread
Request request{l1_table, l2_table_offset, l2_table, on_finish};
boost::asio::dispatch(
m_strand, [this, request=std::move(request)]() {
requests.push_back(std::move(request));
});
dispatch_request();
}
void get_cluster_offset(const LookupTable* l1_table,
uint64_t image_offset, uint64_t* cluster_offset,
Context* on_finish) {
auto cct = qcow_format->m_image_ctx->cct;
uint32_t l1_table_index = image_offset >> qcow_format->m_l1_shift;
uint64_t l2_table_offset = l1_table->cluster_offsets[std::min<uint32_t>(
l1_table_index, l1_table->size - 1)] &
qcow_format->m_cluster_mask;
uint32_t l2_table_index = (image_offset >> qcow_format->m_cluster_bits) &
(qcow_format->m_l2_size - 1);
ldout(cct, 20) << "image_offset=" << image_offset << ", "
<< "l1_table_index=" << l1_table_index << ", "
<< "l2_table_offset=" << l2_table_offset << ", "
<< "l2_table_index=" << l2_table_index << dendl;
if (l1_table_index >= l1_table->size) {
lderr(cct) << "L1 index " << l1_table_index << " out-of-bounds" << dendl;
on_finish->complete(-ERANGE);
return;
} else if (l2_table_offset == 0) {
// L2 table has not been allocated for specified offset
ldout(cct, 20) << "image_offset=" << image_offset << ", "
<< "cluster_offset=DNE" << dendl;
*cluster_offset = 0;
on_finish->complete(-ENOENT);
return;
}
// cache state machine runs in a single strand thread
Request request{l1_table, l2_table_offset, l2_table_index, cluster_offset,
on_finish};
boost::asio::dispatch(
m_strand, [this, request=std::move(request)]() {
requests.push_back(std::move(request));
});
dispatch_request();
}
private:
QCOWFormat* qcow_format;
boost::asio::io_context::strand m_strand;
struct Request {
const LookupTable* l1_table;
uint64_t l2_table_offset;
// get_cluster_offset request
uint32_t l2_table_index;
uint64_t* cluster_offset = nullptr;
// get_l2_table request
std::shared_ptr<const LookupTable>* l2_table;
Context* on_finish;
Request(const LookupTable* l1_table, uint64_t l2_table_offset,
uint32_t l2_table_index, uint64_t* cluster_offset,
Context* on_finish)
: l1_table(l1_table), l2_table_offset(l2_table_offset),
l2_table_index(l2_table_index), cluster_offset(cluster_offset),
on_finish(on_finish) {
}
Request(const LookupTable* l1_table, uint64_t l2_table_offset,
std::shared_ptr<const LookupTable>* l2_table, Context* on_finish)
: l1_table(l1_table), l2_table_offset(l2_table_offset),
l2_table(l2_table), on_finish(on_finish) {
}
};
typedef std::deque<Request> Requests;
struct L2Cache {
uint64_t l2_offset = 0;
std::shared_ptr<LookupTable> l2_table;
utime_t timestamp;
uint32_t count = 0;
bool in_flight = false;
int ret_val = 0;
};
std::vector<L2Cache> l2_cache_entries;
Requests requests;
void dispatch_request() {
boost::asio::dispatch(m_strand, [this]() { execute_request(); });
}
void execute_request() {
auto cct = qcow_format->m_image_ctx->cct;
if (requests.empty()) {
return;
}
auto request = requests.front();
ldout(cct, 20) << "l2_table_offset=" << request.l2_table_offset << dendl;
std::shared_ptr<LookupTable> l2_table;
int r = l2_table_lookup(request.l2_table_offset, &l2_table);
if (r < 0) {
lderr(cct) << "failed to load L2 table: l2_table_offset="
<< request.l2_table_offset << ": "
<< cpp_strerror(r) << dendl;
} else if (l2_table == nullptr) {
// table not in cache -- will restart once its loaded
return;
} else if (request.cluster_offset != nullptr) {
auto cluster_offset = l2_table->cluster_offsets[request.l2_table_index];
if (!l2_table->decoded) {
// table hasn't been byte-swapped
cluster_offset = big_to_native(cluster_offset);
}
*request.cluster_offset = cluster_offset & qcow_format->m_cluster_mask;
if (*request.cluster_offset == QCOW_OFLAG_ZERO) {
ldout(cct, 20) << "l2_table_offset=" << request.l2_table_offset << ", "
<< "l2_table_index=" << request.l2_table_index << ", "
<< "cluster_offset=zeroed" << dendl;
} else {
ldout(cct, 20) << "l2_table_offset=" << request.l2_table_offset << ", "
<< "l2_table_index=" << request.l2_table_index << ", "
<< "cluster_offset=" << *request.cluster_offset
<< dendl;
}
} else if (request.l2_table != nullptr) {
// ensure it's in the correct byte-order
l2_table->decode();
*request.l2_table = l2_table;
} else {
ceph_assert(false);
}
// complete the L2 cache request
boost::asio::post(*qcow_format->m_image_ctx->asio_engine,
[r, ctx=request.on_finish]() { ctx->complete(r); });
requests.pop_front();
// process next request (if any)
dispatch_request();
}
int l2_table_lookup(uint64_t l2_offset,
std::shared_ptr<LookupTable>* l2_table) {
auto cct = qcow_format->m_image_ctx->cct;
l2_table->reset();
// find a match in the existing cache
for (auto idx = 0U; idx < l2_cache_entries.size(); ++idx) {
auto& l2_cache = l2_cache_entries[idx];
if (l2_cache.l2_offset == l2_offset) {
if (l2_cache.in_flight) {
ldout(cct, 20) << "l2_offset=" << l2_offset << ", "
<< "index=" << idx << " (in-flight)" << dendl;
return 0;
}
if (l2_cache.ret_val < 0) {
ldout(cct, 20) << "l2_offset=" << l2_offset << ", "
<< "index=" << idx << " (error): "
<< cpp_strerror(l2_cache.ret_val) << dendl;
int r = l2_cache.ret_val;
l2_cache = L2Cache{};
return r;
}
++l2_cache.count;
if (l2_cache.count == std::numeric_limits<uint32_t>::max()) {
for (auto& entry : l2_cache_entries) {
entry.count >>= 1;
}
}
ldout(cct, 20) << "l2_offset=" << l2_offset << ", " << "index=" << idx
<< dendl;
*l2_table = l2_cache.l2_table;
return 0;
}
}
// find the least used entry
int32_t min_idx = -1;
uint32_t min_count = std::numeric_limits<uint32_t>::max();
utime_t min_timestamp;
for (uint32_t idx = 0U; idx < l2_cache_entries.size(); ++idx) {
auto& l2_cache = l2_cache_entries[idx];
if (l2_cache.in_flight) {
continue;
}
if (l2_cache.count > 0) {
--l2_cache.count;
}
if (l2_cache.count <= min_count) {
if (min_idx == -1 || l2_cache.timestamp < min_timestamp) {
min_timestamp = l2_cache.timestamp;
min_count = l2_cache.count;
min_idx = idx;
}
}
}
if (min_idx == -1) {
// no space in the cache due to in-flight requests
ldout(cct, 20) << "l2_offset=" << l2_offset << ", "
<< "index=DNE (cache busy)" << dendl;
return 0;
}
ldout(cct, 20) << "l2_offset=" << l2_offset << ", "
<< "index=" << min_idx << " (loading)" << dendl;
auto& l2_cache = l2_cache_entries[min_idx];
l2_cache.l2_table = std::make_shared<LookupTable>(qcow_format->m_l2_size);
l2_cache.l2_offset = l2_offset;
l2_cache.timestamp = ceph_clock_now();
l2_cache.count = 1;
l2_cache.in_flight = true;
// read the L2 table into the L2 cache entry
auto ctx = new LambdaContext([this, index=min_idx, l2_offset](int r) {
boost::asio::post(m_strand, [this, index, l2_offset, r]() {
handle_l2_table_lookup(r, index, l2_offset); }); });
qcow_format->m_stream->read(
{{l2_offset, qcow_format->m_l2_size * sizeof(uint64_t)}},
&l2_cache.l2_table->bl, ctx);
return 0;
}
void handle_l2_table_lookup(int r, uint32_t index, uint64_t l2_offset) {
auto cct = qcow_format->m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << ", "
<< "l2_offset=" << l2_offset << ", "
<< "index=" << index << dendl;
auto& l2_cache = l2_cache_entries[index];
ceph_assert(l2_cache.in_flight);
l2_cache.in_flight = false;
if (r < 0) {
lderr(cct) << "failed to load L2 table: "
<< "l2_offset=" << l2_cache.l2_offset << ": "
<< cpp_strerror(r) << dendl;
l2_cache.ret_val = r;
} else {
// keep the L2 table in big-endian byte-order until the full table
// is requested
l2_cache.l2_table->init();
}
// restart the state machine
dispatch_request();
}
};
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::QCOWFormat::ReadRequest: " \
<< this << " " << __func__ << ": "
template <typename I>
class QCOWFormat<I>::ReadRequest {
public:
ReadRequest(QCOWFormat* qcow_format, io::AioCompletion* aio_comp,
const LookupTable* l1_table, io::Extents&& image_extents)
: qcow_format(qcow_format), aio_comp(aio_comp), l1_table(l1_table),
image_extents(std::move(image_extents)) {
}
void send() {
get_cluster_offsets();
}
private:
QCOWFormat* qcow_format;
io::AioCompletion* aio_comp;
const LookupTable* l1_table;
io::Extents image_extents;
size_t image_extents_idx = 0;
uint32_t image_extent_offset = 0;
ClusterExtents cluster_extents;
void get_cluster_offsets() {
auto cct = qcow_format->m_image_ctx->cct;
populate_cluster_extents(cct, qcow_format->m_cluster_size, image_extents,
&cluster_extents);
ldout(cct, 20) << dendl;
auto ctx = new LambdaContext([this](int r) {
handle_get_cluster_offsets(r); });
auto gather_ctx = new C_Gather(cct, ctx);
for (auto& cluster_extent : cluster_extents) {
auto sub_ctx = new LambdaContext(
[this, &cluster_extent, on_finish=gather_ctx->new_sub()](int r) {
handle_get_cluster_offset(r, cluster_extent, on_finish); });
qcow_format->m_l2_table_cache->get_cluster_offset(
l1_table, cluster_extent.image_offset,
&cluster_extent.cluster_offset, sub_ctx);
}
gather_ctx->activate();
}
void handle_get_cluster_offset(int r, const ClusterExtent& cluster_extent,
Context* on_finish) {
auto cct = qcow_format->m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << ", "
<< "image_offset=" << cluster_extent.image_offset << ", "
<< "cluster_offset=" << cluster_extent.cluster_offset
<< dendl;
if (r == -ENOENT) {
ldout(cct, 20) << "image offset DNE in QCOW image" << dendl;
r = 0;
} else if (r < 0) {
lderr(cct) << "failed to map image offset " << cluster_extent.image_offset
<< ": " << cpp_strerror(r) << dendl;
}
on_finish->complete(r);
}
void handle_get_cluster_offsets(int r) {
auto cct = qcow_format->m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to retrieve cluster extents: " << cpp_strerror(r)
<< dendl;
aio_comp->fail(r);
delete this;
return;
}
read_clusters();
}
void read_clusters() {
auto cct = qcow_format->m_image_ctx->cct;
ldout(cct, 20) << dendl;
aio_comp->set_request_count(cluster_extents.size());
for (auto& cluster_extent : cluster_extents) {
auto read_ctx = new io::ReadResult::C_ImageReadRequest(
aio_comp, cluster_extent.buffer_offset,
{{cluster_extent.image_offset, cluster_extent.cluster_length}});
read_ctx->ignore_enoent = true;
auto log_ctx = new LambdaContext(
[this, cct=qcow_format->m_image_ctx->cct,
image_offset=cluster_extent.image_offset,
image_length=cluster_extent.cluster_length, ctx=read_ctx](int r) {
handle_read_cluster(cct, r, image_offset, image_length, ctx);
});
if (cluster_extent.cluster_offset == 0) {
// QCOW header is at offset 0, implies cluster DNE
log_ctx->complete(-ENOENT);
} else if (cluster_extent.cluster_offset == QCOW_OFLAG_ZERO) {
// explicitly zeroed section
read_ctx->bl.append_zero(cluster_extent.cluster_length);
log_ctx->complete(0);
} else {
// request the (sub)cluster from the cluster cache
qcow_format->m_cluster_cache->get_cluster(
cluster_extent.cluster_offset, cluster_extent.cluster_length,
cluster_extent.intra_cluster_offset, &read_ctx->bl, log_ctx);
}
}
delete this;
}
void handle_read_cluster(CephContext* cct, int r, uint64_t image_offset,
uint64_t image_length, Context* on_finish) const {
// NOTE: treat as static function, expect object has been deleted
ldout(cct, 20) << "r=" << r << ", "
<< "image_offset=" << image_offset << ", "
<< "image_length=" << image_length << dendl;
if (r != -ENOENT && r < 0) {
lderr(cct) << "failed to read image extent " << image_offset << "~"
<< image_length << ": " << cpp_strerror(r) << dendl;
}
on_finish->complete(r);
}
};
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::QCOWFormat::" \
<< "ListSnapsRequest: " << this << " " \
<< __func__ << ": "
template <typename I>
class QCOWFormat<I>::ListSnapsRequest {
public:
ListSnapsRequest(
QCOWFormat* qcow_format, uint32_t l1_table_index,
ClusterExtents&& cluster_extents,
const std::map<uint64_t, const LookupTable*>& snap_id_to_l1_table,
io::SnapshotDelta* snapshot_delta, Context* on_finish)
: qcow_format(qcow_format), l1_table_index(l1_table_index),
cluster_extents(std::move(cluster_extents)),
snap_id_to_l1_table(snap_id_to_l1_table), snapshot_delta(snapshot_delta),
on_finish(on_finish) {
}
void send() {
get_l2_table();
}
private:
QCOWFormat* qcow_format;
uint32_t l1_table_index;
ClusterExtents cluster_extents;
std::map<uint64_t, const LookupTable*> snap_id_to_l1_table;
io::SnapshotDelta* snapshot_delta;
Context* on_finish;
std::shared_ptr<const LookupTable> previous_l2_table;
std::shared_ptr<const LookupTable> l2_table;
void get_l2_table() {
auto cct = qcow_format->m_image_ctx->cct;
if (snap_id_to_l1_table.empty()) {
finish(0);
return;
}
auto it = snap_id_to_l1_table.begin();
auto [snap_id, l1_table] = *it;
snap_id_to_l1_table.erase(it);
previous_l2_table = l2_table;
l2_table.reset();
auto ctx = new LambdaContext([this, snap_id = snap_id](int r) {
boost::asio::post(qcow_format->m_strand, [this, snap_id, r]() {
handle_get_l2_table(r, snap_id);
});
});
if (l1_table_index >= l1_table->size ||
l1_table->cluster_offsets[l1_table_index] == 0) {
ldout(cct, 20) << "l1_table_index=" << l1_table_index << ", "
<< "snap_id=" << snap_id << ": DNE" << dendl;
ctx->complete(-ENOENT);
return;
}
uint64_t l2_table_offset = l1_table->cluster_offsets[l1_table_index] &
qcow_format->m_cluster_mask;
ldout(cct, 20) << "l1_table_index=" << l1_table_index << ", "
<< "snap_id=" << snap_id << ", "
<< "l2_table_offset=" << l2_table_offset << dendl;
qcow_format->m_l2_table_cache->get_l2_table(l1_table, l2_table_offset,
&l2_table, ctx);
}
void handle_get_l2_table(int r, uint64_t snap_id) {
ceph_assert(qcow_format->m_strand.running_in_this_thread());
auto cct = qcow_format->m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << ", "
<< "snap_id=" << snap_id << dendl;
if (r == -ENOENT) {
l2_table.reset();
} else if (r < 0) {
lderr(cct) << "failed to retrieve L2 table for snapshot " << snap_id
<< ": " << cpp_strerror(r) << dendl;
finish(r);
return;
}
// compare the cluster offsets at each requested L2 offset between
// the previous snapshot's L2 table and the current L2 table.
auto& sparse_extents = (*snapshot_delta)[{snap_id, snap_id}];
for (auto& cluster_extent : cluster_extents) {
uint32_t l2_table_index =
(cluster_extent.image_offset >> qcow_format->m_cluster_bits) &
(qcow_format->m_l2_size - 1);
std::optional<uint64_t> cluster_offset;
if (l2_table && l2_table_index < l2_table->size) {
cluster_offset = l2_table->cluster_offsets[l2_table_index] &
qcow_format->m_cluster_offset_mask;
}
std::optional<uint64_t> prev_cluster_offset;
if (previous_l2_table && l2_table_index < previous_l2_table->size) {
prev_cluster_offset =
previous_l2_table->cluster_offsets[l2_table_index] &
qcow_format->m_cluster_offset_mask;
}
ldout(cct, 20) << "l1_table_index=" << l1_table_index << ", "
<< "snap_id=" << snap_id << ", "
<< "image_offset=" << cluster_extent.image_offset << ", "
<< "l2_table_index=" << l2_table_index << ", "
<< "cluster_offset=" << cluster_offset << ", "
<< "prev_cluster_offset=" << prev_cluster_offset << dendl;
auto state = io::SPARSE_EXTENT_STATE_DATA;
if (cluster_offset == prev_cluster_offset) {
continue;
} else if ((prev_cluster_offset && !cluster_offset) ||
*cluster_offset == QCOW_OFLAG_ZERO) {
// explicitly zeroed or deallocated
state = io::SPARSE_EXTENT_STATE_ZEROED;
}
sparse_extents.insert(
cluster_extent.image_offset, cluster_extent.cluster_length,
{state, cluster_extent.cluster_length});
}
ldout(cct, 20) << "l1_table_index=" << l1_table_index << ", "
<< "snap_id=" << snap_id << ", "
<< "sparse_extents=" << sparse_extents << dendl;
// continue processing the L2 table at this index for all snapshots
boost::asio::post(*qcow_format->m_image_ctx->asio_engine,
[this]() { get_l2_table(); });
}
void finish(int r) {
auto cct = qcow_format->m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
on_finish->complete(r);
delete this;
}
};
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::QCOWFormat: " << this \
<< " " << __func__ << ": "
template <typename I>
QCOWFormat<I>::QCOWFormat(
I* image_ctx, const json_spirit::mObject& json_object,
const SourceSpecBuilder<I>* source_spec_builder)
: m_image_ctx(image_ctx), m_json_object(json_object),
m_source_spec_builder(source_spec_builder),
m_strand(*image_ctx->asio_engine) {
}
template <typename I>
void QCOWFormat<I>::open(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
int r = m_source_spec_builder->build_stream(m_json_object, &m_stream);
if (r < 0) {
lderr(cct) << "failed to build migration stream handler" << cpp_strerror(r)
<< dendl;
on_finish->complete(r);
return;
}
auto ctx = new LambdaContext([this, on_finish](int r) {
handle_open(r, on_finish); });
m_stream->open(ctx);
}
template <typename I>
void QCOWFormat<I>::handle_open(int r, Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to open QCOW image: " << cpp_strerror(r)
<< dendl;
on_finish->complete(r);
return;
}
probe(on_finish);
}
template <typename I>
void QCOWFormat<I>::probe(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
auto ctx = new LambdaContext([this, on_finish](int r) {
handle_probe(r, on_finish); });
m_bl.clear();
m_stream->read({{0, 8}}, &m_bl, ctx);
}
template <typename I>
void QCOWFormat<I>::handle_probe(int r, Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to probe QCOW image: " << cpp_strerror(r)
<< dendl;
on_finish->complete(r);
return;
}
auto header_probe = *reinterpret_cast<QCowHeaderProbe*>(
m_bl.c_str());
header_probe.magic = big_to_native(header_probe.magic);
header_probe.version = big_to_native(header_probe.version);
if (header_probe.magic != QCOW_MAGIC) {
lderr(cct) << "invalid QCOW header magic" << dendl;
on_finish->complete(-EINVAL);
return;
}
m_bl.clear();
if (header_probe.version == 1) {
#ifdef WITH_RBD_MIGRATION_FORMAT_QCOW_V1
read_v1_header(on_finish);
#else // WITH_RBD_MIGRATION_FORMAT_QCOW_V1
lderr(cct) << "QCOW is not supported" << dendl;
on_finish->complete(-ENOTSUP);
#endif // WITH_RBD_MIGRATION_FORMAT_QCOW_V1
return;
} else if (header_probe.version >= 2 && header_probe.version <= 3) {
read_v2_header(on_finish);
return;
} else {
lderr(cct) << "invalid QCOW header version " << header_probe.version
<< dendl;
on_finish->complete(-EINVAL);
return;
}
}
#ifdef WITH_RBD_MIGRATION_FORMAT_QCOW_V1
template <typename I>
void QCOWFormat<I>::read_v1_header(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
auto ctx = new LambdaContext([this, on_finish](int r) {
handle_read_v1_header(r, on_finish); });
m_bl.clear();
m_stream->read({{0, sizeof(QCowHeaderV1)}}, &m_bl, ctx);
}
template <typename I>
void QCOWFormat<I>::handle_read_v1_header(int r, Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to read QCOW header: " << cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
auto header = *reinterpret_cast<QCowHeaderV1*>(m_bl.c_str());
// byte-swap important fields
header.magic = big_to_native(header.magic);
header.version = big_to_native(header.version);
header.backing_file_offset = big_to_native(header.backing_file_offset);
header.backing_file_size = big_to_native(header.backing_file_size);
header.size = big_to_native(header.size);
header.crypt_method = big_to_native(header.crypt_method);
header.l1_table_offset = big_to_native(header.l1_table_offset);
if (header.magic != QCOW_MAGIC || header.version != 1) {
// honestly shouldn't happen since we've already validated it
lderr(cct) << "header is not QCOW" << dendl;
on_finish->complete(-EINVAL);
return;
}
if (header.cluster_bits < QCOW_MIN_CLUSTER_BITS ||
header.cluster_bits > QCOW_MAX_CLUSTER_BITS) {
lderr(cct) << "invalid cluster bits: " << header.cluster_bits << dendl;
on_finish->complete(-EINVAL);
return;
}
if (header.l2_bits < (QCOW_MIN_CLUSTER_BITS - 3) ||
header.l2_bits > (QCOW_MAX_CLUSTER_BITS - 3)) {
lderr(cct) << "invalid L2 bits: " << header.l2_bits << dendl;
on_finish->complete(-EINVAL);
return;
}
if (header.crypt_method != QCOW_CRYPT_NONE) {
lderr(cct) << "invalid or unsupported encryption method" << dendl;
on_finish->complete(-EINVAL);
return;
}
m_size = header.size;
if (p2roundup(m_size, static_cast<uint64_t>(512)) != m_size) {
lderr(cct) << "image size is not a multiple of block size" << dendl;
on_finish->complete(-EINVAL);
return;
}
m_backing_file_offset = header.backing_file_offset;
m_backing_file_size = header.backing_file_size;
m_cluster_bits = header.cluster_bits;
m_cluster_size = 1UL << header.cluster_bits;
m_cluster_offset_mask = (1ULL << (63 - header.cluster_bits)) - 1;
m_cluster_mask = ~QCOW_OFLAG_COMPRESSED;
m_l2_bits = header.l2_bits;
m_l2_size = (1UL << m_l2_bits);
m_l1_shift = m_cluster_bits + m_l2_bits;
m_l1_table.size = (m_size + (1LL << m_l1_shift) - 1) >> m_l1_shift;
m_l1_table_offset = header.l1_table_offset;
if (m_size > (std::numeric_limits<uint64_t>::max() - (1ULL << m_l1_shift)) ||
m_l1_table.size >
(std::numeric_limits<int32_t>::max() / sizeof(uint64_t))) {
lderr(cct) << "image size too big: " << m_size << dendl;
on_finish->complete(-EINVAL);
return;
}
ldout(cct, 15) << "size=" << m_size << ", "
<< "cluster_bits=" << m_cluster_bits << ", "
<< "l2_bits=" << m_l2_bits << dendl;
// allocate memory for L1 table and L2 + cluster caches
m_l2_table_cache = std::make_unique<L2TableCache>(this);
m_cluster_cache = std::make_unique<ClusterCache>(this);
read_l1_table(on_finish);
}
#endif // WITH_RBD_MIGRATION_FORMAT_QCOW_V1
template <typename I>
void QCOWFormat<I>::read_v2_header(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
auto ctx = new LambdaContext([this, on_finish](int r) {
handle_read_v2_header(r, on_finish); });
m_bl.clear();
m_stream->read({{0, sizeof(QCowHeader)}}, &m_bl, ctx);
}
template <typename I>
void QCOWFormat<I>::handle_read_v2_header(int r, Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to read QCOW2 header: " << cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
auto header = *reinterpret_cast<QCowHeader*>(m_bl.c_str());
// byte-swap important fields
header.magic = big_to_native(header.magic);
header.version = big_to_native(header.version);
header.backing_file_offset = big_to_native(header.backing_file_offset);
header.backing_file_size = big_to_native(header.backing_file_size);
header.cluster_bits = big_to_native(header.cluster_bits);
header.size = big_to_native(header.size);
header.crypt_method = big_to_native(header.crypt_method);
header.l1_size = big_to_native(header.l1_size);
header.l1_table_offset = big_to_native(header.l1_table_offset);
header.nb_snapshots = big_to_native(header.nb_snapshots);
header.snapshots_offset = big_to_native(header.snapshots_offset);
if (header.version == 2) {
// valid only for version >= 3
header.incompatible_features = 0;
header.compatible_features = 0;
header.autoclear_features = 0;
header.header_length = 72;
header.compression_type = 0;
} else {
header.incompatible_features = big_to_native(header.incompatible_features);
header.compatible_features = big_to_native(header.compatible_features);
header.autoclear_features = big_to_native(header.autoclear_features);
header.header_length = big_to_native(header.header_length);
}
if (header.magic != QCOW_MAGIC || header.version < 2 || header.version > 3) {
// honestly shouldn't happen since we've already validated it
lderr(cct) << "header is not QCOW2" << dendl;
on_finish->complete(-EINVAL);
return;
}
if (header.cluster_bits < QCOW_MIN_CLUSTER_BITS ||
header.cluster_bits > QCOW_MAX_CLUSTER_BITS) {
lderr(cct) << "invalid cluster bits: " << header.cluster_bits << dendl;
on_finish->complete(-EINVAL);
return;
}
if (header.crypt_method != QCOW_CRYPT_NONE) {
lderr(cct) << "invalid or unsupported encryption method" << dendl;
on_finish->complete(-EINVAL);
return;
}
m_size = header.size;
if (p2roundup(m_size, static_cast<uint64_t>(512)) != m_size) {
lderr(cct) << "image size is not a multiple of block size" << dendl;
on_finish->complete(-EINVAL);
return;
}
if (header.header_length <= offsetof(QCowHeader, compression_type)) {
header.compression_type = 0;
}
if ((header.compression_type != 0) ||
((header.incompatible_features & QCOW2_INCOMPAT_COMPRESSION) != 0)) {
lderr(cct) << "invalid or unsupported compression type" << dendl;
on_finish->complete(-EINVAL);
return;
}
if ((header.incompatible_features & QCOW2_INCOMPAT_DATA_FILE) != 0) {
lderr(cct) << "external data file feature not supported" << dendl;
on_finish->complete(-ENOTSUP);
}
if ((header.incompatible_features & QCOW2_INCOMPAT_EXTL2) != 0) {
lderr(cct) << "extended L2 table feature not supported" << dendl;
on_finish->complete(-ENOTSUP);
return;
}
header.incompatible_features &= ~QCOW2_INCOMPAT_MASK;
if (header.incompatible_features != 0) {
lderr(cct) << "unknown incompatible feature enabled" << dendl;
on_finish->complete(-EINVAL);
return;
}
m_backing_file_offset = header.backing_file_offset;
m_backing_file_size = header.backing_file_size;
m_cluster_bits = header.cluster_bits;
m_cluster_size = 1UL << header.cluster_bits;
m_cluster_offset_mask = (1ULL << (63 - header.cluster_bits)) - 1;
m_cluster_mask = ~(QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_COPIED);
// L2 table is fixed a (1) cluster block to hold 8-byte (3 bit) offsets
m_l2_bits = m_cluster_bits - 3;
m_l2_size = (1UL << m_l2_bits);
m_l1_shift = m_cluster_bits + m_l2_bits;
m_l1_table.size = (m_size + (1LL << m_l1_shift) - 1) >> m_l1_shift;
m_l1_table_offset = header.l1_table_offset;
if (m_size > (std::numeric_limits<uint64_t>::max() - (1ULL << m_l1_shift)) ||
m_l1_table.size >
(std::numeric_limits<int32_t>::max() / sizeof(uint64_t))) {
lderr(cct) << "image size too big: " << m_size << dendl;
on_finish->complete(-EINVAL);
return;
} else if (m_l1_table.size > header.l1_size) {
lderr(cct) << "invalid L1 table size in header (" << header.l1_size
<< " < " << m_l1_table.size << ")" << dendl;
on_finish->complete(-EINVAL);
return;
}
m_snapshot_count = header.nb_snapshots;
m_snapshots_offset = header.snapshots_offset;
ldout(cct, 15) << "size=" << m_size << ", "
<< "cluster_bits=" << m_cluster_bits << ", "
<< "l1_table_offset=" << m_l1_table_offset << ", "
<< "snapshot_count=" << m_snapshot_count << ", "
<< "snapshots_offset=" << m_snapshots_offset << dendl;
// allocate memory for L1 table and L2 + cluster caches
m_l2_table_cache = std::make_unique<L2TableCache>(this);
m_cluster_cache = std::make_unique<ClusterCache>(this);
read_snapshot(on_finish);
}
template <typename I>
void QCOWFormat<I>::read_snapshot(Context* on_finish) {
if (m_snapshots_offset == 0 || m_snapshots.size() == m_snapshot_count) {
read_l1_table(on_finish);
return;
}
// header is always aligned on 8 byte boundary
m_snapshots_offset = p2roundup(m_snapshots_offset, static_cast<uint64_t>(8));
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "snap_id=" << (m_snapshots.size() + 1) << ", "
<< "offset=" << m_snapshots_offset << dendl;
auto ctx = new LambdaContext([this, on_finish](int r) {
handle_read_snapshot(r, on_finish); });
m_bl.clear();
m_stream->read({{m_snapshots_offset, sizeof(QCowSnapshotHeader)}}, &m_bl,
ctx);
}
template <typename I>
void QCOWFormat<I>::handle_read_snapshot(int r, Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << ", "
<< "index=" << m_snapshots.size() << dendl;
if (r < 0) {
lderr(cct) << "failed to read QCOW2 snapshot header: " << cpp_strerror(r)
<< dendl;
on_finish->complete(r);
return;
}
m_snapshots_offset += m_bl.length();
auto header = *reinterpret_cast<QCowSnapshotHeader*>(m_bl.c_str());
auto& snapshot = m_snapshots[m_snapshots.size() + 1];
snapshot.id.resize(big_to_native(header.id_str_size));
snapshot.name.resize(big_to_native(header.name_size));
snapshot.l1_table_offset = big_to_native(header.l1_table_offset);
snapshot.l1_table.size = big_to_native(header.l1_size);
snapshot.timestamp.sec_ref() = big_to_native(header.date_sec);
snapshot.timestamp.nsec_ref() = big_to_native(header.date_nsec);
snapshot.extra_data_size = big_to_native(header.extra_data_size);
ldout(cct, 10) << "snap_id=" << m_snapshots.size() << ", "
<< "id_str_len=" << snapshot.id.size() << ", "
<< "name_str_len=" << snapshot.name.size() << ", "
<< "l1_table_offset=" << snapshot.l1_table_offset << ", "
<< "l1_size=" << snapshot.l1_table.size << ", "
<< "extra_data_size=" << snapshot.extra_data_size << dendl;
read_snapshot_extra(on_finish);
}
template <typename I>
void QCOWFormat<I>::read_snapshot_extra(Context* on_finish) {
ceph_assert(!m_snapshots.empty());
auto& snapshot = m_snapshots.rbegin()->second;
uint32_t length = snapshot.extra_data_size +
snapshot.id.size() +
snapshot.name.size();
if (length == 0) {
uuid_d uuid_gen;
uuid_gen.generate_random();
snapshot.name = uuid_gen.to_string();
read_snapshot(on_finish);
return;
}
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "snap_id=" << m_snapshots.size() << ", "
<< "offset=" << m_snapshots_offset << ", "
<< "length=" << length << dendl;
auto offset = m_snapshots_offset;
m_snapshots_offset += length;
auto ctx = new LambdaContext([this, on_finish](int r) {
handle_read_snapshot_extra(r, on_finish); });
m_bl.clear();
m_stream->read({{offset, length}}, &m_bl, ctx);
}
template <typename I>
void QCOWFormat<I>::handle_read_snapshot_extra(int r, Context* on_finish) {
ceph_assert(!m_snapshots.empty());
auto& snapshot = m_snapshots.rbegin()->second;
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << ", "
<< "snap_id=" << m_snapshots.size() << dendl;
if (r < 0) {
lderr(cct) << "failed to read QCOW2 snapshot header extra: "
<< cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
if (snapshot.extra_data_size >=
offsetof(QCowSnapshotExtraData, disk_size) + sizeof(uint64_t)) {
auto extra = reinterpret_cast<const QCowSnapshotExtraData*>(m_bl.c_str());
snapshot.size = big_to_native(extra->disk_size);
} else {
snapshot.size = m_size;
}
auto data = reinterpret_cast<const char*>(m_bl.c_str());
data += snapshot.extra_data_size;
if (!snapshot.id.empty()) {
snapshot.id = std::string(data, snapshot.id.size());
data += snapshot.id.size();
}
if (!snapshot.name.empty()) {
snapshot.name = std::string(data, snapshot.name.size());
data += snapshot.name.size();
} else {
uuid_d uuid_gen;
uuid_gen.generate_random();
snapshot.name = uuid_gen.to_string();
}
ldout(cct, 10) << "snap_id=" << m_snapshots.size() << ", "
<< "name=" << snapshot.name << ", "
<< "size=" << snapshot.size << dendl;
read_snapshot_l1_table(on_finish);
}
template <typename I>
void QCOWFormat<I>::read_snapshot_l1_table(Context* on_finish) {
ceph_assert(!m_snapshots.empty());
auto& snapshot = m_snapshots.rbegin()->second;
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "snap_id=" << m_snapshots.size() << ", "
<< "l1_table_offset=" << snapshot.l1_table_offset
<< dendl;
auto ctx = new LambdaContext([this, on_finish](int r) {
handle_read_snapshot_l1_table(r, on_finish); });
m_stream->read({{snapshot.l1_table_offset,
snapshot.l1_table.size * sizeof(uint64_t)}},
&snapshot.l1_table.bl, ctx);
}
template <typename I>
void QCOWFormat<I>::handle_read_snapshot_l1_table(int r, Context* on_finish) {
ceph_assert(!m_snapshots.empty());
auto& snapshot = m_snapshots.rbegin()->second;
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << ", "
<< "snap_id=" << m_snapshots.size() << dendl;
if (r < 0) {
lderr(cct) << "failed to read snapshot L1 table: " << cpp_strerror(r)
<< dendl;
on_finish->complete(r);
return;
}
snapshot.l1_table.decode();
read_snapshot(on_finish);
}
template <typename I>
void QCOWFormat<I>::read_l1_table(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
auto ctx = new LambdaContext([this, on_finish](int r) {
handle_read_l1_table(r, on_finish); });
m_stream->read({{m_l1_table_offset,
m_l1_table.size * sizeof(uint64_t)}},
&m_l1_table.bl, ctx);
}
template <typename I>
void QCOWFormat<I>::handle_read_l1_table(int r, Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to read L1 table: " << cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
m_l1_table.decode();
read_backing_file(on_finish);
}
template <typename I>
void QCOWFormat<I>::read_backing_file(Context* on_finish) {
if (m_backing_file_offset == 0 || m_backing_file_size == 0) {
// all data is within the specified file
on_finish->complete(0);
return;
}
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
// TODO add support for backing files
on_finish->complete(-ENOTSUP);
}
template <typename I>
void QCOWFormat<I>::close(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
m_stream->close(on_finish);
}
template <typename I>
void QCOWFormat<I>::get_snapshots(SnapInfos* snap_infos, Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
snap_infos->clear();
for (auto& [snap_id, snapshot] : m_snapshots) {
SnapInfo snap_info(snapshot.name, cls::rbd::UserSnapshotNamespace{},
snapshot.size, {}, 0, 0, snapshot.timestamp);
snap_infos->emplace(snap_id, snap_info);
}
on_finish->complete(0);
}
template <typename I>
void QCOWFormat<I>::get_image_size(uint64_t snap_id, uint64_t* size,
Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "snap_id=" << snap_id << dendl;
if (snap_id == CEPH_NOSNAP) {
*size = m_size;
} else {
auto snapshot_it = m_snapshots.find(snap_id);
if (snapshot_it == m_snapshots.end()) {
on_finish->complete(-ENOENT);
return;
}
auto& snapshot = snapshot_it->second;
*size = snapshot.size;
}
on_finish->complete(0);
}
template <typename I>
bool QCOWFormat<I>::read(
io::AioCompletion* aio_comp, uint64_t snap_id, io::Extents&& image_extents,
io::ReadResult&& read_result, int op_flags, int read_flags,
const ZTracer::Trace &parent_trace) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "snap_id=" << snap_id << ", "
<< "image_extents=" << image_extents << dendl;
const LookupTable* l1_table = nullptr;
if (snap_id == CEPH_NOSNAP) {
l1_table = &m_l1_table;
} else {
auto snapshot_it = m_snapshots.find(snap_id);
if (snapshot_it == m_snapshots.end()) {
aio_comp->fail(-ENOENT);
return true;
}
auto& snapshot = snapshot_it->second;
l1_table = &snapshot.l1_table;
}
aio_comp->read_result = std::move(read_result);
aio_comp->read_result.set_image_extents(image_extents);
auto read_request = new ReadRequest(this, aio_comp, l1_table,
std::move(image_extents));
read_request->send();
return true;
}
template <typename I>
void QCOWFormat<I>::list_snaps(io::Extents&& image_extents,
io::SnapIds&& snap_ids, int list_snaps_flags,
io::SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace,
Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "image_extents=" << image_extents << dendl;
ClusterExtents cluster_extents;
populate_cluster_extents(cct, m_cluster_size, image_extents,
&cluster_extents);
// map L1 table indexes to cluster extents
std::map<uint64_t, ClusterExtents> l1_cluster_extents;
for (auto& cluster_extent : cluster_extents) {
uint32_t l1_table_index = cluster_extent.image_offset >> m_l1_shift;
auto& l1_cluster_extent = l1_cluster_extents[l1_table_index];
l1_cluster_extent.reserve(cluster_extents.size());
l1_cluster_extent.push_back(cluster_extent);
}
std::map<uint64_t, const LookupTable*> snap_id_to_l1_table;
for (auto& [snap_id, snapshot] : m_snapshots) {
snap_id_to_l1_table[snap_id] = &snapshot.l1_table;
}
snap_id_to_l1_table[CEPH_NOSNAP] = &m_l1_table;
on_finish = new LambdaContext([this, image_extents,
snap_ids=std::move(snap_ids),
snapshot_delta, on_finish](int r) mutable {
handle_list_snaps(r, std::move(image_extents), std::move(snap_ids),
snapshot_delta, on_finish);
});
auto gather_ctx = new C_Gather(cct, on_finish);
for (auto& [l1_table_index, cluster_extents] : l1_cluster_extents) {
auto list_snaps_request = new ListSnapsRequest(
this, l1_table_index, std::move(cluster_extents), snap_id_to_l1_table,
snapshot_delta, gather_ctx->new_sub());
list_snaps_request->send();
}
gather_ctx->activate();
}
template <typename I>
void QCOWFormat<I>::handle_list_snaps(int r, io::Extents&& image_extents,
io::SnapIds&& snap_ids,
io::SnapshotDelta* snapshot_delta,
Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << ", "
<< "snapshot_delta=" << *snapshot_delta << dendl;
std::optional<uint64_t> previous_size = std::nullopt;
for (auto& [snap_id, snapshot] : m_snapshots) {
auto sparse_extents = &(*snapshot_delta)[{snap_id, snap_id}];
util::zero_shrunk_snapshot(cct, image_extents, snap_id, snapshot.size,
&previous_size, sparse_extents);
}
auto sparse_extents = &(*snapshot_delta)[{CEPH_NOSNAP, CEPH_NOSNAP}];
util::zero_shrunk_snapshot(cct, image_extents, CEPH_NOSNAP, m_size,
&previous_size, sparse_extents);
util::merge_snapshot_delta(snap_ids, snapshot_delta);
on_finish->complete(r);
}
} // namespace migration
} // namespace librbd
template class librbd::migration::QCOWFormat<librbd::ImageCtx>;
| 51,801 | 32.507115 | 80 | cc |
null | ceph-main/src/librbd/migration/QCOWFormat.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIGRATION_QCOW_FORMAT_H
#define CEPH_LIBRBD_MIGRATION_QCOW_FORMAT_H
#include "include/int_types.h"
#include "librbd/Types.h"
#include "librbd/migration/FormatInterface.h"
#include "librbd/migration/QCOW.h"
#include "acconfig.h"
#include "json_spirit/json_spirit.h"
#include <boost/asio/io_context_strand.hpp>
#include <boost/iostreams/filter/zlib.hpp>
#include <deque>
#include <vector>
#include <memory>
struct Context;
namespace librbd {
struct AsioEngine;
struct ImageCtx;
namespace migration {
template <typename> struct SourceSpecBuilder;
struct StreamInterface;
namespace qcow_format {
struct LookupTable {
LookupTable() {}
LookupTable(uint32_t size) : size(size) {}
bufferlist bl;
uint64_t* cluster_offsets = nullptr;
uint32_t size = 0;
bool decoded = false;
void init();
void decode();
};
} // namespace qcow_format
template <typename ImageCtxT>
class QCOWFormat : public FormatInterface {
public:
static QCOWFormat* create(
ImageCtxT* image_ctx, const json_spirit::mObject& json_object,
const SourceSpecBuilder<ImageCtxT>* source_spec_builder) {
return new QCOWFormat(image_ctx, json_object, source_spec_builder);
}
QCOWFormat(ImageCtxT* image_ctx, const json_spirit::mObject& json_object,
const SourceSpecBuilder<ImageCtxT>* source_spec_builder);
QCOWFormat(const QCOWFormat&) = delete;
QCOWFormat& operator=(const QCOWFormat&) = delete;
void open(Context* on_finish) override;
void close(Context* on_finish) override;
void get_snapshots(SnapInfos* snap_infos, Context* on_finish) override;
void get_image_size(uint64_t snap_id, uint64_t* size,
Context* on_finish) override;
bool read(io::AioCompletion* aio_comp, uint64_t snap_id,
io::Extents&& image_extents, io::ReadResult&& read_result,
int op_flags, int read_flags,
const ZTracer::Trace &parent_trace) override;
void list_snaps(io::Extents&& image_extents, io::SnapIds&& snap_ids,
int list_snaps_flags, io::SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace,
Context* on_finish) override;
private:
/**
* @verbatim
*
* <start>
* |
* v
* OPEN
* |
* v
* PROBE
* |
* |\---> READ V1 HEADER ----------\
* | |
* \----> READ V2 HEADER |
* | |
* | /----------\ |
* | | | |
* v v | |
* READ SNAPSHOT | |
* | | |
* v | |
* READ SNAPSHOT EXTRA | |
* | | |
* v | |
* READ SNAPSHOT L1 TABLE |
* | |
* \--------------------\|
* |
* v
* READ L1 TABLE
* |
* v
* READ BACKING FILE
* |
* /-------------------------------/
* |
* v
* <opened>
*
* @endverbatim
*/
struct Cluster;
struct ClusterCache;
struct L2TableCache;
struct ReadRequest;
struct ListSnapsRequest;
struct Snapshot {
std::string id;
std::string name;
utime_t timestamp;
uint64_t size = 0;
uint64_t l1_table_offset = 0;
qcow_format::LookupTable l1_table;
uint32_t extra_data_size = 0;
};
ImageCtxT* m_image_ctx;
json_spirit::mObject m_json_object;
const SourceSpecBuilder<ImageCtxT>* m_source_spec_builder;
boost::asio::io_context::strand m_strand;
std::shared_ptr<StreamInterface> m_stream;
bufferlist m_bl;
uint64_t m_size = 0;
uint64_t m_backing_file_offset = 0;
uint32_t m_backing_file_size = 0;
uint32_t m_cluster_bits = 0;
uint32_t m_cluster_size = 0;
uint64_t m_cluster_offset_mask = 0;
uint64_t m_cluster_mask = 0;
uint32_t m_l1_shift = 0;
uint64_t m_l1_table_offset = 0;
qcow_format::LookupTable m_l1_table;
uint32_t m_l2_bits = 0;
uint32_t m_l2_size = 0;
uint32_t m_snapshot_count = 0;
uint64_t m_snapshots_offset = 0;
std::map<uint64_t, Snapshot> m_snapshots;
std::unique_ptr<L2TableCache> m_l2_table_cache;
std::unique_ptr<ClusterCache> m_cluster_cache;
void handle_open(int r, Context* on_finish);
void probe(Context* on_finish);
void handle_probe(int r, Context* on_finish);
#ifdef WITH_RBD_MIGRATION_FORMAT_QCOW_V1
void read_v1_header(Context* on_finish);
void handle_read_v1_header(int r, Context* on_finish);
#endif // WITH_RBD_MIGRATION_FORMAT_QCOW_V1
void read_v2_header(Context* on_finish);
void handle_read_v2_header(int r, Context* on_finish);
void read_snapshot(Context* on_finish);
void handle_read_snapshot(int r, Context* on_finish);
void read_snapshot_extra(Context* on_finish);
void handle_read_snapshot_extra(int r, Context* on_finish);
void read_snapshot_l1_table(Context* on_finish);
void handle_read_snapshot_l1_table(int r, Context* on_finish);
void read_l1_table(Context* on_finish);
void handle_read_l1_table(int r, Context* on_finish);
void read_backing_file(Context* on_finish);
void handle_list_snaps(int r, io::Extents&& image_extents,
io::SnapIds&& snap_ids,
io::SnapshotDelta* snapshot_delta, Context* on_finish);
};
} // namespace migration
} // namespace librbd
extern template class librbd::migration::QCOWFormat<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIGRATION_QCOW_FORMAT_H
| 5,918 | 26.919811 | 80 | h |
null | ceph-main/src/librbd/migration/RawFormat.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/migration/RawFormat.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Utils.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ReadResult.h"
#include "librbd/migration/SnapshotInterface.h"
#include "librbd/migration/SourceSpecBuilder.h"
#include "librbd/migration/Utils.h"
namespace librbd {
namespace migration {
namespace {
static const std::string SNAPSHOTS_KEY {"snapshots"};
} // anonymous namespace
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::RawFormat: " << this \
<< " " << __func__ << ": "
template <typename I>
RawFormat<I>::RawFormat(
I* image_ctx, const json_spirit::mObject& json_object,
const SourceSpecBuilder<I>* source_spec_builder)
: m_image_ctx(image_ctx), m_json_object(json_object),
m_source_spec_builder(source_spec_builder) {
}
template <typename I>
void RawFormat<I>::open(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
on_finish = new LambdaContext([this, on_finish](int r) {
handle_open(r, on_finish); });
// treat the base image as a HEAD-revision snapshot
Snapshots snapshots;
int r = m_source_spec_builder->build_snapshot(m_json_object, CEPH_NOSNAP,
&snapshots[CEPH_NOSNAP]);
if (r < 0) {
lderr(cct) << "failed to build HEAD revision handler: " << cpp_strerror(r)
<< dendl;
on_finish->complete(r);
return;
}
auto& snapshots_val = m_json_object[SNAPSHOTS_KEY];
if (snapshots_val.type() == json_spirit::array_type) {
auto& snapshots_arr = snapshots_val.get_array();
for (auto& snapshot_val : snapshots_arr) {
uint64_t index = snapshots.size();
if (snapshot_val.type() != json_spirit::obj_type) {
lderr(cct) << "invalid snapshot " << index << " JSON: "
<< cpp_strerror(r) << dendl;
on_finish->complete(-EINVAL);
return;
}
auto& snapshot_obj = snapshot_val.get_obj();
r = m_source_spec_builder->build_snapshot(snapshot_obj, index,
&snapshots[index]);
if (r < 0) {
lderr(cct) << "failed to build snapshot " << index << " handler: "
<< cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
}
} else if (snapshots_val.type() != json_spirit::null_type) {
lderr(cct) << "invalid snapshots array" << dendl;
on_finish->complete(-EINVAL);
return;
}
m_snapshots = std::move(snapshots);
auto gather_ctx = new C_Gather(cct, on_finish);
SnapshotInterface* previous_snapshot = nullptr;
for (auto& [_, snapshot] : m_snapshots) {
snapshot->open(previous_snapshot, gather_ctx->new_sub());
previous_snapshot = snapshot.get();
}
gather_ctx->activate();
}
template <typename I>
void RawFormat<I>::handle_open(int r, Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to open raw image: " << cpp_strerror(r)
<< dendl;
auto gather_ctx = new C_Gather(cct, on_finish);
for (auto& [_, snapshot] : m_snapshots) {
snapshot->close(gather_ctx->new_sub());
}
m_image_ctx->state->close(new LambdaContext(
[r, on_finish=gather_ctx->new_sub()](int _) { on_finish->complete(r); }));
gather_ctx->activate();
return;
}
on_finish->complete(0);
}
template <typename I>
void RawFormat<I>::close(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
auto gather_ctx = new C_Gather(cct, on_finish);
for (auto& [snap_id, snapshot] : m_snapshots) {
snapshot->close(gather_ctx->new_sub());
}
gather_ctx->activate();
}
template <typename I>
void RawFormat<I>::get_snapshots(SnapInfos* snap_infos, Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
snap_infos->clear();
for (auto& [snap_id, snapshot] : m_snapshots) {
if (snap_id == CEPH_NOSNAP) {
continue;
}
snap_infos->emplace(snap_id, snapshot->get_snap_info());
}
on_finish->complete(0);
}
template <typename I>
void RawFormat<I>::get_image_size(uint64_t snap_id, uint64_t* size,
Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
auto snapshot_it = m_snapshots.find(snap_id);
if (snapshot_it == m_snapshots.end()) {
on_finish->complete(-ENOENT);
return;
}
*size = snapshot_it->second->get_snap_info().size;
on_finish->complete(0);
}
template <typename I>
bool RawFormat<I>::read(
io::AioCompletion* aio_comp, uint64_t snap_id, io::Extents&& image_extents,
io::ReadResult&& read_result, int op_flags, int read_flags,
const ZTracer::Trace &parent_trace) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "snap_id=" << snap_id << ", "
<< "image_extents=" << image_extents << dendl;
auto snapshot_it = m_snapshots.find(snap_id);
if (snapshot_it == m_snapshots.end()) {
aio_comp->fail(-ENOENT);
return true;
}
snapshot_it->second->read(aio_comp, std::move(image_extents),
std::move(read_result), op_flags, read_flags,
parent_trace);
return true;
}
template <typename I>
void RawFormat<I>::list_snaps(io::Extents&& image_extents,
io::SnapIds&& snap_ids, int list_snaps_flags,
io::SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace,
Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "image_extents=" << image_extents << dendl;
on_finish = new LambdaContext([this, snap_ids=std::move(snap_ids),
snapshot_delta, on_finish](int r) mutable {
handle_list_snaps(r, std::move(snap_ids), snapshot_delta, on_finish);
});
auto gather_ctx = new C_Gather(cct, on_finish);
std::optional<uint64_t> previous_size = std::nullopt;
for (auto& [snap_id, snapshot] : m_snapshots) {
auto& sparse_extents = (*snapshot_delta)[{snap_id, snap_id}];
// zero out any space between the previous snapshot end and this
// snapshot's end
auto& snap_info = snapshot->get_snap_info();
util::zero_shrunk_snapshot(cct, image_extents, snap_id, snap_info.size,
&previous_size, &sparse_extents);
// build set of data/zeroed extents for the current snapshot
snapshot->list_snap(io::Extents{image_extents}, list_snaps_flags,
&sparse_extents, parent_trace, gather_ctx->new_sub());
}
gather_ctx->activate();
}
template <typename I>
void RawFormat<I>::handle_list_snaps(int r, io::SnapIds&& snap_ids,
io::SnapshotDelta* snapshot_delta,
Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << ", "
<< "snapshot_delta=" << snapshot_delta << dendl;
util::merge_snapshot_delta(snap_ids, snapshot_delta);
on_finish->complete(r);
}
} // namespace migration
} // namespace librbd
template class librbd::migration::RawFormat<librbd::ImageCtx>;
| 7,465 | 30.635593 | 80 | cc |
null | ceph-main/src/librbd/migration/RawFormat.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIGRATION_RAW_FORMAT_H
#define CEPH_LIBRBD_MIGRATION_RAW_FORMAT_H
#include "include/int_types.h"
#include "librbd/Types.h"
#include "librbd/migration/FormatInterface.h"
#include "json_spirit/json_spirit.h"
#include <map>
#include <memory>
struct Context;
namespace librbd {
struct AsioEngine;
struct ImageCtx;
namespace migration {
template <typename> struct SourceSpecBuilder;
struct SnapshotInterface;
template <typename ImageCtxT>
class RawFormat : public FormatInterface {
public:
static RawFormat* create(
ImageCtxT* image_ctx, const json_spirit::mObject& json_object,
const SourceSpecBuilder<ImageCtxT>* source_spec_builder) {
return new RawFormat(image_ctx, json_object, source_spec_builder);
}
RawFormat(ImageCtxT* image_ctx, const json_spirit::mObject& json_object,
const SourceSpecBuilder<ImageCtxT>* source_spec_builder);
RawFormat(const RawFormat&) = delete;
RawFormat& operator=(const RawFormat&) = delete;
void open(Context* on_finish) override;
void close(Context* on_finish) override;
void get_snapshots(SnapInfos* snap_infos, Context* on_finish) override;
void get_image_size(uint64_t snap_id, uint64_t* size,
Context* on_finish) override;
bool read(io::AioCompletion* aio_comp, uint64_t snap_id,
io::Extents&& image_extents, io::ReadResult&& read_result,
int op_flags, int read_flags,
const ZTracer::Trace &parent_trace) override;
void list_snaps(io::Extents&& image_extents, io::SnapIds&& snap_ids,
int list_snaps_flags, io::SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace,
Context* on_finish) override;
private:
typedef std::shared_ptr<SnapshotInterface> Snapshot;
typedef std::map<uint64_t, Snapshot> Snapshots;
ImageCtxT* m_image_ctx;
json_spirit::mObject m_json_object;
const SourceSpecBuilder<ImageCtxT>* m_source_spec_builder;
Snapshots m_snapshots;
void handle_open(int r, Context* on_finish);
void handle_list_snaps(int r, io::SnapIds&& snap_ids,
io::SnapshotDelta* snapshot_delta, Context* on_finish);
};
} // namespace migration
} // namespace librbd
extern template class librbd::migration::RawFormat<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIGRATION_RAW_FORMAT_H
| 2,455 | 30.088608 | 80 | h |
null | ceph-main/src/librbd/migration/RawSnapshot.cc | // -*- mode:c++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/migration/RawSnapshot.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ReadResult.h"
#include "librbd/migration/SourceSpecBuilder.h"
#include "librbd/migration/StreamInterface.h"
namespace librbd {
namespace migration {
namespace {
const std::string NAME_KEY{"name"};
} // anonymous namespace
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::RawSnapshot::OpenRequest " \
<< this << " " << __func__ << ": "
template <typename I>
struct RawSnapshot<I>::OpenRequest {
RawSnapshot* raw_snapshot;
Context* on_finish;
OpenRequest(RawSnapshot* raw_snapshot, Context* on_finish)
: raw_snapshot(raw_snapshot), on_finish(on_finish) {
}
void send() {
open_stream();
}
void open_stream() {
auto cct = raw_snapshot->m_image_ctx->cct;
ldout(cct, 10) << dendl;
auto ctx = util::create_context_callback<
OpenRequest, &OpenRequest::handle_open_stream>(this);
raw_snapshot->m_stream->open(ctx);
}
void handle_open_stream(int r) {
auto cct = raw_snapshot->m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to open stream: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
get_image_size();
}
void get_image_size() {
auto cct = raw_snapshot->m_image_ctx->cct;
ldout(cct, 10) << dendl;
auto ctx = util::create_context_callback<
OpenRequest, &OpenRequest::handle_get_image_size>(this);
raw_snapshot->m_stream->get_size(&raw_snapshot->m_snap_info.size, ctx);
}
void handle_get_image_size(int r) {
auto cct = raw_snapshot->m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << ", "
<< "image_size=" << raw_snapshot->m_snap_info.size << dendl;
if (r < 0) {
lderr(cct) << "failed to open stream: " << cpp_strerror(r) << dendl;
close_stream(r);
return;
}
finish(0);
}
void close_stream(int r) {
auto cct = raw_snapshot->m_image_ctx->cct;
ldout(cct, 10) << dendl;
auto ctx = new LambdaContext([this, r](int) {
handle_close_stream(r);
});
raw_snapshot->m_stream->close(ctx);
}
void handle_close_stream(int r) {
auto cct = raw_snapshot->m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << dendl;
raw_snapshot->m_stream.reset();
finish(r);
}
void finish(int r) {
auto cct = raw_snapshot->m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << dendl;
on_finish->complete(r);
delete this;
}
};
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::RawSnapshot: " << this \
<< " " << __func__ << ": "
template <typename I>
RawSnapshot<I>::RawSnapshot(I* image_ctx,
const json_spirit::mObject& json_object,
const SourceSpecBuilder<I>* source_spec_builder,
uint64_t index)
: m_image_ctx(image_ctx), m_json_object(json_object),
m_source_spec_builder(source_spec_builder), m_index(index),
m_snap_info({}, {}, 0, {}, 0, 0, {}) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
}
template <typename I>
void RawSnapshot<I>::open(SnapshotInterface* previous_snapshot,
Context* on_finish) {
auto cct = m_image_ctx->cct;
// special-case for treating the HEAD revision as a snapshot
if (m_index != CEPH_NOSNAP) {
auto& name_val = m_json_object[NAME_KEY];
if (name_val.type() == json_spirit::str_type) {
m_snap_info.name = name_val.get_str();
} else if (name_val.type() == json_spirit::null_type) {
uuid_d uuid_gen;
uuid_gen.generate_random();
m_snap_info.name = uuid_gen.to_string();
} else {
lderr(cct) << "invalid snapshot name" << dendl;
on_finish->complete(-EINVAL);
return;
}
}
ldout(cct, 10) << "name=" << m_snap_info.name << dendl;
int r = m_source_spec_builder->build_stream(m_json_object, &m_stream);
if (r < 0) {
lderr(cct) << "failed to build migration stream handler" << cpp_strerror(r)
<< dendl;
on_finish->complete(r);
return;
}
auto req = new OpenRequest(this, on_finish);
req->send();
}
template <typename I>
void RawSnapshot<I>::close(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
if (!m_stream) {
on_finish->complete(0);
return;
}
m_stream->close(on_finish);
}
template <typename I>
void RawSnapshot<I>::read(io::AioCompletion* aio_comp,
io::Extents&& image_extents,
io::ReadResult&& read_result, int op_flags,
int read_flags,
const ZTracer::Trace &parent_trace) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "image_extents=" << image_extents << dendl;
aio_comp->read_result = std::move(read_result);
aio_comp->read_result.set_image_extents(image_extents);
aio_comp->set_request_count(1);
auto ctx = new io::ReadResult::C_ImageReadRequest(aio_comp,
0, image_extents);
// raw directly maps the image-extent IO down to a byte IO extent
m_stream->read(std::move(image_extents), &ctx->bl, ctx);
}
template <typename I>
void RawSnapshot<I>::list_snap(io::Extents&& image_extents,
int list_snaps_flags,
io::SparseExtents* sparse_extents,
const ZTracer::Trace &parent_trace,
Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "image_extents=" << image_extents << dendl;
// raw does support sparse extents so list the full IO extent as a delta
for (auto& [image_offset, image_length] : image_extents) {
sparse_extents->insert(image_offset, image_length,
{io::SPARSE_EXTENT_STATE_DATA, image_length});
}
on_finish->complete(0);
}
} // namespace migration
} // namespace librbd
template class librbd::migration::RawSnapshot<librbd::ImageCtx>;
| 6,330 | 27.647059 | 79 | cc |
null | ceph-main/src/librbd/migration/RawSnapshot.h | // -*- mode:c++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIGRATION_RAW_SNAPSHOT_H
#define CEPH_LIBRBD_MIGRATION_RAW_SNAPSHOT_H
#include "include/buffer_fwd.h"
#include "include/int_types.h"
#include "common/zipkin_trace.h"
#include "librbd/Types.h"
#include "librbd/io/Types.h"
#include "librbd/migration/SnapshotInterface.h"
#include "json_spirit/json_spirit.h"
#include <memory>
namespace librbd {
struct ImageCtx;
namespace migration {
template <typename> struct SourceSpecBuilder;
struct StreamInterface;
template <typename ImageCtxT>
class RawSnapshot : public SnapshotInterface {
public:
static RawSnapshot* create(
ImageCtx* image_ctx, const json_spirit::mObject& json_object,
const SourceSpecBuilder<ImageCtxT>* source_spec_builder, uint64_t index) {
return new RawSnapshot(image_ctx, json_object, source_spec_builder, index);
}
RawSnapshot(ImageCtxT* image_ctx, const json_spirit::mObject& json_object,
const SourceSpecBuilder<ImageCtxT>* source_spec_builder,
uint64_t index);
RawSnapshot(const RawSnapshot&) = delete;
RawSnapshot& operator=(const RawSnapshot&) = delete;
void open(SnapshotInterface* previous_snapshot, Context* on_finish) override;
void close(Context* on_finish) override;
const SnapInfo& get_snap_info() const override {
return m_snap_info;
}
void read(io::AioCompletion* aio_comp, io::Extents&& image_extents,
io::ReadResult&& read_result, int op_flags, int read_flags,
const ZTracer::Trace &parent_trace) override;
void list_snap(io::Extents&& image_extents, int list_snaps_flags,
io::SparseExtents* sparse_extents,
const ZTracer::Trace &parent_trace,
Context* on_finish) override;
private:
struct OpenRequest;
ImageCtxT* m_image_ctx;
json_spirit::mObject m_json_object;
const SourceSpecBuilder<ImageCtxT>* m_source_spec_builder;
uint64_t m_index = 0;
SnapInfo m_snap_info;
std::shared_ptr<StreamInterface> m_stream;
};
} // namespace migration
} // namespace librbd
extern template class librbd::migration::RawSnapshot<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIGRATION_RAW_SNAPSHOT_H
| 2,256 | 28.697368 | 80 | h |
null | ceph-main/src/librbd/migration/S3Stream.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/migration/S3Stream.h"
#include "common/armor.h"
#include "common/ceph_crypto.h"
#include "common/ceph_time.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/asio/Utils.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ReadResult.h"
#include "librbd/migration/HttpClient.h"
#include "librbd/migration/HttpProcessorInterface.h"
#include <boost/beast/http.hpp>
#include <fmt/chrono.h>
#include <fmt/format.h>
#include <time.h>
namespace librbd {
namespace migration {
using HttpRequest = boost::beast::http::request<boost::beast::http::empty_body>;
namespace {
const std::string URL_KEY {"url"};
const std::string ACCESS_KEY {"access_key"};
const std::string SECRET_KEY {"secret_key"};
} // anonymous namespace
template <typename I>
struct S3Stream<I>::HttpProcessor : public HttpProcessorInterface {
S3Stream* s3stream;
HttpProcessor(S3Stream* s3stream) : s3stream(s3stream) {
}
void process_request(EmptyRequest& request) override {
s3stream->process_request(request);
}
};
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::S3Stream: " << this \
<< " " << __func__ << ": "
template <typename I>
S3Stream<I>::S3Stream(I* image_ctx, const json_spirit::mObject& json_object)
: m_image_ctx(image_ctx), m_cct(image_ctx->cct),
m_asio_engine(image_ctx->asio_engine), m_json_object(json_object),
m_http_processor(std::make_unique<HttpProcessor>(this)) {
}
template <typename I>
S3Stream<I>::~S3Stream() {
}
template <typename I>
void S3Stream<I>::open(Context* on_finish) {
auto& url_value = m_json_object[URL_KEY];
if (url_value.type() != json_spirit::str_type) {
lderr(m_cct) << "failed to locate '" << URL_KEY << "' key" << dendl;
on_finish->complete(-EINVAL);
return;
}
auto& access_key = m_json_object[ACCESS_KEY];
if (access_key.type() != json_spirit::str_type) {
lderr(m_cct) << "failed to locate '" << ACCESS_KEY << "' key" << dendl;
on_finish->complete(-EINVAL);
return;
}
auto& secret_key = m_json_object[SECRET_KEY];
if (secret_key.type() != json_spirit::str_type) {
lderr(m_cct) << "failed to locate '" << SECRET_KEY << "' key" << dendl;
on_finish->complete(-EINVAL);
return;
}
m_url = url_value.get_str();
librados::Rados rados(m_image_ctx->md_ctx);
int r = 0;
m_access_key = access_key.get_str();
if (util::is_config_key_uri(m_access_key)) {
r = util::get_config_key(rados, m_access_key, &m_access_key);
if (r < 0) {
lderr(m_cct) << "failed to retrieve access key from config: "
<< cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
}
m_secret_key = secret_key.get_str();
if (util::is_config_key_uri(m_secret_key)) {
r = util::get_config_key(rados, m_secret_key, &m_secret_key);
if (r < 0) {
lderr(m_cct) << "failed to retrieve secret key from config: "
<< cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
}
ldout(m_cct, 10) << "url=" << m_url << ", "
<< "access_key=" << m_access_key << dendl;
m_http_client.reset(HttpClient<I>::create(m_image_ctx, m_url));
m_http_client->set_http_processor(m_http_processor.get());
m_http_client->open(on_finish);
}
template <typename I>
void S3Stream<I>::close(Context* on_finish) {
ldout(m_cct, 10) << dendl;
if (!m_http_client) {
on_finish->complete(0);
return;
}
m_http_client->close(on_finish);
}
template <typename I>
void S3Stream<I>::get_size(uint64_t* size, Context* on_finish) {
ldout(m_cct, 10) << dendl;
m_http_client->get_size(size, on_finish);
}
template <typename I>
void S3Stream<I>::read(io::Extents&& byte_extents, bufferlist* data,
Context* on_finish) {
ldout(m_cct, 20) << "byte_extents=" << byte_extents << dendl;
m_http_client->read(std::move(byte_extents), data, on_finish);
}
template <typename I>
void S3Stream<I>::process_request(HttpRequest& http_request) {
ldout(m_cct, 20) << dendl;
// format RFC 1123 date/time
auto time = ceph::real_clock::to_time_t(ceph::real_clock::now());
struct tm timeInfo;
gmtime_r(&time, &timeInfo);
std::string date = fmt::format("{:%a, %d %b %Y %H:%M:%S %z}", timeInfo);
http_request.set(boost::beast::http::field::date, date);
// note: we don't support S3 subresources
std::string canonicalized_resource = std::string(http_request.target());
std::string string_to_sign = fmt::format(
"{}\n\n\n{}\n{}",
std::string(boost::beast::http::to_string(http_request.method())),
date, canonicalized_resource);
// create HMAC-SHA1 signature from secret key + string-to-sign
sha1_digest_t digest;
ceph::crypto::HMACSHA1 hmac(
reinterpret_cast<const unsigned char*>(m_secret_key.data()),
m_secret_key.size());
hmac.Update(reinterpret_cast<const unsigned char*>(string_to_sign.data()),
string_to_sign.size());
hmac.Final(reinterpret_cast<unsigned char*>(digest.v));
// base64 encode the result
char buf[64];
int r = ceph_armor(std::begin(buf), std::begin(buf) + sizeof(buf),
reinterpret_cast<const char *>(digest.v),
reinterpret_cast<const char *>(digest.v + digest.SIZE));
if (r < 0) {
ceph_abort("ceph_armor failed");
}
// store the access-key + signature in the HTTP authorization header
std::string signature = std::string(std::begin(buf), std::begin(buf) + r);
std::string authorization = fmt::format("AWS {}:{}", m_access_key, signature);
http_request.set(boost::beast::http::field::authorization, authorization);
ldout(m_cct, 20) << "string_to_sign=" << string_to_sign << ", "
<< "authorization=" << authorization << dendl;
}
} // namespace migration
} // namespace librbd
template class librbd::migration::S3Stream<librbd::ImageCtx>;
| 6,118 | 29.442786 | 80 | cc |
null | ceph-main/src/librbd/migration/S3Stream.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIGRATION_S3_STREAM_H
#define CEPH_LIBRBD_MIGRATION_S3_STREAM_H
#include "include/int_types.h"
#include "librbd/migration/StreamInterface.h"
#include <boost/beast/http/empty_body.hpp>
#include <boost/beast/http/message.hpp>
#include <boost/beast/http/string_body.hpp>
#include <json_spirit/json_spirit.h>
#include <memory>
#include <string>
struct Context;
namespace librbd {
struct AsioEngine;
struct ImageCtx;
namespace migration {
template <typename> class HttpClient;
template <typename ImageCtxT>
class S3Stream : public StreamInterface {
public:
static S3Stream* create(ImageCtxT* image_ctx,
const json_spirit::mObject& json_object) {
return new S3Stream(image_ctx, json_object);
}
S3Stream(ImageCtxT* image_ctx, const json_spirit::mObject& json_object);
~S3Stream() override;
S3Stream(const S3Stream&) = delete;
S3Stream& operator=(const S3Stream&) = delete;
void open(Context* on_finish) override;
void close(Context* on_finish) override;
void get_size(uint64_t* size, Context* on_finish) override;
void read(io::Extents&& byte_extents, bufferlist* data,
Context* on_finish) override;
private:
using HttpRequest = boost::beast::http::request<
boost::beast::http::empty_body>;
using HttpResponse = boost::beast::http::response<
boost::beast::http::string_body>;
struct HttpProcessor;
ImageCtxT* m_image_ctx;
CephContext* m_cct;
std::shared_ptr<AsioEngine> m_asio_engine;
json_spirit::mObject m_json_object;
std::string m_url;
std::string m_access_key;
std::string m_secret_key;
std::unique_ptr<HttpProcessor> m_http_processor;
std::unique_ptr<HttpClient<ImageCtxT>> m_http_client;
void process_request(HttpRequest& http_request);
};
} // namespace migration
} // namespace librbd
extern template class librbd::migration::S3Stream<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIGRATION_S3_STREAM_H
| 2,039 | 24.822785 | 74 | h |
null | ceph-main/src/librbd/migration/SnapshotInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIGRATION_SNAPSHOT_INTERFACE_H
#define CEPH_LIBRBD_MIGRATION_SNAPSHOT_INTERFACE_H
#include "include/buffer_fwd.h"
#include "include/int_types.h"
#include "common/zipkin_trace.h"
#include "librbd/Types.h"
#include "librbd/io/Types.h"
#include <string>
struct Context;
namespace librbd {
namespace io {
struct AioCompletion;
struct ReadResult;
} // namespace io
namespace migration {
struct SnapshotInterface {
virtual ~SnapshotInterface() {
}
virtual void open(SnapshotInterface* previous_snapshot,
Context* on_finish) = 0;
virtual void close(Context* on_finish) = 0;
virtual const SnapInfo& get_snap_info() const = 0;
virtual void read(io::AioCompletion* aio_comp, io::Extents&& image_extents,
io::ReadResult&& read_result, int op_flags, int read_flags,
const ZTracer::Trace &parent_trace) = 0;
virtual void list_snap(io::Extents&& image_extents, int list_snaps_flags,
io::SparseExtents* sparse_extents,
const ZTracer::Trace &parent_trace,
Context* on_finish) = 0;
};
} // namespace migration
} // namespace librbd
#endif // CEPH_LIBRBD_MIGRATION_SNAPSHOT_INTERFACE_H
| 1,348 | 26.530612 | 79 | h |
null | ceph-main/src/librbd/migration/SourceSpecBuilder.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/migration/SourceSpecBuilder.h"
#include "common/dout.h"
#include "librbd/ImageCtx.h"
#include "librbd/migration/FileStream.h"
#include "librbd/migration/HttpStream.h"
#include "librbd/migration/S3Stream.h"
#include "librbd/migration/NativeFormat.h"
#include "librbd/migration/QCOWFormat.h"
#include "librbd/migration/RawFormat.h"
#include "librbd/migration/RawSnapshot.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::SourceSpecBuilder: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace migration {
namespace {
const std::string STREAM_KEY{"stream"};
const std::string TYPE_KEY{"type"};
} // anonymous namespace
template <typename I>
int SourceSpecBuilder<I>::parse_source_spec(
const std::string& source_spec,
json_spirit::mObject* source_spec_object) const {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
json_spirit::mValue json_root;
if(json_spirit::read(source_spec, json_root)) {
try {
*source_spec_object = json_root.get_obj();
return 0;
} catch (std::runtime_error&) {
}
}
lderr(cct) << "invalid source-spec JSON" << dendl;
return -EBADMSG;
}
template <typename I>
int SourceSpecBuilder<I>::build_format(
const json_spirit::mObject& source_spec_object, bool import_only,
std::unique_ptr<FormatInterface>* format) const {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
auto type_value_it = source_spec_object.find(TYPE_KEY);
if (type_value_it == source_spec_object.end() ||
type_value_it->second.type() != json_spirit::str_type) {
lderr(cct) << "failed to locate format type value" << dendl;
return -EINVAL;
}
auto& type = type_value_it->second.get_str();
if (type == "native") {
format->reset(NativeFormat<I>::create(m_image_ctx, source_spec_object,
import_only));
} else if (type == "qcow") {
format->reset(QCOWFormat<I>::create(m_image_ctx, source_spec_object, this));
} else if (type == "raw") {
format->reset(RawFormat<I>::create(m_image_ctx, source_spec_object, this));
} else {
lderr(cct) << "unknown or unsupported format type '" << type << "'"
<< dendl;
return -ENOSYS;
}
return 0;
}
template <typename I>
int SourceSpecBuilder<I>::build_snapshot(
const json_spirit::mObject& source_spec_object, uint64_t index,
std::shared_ptr<SnapshotInterface>* snapshot) const {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
auto type_value_it = source_spec_object.find(TYPE_KEY);
if (type_value_it == source_spec_object.end() ||
type_value_it->second.type() != json_spirit::str_type) {
lderr(cct) << "failed to locate snapshot type value" << dendl;
return -EINVAL;
}
auto& type = type_value_it->second.get_str();
if (type == "raw") {
snapshot->reset(RawSnapshot<I>::create(m_image_ctx, source_spec_object,
this, index));
} else {
lderr(cct) << "unknown or unsupported format type '" << type << "'"
<< dendl;
return -ENOSYS;
}
return 0;
}
template <typename I>
int SourceSpecBuilder<I>::build_stream(
const json_spirit::mObject& source_spec_object,
std::shared_ptr<StreamInterface>* stream) const {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
auto stream_value_it = source_spec_object.find(STREAM_KEY);
if (stream_value_it == source_spec_object.end() ||
stream_value_it->second.type() != json_spirit::obj_type) {
lderr(cct) << "failed to locate stream object" << dendl;
return -EINVAL;
}
auto& stream_obj = stream_value_it->second.get_obj();
auto type_value_it = stream_obj.find(TYPE_KEY);
if (type_value_it == stream_obj.end() ||
type_value_it->second.type() != json_spirit::str_type) {
lderr(cct) << "failed to locate stream type value" << dendl;
return -EINVAL;
}
auto& type = type_value_it->second.get_str();
if (type == "file") {
stream->reset(FileStream<I>::create(m_image_ctx, stream_obj));
} else if (type == "http") {
stream->reset(HttpStream<I>::create(m_image_ctx, stream_obj));
} else if (type == "s3") {
stream->reset(S3Stream<I>::create(m_image_ctx, stream_obj));
} else {
lderr(cct) << "unknown or unsupported stream type '" << type << "'"
<< dendl;
return -ENOSYS;
}
return 0;
}
} // namespace migration
} // namespace librbd
template class librbd::migration::SourceSpecBuilder<librbd::ImageCtx>;
| 4,674 | 30.587838 | 80 | cc |
null | ceph-main/src/librbd/migration/SourceSpecBuilder.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIGRATION_SOURCE_SPEC_BUILDER_H
#define CEPH_LIBRBD_MIGRATION_SOURCE_SPEC_BUILDER_H
#include "include/int_types.h"
#include <json_spirit/json_spirit.h>
#include <memory>
#include <optional>
#include <string>
struct Context;
namespace librbd {
struct ImageCtx;
namespace migration {
struct FormatInterface;
struct SnapshotInterface;
struct StreamInterface;
template <typename ImageCtxT>
class SourceSpecBuilder {
public:
SourceSpecBuilder(ImageCtxT* image_ctx) : m_image_ctx(image_ctx) {
}
int parse_source_spec(const std::string& source_spec,
json_spirit::mObject* source_spec_object) const;
int build_format(const json_spirit::mObject& format_object, bool import_only,
std::unique_ptr<FormatInterface>* format) const;
int build_snapshot(const json_spirit::mObject& source_spec_object,
uint64_t index,
std::shared_ptr<SnapshotInterface>* snapshot) const;
int build_stream(const json_spirit::mObject& source_spec_object,
std::shared_ptr<StreamInterface>* stream) const;
private:
ImageCtxT* m_image_ctx;
};
} // namespace migration
} // namespace librbd
extern template class librbd::migration::SourceSpecBuilder<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIGRATION_SOURCE_SPEC_BUILDER_H
| 1,437 | 25.145455 | 79 | h |
null | ceph-main/src/librbd/migration/StreamInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIGRATION_STREAM_INTERFACE_H
#define CEPH_LIBRBD_MIGRATION_STREAM_INTERFACE_H
#include "include/buffer_fwd.h"
#include "include/int_types.h"
#include "librbd/io/Types.h"
struct Context;
namespace librbd {
namespace migration {
struct StreamInterface {
virtual ~StreamInterface() {
}
virtual void open(Context* on_finish) = 0;
virtual void close(Context* on_finish) = 0;
virtual void get_size(uint64_t* size, Context* on_finish) = 0;
virtual void read(io::Extents&& byte_extents, bufferlist* data,
Context* on_finish) = 0;
};
} // namespace migration
} // namespace librbd
#endif // CEPH_LIBRBD_MIGRATION_STREAM_INTERFACE_H
| 783 | 22.757576 | 70 | h |
null | ceph-main/src/librbd/migration/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIGRATION_TYPES_H
#define CEPH_LIBRBD_MIGRATION_TYPES_H
#include <string>
#include <utility>
namespace librbd {
namespace migration {
enum UrlScheme {
URL_SCHEME_HTTP,
URL_SCHEME_HTTPS,
};
struct UrlSpec {
UrlSpec() {}
UrlSpec(UrlScheme scheme, const std::string& host, const std::string& port,
const std::string& path)
: scheme(scheme), host(host), port(port), path(path) {
}
UrlScheme scheme = URL_SCHEME_HTTP;
std::string host;
std::string port = "80";
std::string path = "/";
};
inline bool operator==(const UrlSpec& lhs, const UrlSpec& rhs) {
return (lhs.scheme == rhs.scheme &&
lhs.host == rhs.host &&
lhs.port == rhs.port &&
lhs.path == rhs.path);
}
} // namespace migration
} // namespace librbd
#endif // CEPH_LIBRBD_MIGRATION_TYPES_H
| 936 | 20.790698 | 77 | h |
null | ceph-main/src/librbd/migration/Utils.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/migration/Utils.h"
#include "common/dout.h"
#include "common/errno.h"
#include <boost/lexical_cast.hpp>
#include <regex>
namespace librbd {
namespace migration {
namespace util {
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::migration::util::" << __func__ << ": "
int parse_url(CephContext* cct, const std::string& url, UrlSpec* url_spec) {
ldout(cct, 10) << "url=" << url << dendl;
*url_spec = UrlSpec{};
// parse the provided URL (scheme, user, password, host, port, path,
// parameters, query, and fragment)
std::regex url_regex(
R"(^(?:([^:/]*)://)?(?:(\w+)(?::(\w+))?@)?([^/;\?:#]+)(?::([^/;\?#]+))?)"
R"((?:/([^;\?#]*))?(?:;([^\?#]+))?(?:\?([^#]+))?(?:#(\w+))?$)");
std::smatch match;
if(!std::regex_match(url, match, url_regex)) {
lderr(cct) << "invalid url: '" << url << "'" << dendl;
return -EINVAL;
}
auto& scheme = match[1];
if (scheme == "http" || scheme == "") {
url_spec->scheme = URL_SCHEME_HTTP;
} else if (scheme == "https") {
url_spec->scheme = URL_SCHEME_HTTPS;
url_spec->port = "443";
} else {
lderr(cct) << "invalid url scheme: '" << url << "'" << dendl;
return -EINVAL;
}
url_spec->host = match[4];
auto& port = match[5];
if (port.matched) {
try {
boost::lexical_cast<uint16_t>(port);
} catch (boost::bad_lexical_cast&) {
lderr(cct) << "invalid url port: '" << url << "'" << dendl;
return -EINVAL;
}
url_spec->port = port;
}
auto& path = match[6];
if (path.matched) {
url_spec->path += path;
}
return 0;
}
void zero_shrunk_snapshot(CephContext* cct, const io::Extents& image_extents,
uint64_t snap_id, uint64_t new_size,
std::optional<uint64_t> *previous_size,
io::SparseExtents* sparse_extents) {
if (*previous_size && **previous_size > new_size) {
ldout(cct, 20) << "snapshot resize " << **previous_size << " -> "
<< new_size << dendl;
interval_set<uint64_t> zero_interval;
zero_interval.insert(new_size, **previous_size - new_size);
for (auto& image_extent : image_extents) {
interval_set<uint64_t> image_interval;
image_interval.insert(image_extent.first, image_extent.second);
image_interval.intersection_of(zero_interval);
for (auto [image_offset, image_length] : image_interval) {
ldout(cct, 20) << "zeroing extent " << image_offset << "~"
<< image_length << " at snapshot " << snap_id << dendl;
sparse_extents->insert(image_offset, image_length,
{io::SPARSE_EXTENT_STATE_ZEROED, image_length});
}
}
}
*previous_size = new_size;
}
void merge_snapshot_delta(const io::SnapIds& snap_ids,
io::SnapshotDelta* snapshot_delta) {
io::SnapshotDelta orig_snapshot_delta = std::move(*snapshot_delta);
snapshot_delta->clear();
auto snap_id_it = snap_ids.begin();
ceph_assert(snap_id_it != snap_ids.end());
// merge any snapshot intervals that were not requested
std::list<io::SparseExtents*> pending_sparse_extents;
for (auto& [snap_key, sparse_extents] : orig_snapshot_delta) {
// advance to next valid requested snap id
while (snap_id_it != snap_ids.end() && *snap_id_it < snap_key.first) {
++snap_id_it;
}
if (snap_id_it == snap_ids.end()) {
break;
}
// loop through older write/read snapshot sparse extents to remove any
// overlaps with the current sparse extent
for (auto prev_sparse_extents : pending_sparse_extents) {
for (auto& sparse_extent : sparse_extents) {
prev_sparse_extents->erase(sparse_extent.get_off(),
sparse_extent.get_len());
}
}
auto write_read_snap_ids = std::make_pair(*snap_id_it, snap_key.second);
(*snapshot_delta)[write_read_snap_ids] = std::move(sparse_extents);
if (write_read_snap_ids.first > snap_key.first) {
// the current snapshot wasn't requested so it might need to get
// merged with a later snapshot
pending_sparse_extents.push_back(&(*snapshot_delta)[write_read_snap_ids]);
} else {
// we don't merge results passed a valid requested snapshot
pending_sparse_extents.clear();
}
}
}
} // namespace util
} // namespace migration
} // namespace librbd
| 4,523 | 32.761194 | 80 | cc |
null | ceph-main/src/librbd/migration/Utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIGRATION_UTILS_H
#define CEPH_LIBRBD_MIGRATION_UTILS_H
#include "include/common_fwd.h"
#include "librbd/io/Types.h"
#include "librbd/migration/Types.h"
#include <optional>
#include <string>
namespace librbd {
namespace migration {
namespace util {
int parse_url(CephContext* cct, const std::string& url, UrlSpec* url_spec);
void zero_shrunk_snapshot(CephContext* cct, const io::Extents& image_extents,
uint64_t snap_id, uint64_t new_size,
std::optional<uint64_t> *previous_size,
io::SparseExtents* sparse_extents);
void merge_snapshot_delta(const io::SnapIds& snap_ids,
io::SnapshotDelta* snapshot_delta);
} // namespace util
} // namespace migration
} // namespace librbd
#endif // CEPH_LIBRBD_MIGRATION_UTILS_H
| 943 | 29.451613 | 77 | h |
null | ceph-main/src/librbd/mirror/DemoteRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/DemoteRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Journal.h"
#include "librbd/Utils.h"
#include "librbd/mirror/GetInfoRequest.h"
#include "librbd/mirror/snapshot/DemoteRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::DemoteRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace mirror {
using librbd::util::create_context_callback;
template <typename I>
void DemoteRequest<I>::send() {
get_info();
}
template <typename I>
void DemoteRequest<I>::get_info() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
auto ctx = create_context_callback<
DemoteRequest<I>, &DemoteRequest<I>::handle_get_info>(this);
auto req = GetInfoRequest<I>::create(m_image_ctx, &m_mirror_image,
&m_promotion_state,
&m_primary_mirror_uuid, ctx);
req->send();
}
template <typename I>
void DemoteRequest<I>::handle_get_info(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve mirroring state: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
} else if (m_mirror_image.state != cls::rbd::MIRROR_IMAGE_STATE_ENABLED) {
lderr(cct) << "mirroring is not currently enabled" << dendl;
finish(-EINVAL);
return;
} else if (m_promotion_state != PROMOTION_STATE_PRIMARY) {
lderr(cct) << "image is not primary" << dendl;
finish(-EINVAL);
return;
}
acquire_lock();
}
template <typename I>
void DemoteRequest<I>::acquire_lock() {
CephContext *cct = m_image_ctx.cct;
m_image_ctx.owner_lock.lock_shared();
if (m_image_ctx.exclusive_lock == nullptr) {
m_image_ctx.owner_lock.unlock_shared();
if (m_mirror_image.mode == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) {
lderr(cct) << "exclusive lock is not active" << dendl;
finish(-EINVAL);
} else {
demote();
}
return;
}
// avoid accepting new requests from peers while we demote
// the image
m_image_ctx.exclusive_lock->block_requests(0);
m_blocked_requests = true;
if (m_image_ctx.exclusive_lock->is_lock_owner()) {
m_image_ctx.owner_lock.unlock_shared();
demote();
return;
}
ldout(cct, 20) << dendl;
auto ctx = create_context_callback<
DemoteRequest<I>,
&DemoteRequest<I>::handle_acquire_lock>(this, m_image_ctx.exclusive_lock);
m_image_ctx.exclusive_lock->acquire_lock(ctx);
m_image_ctx.owner_lock.unlock_shared();
}
template <typename I>
void DemoteRequest<I>::handle_acquire_lock(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to lock image: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
m_image_ctx.owner_lock.lock_shared();
if (m_image_ctx.exclusive_lock != nullptr &&
!m_image_ctx.exclusive_lock->is_lock_owner()) {
r = m_image_ctx.exclusive_lock->get_unlocked_op_error();
m_image_ctx.owner_lock.unlock_shared();
lderr(cct) << "failed to acquire exclusive lock" << dendl;
finish(r);
return;
}
m_image_ctx.owner_lock.unlock_shared();
demote();
}
template <typename I>
void DemoteRequest<I>::demote() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
auto ctx = create_context_callback<
DemoteRequest<I>, &DemoteRequest<I>::handle_demote>(this);
if (m_mirror_image.mode == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) {
Journal<I>::demote(&m_image_ctx, ctx);
} else if (m_mirror_image.mode == cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
auto req = mirror::snapshot::DemoteRequest<I>::create(
&m_image_ctx, m_mirror_image.global_image_id, ctx);
req->send();
} else {
lderr(cct) << "unknown image mirror mode: " << m_mirror_image.mode << dendl;
m_ret_val = -EOPNOTSUPP;
release_lock();
}
}
template <typename I>
void DemoteRequest<I>::handle_demote(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
m_ret_val = r;
lderr(cct) << "failed to demote image: " << cpp_strerror(r) << dendl;
}
release_lock();
}
template <typename I>
void DemoteRequest<I>::release_lock() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
m_image_ctx.owner_lock.lock_shared();
if (m_image_ctx.exclusive_lock == nullptr) {
m_image_ctx.owner_lock.unlock_shared();
finish(0);
return;
}
auto ctx = create_context_callback<
DemoteRequest<I>,
&DemoteRequest<I>::handle_release_lock>(this, m_image_ctx.exclusive_lock);
m_image_ctx.exclusive_lock->release_lock(ctx);
m_image_ctx.owner_lock.unlock_shared();
}
template <typename I>
void DemoteRequest<I>::handle_release_lock(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to release exclusive lock: " << cpp_strerror(r)
<< dendl;
}
finish(r);
}
template <typename I>
void DemoteRequest<I>::finish(int r) {
if (m_ret_val < 0) {
r = m_ret_val;
}
{
std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_blocked_requests && m_image_ctx.exclusive_lock != nullptr) {
m_image_ctx.exclusive_lock->unblock_requests();
}
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace mirror
} // namespace librbd
template class librbd::mirror::DemoteRequest<librbd::ImageCtx>;
| 5,882 | 26.110599 | 80 | cc |
null | ceph-main/src/librbd/mirror/DemoteRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_DEMOTE_REQUEST_H
#define CEPH_LIBRBD_MIRROR_DEMOTE_REQUEST_H
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/mirror/Types.h"
struct Context;
namespace librbd {
struct ImageCtx;
namespace mirror {
template <typename ImageCtxT = librbd::ImageCtx>
class DemoteRequest {
public:
static DemoteRequest *create(ImageCtxT &image_ctx, Context *on_finish) {
return new DemoteRequest(image_ctx, on_finish);
}
DemoteRequest(ImageCtxT &image_ctx, Context *on_finish)
: m_image_ctx(image_ctx), m_on_finish(on_finish) {
}
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* GET_INFO
* |
* v
* ACQUIRE_LOCK * * * *
* | *
* v *
* DEMOTE *
* | *
* v *
* RELEASE_LOCK *
* | *
* v *
* <finish> < * * * * *
*
* @endverbatim
*/
ImageCtxT &m_image_ctx;
Context *m_on_finish;
int m_ret_val = 0;
bool m_blocked_requests = false;
cls::rbd::MirrorImage m_mirror_image;
PromotionState m_promotion_state = PROMOTION_STATE_PRIMARY;
std::string m_primary_mirror_uuid;
void get_info();
void handle_get_info(int r);
void acquire_lock();
void handle_acquire_lock(int r);
void demote();
void handle_demote(int r);
void release_lock();
void handle_release_lock(int r);
void finish(int r);
};
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::DemoteRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_DEMOTE_REQUEST_H
| 1,723 | 18.816092 | 74 | h |
null | ceph-main/src/librbd/mirror/DisableRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/DisableRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/journal/cls_journal_client.h"
#include "journal/Journaler.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Journal.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/journal/PromoteRequest.h"
#include "librbd/mirror/GetInfoRequest.h"
#include "librbd/mirror/ImageRemoveRequest.h"
#include "librbd/mirror/ImageStateUpdateRequest.h"
#include "librbd/mirror/snapshot/PromoteRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::DisableRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace mirror {
using util::create_rados_callback;
template <typename I>
DisableRequest<I>::DisableRequest(I *image_ctx, bool force, bool remove,
Context *on_finish)
: m_image_ctx(image_ctx), m_force(force), m_remove(remove),
m_on_finish(on_finish) {
}
template <typename I>
void DisableRequest<I>::send() {
send_get_mirror_info();
}
template <typename I>
void DisableRequest<I>::send_get_mirror_info() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
using klass = DisableRequest<I>;
Context *ctx = util::create_context_callback<
klass, &klass::handle_get_mirror_info>(this);
auto req = GetInfoRequest<I>::create(*m_image_ctx, &m_mirror_image,
&m_promotion_state,
&m_primary_mirror_uuid, ctx);
req->send();
}
template <typename I>
Context *DisableRequest<I>::handle_get_mirror_info(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << *result << dendl;
if (*result < 0) {
if (*result == -ENOENT) {
ldout(cct, 20) << "mirroring is not enabled for this image" << dendl;
*result = 0;
} else {
lderr(cct) << "failed to get mirroring info: " << cpp_strerror(*result)
<< dendl;
}
return m_on_finish;
}
m_is_primary = (m_promotion_state == PROMOTION_STATE_PRIMARY ||
m_promotion_state == PROMOTION_STATE_UNKNOWN);
if (!m_is_primary && !m_force) {
lderr(cct) << "mirrored image is not primary, "
<< "add force option to disable mirroring" << dendl;
*result = -EINVAL;
return m_on_finish;
}
send_image_state_update();
return nullptr;
}
template <typename I>
void DisableRequest<I>::send_image_state_update() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
auto ctx = util::create_context_callback<
DisableRequest<I>,
&DisableRequest<I>::handle_image_state_update>(this);
auto req = ImageStateUpdateRequest<I>::create(
m_image_ctx->md_ctx, m_image_ctx->id,
cls::rbd::MIRROR_IMAGE_STATE_DISABLING, m_mirror_image, ctx);
req->send();
}
template <typename I>
Context *DisableRequest<I>::handle_image_state_update(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to disable mirroring: " << cpp_strerror(*result)
<< dendl;
return m_on_finish;
}
send_promote_image();
return nullptr;
}
template <typename I>
void DisableRequest<I>::send_promote_image() {
if (m_is_primary) {
clean_mirror_state();
return;
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
auto ctx = util::create_context_callback<
DisableRequest<I>, &DisableRequest<I>::handle_promote_image>(this);
if (m_mirror_image.mode == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) {
// Not primary -- shouldn't have the journal open
ceph_assert(m_image_ctx->journal == nullptr);
auto req = journal::PromoteRequest<I>::create(m_image_ctx, true, ctx);
req->send();
} else if (m_mirror_image.mode == cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
auto req = mirror::snapshot::PromoteRequest<I>::create(
m_image_ctx, m_mirror_image.global_image_id, ctx);
req->send();
} else {
lderr(cct) << "unknown image mirror mode: " << m_mirror_image.mode << dendl;
ctx->complete(-EOPNOTSUPP);
}
}
template <typename I>
Context *DisableRequest<I>::handle_promote_image(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to promote image: " << cpp_strerror(*result) << dendl;
return m_on_finish;
}
send_refresh_image();
return nullptr;
}
template <typename I>
void DisableRequest<I>::send_refresh_image() {
if (!m_image_ctx->state->is_refresh_required()) {
clean_mirror_state();
return;
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
auto ctx = util::create_context_callback<
DisableRequest<I>,
&DisableRequest<I>::handle_refresh_image>(this);
m_image_ctx->state->refresh(ctx);
}
template <typename I>
Context *DisableRequest<I>::handle_refresh_image(int* result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to refresh image: " << cpp_strerror(*result) << dendl;
return m_on_finish;
}
clean_mirror_state();
return nullptr;
}
template <typename I>
void DisableRequest<I>::clean_mirror_state() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
if (m_mirror_image.mode == cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
remove_mirror_snapshots();
} else {
send_get_clients();
}
}
template <typename I>
void DisableRequest<I>::send_get_clients() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
using klass = DisableRequest<I>;
Context *ctx = util::create_context_callback<
klass, &klass::handle_get_clients>(this);
std::string header_oid = ::journal::Journaler::header_oid(m_image_ctx->id);
m_clients.clear();
cls::journal::client::client_list(m_image_ctx->md_ctx, header_oid, &m_clients,
ctx);
}
template <typename I>
Context *DisableRequest<I>::handle_get_clients(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << *result << dendl;
std::unique_lock locker{m_lock};
ceph_assert(m_current_ops.empty());
if (*result < 0) {
lderr(cct) << "failed to get registered clients: " << cpp_strerror(*result)
<< dendl;
return m_on_finish;
}
for (auto client : m_clients) {
journal::ClientData client_data;
auto bl_it = client.data.cbegin();
try {
using ceph::decode;
decode(client_data, bl_it);
} catch (const buffer::error &err) {
lderr(cct) << "failed to decode client data" << dendl;
m_error_result = -EBADMSG;
continue;
}
journal::ClientMetaType type = client_data.get_client_meta_type();
if (type != journal::ClientMetaType::MIRROR_PEER_CLIENT_META_TYPE) {
continue;
}
if (m_current_ops.find(client.id) != m_current_ops.end()) {
// Should not happen.
lderr(cct) << "clients with the same id "
<< client.id << dendl;
continue;
}
m_current_ops[client.id] = 0;
m_ret[client.id] = 0;
journal::MirrorPeerClientMeta client_meta =
boost::get<journal::MirrorPeerClientMeta>(client_data.client_meta);
for (const auto& sync : client_meta.sync_points) {
send_remove_snap(client.id, sync.snap_namespace, sync.snap_name);
}
if (m_current_ops[client.id] == 0) {
// no snaps to remove
send_unregister_client(client.id);
}
}
if (m_current_ops.empty()) {
if (m_error_result < 0) {
*result = m_error_result;
return m_on_finish;
} else if (!m_remove) {
return m_on_finish;
}
locker.unlock();
// no mirror clients to unregister
send_remove_mirror_image();
}
return nullptr;
}
template <typename I>
void DisableRequest<I>::remove_mirror_snapshots() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
// remove snapshot-based mirroring snapshots
bool removing_snapshots = false;
{
std::lock_guard locker{m_lock};
std::shared_lock image_locker{m_image_ctx->image_lock};
for (auto &it : m_image_ctx->snap_info) {
auto &snap_info = it.second;
auto type = cls::rbd::get_snap_namespace_type(
snap_info.snap_namespace);
if (type == cls::rbd::SNAPSHOT_NAMESPACE_TYPE_MIRROR) {
send_remove_snap("", snap_info.snap_namespace, snap_info.name);
removing_snapshots = true;
}
}
}
if (!removing_snapshots) {
send_remove_mirror_image();
}
}
template <typename I>
void DisableRequest<I>::send_remove_snap(
const std::string &client_id,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << "client_id=" << client_id
<< ", snap_name=" << snap_name << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
m_current_ops[client_id]++;
Context *ctx = create_context_callback(
&DisableRequest<I>::handle_remove_snap, client_id);
ctx = new LambdaContext([this, snap_namespace, snap_name, ctx](int r) {
m_image_ctx->operations->snap_remove(snap_namespace,
snap_name.c_str(),
ctx);
});
m_image_ctx->op_work_queue->queue(ctx, 0);
}
template <typename I>
Context *DisableRequest<I>::handle_remove_snap(int *result,
const std::string &client_id) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << *result << dendl;
std::unique_lock locker{m_lock};
ceph_assert(m_current_ops[client_id] > 0);
m_current_ops[client_id]--;
if (*result < 0 && *result != -ENOENT) {
lderr(cct) << "failed to remove mirroring snapshot: "
<< cpp_strerror(*result) << dendl;
m_ret[client_id] = *result;
}
if (m_current_ops[client_id] == 0) {
if (m_mirror_image.mode == cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
ceph_assert(client_id.empty());
m_current_ops.erase(client_id);
if (m_ret[client_id] < 0) {
return m_on_finish;
}
locker.unlock();
send_remove_mirror_image();
return nullptr;
}
send_unregister_client(client_id);
}
return nullptr;
}
template <typename I>
void DisableRequest<I>::send_unregister_client(
const std::string &client_id) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_current_ops[client_id] == 0);
Context *ctx = create_context_callback(
&DisableRequest<I>::handle_unregister_client, client_id);
if (m_ret[client_id] < 0) {
m_image_ctx->op_work_queue->queue(ctx, m_ret[client_id]);
return;
}
librados::ObjectWriteOperation op;
cls::journal::client::client_unregister(&op, client_id);
std::string header_oid = ::journal::Journaler::header_oid(m_image_ctx->id);
librados::AioCompletion *comp = create_rados_callback(ctx);
int r = m_image_ctx->md_ctx.aio_operate(header_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
Context *DisableRequest<I>::handle_unregister_client(
int *result, const std::string &client_id) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << *result << dendl;
std::unique_lock locker{m_lock};
ceph_assert(m_current_ops[client_id] == 0);
m_current_ops.erase(client_id);
if (*result < 0 && *result != -ENOENT) {
lderr(cct) << "failed to unregister remote journal client: "
<< cpp_strerror(*result) << dendl;
m_error_result = *result;
}
if (!m_current_ops.empty()) {
return nullptr;
}
if (m_error_result < 0) {
*result = m_error_result;
return m_on_finish;
}
locker.unlock();
send_get_clients();
return nullptr;
}
template <typename I>
void DisableRequest<I>::send_remove_mirror_image() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
auto ctx = util::create_context_callback<
DisableRequest<I>,
&DisableRequest<I>::handle_remove_mirror_image>(this);
auto req = ImageRemoveRequest<I>::create(
m_image_ctx->md_ctx, m_mirror_image.global_image_id, m_image_ctx->id,
ctx);
req->send();
}
template <typename I>
Context *DisableRequest<I>::handle_remove_mirror_image(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to remove mirror image: " << cpp_strerror(*result)
<< dendl;
return m_on_finish;
}
ldout(cct, 20) << "removed image state from rbd_mirroring object" << dendl;
return m_on_finish;
}
template <typename I>
Context *DisableRequest<I>::create_context_callback(
Context*(DisableRequest<I>::*handle)(int*, const std::string &client_id),
const std::string &client_id) {
return new LambdaContext([this, handle, client_id](int r) {
Context *on_finish = (this->*handle)(&r, client_id);
if (on_finish != nullptr) {
on_finish->complete(r);
delete this;
}
});
}
} // namespace mirror
} // namespace librbd
template class librbd::mirror::DisableRequest<librbd::ImageCtx>;
| 13,445 | 27.0125 | 80 | cc |
null | ceph-main/src/librbd/mirror/DisableRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_DISABLE_REQUEST_H
#define CEPH_LIBRBD_MIRROR_DISABLE_REQUEST_H
#include "include/buffer.h"
#include "common/ceph_mutex.h"
#include "cls/journal/cls_journal_types.h"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/mirror/Types.h"
#include <map>
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace mirror {
template <typename ImageCtxT = ImageCtx>
class DisableRequest {
public:
static DisableRequest *create(ImageCtxT *image_ctx, bool force,
bool remove, Context *on_finish) {
return new DisableRequest(image_ctx, force, remove, on_finish);
}
DisableRequest(ImageCtxT *image_ctx, bool force, bool remove,
Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* GET_MIRROR_INFO * * * * * * * * * * * * * * * * * * * * * * *
* | *
* v *
* IMAGE_STATE_UPDATE * * * * * * * * * * * * * * * * * * * * * *
* | *
* v *
* PROMOTE_IMAGE (skip if primary) *
* | *
* v *
* REFRESH_IMAGE (skip if necessary) *
* | *
* v *
* GET_CLIENTS <----------------------------------------\ * * * *
* | | (unregister clients) | * (on error)
* | |/----------------------------\ | *
* | | | | *
* | | /-----------\ (repeat | (repeat | (repeat
* | | | | as needed) | as needed) | as needed)
* | v v | | | *
* | REMOVE_SYNC_SNAP --/ * * * * * * | * * * * * * | * * * *
* | | | | *
* | v | | *
* | UNREGISTER_CLIENT ---------------/-------------/ * * * *
* | *
* | (no more clients *
* | to unregister) *
* v *
* REMOVE_MIRROR_IMAGE * * * * * * * * * * * * * * * * * * * * *
* | (skip if no remove) *
* v *
* <finish> < * * * * * * * * * * * * * * * * * * * * * * * * * *
*
* @endverbatim
*/
ImageCtxT *m_image_ctx;
bool m_force;
bool m_remove;
Context *m_on_finish;
bool m_is_primary = false;
cls::rbd::MirrorImage m_mirror_image;
PromotionState m_promotion_state = PROMOTION_STATE_NON_PRIMARY;
std::string m_primary_mirror_uuid;
std::set<cls::journal::Client> m_clients;
std::map<std::string, int> m_ret;
std::map<std::string, int> m_current_ops;
int m_error_result = 0;
mutable ceph::mutex m_lock =
ceph::make_mutex("mirror::DisableRequest::m_lock");
void send_get_mirror_info();
Context *handle_get_mirror_info(int *result);
void send_image_state_update();
Context *handle_image_state_update(int *result);
void send_notify_mirroring_watcher();
Context *handle_notify_mirroring_watcher(int *result);
void send_promote_image();
Context *handle_promote_image(int *result);
void send_refresh_image();
Context* handle_refresh_image(int* result);
void clean_mirror_state();
void send_get_clients();
Context *handle_get_clients(int *result);
void remove_mirror_snapshots();
void send_remove_snap(const std::string &client_id,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name);
Context *handle_remove_snap(int *result, const std::string &client_id);
void send_unregister_client(const std::string &client_id);
Context *handle_unregister_client(int *result, const std::string &client_id);
void send_remove_mirror_image();
Context *handle_remove_mirror_image(int *result);
void send_notify_mirroring_watcher_removed();
Context *handle_notify_mirroring_watcher_removed(int *result);
Context *create_context_callback(
Context*(DisableRequest<ImageCtxT>::*handle)(
int*, const std::string &client_id),
const std::string &client_id);
};
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::DisableRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_DISABLE_REQUEST_H
| 5,050 | 34.076389 | 79 | h |
null | ceph-main/src/librbd/mirror/EnableRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/EnableRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageState.h"
#include "librbd/Journal.h"
#include "librbd/Utils.h"
#include "librbd/mirror/ImageStateUpdateRequest.h"
#include "librbd/mirror/snapshot/CreatePrimaryRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::EnableRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace mirror {
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
EnableRequest<I>::EnableRequest(librados::IoCtx &io_ctx,
const std::string &image_id,
I* image_ctx,
cls::rbd::MirrorImageMode mode,
const std::string &non_primary_global_image_id,
bool image_clean,
asio::ContextWQ *op_work_queue,
Context *on_finish)
: m_io_ctx(io_ctx), m_image_id(image_id), m_image_ctx(image_ctx),
m_mode(mode), m_non_primary_global_image_id(non_primary_global_image_id),
m_image_clean(image_clean), m_op_work_queue(op_work_queue),
m_on_finish(on_finish),
m_cct(reinterpret_cast<CephContext*>(io_ctx.cct())) {
}
template <typename I>
void EnableRequest<I>::send() {
get_mirror_image();
}
template <typename I>
void EnableRequest<I>::get_mirror_image() {
ldout(m_cct, 10) << dendl;
librados::ObjectReadOperation op;
cls_client::mirror_image_get_start(&op, m_image_id);
using klass = EnableRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_get_mirror_image>(this);
m_out_bl.clear();
int r = m_io_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void EnableRequest<I>::handle_get_mirror_image(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r == 0) {
auto iter = m_out_bl.cbegin();
r = cls_client::mirror_image_get_finish(&iter, &m_mirror_image);
}
if (r == 0 && m_mirror_image.state == cls::rbd::MIRROR_IMAGE_STATE_CREATING &&
!m_non_primary_global_image_id.empty()) {
// special case where rbd-mirror injects a disabled record to record the
// local image id prior to creating ther image
ldout(m_cct, 10) << "enabling mirroring on in-progress image replication"
<< dendl;
} else if (r == 0) {
if (m_mirror_image.mode != m_mode) {
lderr(m_cct) << "invalid current image mirror mode" << dendl;
r = -EINVAL;
} else if (m_mirror_image.state == cls::rbd::MIRROR_IMAGE_STATE_ENABLED) {
ldout(m_cct, 10) << "mirroring is already enabled" << dendl;
} else {
lderr(m_cct) << "currently disabling" << dendl;
r = -EINVAL;
}
finish(r);
return;
} else if (r != -ENOENT) {
lderr(m_cct) << "failed to retrieve mirror image: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
r = 0;
m_mirror_image.mode = m_mode;
if (m_non_primary_global_image_id.empty()) {
uuid_d uuid_gen;
uuid_gen.generate_random();
m_mirror_image.global_image_id = uuid_gen.to_string();
} else {
m_mirror_image.global_image_id = m_non_primary_global_image_id;
}
get_tag_owner();
}
template <typename I>
void EnableRequest<I>::get_tag_owner() {
if (m_mirror_image.mode == cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
open_image();
return;
} else if (!m_non_primary_global_image_id.empty()) {
image_state_update();
return;
}
ldout(m_cct, 10) << dendl;
using klass = EnableRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_get_tag_owner>(this);
librbd::Journal<>::is_tag_owner(m_io_ctx, m_image_id, &m_is_primary,
m_op_work_queue, ctx);
}
template <typename I>
void EnableRequest<I>::handle_get_tag_owner(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to check tag ownership: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
if (!m_is_primary) {
lderr(m_cct) << "last journal tag not owned by local cluster" << dendl;
finish(-EINVAL);
return;
}
image_state_update();
}
template <typename I>
void EnableRequest<I>::open_image() {
if (!m_non_primary_global_image_id.empty()) {
// special case for rbd-mirror creating a non-primary image
enable_non_primary_feature();
return;
} else if (m_image_ctx != nullptr) {
create_primary_snapshot();
return;
}
ldout(m_cct, 10) << dendl;
m_close_image = true;
m_image_ctx = I::create("", m_image_id, CEPH_NOSNAP, m_io_ctx, false);
auto ctx = create_context_callback<
EnableRequest<I>, &EnableRequest<I>::handle_open_image>(this);
m_image_ctx->state->open(OPEN_FLAG_SKIP_OPEN_PARENT |
OPEN_FLAG_IGNORE_MIGRATING, ctx);
}
template <typename I>
void EnableRequest<I>::handle_open_image(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to open image: " << cpp_strerror(r) << dendl;
m_image_ctx = nullptr;
finish(r);
return;
}
create_primary_snapshot();
}
template <typename I>
void EnableRequest<I>::create_primary_snapshot() {
ldout(m_cct, 10) << dendl;
ceph_assert(m_image_ctx != nullptr);
uint64_t snap_create_flags;
int r = util::snap_create_flags_api_to_internal(
m_cct, util::get_default_snap_create_flags(m_image_ctx),
&snap_create_flags);
ceph_assert(r == 0);
auto ctx = create_context_callback<
EnableRequest<I>,
&EnableRequest<I>::handle_create_primary_snapshot>(this);
auto req = snapshot::CreatePrimaryRequest<I>::create(
m_image_ctx, m_mirror_image.global_image_id,
(m_image_clean ? 0 : CEPH_NOSNAP), snap_create_flags,
snapshot::CREATE_PRIMARY_FLAG_IGNORE_EMPTY_PEERS, &m_snap_id, ctx);
req->send();
}
template <typename I>
void EnableRequest<I>::handle_create_primary_snapshot(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to create initial primary snapshot: "
<< cpp_strerror(r) << dendl;
m_ret_val = r;
}
close_image();
}
template <typename I>
void EnableRequest<I>::close_image() {
if (!m_close_image) {
if (m_ret_val < 0) {
finish(m_ret_val);
} else {
image_state_update();
}
return;
}
ldout(m_cct, 10) << dendl;
auto ctx = create_context_callback<
EnableRequest<I>, &EnableRequest<I>::handle_close_image>(this);
m_image_ctx->state->close(ctx);
}
template <typename I>
void EnableRequest<I>::handle_close_image(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
m_image_ctx = nullptr;
if (r < 0) {
lderr(m_cct) << "failed to close image: " << cpp_strerror(r) << dendl;
if (m_ret_val == 0) {
m_ret_val = r;
}
}
if (m_ret_val < 0) {
finish(m_ret_val);
return;
}
image_state_update();
}
template <typename I>
void EnableRequest<I>::enable_non_primary_feature() {
if (m_mirror_image.mode != cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
image_state_update();
return;
}
ldout(m_cct, 10) << dendl;
// ensure image is flagged with non-primary feature so that
// standard RBD clients cannot write to it.
librados::ObjectWriteOperation op;
cls_client::set_features(&op, RBD_FEATURE_NON_PRIMARY,
RBD_FEATURE_NON_PRIMARY);
auto aio_comp = create_rados_callback<
EnableRequest<I>,
&EnableRequest<I>::handle_enable_non_primary_feature>(this);
int r = m_io_ctx.aio_operate(util::header_name(m_image_id), aio_comp, &op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void EnableRequest<I>::handle_enable_non_primary_feature(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to enable non-primary feature: "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
void EnableRequest<I>::image_state_update() {
ldout(m_cct, 10) << dendl;
auto ctx = create_context_callback<
EnableRequest<I>, &EnableRequest<I>::handle_image_state_update>(this);
auto req = ImageStateUpdateRequest<I>::create(
m_io_ctx, m_image_id, cls::rbd::MIRROR_IMAGE_STATE_ENABLED,
m_mirror_image, ctx);
req->send();
}
template <typename I>
void EnableRequest<I>::handle_image_state_update(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to enable mirroring: " << cpp_strerror(r)
<< dendl;
}
finish(r);
}
template <typename I>
void EnableRequest<I>::finish(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace mirror
} // namespace librbd
template class librbd::mirror::EnableRequest<librbd::ImageCtx>;
| 9,157 | 26.751515 | 80 | cc |
null | ceph-main/src/librbd/mirror/EnableRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_ENABLE_REQUEST_H
#define CEPH_LIBRBD_MIRROR_ENABLE_REQUEST_H
#include "include/buffer_fwd.h"
#include "include/rados/librados_fwd.hpp"
#include "include/rbd/librbd.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/ImageCtx.h"
#include "librbd/mirror/Types.h"
#include <map>
#include <string>
class Context;
namespace librbd {
namespace asio { struct ContextWQ; }
namespace mirror {
template <typename ImageCtxT = ImageCtx>
class EnableRequest {
public:
static EnableRequest *create(ImageCtxT *image_ctx,
cls::rbd::MirrorImageMode mode,
const std::string &non_primary_global_image_id,
bool image_clean, Context *on_finish) {
return new EnableRequest(image_ctx->md_ctx, image_ctx->id, image_ctx, mode,
non_primary_global_image_id, image_clean,
image_ctx->op_work_queue, on_finish);
}
static EnableRequest *create(librados::IoCtx &io_ctx,
const std::string &image_id,
cls::rbd::MirrorImageMode mode,
const std::string &non_primary_global_image_id,
bool image_clean, asio::ContextWQ *op_work_queue,
Context *on_finish) {
return new EnableRequest(io_ctx, image_id, nullptr, mode,
non_primary_global_image_id, image_clean,
op_work_queue, on_finish);
}
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* GET_MIRROR_IMAGE * * * * * * *
* | * (on error)
* v (skip if not needed) *
* GET_TAG_OWNER * * * * * * * *
* | *
* v (skip if not needed) *
* OPEN_IMAGE *
* | *
* v (skip if not needed) *
* CREATE_PRIMARY_SNAPSHOT * * *
* | *
* v (skip of not opened) *
* CLOSE_IMAGE *
* | *
* v (skip if not needed) *
* ENABLE_NON_PRIMARY_FEATURE *
* | *
* v (skip if not needed) *
* IMAGE_STATE_UPDATE * * * * * *
* | *
* v *
* <finish> < * * * * * * * * *
*
* @endverbatim
*/
EnableRequest(librados::IoCtx &io_ctx, const std::string &image_id,
ImageCtxT* image_ctx, cls::rbd::MirrorImageMode mode,
const std::string &non_primary_global_image_id,
bool image_clean, asio::ContextWQ *op_work_queue,
Context *on_finish);
librados::IoCtx &m_io_ctx;
std::string m_image_id;
ImageCtxT* m_image_ctx;
cls::rbd::MirrorImageMode m_mode;
std::string m_non_primary_global_image_id;
bool m_image_clean;
asio::ContextWQ *m_op_work_queue;
Context *m_on_finish;
CephContext *m_cct = nullptr;
bufferlist m_out_bl;
cls::rbd::MirrorImage m_mirror_image;
int m_ret_val = 0;
bool m_close_image = false;
bool m_is_primary = false;
uint64_t m_snap_id = CEPH_NOSNAP;
void get_mirror_image();
void handle_get_mirror_image(int r);
void get_tag_owner();
void handle_get_tag_owner(int r);
void open_image();
void handle_open_image(int r);
void create_primary_snapshot();
void handle_create_primary_snapshot(int r);
void close_image();
void handle_close_image(int r);
void enable_non_primary_feature();
void handle_enable_non_primary_feature(int r);
void image_state_update();
void handle_image_state_update(int r);
void finish(int r);
};
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::EnableRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_ENABLE_REQUEST_H
| 4,018 | 28.551471 | 80 | h |
null | ceph-main/src/librbd/mirror/GetInfoRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/GetInfoRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Journal.h"
#include "librbd/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::GetInfoRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace mirror {
using librbd::util::create_context_callback;
using librbd::util::create_rados_callback;
template <typename I>
GetInfoRequest<I>::GetInfoRequest(librados::IoCtx& io_ctx,
asio::ContextWQ *op_work_queue,
const std::string &image_id,
cls::rbd::MirrorImage *mirror_image,
PromotionState *promotion_state,
std::string* primary_mirror_uuid,
Context *on_finish)
: m_io_ctx(io_ctx), m_op_work_queue(op_work_queue), m_image_id(image_id),
m_mirror_image(mirror_image), m_promotion_state(promotion_state),
m_primary_mirror_uuid(primary_mirror_uuid), m_on_finish(on_finish),
m_cct(reinterpret_cast<CephContext *>(io_ctx.cct())) {
}
template <typename I>
GetInfoRequest<I>::GetInfoRequest(I &image_ctx,
cls::rbd::MirrorImage *mirror_image,
PromotionState *promotion_state,
std::string* primary_mirror_uuid,
Context *on_finish)
: m_image_ctx(&image_ctx), m_io_ctx(image_ctx.md_ctx),
m_op_work_queue(image_ctx.op_work_queue), m_image_id(image_ctx.id),
m_mirror_image(mirror_image), m_promotion_state(promotion_state),
m_primary_mirror_uuid(primary_mirror_uuid), m_on_finish(on_finish),
m_cct(image_ctx.cct) {
}
template <typename I>
void GetInfoRequest<I>::send() {
get_mirror_image();
}
template <typename I>
void GetInfoRequest<I>::get_mirror_image() {
ldout(m_cct, 20) << dendl;
librados::ObjectReadOperation op;
cls_client::mirror_image_get_start(&op, m_image_id);
librados::AioCompletion *comp = create_rados_callback<
GetInfoRequest<I>, &GetInfoRequest<I>::handle_get_mirror_image>(this);
int r = m_io_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void GetInfoRequest<I>::handle_get_mirror_image(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
m_mirror_image->state = cls::rbd::MIRROR_IMAGE_STATE_DISABLED;
*m_promotion_state = PROMOTION_STATE_NON_PRIMARY;
if (r == 0) {
auto iter = m_out_bl.cbegin();
r = cls_client::mirror_image_get_finish(&iter, m_mirror_image);
}
if (r == -ENOENT) {
ldout(m_cct, 20) << "mirroring is disabled" << dendl;
finish(r);
return;
} else if (r < 0) {
lderr(m_cct) << "failed to retrieve mirroring state: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
if (m_mirror_image->mode == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) {
get_journal_tag_owner();
} else if (m_mirror_image->mode == cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
get_snapcontext();
} else {
ldout(m_cct, 20) << "unknown mirror image mode: " << m_mirror_image->mode
<< dendl;
finish(-EOPNOTSUPP);
}
}
template <typename I>
void GetInfoRequest<I>::get_journal_tag_owner() {
ldout(m_cct, 20) << dendl;
auto ctx = create_context_callback<
GetInfoRequest<I>, &GetInfoRequest<I>::handle_get_journal_tag_owner>(this);
Journal<I>::get_tag_owner(m_io_ctx, m_image_id, &m_mirror_uuid,
m_op_work_queue, ctx);
}
template <typename I>
void GetInfoRequest<I>::handle_get_journal_tag_owner(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to determine tag ownership: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
if (m_mirror_uuid == Journal<>::LOCAL_MIRROR_UUID) {
*m_promotion_state = PROMOTION_STATE_PRIMARY;
*m_primary_mirror_uuid = "";
} else if (m_mirror_uuid == Journal<>::ORPHAN_MIRROR_UUID) {
*m_promotion_state = PROMOTION_STATE_ORPHAN;
*m_primary_mirror_uuid = "";
} else {
*m_primary_mirror_uuid = m_mirror_uuid;
}
finish(0);
}
template <typename I>
void GetInfoRequest<I>::get_snapcontext() {
if (m_image_ctx != nullptr) {
{
std::shared_lock image_locker{m_image_ctx->image_lock};
calc_promotion_state(m_image_ctx->snap_info);
}
finish(0);
return;
}
ldout(m_cct, 20) << dendl;
librados::ObjectReadOperation op;
cls_client::get_snapcontext_start(&op);
librados::AioCompletion *comp = create_rados_callback<
GetInfoRequest<I>, &GetInfoRequest<I>::handle_get_snapcontext>(this);
m_out_bl.clear();
int r = m_io_ctx.aio_operate(util::header_name(m_image_id), comp, &op,
&m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void GetInfoRequest<I>::handle_get_snapcontext(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r >= 0) {
auto it = m_out_bl.cbegin();
r = cls_client::get_snapcontext_finish(&it, &m_snapc);
}
if (r == -ENOENT &&
m_mirror_image->state == cls::rbd::MIRROR_IMAGE_STATE_CREATING) {
// image doesn't exist but we have a mirror image record for it
ldout(m_cct, 10) << "image does not exist for mirror image id "
<< m_image_id << dendl;
*m_promotion_state = PROMOTION_STATE_UNKNOWN;
*m_primary_mirror_uuid = "";
finish(0);
return;
} else if (r < 0) {
lderr(m_cct) << "failed to get snapcontext: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
get_snapshots();
}
template <typename I>
void GetInfoRequest<I>::get_snapshots() {
ldout(m_cct, 20) << dendl;
if (m_snapc.snaps.empty()) {
handle_get_snapshots(0);
return;
}
librados::ObjectReadOperation op;
for (auto snap_id : m_snapc.snaps) {
cls_client::snapshot_get_start(&op, snap_id);
}
librados::AioCompletion *comp = create_rados_callback<
GetInfoRequest<I>, &GetInfoRequest<I>::handle_get_snapshots>(this);
m_out_bl.clear();
int r = m_io_ctx.aio_operate(util::header_name(m_image_id), comp, &op,
&m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void GetInfoRequest<I>::handle_get_snapshots(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
std::map<librados::snap_t, SnapInfo> snap_info;
auto it = m_out_bl.cbegin();
for (auto snap_id : m_snapc.snaps) {
cls::rbd::SnapshotInfo snap;
if (r >= 0) {
r = cls_client::snapshot_get_finish(&it, &snap);
}
snap_info.emplace(
snap_id, SnapInfo(snap.name, snap.snapshot_namespace, 0, {}, 0, 0, {}));
}
if (r == -ENOENT) {
// restart
get_snapcontext();
return;
}
if (r < 0) {
lderr(m_cct) << "failed to get snapshots: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
calc_promotion_state(snap_info);
finish(0);
}
template <typename I>
void GetInfoRequest<I>::finish(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
template <typename I>
void GetInfoRequest<I>::calc_promotion_state(
const std::map<librados::snap_t, SnapInfo> &snap_info) {
*m_promotion_state = PROMOTION_STATE_UNKNOWN;
*m_primary_mirror_uuid = "";
for (auto it = snap_info.rbegin(); it != snap_info.rend(); it++) {
auto mirror_ns = std::get_if<cls::rbd::MirrorSnapshotNamespace>(
&it->second.snap_namespace);
if (mirror_ns != nullptr) {
switch (mirror_ns->state) {
case cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY:
*m_promotion_state = PROMOTION_STATE_PRIMARY;
break;
case cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY:
*m_promotion_state = PROMOTION_STATE_NON_PRIMARY;
*m_primary_mirror_uuid = mirror_ns->primary_mirror_uuid;
break;
case cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED:
case cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY_DEMOTED:
*m_promotion_state = PROMOTION_STATE_ORPHAN;
break;
}
break;
}
}
ldout(m_cct, 10) << "promotion_state=" << *m_promotion_state << ", "
<< "primary_mirror_uuid=" << *m_primary_mirror_uuid << dendl;
}
} // namespace mirror
} // namespace librbd
template class librbd::mirror::GetInfoRequest<librbd::ImageCtx>;
| 8,718 | 28.962199 | 80 | cc |
null | ceph-main/src/librbd/mirror/GetInfoRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_GET_INFO_REQUEST_H
#define CEPH_LIBRBD_MIRROR_GET_INFO_REQUEST_H
#include "common/snap_types.h"
#include "include/buffer.h"
#include "include/common_fwd.h"
#include "include/rados/librados.hpp"
#include "librbd/Types.h"
#include "librbd/mirror/Types.h"
#include <string>
struct Context;
namespace cls { namespace rbd { struct MirrorImage; } }
namespace librbd {
struct ImageCtx;
namespace asio { struct ContextWQ; }
namespace mirror {
template <typename ImageCtxT = librbd::ImageCtx>
class GetInfoRequest {
public:
static GetInfoRequest *create(librados::IoCtx &io_ctx,
asio::ContextWQ *op_work_queue,
const std::string &image_id,
cls::rbd::MirrorImage *mirror_image,
PromotionState *promotion_state,
std::string* primary_mirror_uuid,
Context *on_finish) {
return new GetInfoRequest(io_ctx, op_work_queue, image_id, mirror_image,
promotion_state, primary_mirror_uuid, on_finish);
}
static GetInfoRequest *create(ImageCtxT &image_ctx,
cls::rbd::MirrorImage *mirror_image,
PromotionState *promotion_state,
std::string* primary_mirror_uuid,
Context *on_finish) {
return new GetInfoRequest(image_ctx, mirror_image, promotion_state,
primary_mirror_uuid, on_finish);
}
GetInfoRequest(librados::IoCtx& io_ctx, asio::ContextWQ *op_work_queue,
const std::string &image_id,
cls::rbd::MirrorImage *mirror_image,
PromotionState *promotion_state,
std::string* primary_mirror_uuid, Context *on_finish);
GetInfoRequest(ImageCtxT &image_ctx, cls::rbd::MirrorImage *mirror_image,
PromotionState *promotion_state,
std::string* primary_mirror_uuid, Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* GET_MIRROR_IMAGE
* |
* (journal /--------/ \--------\ (snapshot
* mode) | | mode)
* v v
* GET_JOURNAL_TAG_OWNER GET_SNAPCONTEXT (skip if
* | | cached)
* | v
* | GET_SNAPSHOTS (skip if
* | | cached)
* \--------\ /--------/
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT *m_image_ctx = nullptr;
librados::IoCtx &m_io_ctx;
asio::ContextWQ *m_op_work_queue;
std::string m_image_id;
cls::rbd::MirrorImage *m_mirror_image;
PromotionState *m_promotion_state;
std::string* m_primary_mirror_uuid;
Context *m_on_finish;
CephContext *m_cct;
bufferlist m_out_bl;
std::string m_mirror_uuid;
::SnapContext m_snapc;
void get_mirror_image();
void handle_get_mirror_image(int r);
void get_journal_tag_owner();
void handle_get_journal_tag_owner(int r);
void get_snapcontext();
void handle_get_snapcontext(int r);
void get_snapshots();
void handle_get_snapshots(int r);
void finish(int r);
void calc_promotion_state(
const std::map<librados::snap_t, SnapInfo> &snap_info);
};
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::GetInfoRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_GET_INFO_REQUEST_H
| 3,844 | 30.008065 | 79 | h |
null | ceph-main/src/librbd/mirror/GetStatusRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/GetStatusRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Journal.h"
#include "librbd/Utils.h"
#include "librbd/mirror/GetInfoRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::GetStatusRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace mirror {
using librbd::util::create_context_callback;
using librbd::util::create_rados_callback;
template <typename I>
void GetStatusRequest<I>::send() {
*m_mirror_image_status = cls::rbd::MirrorImageStatus(
{{cls::rbd::MirrorImageSiteStatus::LOCAL_MIRROR_UUID,
cls::rbd::MIRROR_IMAGE_STATUS_STATE_UNKNOWN, "status not found"}});
get_info();
}
template <typename I>
void GetStatusRequest<I>::get_info() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
auto ctx = create_context_callback<
GetStatusRequest<I>, &GetStatusRequest<I>::handle_get_info>(this);
auto req = GetInfoRequest<I>::create(m_image_ctx, m_mirror_image,
m_promotion_state,
&m_primary_mirror_uuid, ctx);
req->send();
}
template <typename I>
void GetStatusRequest<I>::handle_get_info(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
if (r != -ENOENT) {
lderr(cct) << "failed to retrieve mirroring state: " << cpp_strerror(r)
<< dendl;
}
finish(r);
return;
} else if (m_mirror_image->state != cls::rbd::MIRROR_IMAGE_STATE_ENABLED) {
finish(0);
return;
}
get_status();
}
template <typename I>
void GetStatusRequest<I>::get_status() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
librados::ObjectReadOperation op;
cls_client::mirror_image_status_get_start(
&op, m_mirror_image->global_image_id);
librados::AioCompletion *comp = create_rados_callback<
GetStatusRequest<I>, &GetStatusRequest<I>::handle_get_status>(this);
int r = m_image_ctx.md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void GetStatusRequest<I>::handle_get_status(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r == 0) {
auto iter = m_out_bl.cbegin();
r = cls_client::mirror_image_status_get_finish(&iter,
m_mirror_image_status);
}
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve mirror image status: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
void GetStatusRequest<I>::finish(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace mirror
} // namespace librbd
template class librbd::mirror::GetStatusRequest<librbd::ImageCtx>;
| 3,208 | 26.42735 | 79 | cc |
null | ceph-main/src/librbd/mirror/GetStatusRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_GET_STATUS_REQUEST_H
#define CEPH_LIBRBD_MIRROR_GET_STATUS_REQUEST_H
#include "include/buffer.h"
#include "librbd/mirror/Types.h"
#include <string>
struct Context;
namespace cls { namespace rbd { struct MirrorImage; } }
namespace cls { namespace rbd { struct MirrorImageStatus; } }
namespace librbd {
struct ImageCtx;
namespace mirror {
template <typename ImageCtxT = librbd::ImageCtx>
class GetStatusRequest {
public:
static GetStatusRequest *create(ImageCtxT &image_ctx,
cls::rbd::MirrorImageStatus *status,
cls::rbd::MirrorImage *mirror_image,
PromotionState *promotion_state,
Context *on_finish) {
return new GetStatusRequest(image_ctx, status, mirror_image,
promotion_state, on_finish);
}
GetStatusRequest(ImageCtxT &image_ctx, cls::rbd::MirrorImageStatus *status,
cls::rbd::MirrorImage *mirror_image,
PromotionState *promotion_state, Context *on_finish)
: m_image_ctx(image_ctx), m_mirror_image_status(status),
m_mirror_image(mirror_image), m_promotion_state(promotion_state),
m_on_finish(on_finish) {
}
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* GET_INFO
* |
* v
* GET_STATUS
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT &m_image_ctx;
cls::rbd::MirrorImageStatus *m_mirror_image_status;
cls::rbd::MirrorImage *m_mirror_image;
PromotionState *m_promotion_state;
Context *m_on_finish;
bufferlist m_out_bl;
std::string m_primary_mirror_uuid;
void get_info();
void handle_get_info(int r);
void get_status();
void handle_get_status(int r);
void finish(int r);
};
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::GetStatusRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_GET_STATUS_REQUEST_H
| 2,124 | 23.425287 | 77 | h |
null | ceph-main/src/librbd/mirror/GetUuidRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/GetUuidRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::GetUuidRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace mirror {
using librbd::util::create_rados_callback;
template <typename I>
GetUuidRequest<I>::GetUuidRequest(
librados::IoCtx& io_ctx, std::string* mirror_uuid, Context* on_finish)
: m_mirror_uuid(mirror_uuid), m_on_finish(on_finish),
m_cct(reinterpret_cast<CephContext*>(io_ctx.cct())) {
m_io_ctx.dup(io_ctx);
m_io_ctx.set_namespace("");
}
template <typename I>
void GetUuidRequest<I>::send() {
get_mirror_uuid();
}
template <typename I>
void GetUuidRequest<I>::get_mirror_uuid() {
ldout(m_cct, 20) << dendl;
librados::ObjectReadOperation op;
librbd::cls_client::mirror_uuid_get_start(&op);
auto aio_comp = create_rados_callback<
GetUuidRequest<I>, &GetUuidRequest<I>::handle_get_mirror_uuid>(this);
int r = m_io_ctx.aio_operate(RBD_MIRRORING, aio_comp, &op, &m_out_bl);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void GetUuidRequest<I>::handle_get_mirror_uuid(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r >= 0) {
auto it = m_out_bl.cbegin();
r = librbd::cls_client::mirror_uuid_get_finish(&it, m_mirror_uuid);
if (r >= 0 && m_mirror_uuid->empty()) {
r = -ENOENT;
}
}
if (r < 0) {
if (r == -ENOENT) {
ldout(m_cct, 5) << "mirror uuid missing" << dendl;
} else {
lderr(m_cct) << "failed to retrieve mirror uuid: " << cpp_strerror(r)
<< dendl;
}
*m_mirror_uuid = "";
}
finish(r);
}
template <typename I>
void GetUuidRequest<I>::finish(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace mirror
} // namespace librbd
template class librbd::mirror::GetUuidRequest<librbd::ImageCtx>;
| 2,198 | 24.275862 | 75 | cc |
null | ceph-main/src/librbd/mirror/GetUuidRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_GET_UUID_REQUEST_H
#define CEPH_LIBRBD_MIRROR_GET_UUID_REQUEST_H
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include <string>
#include <set>
struct Context;
namespace librbd {
struct ImageCtx;
namespace mirror {
template <typename ImageCtxT = librbd::ImageCtx>
class GetUuidRequest {
public:
static GetUuidRequest *create(librados::IoCtx& io_ctx,
std::string* mirror_uuid, Context* on_finish) {
return new GetUuidRequest(io_ctx, mirror_uuid, on_finish);
}
GetUuidRequest(librados::IoCtx& io_ctx, std::string* mirror_uuid,
Context* on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* GET_MIRROR_UUID
* |
* v
* <finish>
*
* @endverbatim
*/
librados::IoCtx m_io_ctx;
std::string* m_mirror_uuid;
Context* m_on_finish;
CephContext* m_cct;
bufferlist m_out_bl;
void get_mirror_uuid();
void handle_get_mirror_uuid(int r);
void finish(int r);
};
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::GetUuidRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_GET_UUID_REQUEST_H
| 1,351 | 18.314286 | 79 | h |
null | ceph-main/src/librbd/mirror/ImageRemoveRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/ImageRemoveRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/MirroringWatcher.h"
#include "librbd/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::ImageRemoveRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace mirror {
using util::create_rados_callback;
template <typename I>
ImageRemoveRequest<I>::ImageRemoveRequest(
librados::IoCtx& io_ctx, const std::string& global_image_id,
const std::string& image_id, Context* on_finish)
: m_io_ctx(io_ctx), m_global_image_id(global_image_id), m_image_id(image_id),
m_on_finish(on_finish), m_cct(static_cast<CephContext*>(m_io_ctx.cct())) {
}
template <typename I>
void ImageRemoveRequest<I>::send() {
remove_mirror_image();
}
template <typename I>
void ImageRemoveRequest<I>::remove_mirror_image() {
ldout(m_cct, 10) << dendl;
librados::ObjectWriteOperation op;
cls_client::mirror_image_remove(&op, m_image_id);
auto comp = create_rados_callback<
ImageRemoveRequest<I>,
&ImageRemoveRequest<I>::handle_remove_mirror_image>(this);
int r = m_io_ctx.aio_operate(RBD_MIRRORING, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void ImageRemoveRequest<I>::handle_remove_mirror_image(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "failed to remove mirroring image: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
notify_mirroring_watcher();
}
template <typename I>
void ImageRemoveRequest<I>::notify_mirroring_watcher() {
ldout(m_cct, 10) << dendl;
auto ctx = util::create_context_callback<
ImageRemoveRequest<I>,
&ImageRemoveRequest<I>::handle_notify_mirroring_watcher>(this);
MirroringWatcher<I>::notify_image_updated(
m_io_ctx, cls::rbd::MIRROR_IMAGE_STATE_DISABLED,
m_image_id, m_global_image_id, ctx);
}
template <typename I>
void ImageRemoveRequest<I>::handle_notify_mirroring_watcher(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to notify mirror image update: " << cpp_strerror(r)
<< dendl;
}
finish(0);
}
template <typename I>
void ImageRemoveRequest<I>::finish(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace mirror
} // namespace librbd
template class librbd::mirror::ImageRemoveRequest<librbd::ImageCtx>;
| 2,680 | 26.080808 | 79 | cc |
null | ceph-main/src/librbd/mirror/ImageRemoveRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_IMAGE_REMOVE_REQUEST_H
#define CEPH_LIBRBD_MIRROR_IMAGE_REMOVE_REQUEST_H
#include "include/rados/librados.hpp"
#include "common/ceph_mutex.h"
#include "cls/rbd/cls_rbd_types.h"
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace mirror {
template <typename ImageCtxT = ImageCtx>
class ImageRemoveRequest {
public:
static ImageRemoveRequest *create(librados::IoCtx& io_ctx,
const std::string& global_image_id,
const std::string& image_id,
Context* on_finish) {
return new ImageRemoveRequest(io_ctx, global_image_id, image_id, on_finish);
}
ImageRemoveRequest(librados::IoCtx& io_ctx,
const std::string& global_image_id,
const std::string& image_id,
Context* on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* REMOVE_MIRROR_IMAGE
* |
* v
* NOTIFY_MIRRORING_WATCHER
* |
* v
* <finish>
*
* @endverbatim
*/
librados::IoCtx& m_io_ctx;
std::string m_global_image_id;
std::string m_image_id;
Context* m_on_finish;
CephContext* m_cct;
void remove_mirror_image();
void handle_remove_mirror_image(int r);
void notify_mirroring_watcher();
void handle_notify_mirroring_watcher(int r);
void finish(int r);
};
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::ImageRemoveRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_IMAGE_REMOVE_REQUEST_H
| 1,717 | 21.025641 | 80 | h |
null | ceph-main/src/librbd/mirror/ImageStateUpdateRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/ImageStateUpdateRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/MirroringWatcher.h"
#include "librbd/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::ImageStateUpdateRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace mirror {
using util::create_rados_callback;
template <typename I>
ImageStateUpdateRequest<I>::ImageStateUpdateRequest(
librados::IoCtx& io_ctx,
const std::string& image_id,
cls::rbd::MirrorImageState mirror_image_state,
const cls::rbd::MirrorImage& mirror_image,
Context* on_finish)
: m_io_ctx(io_ctx), m_image_id(image_id),
m_mirror_image_state(mirror_image_state), m_mirror_image(mirror_image),
m_on_finish(on_finish), m_cct(static_cast<CephContext*>(m_io_ctx.cct())) {
ceph_assert(m_mirror_image_state != cls::rbd::MIRROR_IMAGE_STATE_DISABLED);
}
template <typename I>
void ImageStateUpdateRequest<I>::send() {
get_mirror_image();
}
template <typename I>
void ImageStateUpdateRequest<I>::get_mirror_image() {
if (!m_mirror_image.global_image_id.empty()) {
set_mirror_image();
return;
}
ldout(m_cct, 10) << dendl;
librados::ObjectReadOperation op;
cls_client::mirror_image_get_start(&op, m_image_id);
auto comp = create_rados_callback<
ImageStateUpdateRequest<I>,
&ImageStateUpdateRequest<I>::handle_get_mirror_image>(this);
int r = m_io_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void ImageStateUpdateRequest<I>::handle_get_mirror_image(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r == 0) {
auto iter = m_out_bl.cbegin();
r = cls_client::mirror_image_get_finish(&iter, &m_mirror_image);
}
if (r == -ENOENT) {
ldout(m_cct, 20) << "mirroring is disabled" << dendl;
finish(0);
return;
} else if (r < 0) {
lderr(m_cct) << "failed to retrieve mirroring state: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
set_mirror_image();
}
template <typename I>
void ImageStateUpdateRequest<I>::set_mirror_image() {
if (m_mirror_image.state == m_mirror_image_state) {
finish(0);
return;
}
ldout(m_cct, 10) << dendl;
m_mirror_image.state = m_mirror_image_state;
librados::ObjectWriteOperation op;
cls_client::mirror_image_set(&op, m_image_id, m_mirror_image);
auto comp = create_rados_callback<
ImageStateUpdateRequest<I>,
&ImageStateUpdateRequest<I>::handle_set_mirror_image>(this);
int r = m_io_ctx.aio_operate(RBD_MIRRORING, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void ImageStateUpdateRequest<I>::handle_set_mirror_image(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to disable mirroring image: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
notify_mirroring_watcher();
}
template <typename I>
void ImageStateUpdateRequest<I>::notify_mirroring_watcher() {
ldout(m_cct, 10) << dendl;
auto ctx = util::create_context_callback<
ImageStateUpdateRequest<I>,
&ImageStateUpdateRequest<I>::handle_notify_mirroring_watcher>(this);
MirroringWatcher<I>::notify_image_updated(
m_io_ctx, m_mirror_image_state, m_image_id, m_mirror_image.global_image_id,
ctx);
}
template <typename I>
void ImageStateUpdateRequest<I>::handle_notify_mirroring_watcher(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to notify mirror image update: " << cpp_strerror(r)
<< dendl;
}
finish(0);
}
template <typename I>
void ImageStateUpdateRequest<I>::finish(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace mirror
} // namespace librbd
template class librbd::mirror::ImageStateUpdateRequest<librbd::ImageCtx>;
| 4,136 | 26.217105 | 79 | cc |
null | ceph-main/src/librbd/mirror/ImageStateUpdateRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_IMAGE_STATE_UPDATE_REQUEST_H
#define CEPH_LIBRBD_MIRROR_IMAGE_STATE_UPDATE_REQUEST_H
#include "include/rados/librados.hpp"
#include "common/ceph_mutex.h"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/mirror/Types.h"
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace mirror {
template <typename ImageCtxT = ImageCtx>
class ImageStateUpdateRequest {
public:
static ImageStateUpdateRequest *create(
librados::IoCtx& io_ctx,
const std::string& image_id,
cls::rbd::MirrorImageState mirror_image_state,
const cls::rbd::MirrorImage& mirror_image,
Context* on_finish) {
return new ImageStateUpdateRequest(
io_ctx, image_id, mirror_image_state, mirror_image, on_finish);
}
ImageStateUpdateRequest(
librados::IoCtx& io_ctx,
const std::string& image_id,
cls::rbd::MirrorImageState mirror_image_state,
const cls::rbd::MirrorImage& mirror_image,
Context* on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v (skip if provided)
* GET_MIRROR_IMAGE
* |
* v
* SET_MIRROR_IMAGE
* |
* v
* NOTIFY_MIRRORING_WATCHER
* |
* v
* <finish>
*
* @endverbatim
*/
librados::IoCtx& m_io_ctx;
std::string m_image_id;
cls::rbd::MirrorImageState m_mirror_image_state;
cls::rbd::MirrorImage m_mirror_image;
Context* m_on_finish;
CephContext* m_cct;
bufferlist m_out_bl;
void get_mirror_image();
void handle_get_mirror_image(int r);
void set_mirror_image();
void handle_set_mirror_image(int r);
void notify_mirroring_watcher();
void handle_notify_mirroring_watcher(int r);
void finish(int r);
};
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::ImageStateUpdateRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_IMAGE_STATE_UPDATE_REQUEST_H
| 2,026 | 20.795699 | 80 | h |
null | ceph-main/src/librbd/mirror/PromoteRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/PromoteRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Journal.h"
#include "librbd/Utils.h"
#include "librbd/mirror/GetInfoRequest.h"
#include "librbd/mirror/snapshot/PromoteRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::PromoteRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace mirror {
using librbd::util::create_context_callback;
template <typename I>
void PromoteRequest<I>::send() {
get_info();
}
template <typename I>
void PromoteRequest<I>::get_info() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
auto ctx = create_context_callback<
PromoteRequest<I>, &PromoteRequest<I>::handle_get_info>(this);
auto req = GetInfoRequest<I>::create(m_image_ctx, &m_mirror_image,
&m_promotion_state,
&m_primary_mirror_uuid, ctx);
req->send();
}
template <typename I>
void PromoteRequest<I>::handle_get_info(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to retrieve mirroring state: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
} else if (m_mirror_image.state != cls::rbd::MIRROR_IMAGE_STATE_ENABLED) {
lderr(cct) << "mirroring is not currently enabled" << dendl;
finish(-EINVAL);
return;
} else if (m_promotion_state == PROMOTION_STATE_PRIMARY) {
lderr(cct) << "image is already primary" << dendl;
finish(-EINVAL);
return;
} else if (m_promotion_state == PROMOTION_STATE_NON_PRIMARY && !m_force) {
lderr(cct) << "image is primary within a remote cluster or demotion is not propagated yet"
<< dendl;
finish(-EBUSY);
return;
}
promote();
}
template <typename I>
void PromoteRequest<I>::promote() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
auto ctx = create_context_callback<
PromoteRequest<I>, &PromoteRequest<I>::handle_promote>(this);
if (m_mirror_image.mode == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) {
Journal<I>::promote(&m_image_ctx, ctx);
} else if (m_mirror_image.mode == cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
auto req = mirror::snapshot::PromoteRequest<I>::create(
&m_image_ctx, m_mirror_image.global_image_id, ctx);
req->send();
} else {
lderr(cct) << "unknown image mirror mode: " << m_mirror_image.mode << dendl;
finish(-EOPNOTSUPP);
}
}
template <typename I>
void PromoteRequest<I>::handle_promote(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to promote image: " << cpp_strerror(r)
<< dendl;
}
finish(r);
}
template <typename I>
void PromoteRequest<I>::finish(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace mirror
} // namespace librbd
template class librbd::mirror::PromoteRequest<librbd::ImageCtx>;
| 3,337 | 27.775862 | 94 | cc |
null | ceph-main/src/librbd/mirror/PromoteRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_PROMOTE_REQUEST_H
#define CEPH_LIBRBD_MIRROR_PROMOTE_REQUEST_H
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/mirror/Types.h"
struct Context;
namespace librbd {
struct ImageCtx;
namespace mirror {
template <typename ImageCtxT = librbd::ImageCtx>
class PromoteRequest {
public:
static PromoteRequest *create(ImageCtxT &image_ctx, bool force,
Context *on_finish) {
return new PromoteRequest(image_ctx, force, on_finish);
}
PromoteRequest(ImageCtxT &image_ctx, bool force, Context *on_finish)
: m_image_ctx(image_ctx), m_force(force), m_on_finish(on_finish) {
}
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* GET_INFO
* |
* v
* GET_TAG_OWNER
* |
* v
* PROMOTE
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT &m_image_ctx;
bool m_force;
Context *m_on_finish;
cls::rbd::MirrorImage m_mirror_image;
PromotionState m_promotion_state = PROMOTION_STATE_PRIMARY;
std::string m_primary_mirror_uuid;
void get_info();
void handle_get_info(int r);
void promote();
void handle_promote(int r);
void finish(int r);
};
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::PromoteRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_PROMOTE_REQUEST_H
| 1,479 | 18.220779 | 71 | h |
null | ceph-main/src/librbd/mirror/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_TYPES_H
#define CEPH_LIBRBD_MIRROR_TYPES_H
namespace librbd {
namespace mirror {
enum PromotionState {
PROMOTION_STATE_UNKNOWN,
PROMOTION_STATE_PRIMARY,
PROMOTION_STATE_NON_PRIMARY,
PROMOTION_STATE_ORPHAN
};
} // namespace mirror
} // namespace librbd
#endif // CEPH_LIBRBD_MIRROR_TYPES_H
| 428 | 18.5 | 70 | h |
null | ceph-main/src/librbd/mirror/snapshot/CreateNonPrimaryRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/snapshot/CreateNonPrimaryRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/mirror/snapshot/Utils.h"
#include "librbd/mirror/snapshot/WriteImageStateRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::snapshot::CreateNonPrimaryRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace mirror {
namespace snapshot {
using librbd::util::create_context_callback;
using librbd::util::create_rados_callback;
template <typename I>
CreateNonPrimaryRequest<I>::CreateNonPrimaryRequest(
I* image_ctx, bool demoted, const std::string &primary_mirror_uuid,
uint64_t primary_snap_id, const SnapSeqs& snap_seqs,
const ImageState &image_state, uint64_t *snap_id, Context *on_finish)
: m_image_ctx(image_ctx), m_demoted(demoted),
m_primary_mirror_uuid(primary_mirror_uuid),
m_primary_snap_id(primary_snap_id), m_snap_seqs(snap_seqs),
m_image_state(image_state), m_snap_id(snap_id), m_on_finish(on_finish) {
m_default_ns_ctx.dup(m_image_ctx->md_ctx);
m_default_ns_ctx.set_namespace("");
}
template <typename I>
void CreateNonPrimaryRequest<I>::send() {
refresh_image();
}
template <typename I>
void CreateNonPrimaryRequest<I>::refresh_image() {
if (!m_image_ctx->state->is_refresh_required()) {
get_mirror_image();
return;
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
auto ctx = create_context_callback<
CreateNonPrimaryRequest<I>,
&CreateNonPrimaryRequest<I>::handle_refresh_image>(this);
m_image_ctx->state->refresh(ctx);
}
template <typename I>
void CreateNonPrimaryRequest<I>::handle_refresh_image(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to refresh image: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
get_mirror_image();
}
template <typename I>
void CreateNonPrimaryRequest<I>::get_mirror_image() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
librados::ObjectReadOperation op;
cls_client::mirror_image_get_start(&op, m_image_ctx->id);
librados::AioCompletion *comp = create_rados_callback<
CreateNonPrimaryRequest<I>,
&CreateNonPrimaryRequest<I>::handle_get_mirror_image>(this);
int r = m_image_ctx->md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void CreateNonPrimaryRequest<I>::handle_get_mirror_image(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
cls::rbd::MirrorImage mirror_image;
if (r == 0) {
auto iter = m_out_bl.cbegin();
r = cls_client::mirror_image_get_finish(&iter, &mirror_image);
}
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve mirroring state: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
if (mirror_image.mode != cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
lderr(cct) << "snapshot based mirroring is not enabled" << dendl;
finish(-EINVAL);
return;
}
if (!is_orphan() && !util::can_create_non_primary_snapshot(m_image_ctx)) {
finish(-EINVAL);
return;
}
uuid_d uuid_gen;
uuid_gen.generate_random();
m_snap_name = ".mirror.non_primary." + mirror_image.global_image_id + "." +
uuid_gen.to_string();
get_mirror_peers();
}
template <typename I>
void CreateNonPrimaryRequest<I>::get_mirror_peers() {
if (!m_demoted) {
create_snapshot();
return;
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
librados::ObjectReadOperation op;
cls_client::mirror_peer_list_start(&op);
auto aio_comp = create_rados_callback<
CreateNonPrimaryRequest<I>,
&CreateNonPrimaryRequest<I>::handle_get_mirror_peers>(this);
m_out_bl.clear();
int r = m_default_ns_ctx.aio_operate(RBD_MIRRORING, aio_comp, &op, &m_out_bl);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void CreateNonPrimaryRequest<I>::handle_get_mirror_peers(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
std::vector<cls::rbd::MirrorPeer> peers;
if (r == 0) {
auto iter = m_out_bl.cbegin();
r = cls_client::mirror_peer_list_finish(&iter, &peers);
}
if (r < 0) {
lderr(cct) << "failed to retrieve mirror peers: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
for (auto &peer : peers) {
if (peer.mirror_peer_direction == cls::rbd::MIRROR_PEER_DIRECTION_RX) {
continue;
}
m_mirror_peer_uuids.insert(peer.uuid);
}
create_snapshot();
}
template <typename I>
void CreateNonPrimaryRequest<I>::create_snapshot() {
CephContext *cct = m_image_ctx->cct;
cls::rbd::MirrorSnapshotNamespace ns{
(m_demoted ? cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY_DEMOTED :
cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY), {},
m_primary_mirror_uuid, m_primary_snap_id};
if (m_demoted) {
ns.mirror_peer_uuids = m_mirror_peer_uuids;
}
ns.snap_seqs = m_snap_seqs;
ns.complete = is_orphan();
ldout(cct, 15) << "ns=" << ns << dendl;
auto ctx = create_context_callback<
CreateNonPrimaryRequest<I>,
&CreateNonPrimaryRequest<I>::handle_create_snapshot>(this);
m_image_ctx->operations->snap_create(ns, m_snap_name, 0, m_prog_ctx, ctx);
}
template <typename I>
void CreateNonPrimaryRequest<I>::handle_create_snapshot(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to create mirror snapshot: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
write_image_state();
}
template <typename I>
void CreateNonPrimaryRequest<I>::write_image_state() {
uint64_t snap_id;
{
std::shared_lock image_locker{m_image_ctx->image_lock};
snap_id = m_image_ctx->get_snap_id(
cls::rbd::MirrorSnapshotNamespace{}, m_snap_name);
}
if (m_snap_id != nullptr) {
*m_snap_id = snap_id;
}
if (is_orphan()) {
finish(0);
return;
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
auto ctx = create_context_callback<
CreateNonPrimaryRequest<I>,
&CreateNonPrimaryRequest<I>::handle_write_image_state>(this);
auto req = WriteImageStateRequest<I>::create(m_image_ctx, snap_id,
m_image_state, ctx);
req->send();
}
template <typename I>
void CreateNonPrimaryRequest<I>::handle_write_image_state(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to write image state: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
void CreateNonPrimaryRequest<I>::finish(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace snapshot
} // namespace mirror
} // namespace librbd
template class librbd::mirror::snapshot::CreateNonPrimaryRequest<librbd::ImageCtx>;
| 7,423 | 26.094891 | 85 | cc |
null | ceph-main/src/librbd/mirror/snapshot/CreateNonPrimaryRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_SNAPSHOT_CREATE_NON_PRIMARY_REQUEST_H
#define CEPH_LIBRBD_MIRROR_SNAPSHOT_CREATE_NON_PRIMARY_REQUEST_H
#include "include/buffer.h"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/Types.h"
#include "librbd/internal.h"
#include "librbd/mirror/snapshot/Types.h"
#include <string>
#include <set>
struct Context;
namespace librbd {
struct ImageCtx;
namespace mirror {
namespace snapshot {
template <typename ImageCtxT = librbd::ImageCtx>
class CreateNonPrimaryRequest {
public:
static CreateNonPrimaryRequest *create(ImageCtxT *image_ctx,
bool demoted,
const std::string &primary_mirror_uuid,
uint64_t primary_snap_id,
const SnapSeqs& snap_seqs,
const ImageState &image_state,
uint64_t *snap_id,
Context *on_finish) {
return new CreateNonPrimaryRequest(image_ctx, demoted, primary_mirror_uuid,
primary_snap_id, snap_seqs, image_state,
snap_id, on_finish);
}
CreateNonPrimaryRequest(ImageCtxT *image_ctx,
bool demoted,
const std::string &primary_mirror_uuid,
uint64_t primary_snap_id,
const SnapSeqs& snap_seqs,
const ImageState &image_state, uint64_t *snap_id,
Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* REFRESH_IMAGE
* |
* v
* GET_MIRROR_IMAGE
* |
* v (skip if not needed)
* GET_MIRROR_PEERS
* |
* v
* CREATE_SNAPSHOT
* |
* v
* WRITE_IMAGE_STATE
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT *m_image_ctx;
bool m_demoted;
std::string m_primary_mirror_uuid;
uint64_t m_primary_snap_id;
SnapSeqs m_snap_seqs;
ImageState m_image_state;
uint64_t *m_snap_id;
Context *m_on_finish;
librados::IoCtx m_default_ns_ctx;
std::set<std::string> m_mirror_peer_uuids;
std::string m_snap_name;
bufferlist m_out_bl;
NoOpProgressContext m_prog_ctx;
bool is_orphan() const {
return m_primary_mirror_uuid.empty();
}
void refresh_image();
void handle_refresh_image(int r);
void get_mirror_image();
void handle_get_mirror_image(int r);
void get_mirror_peers();
void handle_get_mirror_peers(int r);
void create_snapshot();
void handle_create_snapshot(int r);
void write_image_state();
void handle_write_image_state(int r);
void finish(int r);
};
} // namespace snapshot
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::snapshot::CreateNonPrimaryRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_SNAPSHOT_CREATE_NON_PRIMARY_REQUEST_H
| 3,139 | 24.322581 | 90 | h |
null | ceph-main/src/librbd/mirror/snapshot/CreatePrimaryRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/snapshot/CreatePrimaryRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/mirror/snapshot/UnlinkPeerRequest.h"
#include "librbd/mirror/snapshot/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::snapshot::CreatePrimaryRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace mirror {
namespace snapshot {
using librbd::util::create_context_callback;
using librbd::util::create_rados_callback;
template <typename I>
CreatePrimaryRequest<I>::CreatePrimaryRequest(
I *image_ctx, const std::string& global_image_id,
uint64_t clean_since_snap_id, uint64_t snap_create_flags, uint32_t flags,
uint64_t *snap_id, Context *on_finish)
: m_image_ctx(image_ctx), m_global_image_id(global_image_id),
m_clean_since_snap_id(clean_since_snap_id),
m_snap_create_flags(snap_create_flags), m_flags(flags), m_snap_id(snap_id),
m_on_finish(on_finish) {
m_default_ns_ctx.dup(m_image_ctx->md_ctx);
m_default_ns_ctx.set_namespace("");
}
template <typename I>
void CreatePrimaryRequest<I>::send() {
if (!util::can_create_primary_snapshot(
m_image_ctx,
((m_flags & CREATE_PRIMARY_FLAG_DEMOTED) != 0),
((m_flags & CREATE_PRIMARY_FLAG_FORCE) != 0), nullptr, nullptr)) {
finish(-EINVAL);
return;
}
uuid_d uuid_gen;
uuid_gen.generate_random();
m_snap_name = ".mirror.primary." + m_global_image_id + "." +
uuid_gen.to_string();
get_mirror_peers();
}
template <typename I>
void CreatePrimaryRequest<I>::get_mirror_peers() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
librados::ObjectReadOperation op;
cls_client::mirror_peer_list_start(&op);
librados::AioCompletion *comp = create_rados_callback<
CreatePrimaryRequest<I>,
&CreatePrimaryRequest<I>::handle_get_mirror_peers>(this);
m_out_bl.clear();
int r = m_default_ns_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void CreatePrimaryRequest<I>::handle_get_mirror_peers(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
std::vector<cls::rbd::MirrorPeer> peers;
if (r == 0) {
auto iter = m_out_bl.cbegin();
r = cls_client::mirror_peer_list_finish(&iter, &peers);
}
if (r < 0) {
lderr(cct) << "failed to retrieve mirror peers: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
for (auto &peer : peers) {
if (peer.mirror_peer_direction == cls::rbd::MIRROR_PEER_DIRECTION_RX) {
continue;
}
m_mirror_peer_uuids.insert(peer.uuid);
}
if (m_mirror_peer_uuids.empty() &&
((m_flags & CREATE_PRIMARY_FLAG_IGNORE_EMPTY_PEERS) == 0)) {
lderr(cct) << "no mirror tx peers configured for the pool" << dendl;
finish(-EINVAL);
return;
}
create_snapshot();
}
template <typename I>
void CreatePrimaryRequest<I>::create_snapshot() {
cls::rbd::MirrorSnapshotNamespace ns{
((m_flags & CREATE_PRIMARY_FLAG_DEMOTED) != 0 ?
cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED :
cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY),
m_mirror_peer_uuids, "", m_clean_since_snap_id};
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "name=" << m_snap_name << ", "
<< "ns=" << ns << dendl;
auto ctx = create_context_callback<
CreatePrimaryRequest<I>,
&CreatePrimaryRequest<I>::handle_create_snapshot>(this);
m_image_ctx->operations->snap_create(ns, m_snap_name, m_snap_create_flags,
m_prog_ctx, ctx);
}
template <typename I>
void CreatePrimaryRequest<I>::handle_create_snapshot(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to create mirror snapshot: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
refresh_image();
}
template <typename I>
void CreatePrimaryRequest<I>::refresh_image() {
// refresh is required to retrieve the snapshot id (if snapshot
// created via remote RPC) and complete flag (regardless)
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
auto ctx = create_context_callback<
CreatePrimaryRequest<I>,
&CreatePrimaryRequest<I>::handle_refresh_image>(this);
m_image_ctx->state->refresh(ctx);
}
template <typename I>
void CreatePrimaryRequest<I>::handle_refresh_image(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to refresh image: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
if (m_snap_id != nullptr) {
std::shared_lock image_locker{m_image_ctx->image_lock};
*m_snap_id = m_image_ctx->get_snap_id(
cls::rbd::MirrorSnapshotNamespace{}, m_snap_name);
ldout(cct, 15) << "snap_id=" << *m_snap_id << dendl;
}
unlink_peer();
}
template <typename I>
void CreatePrimaryRequest<I>::unlink_peer() {
uint64_t max_snapshots = m_image_ctx->config.template get_val<uint64_t>(
"rbd_mirroring_max_mirroring_snapshots");
ceph_assert(max_snapshots >= 3);
std::string peer_uuid;
uint64_t snap_id = CEPH_NOSNAP;
for (auto &peer : m_mirror_peer_uuids) {
std::shared_lock image_locker{m_image_ctx->image_lock};
size_t count = 0;
uint64_t unlink_snap_id = 0;
for (auto &snap_it : m_image_ctx->snap_info) {
auto info = std::get_if<cls::rbd::MirrorSnapshotNamespace>(
&snap_it.second.snap_namespace);
if (info == nullptr) {
continue;
}
if (info->state != cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY) {
// reset counters -- we count primary snapshots after the last promotion
count = 0;
unlink_snap_id = 0;
continue;
}
// call UnlinkPeerRequest only if the snapshot is linked with this peer
// or if it's not linked with any peer (happens if mirroring is enabled
// on a pool with no peers configured or if UnlinkPeerRequest gets
// interrupted)
if (!info->mirror_peer_uuids.empty() &&
info->mirror_peer_uuids.count(peer) == 0) {
continue;
}
if (info->mirror_peer_uuids.empty() || !info->complete) {
peer_uuid = peer;
snap_id = snap_it.first;
break;
}
count++;
if (count == max_snapshots) {
unlink_snap_id = snap_it.first;
}
if (count > max_snapshots) {
peer_uuid = peer;
snap_id = unlink_snap_id;
break;
}
}
if (snap_id != CEPH_NOSNAP) {
break;
}
}
if (snap_id == CEPH_NOSNAP) {
finish(0);
return;
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "peer=" << peer_uuid << ", snap_id=" << snap_id << dendl;
auto ctx = create_context_callback<
CreatePrimaryRequest<I>,
&CreatePrimaryRequest<I>::handle_unlink_peer>(this);
auto req = UnlinkPeerRequest<I>::create(m_image_ctx, snap_id, peer_uuid, true,
ctx);
req->send();
}
template <typename I>
void CreatePrimaryRequest<I>::handle_unlink_peer(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to unlink peer: " << cpp_strerror(r) << dendl;
finish(0); // not fatal
return;
}
unlink_peer();
}
template <typename I>
void CreatePrimaryRequest<I>::finish(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace snapshot
} // namespace mirror
} // namespace librbd
template class librbd::mirror::snapshot::CreatePrimaryRequest<librbd::ImageCtx>;
| 8,036 | 28.225455 | 82 | cc |
null | ceph-main/src/librbd/mirror/snapshot/CreatePrimaryRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_SNAPSHOT_CREATE_PRIMARY_REQUEST_H
#define CEPH_LIBRBD_MIRROR_SNAPSHOT_CREATE_PRIMARY_REQUEST_H
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/internal.h"
#include "librbd/mirror/snapshot/Types.h"
#include <string>
#include <set>
struct Context;
namespace librbd {
struct ImageCtx;
namespace mirror {
namespace snapshot {
template <typename ImageCtxT = librbd::ImageCtx>
class CreatePrimaryRequest {
public:
static CreatePrimaryRequest *create(ImageCtxT *image_ctx,
const std::string& global_image_id,
uint64_t clean_since_snap_id,
uint64_t snap_create_flags,
uint32_t flags, uint64_t *snap_id,
Context *on_finish) {
return new CreatePrimaryRequest(image_ctx, global_image_id,
clean_since_snap_id, snap_create_flags, flags,
snap_id, on_finish);
}
CreatePrimaryRequest(ImageCtxT *image_ctx,
const std::string& global_image_id,
uint64_t clean_since_snap_id, uint64_t snap_create_flags,
uint32_t flags, uint64_t *snap_id, Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* GET_MIRROR_PEERS
* |
* v
* CREATE_SNAPSHOT
* |
* v
* REFRESH_IMAGE
* |
* v
* UNLINK_PEER (skip if not needed,
* | repeat if needed)
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT *m_image_ctx;
std::string m_global_image_id;
uint64_t m_clean_since_snap_id;
const uint64_t m_snap_create_flags;
const uint32_t m_flags;
uint64_t *m_snap_id;
Context *m_on_finish;
librados::IoCtx m_default_ns_ctx;
std::set<std::string> m_mirror_peer_uuids;
std::string m_snap_name;
bufferlist m_out_bl;
NoOpProgressContext m_prog_ctx;
void get_mirror_peers();
void handle_get_mirror_peers(int r);
void create_snapshot();
void handle_create_snapshot(int r);
void refresh_image();
void handle_refresh_image(int r);
void unlink_peer();
void handle_unlink_peer(int r);
void finish(int r);
};
} // namespace snapshot
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::snapshot::CreatePrimaryRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_SNAPSHOT_CREATE_PRIMARY_REQUEST_H
| 2,689 | 24.140187 | 87 | h |
null | ceph-main/src/librbd/mirror/snapshot/DemoteRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/snapshot/DemoteRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/mirror/snapshot/CreatePrimaryRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::snapshot::DemoteRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace mirror {
namespace snapshot {
using librbd::util::create_context_callback;
using librbd::util::create_rados_callback;
template <typename I>
void DemoteRequest<I>::send() {
enable_non_primary_feature();
}
template <typename I>
void DemoteRequest<I>::enable_non_primary_feature() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
// ensure image is flagged with non-primary feature so that
// standard RBD clients cannot write to it.
librados::ObjectWriteOperation op;
cls_client::set_features(&op, RBD_FEATURE_NON_PRIMARY,
RBD_FEATURE_NON_PRIMARY);
auto aio_comp = create_rados_callback<
DemoteRequest<I>,
&DemoteRequest<I>::handle_enable_non_primary_feature>(this);
int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, aio_comp,
&op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void DemoteRequest<I>::handle_enable_non_primary_feature(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to enable non-primary feature: "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
create_snapshot();
}
template <typename I>
void DemoteRequest<I>::create_snapshot() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
auto ctx = create_context_callback<
DemoteRequest<I>, &DemoteRequest<I>::handle_create_snapshot>(this);
auto req = CreatePrimaryRequest<I>::create(
m_image_ctx, m_global_image_id, CEPH_NOSNAP,
SNAP_CREATE_FLAG_SKIP_NOTIFY_QUIESCE,
(snapshot::CREATE_PRIMARY_FLAG_IGNORE_EMPTY_PEERS |
snapshot::CREATE_PRIMARY_FLAG_DEMOTED), nullptr, ctx);
req->send();
}
template <typename I>
void DemoteRequest<I>::handle_create_snapshot(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to create mirror snapshot: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
void DemoteRequest<I>::finish(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace snapshot
} // namespace mirror
} // namespace librbd
template class librbd::mirror::snapshot::DemoteRequest<librbd::ImageCtx>;
| 3,049 | 26.477477 | 76 | cc |
null | ceph-main/src/librbd/mirror/snapshot/DemoteRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_SNAPSHOT_DEMOTE_REQUEST_H
#define CEPH_LIBRBD_MIRROR_SNAPSHOT_DEMOTE_REQUEST_H
#include "include/buffer.h"
#include <string>
#include <set>
struct Context;
namespace librbd {
struct ImageCtx;
namespace mirror {
namespace snapshot {
template <typename ImageCtxT = librbd::ImageCtx>
class DemoteRequest {
public:
static DemoteRequest *create(ImageCtxT *image_ctx,
const std::string& global_image_id,
Context *on_finish) {
return new DemoteRequest(image_ctx, global_image_id, on_finish);
}
DemoteRequest(ImageCtxT *image_ctx, const std::string& global_image_id,
Context *on_finish)
: m_image_ctx(image_ctx), m_global_image_id(global_image_id),
m_on_finish(on_finish) {
}
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* ENABLE_NON_PRIMARY_FEATURE
* |
* v
* CREATE_SNAPSHOT
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT *m_image_ctx;
std::string m_global_image_id;
Context *m_on_finish;
void enable_non_primary_feature();
void handle_enable_non_primary_feature(int r);
void create_snapshot();
void handle_create_snapshot(int r);
void finish(int r);
};
} // namespace snapshot
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::snapshot::DemoteRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_SNAPSHOT_DEMOTE_REQUEST_H
| 1,593 | 19.701299 | 80 | h |
null | ceph-main/src/librbd/mirror/snapshot/GetImageStateRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/snapshot/GetImageStateRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/mirror/snapshot/Types.h"
#include "librbd/mirror/snapshot/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::snapshot::GetImageStateRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace mirror {
namespace snapshot {
using librbd::util::create_rados_callback;
template <typename I>
void GetImageStateRequest<I>::send() {
read_object();
}
template <typename I>
void GetImageStateRequest<I>::read_object() {
CephContext *cct = m_image_ctx->cct;
auto oid = util::image_state_object_name(m_image_ctx, m_snap_id,
m_object_index);
ldout(cct, 15) << oid << dendl;
librados::ObjectReadOperation op;
m_bl.clear();
op.read(0, 0, &m_bl, nullptr);
librados::AioCompletion *comp = create_rados_callback<
GetImageStateRequest<I>,
&GetImageStateRequest<I>::handle_read_object>(this);
int r = m_image_ctx->md_ctx.aio_operate(oid, comp, &op, nullptr);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void GetImageStateRequest<I>::handle_read_object(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to read image state object: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
auto iter = m_bl.cbegin();
if (m_object_index == 0) {
ImageStateHeader header;
try {
using ceph::decode;
decode(header, iter);
} catch (const buffer::error &err) {
lderr(cct) << "failed to decode image state object header" << dendl;
finish(-EBADMSG);
return;
}
m_object_count = header.object_count;
}
bufferlist bl;
bl.substr_of(m_bl, iter.get_off(), m_bl.length() - iter.get_off());
m_state_bl.claim_append(bl);
m_object_index++;
if (m_object_index >= m_object_count) {
finish(0);
return;
}
read_object();
}
template <typename I>
void GetImageStateRequest<I>::finish(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r == 0) {
try {
using ceph::decode;
decode(*m_image_state, m_state_bl);
} catch (const buffer::error &err) {
lderr(cct) << "failed to decode image state" << dendl;
r = -EBADMSG;
}
}
m_on_finish->complete(r);
delete this;
}
} // namespace snapshot
} // namespace mirror
} // namespace librbd
template class librbd::mirror::snapshot::GetImageStateRequest<librbd::ImageCtx>;
| 2,818 | 23.513043 | 82 | cc |
null | ceph-main/src/librbd/mirror/snapshot/GetImageStateRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_SNAPSHOT_GET_IMAGE_STATE_REQUEST_H
#define CEPH_LIBRBD_MIRROR_SNAPSHOT_GET_IMAGE_STATE_REQUEST_H
#include "include/buffer.h"
#include "include/types.h"
struct Context;
namespace librbd {
struct ImageCtx;
namespace mirror {
namespace snapshot {
struct ImageState;
template <typename ImageCtxT = librbd::ImageCtx>
class GetImageStateRequest {
public:
static GetImageStateRequest *create(ImageCtxT *image_ctx, uint64_t snap_id,
ImageState *image_state,
Context *on_finish) {
return new GetImageStateRequest(image_ctx, snap_id, image_state, on_finish);
}
GetImageStateRequest(ImageCtxT *image_ctx, uint64_t snap_id,
ImageState *image_state, Context *on_finish)
: m_image_ctx(image_ctx), m_snap_id(snap_id), m_image_state(image_state),
m_on_finish(on_finish) {
}
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* READ_OBJECT (repeat for
* | every object)
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT *m_image_ctx;
uint64_t m_snap_id;
ImageState *m_image_state;
Context *m_on_finish;
bufferlist m_bl;
bufferlist m_state_bl;
size_t m_object_count = 0;
size_t m_object_index = 0;
void read_object();
void handle_read_object(int r);
void finish(int r);
};
} // namespace snapshot
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::snapshot::GetImageStateRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_SNAPSHOT_GET_IMAGE_STATE_REQUEST_H
| 1,724 | 21.402597 | 87 | h |
null | ceph-main/src/librbd/mirror/snapshot/ImageMeta.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/snapshot/ImageMeta.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "json_spirit/json_spirit.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/WatchNotifyTypes.h"
#include "librbd/mirror/snapshot/Utils.h"
#include "librbd/watcher/Notifier.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::snapshot::ImageMeta: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace mirror {
namespace snapshot {
using librbd::util::create_rados_callback;
using librbd::mirror::snapshot::util::get_image_meta_key;
template <typename I>
ImageMeta<I>::ImageMeta(I* image_ctx, const std::string& mirror_uuid)
: m_image_ctx(image_ctx), m_mirror_uuid(mirror_uuid) {
}
template <typename I>
void ImageMeta<I>::load(Context* on_finish) {
ldout(m_image_ctx->cct, 15) << "oid=" << m_image_ctx->header_oid << ", "
<< "key=" << get_image_meta_key(m_mirror_uuid)
<< dendl;
librados::ObjectReadOperation op;
cls_client::metadata_get_start(&op, get_image_meta_key(m_mirror_uuid));
m_out_bl.clear();
auto ctx = new LambdaContext([this, on_finish](int r) {
handle_load(on_finish, r);
});
auto aio_comp = create_rados_callback(ctx);
int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, aio_comp,
&op, &m_out_bl);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void ImageMeta<I>::handle_load(Context* on_finish, int r) {
ldout(m_image_ctx->cct, 15) << "r=" << r << dendl;
std::string data;
if (r == 0) {
auto it = m_out_bl.cbegin();
r = cls_client::metadata_get_finish(&it, &data);
}
if (r == -ENOENT) {
ldout(m_image_ctx->cct, 15) << "no snapshot-based mirroring image-meta: "
<< cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
} else if (r < 0) {
lderr(m_image_ctx->cct) << "failed to load snapshot-based mirroring "
<< "image-meta: " << cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
bool json_valid = false;
json_spirit::mValue json_root;
if (json_spirit::read(data, json_root)) {
try {
auto& json_obj = json_root.get_obj();
resync_requested = json_obj["resync_requested"].get_bool();
json_valid = true;
} catch (std::runtime_error&) {
}
}
if (!json_valid) {
lderr(m_image_ctx->cct) << "invalid image-meta JSON received" << dendl;
on_finish->complete(-EBADMSG);
return;
}
on_finish->complete(0);
}
template <typename I>
void ImageMeta<I>::save(Context* on_finish) {
ldout(m_image_ctx->cct, 15) << "oid=" << m_image_ctx->header_oid << ", "
<< "key=" << get_image_meta_key(m_mirror_uuid)
<< dendl;
// simple implementation for now
std::string json = "{\"resync_requested\": " +
std::string(resync_requested ? "true" : "false") + "}";
bufferlist bl;
bl.append(json);
// avoid using built-in metadata_set operation since that would require
// opening the non-primary image in read/write mode which isn't supported
librados::ObjectWriteOperation op;
cls_client::metadata_set(&op, {{get_image_meta_key(m_mirror_uuid), bl}});
auto ctx = new LambdaContext([this, on_finish](int r) {
handle_save(on_finish, r);
});
auto aio_comp = create_rados_callback(ctx);
int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, aio_comp,
&op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void ImageMeta<I>::handle_save(Context* on_finish, int r) {
ldout(m_image_ctx->cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(m_image_ctx->cct) << "failed to save snapshot-based mirroring "
<< "image-meta: " << cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
notify_update(on_finish);
}
template <typename I>
void ImageMeta<I>::notify_update(Context* on_finish) {
ldout(m_image_ctx->cct, 15) << dendl;
// directly send header notification on image since you cannot
// open a non-primary image read/write and therefore cannot re-use
// the ImageWatcher to send the notification
bufferlist bl;
encode(watch_notify::NotifyMessage(new watch_notify::HeaderUpdatePayload()),
bl);
m_out_bl.clear();
auto ctx = new LambdaContext([this, on_finish](int r) {
handle_notify_update(on_finish, r);
});
auto aio_comp = create_rados_callback(ctx);
int r = m_image_ctx->md_ctx.aio_notify(
m_image_ctx->header_oid, aio_comp, bl, watcher::Notifier::NOTIFY_TIMEOUT,
&m_out_bl);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void ImageMeta<I>::handle_notify_update(Context* on_finish, int r) {
ldout(m_image_ctx->cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(m_image_ctx->cct) << "failed to notify image update: "
<< cpp_strerror(r) << dendl;
}
on_finish->complete(r);
}
} // namespace snapshot
} // namespace mirror
} // namespace librbd
template class librbd::mirror::snapshot::ImageMeta<librbd::ImageCtx>;
| 5,464 | 30.051136 | 78 | cc |
null | ceph-main/src/librbd/mirror/snapshot/ImageMeta.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_SNAPSHOT_IMAGE_META_H
#define CEPH_LIBRBD_MIRROR_SNAPSHOT_IMAGE_META_H
#include "include/rados/librados.hpp"
#include <string>
struct Context;
namespace librbd {
struct ImageCtx;
namespace mirror {
namespace snapshot {
template <typename ImageCtxT>
class ImageMeta {
public:
static ImageMeta* create(ImageCtxT* image_ctx,
const std::string& mirror_uuid) {
return new ImageMeta(image_ctx, mirror_uuid);
}
ImageMeta(ImageCtxT* image_ctx, const std::string& mirror_uuid);
void load(Context* on_finish);
void save(Context* on_finish);
bool resync_requested = false;
private:
/**
* @verbatim
*
* <start>
* |
* v
* METADATA_GET
* |
* v
* <idle>
* |
* v
* METADATA_SET
* |
* v
* NOTIFY_UPDATE
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT* m_image_ctx;
std::string m_mirror_uuid;
bufferlist m_out_bl;
void handle_load(Context* on_finish, int r);
void handle_save(Context* on_finish, int r);
void notify_update(Context* on_finish);
void handle_notify_update(Context* on_finish, int r);
};
} // namespace snapshot
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::snapshot::ImageMeta<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_SNAPSHOT_IMAGE_META_H
| 1,467 | 17.582278 | 76 | h |
null | ceph-main/src/librbd/mirror/snapshot/PromoteRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/snapshot/PromoteRequest.h"
#include "common/Timer.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/image/ListWatchersRequest.h"
#include "librbd/mirror/snapshot/CreateNonPrimaryRequest.h"
#include "librbd/mirror/snapshot/CreatePrimaryRequest.h"
#include "librbd/mirror/snapshot/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::snapshot::PromoteRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace mirror {
namespace snapshot {
using librbd::util::create_async_context_callback;
using librbd::util::create_context_callback;
using librbd::util::create_rados_callback;
template <typename I>
void PromoteRequest<I>::send() {
CephContext *cct = m_image_ctx->cct;
bool requires_orphan = false;
if (!util::can_create_primary_snapshot(m_image_ctx, false, true,
&requires_orphan,
&m_rollback_snap_id)) {
lderr(cct) << "cannot promote" << dendl;
finish(-EINVAL);
return;
} else if (m_rollback_snap_id == CEPH_NOSNAP && !requires_orphan) {
create_promote_snapshot();
return;
}
ldout(cct, 15) << "requires_orphan=" << requires_orphan << ", "
<< "rollback_snap_id=" << m_rollback_snap_id << dendl;
create_orphan_snapshot();
}
template <typename I>
void PromoteRequest<I>::create_orphan_snapshot() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
auto ctx = create_context_callback<
PromoteRequest<I>,
&PromoteRequest<I>::handle_create_orphan_snapshot>(this);
auto req = CreateNonPrimaryRequest<I>::create(
m_image_ctx, false, "", CEPH_NOSNAP, {}, {}, nullptr, ctx);
req->send();
}
template <typename I>
void PromoteRequest<I>::handle_create_orphan_snapshot(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to create orphan snapshot: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
list_watchers();
}
template <typename I>
void PromoteRequest<I>::list_watchers() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
auto ctx = create_context_callback<
PromoteRequest<I>,
&PromoteRequest<I>::handle_list_watchers>(this);
m_watchers.clear();
auto flags = librbd::image::LIST_WATCHERS_FILTER_OUT_MY_INSTANCE |
librbd::image::LIST_WATCHERS_MIRROR_INSTANCES_ONLY;
auto req = librbd::image::ListWatchersRequest<I>::create(
*m_image_ctx, flags, &m_watchers, ctx);
req->send();
}
template <typename I>
void PromoteRequest<I>::handle_list_watchers(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to list watchers: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
if (m_watchers.empty()) {
acquire_exclusive_lock();
return;
}
wait_update_notify();
}
template <typename I>
void PromoteRequest<I>::wait_update_notify() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
ImageCtx::get_timer_instance(cct, &m_timer, &m_timer_lock);
std::lock_guard timer_lock{*m_timer_lock};
m_scheduler_ticks = 5;
int r = m_image_ctx->state->register_update_watcher(&m_update_watch_ctx,
&m_update_watcher_handle);
if (r < 0) {
lderr(cct) << "failed to register update watcher: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
scheduler_unregister_update_watcher();
}
template <typename I>
void PromoteRequest<I>::handle_update_notify() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
std::lock_guard timer_lock{*m_timer_lock};
m_scheduler_ticks = 0;
}
template <typename I>
void PromoteRequest<I>::scheduler_unregister_update_watcher() {
ceph_assert(ceph_mutex_is_locked(*m_timer_lock));
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "scheduler_ticks=" << m_scheduler_ticks << dendl;
if (m_scheduler_ticks > 0) {
m_scheduler_ticks--;
m_timer->add_event_after(1, new LambdaContext([this](int) {
scheduler_unregister_update_watcher();
}));
return;
}
m_image_ctx->op_work_queue->queue(new LambdaContext([this](int) {
unregister_update_watcher();
}), 0);
}
template <typename I>
void PromoteRequest<I>::unregister_update_watcher() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
auto ctx = create_context_callback<
PromoteRequest<I>,
&PromoteRequest<I>::handle_unregister_update_watcher>(this);
m_image_ctx->state->unregister_update_watcher(m_update_watcher_handle, ctx);
}
template <typename I>
void PromoteRequest<I>::handle_unregister_update_watcher(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to unregister update watcher: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
list_watchers();
}
template <typename I>
void PromoteRequest<I>::acquire_exclusive_lock() {
{
std::unique_lock locker{m_image_ctx->owner_lock};
if (m_image_ctx->exclusive_lock != nullptr &&
!m_image_ctx->exclusive_lock->is_lock_owner()) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
m_lock_acquired = true;
m_image_ctx->exclusive_lock->block_requests(0);
auto ctx = create_context_callback<
PromoteRequest<I>,
&PromoteRequest<I>::handle_acquire_exclusive_lock>(this);
m_image_ctx->exclusive_lock->acquire_lock(ctx);
return;
}
}
rollback();
}
template <typename I>
void PromoteRequest<I>::handle_acquire_exclusive_lock(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to acquire exclusive lock: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
} else {
std::unique_lock locker{m_image_ctx->owner_lock};
if (m_image_ctx->exclusive_lock != nullptr &&
!m_image_ctx->exclusive_lock->is_lock_owner()) {
lderr(cct) << "failed to acquire exclusive lock" << dendl;
r = m_image_ctx->exclusive_lock->get_unlocked_op_error();
locker.unlock();
finish(r);
return;
}
}
rollback();
}
template <typename I>
void PromoteRequest<I>::rollback() {
if (m_rollback_snap_id == CEPH_NOSNAP) {
create_promote_snapshot();
return;
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
std::shared_lock owner_locker{m_image_ctx->owner_lock};
std::shared_lock image_locker{m_image_ctx->image_lock};
auto info = m_image_ctx->get_snap_info(m_rollback_snap_id);
ceph_assert(info != nullptr);
auto snap_namespace = info->snap_namespace;
auto snap_name = info->name;
image_locker.unlock();
auto ctx = create_async_context_callback(
*m_image_ctx, create_context_callback<
PromoteRequest<I>, &PromoteRequest<I>::handle_rollback>(this));
m_image_ctx->operations->execute_snap_rollback(snap_namespace, snap_name,
m_progress_ctx, ctx);
}
template <typename I>
void PromoteRequest<I>::handle_rollback(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to rollback: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
create_promote_snapshot();
}
template <typename I>
void PromoteRequest<I>::create_promote_snapshot() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
auto ctx = create_context_callback<
PromoteRequest<I>,
&PromoteRequest<I>::handle_create_promote_snapshot>(this);
auto req = CreatePrimaryRequest<I>::create(
m_image_ctx, m_global_image_id, CEPH_NOSNAP,
SNAP_CREATE_FLAG_SKIP_NOTIFY_QUIESCE,
(snapshot::CREATE_PRIMARY_FLAG_IGNORE_EMPTY_PEERS |
snapshot::CREATE_PRIMARY_FLAG_FORCE), nullptr, ctx);
req->send();
}
template <typename I>
void PromoteRequest<I>::handle_create_promote_snapshot(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to create promote snapshot: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
disable_non_primary_feature();
}
template <typename I>
void PromoteRequest<I>::disable_non_primary_feature() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << dendl;
// remove the non-primary feature flag so that the image can be
// R/W by standard RBD clients
librados::ObjectWriteOperation op;
cls_client::set_features(&op, 0U, RBD_FEATURE_NON_PRIMARY);
auto aio_comp = create_rados_callback<
PromoteRequest<I>,
&PromoteRequest<I>::handle_disable_non_primary_feature>(this);
int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, aio_comp,
&op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void PromoteRequest<I>::handle_disable_non_primary_feature(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to disable non-primary feature: "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
release_exclusive_lock();
}
template <typename I>
void PromoteRequest<I>::release_exclusive_lock() {
if (m_lock_acquired) {
std::unique_lock locker{m_image_ctx->owner_lock};
if (m_image_ctx->exclusive_lock != nullptr) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
m_image_ctx->exclusive_lock->unblock_requests();
auto ctx = create_context_callback<
PromoteRequest<I>,
&PromoteRequest<I>::handle_release_exclusive_lock>(this);
m_image_ctx->exclusive_lock->release_lock(ctx);
return;
}
}
finish(0);
}
template <typename I>
void PromoteRequest<I>::handle_release_exclusive_lock(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to release exclusive lock: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
void PromoteRequest<I>::finish(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace snapshot
} // namespace mirror
} // namespace librbd
template class librbd::mirror::snapshot::PromoteRequest<librbd::ImageCtx>;
| 11,030 | 26.169951 | 80 | cc |
null | ceph-main/src/librbd/mirror/snapshot/PromoteRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_SNAPSHOT_PROMOTE_REQUEST_H
#define CEPH_LIBRBD_MIRROR_SNAPSHOT_PROMOTE_REQUEST_H
#include "include/buffer.h"
#include "include/rbd/librbd.hpp"
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include "librbd/internal.h"
#include <string>
#include <set>
struct Context;
namespace librbd {
struct ImageCtx;
namespace mirror {
namespace snapshot {
template <typename ImageCtxT = librbd::ImageCtx>
class PromoteRequest {
public:
static PromoteRequest *create(ImageCtxT *image_ctx,
const std::string& global_image_id,
Context *on_finish) {
return new PromoteRequest(image_ctx, global_image_id, on_finish);
}
PromoteRequest(ImageCtxT *image_ctx, const std::string& global_image_id,
Context *on_finish)
: m_image_ctx(image_ctx), m_global_image_id(global_image_id),
m_on_finish(on_finish) {
}
void send();
private:
/**
* @verbatim
*
* <start>
* |
* | (can promote)
* |\----------------------------------------\
* | |
* | |
* v (skip if not needed) |
* CREATE_ORPHAN_SNAPSHOT |
* | |
* | /-- UNREGISTER_UPDATE_WATCHER <-\ |
* v v | |
* LIST_WATCHERS ----> WAIT_UPDATE_NOTIFY --/ |
* | |
* | (no watchers) |
* v |
* ACQUIRE_EXCLUSIVE_LOCK |
* | (skip if not needed) |
* v |
* ROLLBACK |
* | |
* v |
* CREATE_PROMOTE_SNAPSHOT <--------------------/
* |
* v
* DISABLE_NON_PRIMARY_FEATURE
* |
* v
* RELEASE_EXCLUSIVE_LOCK (skip if not needed)
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT *m_image_ctx;
std::string m_global_image_id;
Context *m_on_finish;
uint64_t m_rollback_snap_id = CEPH_NOSNAP;
bool m_lock_acquired = false;
NoOpProgressContext m_progress_ctx;
class UpdateWatchCtx : public librbd::UpdateWatchCtx {
public:
UpdateWatchCtx(PromoteRequest *promote_request)
: promote_request(promote_request) {
}
void handle_notify() {
promote_request->handle_update_notify();
}
private:
PromoteRequest *promote_request;
} m_update_watch_ctx = {this};
std::list<obj_watch_t> m_watchers;
uint64_t m_update_watcher_handle = 0;
uint64_t m_scheduler_ticks = 0;
SafeTimer *m_timer = nullptr;
ceph::mutex *m_timer_lock = nullptr;
void refresh_image();
void handle_refresh_image(int r);
void create_orphan_snapshot();
void handle_create_orphan_snapshot(int r);
void list_watchers();
void handle_list_watchers(int r);
void wait_update_notify();
void handle_update_notify();
void scheduler_unregister_update_watcher();
void unregister_update_watcher();
void handle_unregister_update_watcher(int r);
void acquire_exclusive_lock();
void handle_acquire_exclusive_lock(int r);
void rollback();
void handle_rollback(int r);
void create_promote_snapshot();
void handle_create_promote_snapshot(int r);
void disable_non_primary_feature();
void handle_disable_non_primary_feature(int r);
void release_exclusive_lock();
void handle_release_exclusive_lock(int r);
void finish(int r);
};
} // namespace snapshot
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::snapshot::PromoteRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_SNAPSHOT_PROMOTE_REQUEST_H
| 4,036 | 25.559211 | 81 | h |
null | ceph-main/src/librbd/mirror/snapshot/RemoveImageStateRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/snapshot/RemoveImageStateRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/mirror/snapshot/Types.h"
#include "librbd/mirror/snapshot/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::snapshot::RemoveImageStateRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace mirror {
namespace snapshot {
using librbd::util::create_rados_callback;
template <typename I>
void RemoveImageStateRequest<I>::send() {
get_object_count();
}
template <typename I>
void RemoveImageStateRequest<I>::get_object_count() {
CephContext *cct = m_image_ctx->cct;
auto oid = util::image_state_object_name(m_image_ctx, m_snap_id, 0);
ldout(cct, 15) << oid << dendl;
librados::ObjectReadOperation op;
op.read(0, 0, &m_bl, nullptr);
librados::AioCompletion *comp = create_rados_callback<
RemoveImageStateRequest<I>,
&RemoveImageStateRequest<I>::handle_get_object_count>(this);
int r = m_image_ctx->md_ctx.aio_operate(oid, comp, &op, nullptr);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void RemoveImageStateRequest<I>::handle_get_object_count(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to read image state object: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
ImageStateHeader header(1);
auto iter = m_bl.cbegin();
try {
using ceph::decode;
decode(header, iter);
} catch (const buffer::error &err) {
lderr(cct) << "failed to decode image state object header" << dendl;
// still try to remove it
}
m_object_count = header.object_count > 0 ? header.object_count : 1;
remove_object();
}
template <typename I>
void RemoveImageStateRequest<I>::remove_object() {
CephContext *cct = m_image_ctx->cct;
ceph_assert(m_object_count > 0);
m_object_count--;
auto oid = util::image_state_object_name(m_image_ctx, m_snap_id,
m_object_count);
ldout(cct, 15) << oid << dendl;
librados::ObjectWriteOperation op;
op.remove();
librados::AioCompletion *comp = create_rados_callback<
RemoveImageStateRequest<I>,
&RemoveImageStateRequest<I>::handle_remove_object>(this);
int r = m_image_ctx->md_ctx.aio_operate(oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void RemoveImageStateRequest<I>::handle_remove_object(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to remove image state object: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
if (m_object_count == 0) {
finish(0);
return;
}
remove_object();
}
template <typename I>
void RemoveImageStateRequest<I>::finish(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace snapshot
} // namespace mirror
} // namespace librbd
template class librbd::mirror::snapshot::RemoveImageStateRequest<librbd::ImageCtx>;
| 3,387 | 24.666667 | 85 | cc |
null | ceph-main/src/librbd/mirror/snapshot/RemoveImageStateRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_SNAPSHOT_REMOVE_IMAGE_STATE_REQUEST_H
#define CEPH_LIBRBD_MIRROR_SNAPSHOT_REMOVE_IMAGE_STATE_REQUEST_H
#include "include/buffer.h"
#include "include/types.h"
struct Context;
namespace librbd {
struct ImageCtx;
namespace mirror {
namespace snapshot {
template <typename ImageCtxT = librbd::ImageCtx>
class RemoveImageStateRequest {
public:
static RemoveImageStateRequest *create(ImageCtxT *image_ctx, uint64_t snap_id,
Context *on_finish) {
return new RemoveImageStateRequest(image_ctx, snap_id, on_finish);
}
RemoveImageStateRequest(ImageCtxT *image_ctx, uint64_t snap_id,
Context *on_finish)
: m_image_ctx(image_ctx), m_snap_id(snap_id), m_on_finish(on_finish) {
}
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* GET_OBJECT_COUNT
* |
* v
* REMOVE_OBJECT (repeat for
* | every object)
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT *m_image_ctx;
uint64_t m_snap_id;
Context *m_on_finish;
bufferlist m_bl;
size_t m_object_count = 0;
void get_object_count();
void handle_get_object_count(int r);
void remove_object();
void handle_remove_object(int r);
void finish(int r);
};
} // namespace snapshot
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::snapshot::RemoveImageStateRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_SNAPSHOT_REMOVE_IMAGE_STATE_REQUEST_H
| 1,635 | 20.526316 | 90 | h |
null | ceph-main/src/librbd/mirror/snapshot/SetImageStateRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/snapshot/SetImageStateRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/image/GetMetadataRequest.h"
#include "librbd/mirror/snapshot/WriteImageStateRequest.h"
#include <boost/algorithm/string/predicate.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror_snapshot::SetImageStateRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace mirror {
namespace snapshot {
using librbd::util::create_context_callback;
using librbd::util::create_rados_callback;
template <typename I>
void SetImageStateRequest<I>::send() {
get_name();
}
template <typename I>
void SetImageStateRequest<I>::get_name() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
librados::ObjectReadOperation op;
cls_client::dir_get_name_start(&op, m_image_ctx->id);
librados::AioCompletion *comp = create_rados_callback<
SetImageStateRequest<I>,
&SetImageStateRequest<I>::handle_get_name>(this);
m_bl.clear();
int r = m_image_ctx->md_ctx.aio_operate(RBD_DIRECTORY, comp, &op, &m_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void SetImageStateRequest<I>::handle_get_name(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r == 0) {
auto it = m_bl.cbegin();
r = cls_client::dir_get_name_finish(&it, &m_image_state.name);
}
if (r < 0) {
lderr(cct) << "failed to retrieve image name: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
ldout(cct, 15) << "name=" << m_image_state.name << dendl;
get_snap_limit();
}
template <typename I>
void SetImageStateRequest<I>::get_snap_limit() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
librados::ObjectReadOperation op;
cls_client::snapshot_get_limit_start(&op);
librados::AioCompletion *comp = create_rados_callback<
SetImageStateRequest<I>,
&SetImageStateRequest<I>::handle_get_snap_limit>(this);
m_bl.clear();
int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op,
&m_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void SetImageStateRequest<I>::handle_get_snap_limit(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r == 0) {
auto it = m_bl.cbegin();
r = cls_client::snapshot_get_limit_finish(&it, &m_image_state.snap_limit);
}
if (r < 0) {
lderr(cct) << "failed to retrieve snapshot limit: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
ldout(cct, 15) << "snap_limit=" << m_image_state.snap_limit << dendl;
get_metadata();
}
template <typename I>
void SetImageStateRequest<I>::get_metadata() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
auto ctx = create_context_callback<
SetImageStateRequest<I>,
&SetImageStateRequest<I>::handle_get_metadata>(this);
auto req = image::GetMetadataRequest<I>::create(
m_image_ctx->md_ctx, m_image_ctx->header_oid, true, "", "", 0,
&m_image_state.metadata, ctx);
req->send();
}
template <typename I>
void SetImageStateRequest<I>::handle_get_metadata(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to retrieve metadata: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
{
std::shared_lock image_locker{m_image_ctx->image_lock};
m_image_state.features =
m_image_ctx->features & ~RBD_FEATURES_IMPLICIT_ENABLE;
for (auto &[snap_id, snap_info] : m_image_ctx->snap_info) {
auto type = cls::rbd::get_snap_namespace_type(snap_info.snap_namespace);
if (type != cls::rbd::SNAPSHOT_NAMESPACE_TYPE_USER) {
// only replicate user snapshots -- trash snapshots will be
// replicated by an implicit delete if required
continue;
}
m_image_state.snapshots[snap_id] = {snap_info.snap_namespace,
snap_info.name,
snap_info.protection_status};
}
}
write_image_state();
}
template <typename I>
void SetImageStateRequest<I>::write_image_state() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
auto ctx = create_context_callback<
SetImageStateRequest<I>,
&SetImageStateRequest<I>::handle_write_image_state>(this);
auto req = WriteImageStateRequest<I>::create(m_image_ctx, m_snap_id,
m_image_state, ctx);
req->send();
}
template <typename I>
void SetImageStateRequest<I>::handle_write_image_state(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to write image state: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
update_primary_snapshot();
}
template <typename I>
void SetImageStateRequest<I>::update_primary_snapshot() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
librados::ObjectWriteOperation op;
librbd::cls_client::mirror_image_snapshot_set_copy_progress(
&op, m_snap_id, true, 0);
auto aio_comp = create_rados_callback<
SetImageStateRequest<I>,
&SetImageStateRequest<I>::handle_update_primary_snapshot>(this);
int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, aio_comp,
&op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void SetImageStateRequest<I>::handle_update_primary_snapshot(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to update primary snapshot: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
void SetImageStateRequest<I>::finish(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace snapshot
} // namespace mirror
} // namespace librbd
template class librbd::mirror::snapshot::SetImageStateRequest<librbd::ImageCtx>;
| 6,489 | 26.5 | 81 | cc |
null | ceph-main/src/librbd/mirror/snapshot/SetImageStateRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_SNAPSHOT_SET_IMAGE_STATE_REQUEST_H
#define CEPH_LIBRBD_MIRROR_SNAPSHOT_SET_IMAGE_STATE_REQUEST_H
#include "librbd/mirror/snapshot/Types.h"
#include <map>
#include <string>
struct Context;
namespace librbd {
struct ImageCtx;
namespace mirror {
namespace snapshot {
template <typename ImageCtxT = librbd::ImageCtx>
class SetImageStateRequest {
public:
static SetImageStateRequest *create(ImageCtxT *image_ctx, uint64_t snap_id,
Context *on_finish) {
return new SetImageStateRequest(image_ctx, snap_id, on_finish);
}
SetImageStateRequest(ImageCtxT *image_ctx, uint64_t snap_id,
Context *on_finish)
: m_image_ctx(image_ctx), m_snap_id(snap_id), m_on_finish(on_finish) {
}
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* GET_NAME
* |
* v
* GET_SNAP_LIMIT
* |
* v
* GET_METADATA
* |
* v
* WRITE_IMAGE_STATE
* |
* v
* UPDATE_PRIMARY_SNAPSHOT
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT *m_image_ctx;
uint64_t m_snap_id;
Context *m_on_finish;
ImageState m_image_state;
bufferlist m_bl;
bufferlist m_state_bl;
void get_name();
void handle_get_name(int r);
void get_snap_limit();
void handle_get_snap_limit(int r);
void get_metadata();
void handle_get_metadata(int r);
void write_image_state();
void handle_write_image_state(int r);
void update_primary_snapshot();
void handle_update_primary_snapshot(int r);
void finish(int r);
};
} // namespace snapshot
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::snapshot::SetImageStateRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_SNAPSHOT_SET_IMAGE_STATE_REQUEST_H
| 1,930 | 18.907216 | 87 | h |
null | ceph-main/src/librbd/mirror/snapshot/Types.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/Formatter.h"
#include "include/encoding.h"
#include "include/stringify.h"
#include "librbd/mirror/snapshot/Types.h"
namespace librbd {
namespace mirror {
namespace snapshot {
void ImageStateHeader::encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(object_count, bl);
ENCODE_FINISH(bl);
}
void ImageStateHeader::decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(object_count, bl);
DECODE_FINISH(bl);
}
void SnapState::encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(snap_namespace, bl);
encode(name, bl);
encode(protection_status, bl);
ENCODE_FINISH(bl);
}
void SnapState::decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(snap_namespace, bl);
decode(name, bl);
decode(protection_status, bl);
DECODE_FINISH(bl);
}
void SnapState::dump(Formatter *f) const {
f->open_object_section("namespace");
snap_namespace.dump(f);
f->close_section();
f->dump_string("name", name);
f->dump_unsigned("protection_status", protection_status);
}
std::ostream& operator<<(std::ostream& os, const SnapState& snap_state) {
os << "["
<< "namespace=" << snap_state.snap_namespace << ", "
<< "name=" << snap_state.name << ", "
<< "protection=" << static_cast<int>(snap_state.protection_status)
<< "]";
return os;
}
void ImageState::encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(name, bl);
encode(features, bl);
encode(snap_limit, bl);
encode(snapshots, bl);
encode(metadata, bl);
ENCODE_FINISH(bl);
}
void ImageState::decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(name, bl);
decode(features, bl);
decode(snap_limit, bl);
decode(snapshots, bl);
decode(metadata, bl);
DECODE_FINISH(bl);
}
void ImageState::dump(Formatter *f) const {
f->dump_string("name", name);
f->dump_unsigned("features", features);
f->dump_unsigned("snap_limit", snap_limit);
f->open_array_section("snapshots");
for (auto &[id, snap_state] : snapshots) {
f->open_object_section(stringify(id).c_str());
snap_state.dump(f);
f->close_section(); // snap_state
}
f->close_section(); // snapshots
f->open_object_section("metadata");
for (auto &it : metadata) {
f->dump_stream(it.first.c_str()) << it.second;
}
f->close_section(); // metadata
}
std::ostream& operator<<(std::ostream& os, const ImageState& image_state) {
os << "["
<< "name=" << image_state.name << ", "
<< "features=" << image_state.features << ", "
<< "snap_limit=" << image_state.snap_limit << ", "
<< "snaps=" << image_state.snapshots << ", "
<< "metadata_count=" << image_state.metadata.size()
<< "]";
return os;
}
} // namespace snapshot
} // namespace mirror
} // namespace librbd
| 2,888 | 25.263636 | 75 | cc |
null | ceph-main/src/librbd/mirror/snapshot/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_SNAPSHOT_TYPES_H
#define CEPH_LIBRBD_MIRROR_SNAPSHOT_TYPES_H
#include "cls/rbd/cls_rbd_types.h"
#include "include/buffer.h"
#include "include/types.h"
#include <map>
#include <string>
namespace librbd {
namespace mirror {
namespace snapshot {
enum CreatePrimaryFlags {
CREATE_PRIMARY_FLAG_IGNORE_EMPTY_PEERS = (1 << 0),
CREATE_PRIMARY_FLAG_DEMOTED = (1 << 1),
CREATE_PRIMARY_FLAG_FORCE = (1 << 2)
};
struct ImageStateHeader {
uint32_t object_count = 0;
ImageStateHeader() {
}
ImageStateHeader(uint32_t object_count) : object_count(object_count) {
}
void encode(bufferlist &bl) const;
void decode(bufferlist::const_iterator &it);
};
WRITE_CLASS_ENCODER(ImageStateHeader);
struct SnapState {
cls::rbd::SnapshotNamespace snap_namespace;
std::string name;
uint8_t protection_status = 0;
SnapState() {
}
SnapState(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &name, uint8_t protection_status)
: snap_namespace(snap_namespace), name(name),
protection_status(protection_status) {
}
bool operator==(const SnapState& rhs) const {
return snap_namespace == rhs.snap_namespace &&
name == rhs.name && protection_status == rhs.protection_status;
}
bool operator<(const SnapState& rhs) const {
if (snap_namespace != rhs.snap_namespace) {
return snap_namespace < rhs.snap_namespace;
}
if (name != rhs.name) {
return name < rhs.name;
}
return protection_status < rhs.protection_status;
}
void encode(bufferlist &bl) const;
void decode(bufferlist::const_iterator &it);
void dump(Formatter *f) const;
};
std::ostream& operator<<(std::ostream& os, const SnapState& snap_state);
WRITE_CLASS_ENCODER(SnapState);
struct ImageState {
std::string name;
uint64_t features = 0;
uint64_t snap_limit = 0;
std::map<uint64_t, SnapState> snapshots;
std::map<std::string, bufferlist> metadata;
ImageState() {
}
ImageState(const std::string &name, uint64_t features, uint64_t snap_limit,
const std::map<uint64_t, SnapState> &snapshots,
const std::map<std::string, bufferlist> &metadata)
: name(name), features(features), snap_limit(snap_limit),
snapshots(snapshots), metadata(metadata) {
}
bool operator==(const ImageState& rhs) const {
return name == rhs.name && features == rhs.features &&
snap_limit == rhs.snap_limit && snapshots == rhs.snapshots;
}
bool operator<(const ImageState& rhs) const {
if (name != rhs.name) {
return name < rhs.name;
}
if (features != rhs.features) {
return features < rhs.features;
}
if (snap_limit != rhs.snap_limit) {
return snap_limit < rhs.snap_limit;
}
return snapshots < rhs.snapshots;
}
void encode(bufferlist &bl) const;
void decode(bufferlist::const_iterator &it);
void dump(Formatter *f) const;
};
std::ostream& operator<<(std::ostream& os, const ImageState& image_state);
WRITE_CLASS_ENCODER(ImageState);
} // namespace snapshot
} // namespace mirror
} // namespace librbd
#endif // CEPH_LIBRBD_MIRROR_SNAPSHOT_TYPES_H
| 3,272 | 25.609756 | 77 | h |
null | ceph-main/src/librbd/mirror/snapshot/UnlinkPeerRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/snapshot/UnlinkPeerRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::snapshot::UnlinkPeerRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace mirror {
namespace snapshot {
using librbd::util::create_context_callback;
using librbd::util::create_rados_callback;
template <typename I>
void UnlinkPeerRequest<I>::send() {
if (!m_image_ctx->state->is_refresh_required()) {
unlink_peer();
return;
}
refresh_image();
}
template <typename I>
void UnlinkPeerRequest<I>::refresh_image() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
auto ctx = create_context_callback<
UnlinkPeerRequest<I>, &UnlinkPeerRequest<I>::handle_refresh_image>(this);
m_image_ctx->state->refresh(ctx);
}
template <typename I>
void UnlinkPeerRequest<I>::handle_refresh_image(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to refresh image: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
unlink_peer();
}
template <typename I>
void UnlinkPeerRequest<I>::unlink_peer() {
CephContext *cct = m_image_ctx->cct;
m_image_ctx->image_lock.lock_shared();
int r = -ENOENT;
cls::rbd::SnapshotNamespace snap_namespace;
std::string snap_name;
bool have_newer_mirror_snapshot = false;
for (auto snap_it = m_image_ctx->snap_info.find(m_snap_id);
snap_it != m_image_ctx->snap_info.end(); ++snap_it) {
if (snap_it->first == m_snap_id) {
r = 0;
snap_namespace = snap_it->second.snap_namespace;
snap_name = snap_it->second.name;
} else if (std::holds_alternative<cls::rbd::MirrorSnapshotNamespace>(
snap_it->second.snap_namespace)) {
ldout(cct, 15) << "located newer mirror snapshot" << dendl;
have_newer_mirror_snapshot = true;
break;
}
}
if (r == -ENOENT) {
ldout(cct, 15) << "missing snapshot: snap_id=" << m_snap_id << dendl;
m_image_ctx->image_lock.unlock_shared();
finish(r);
return;
}
auto mirror_ns = std::get_if<cls::rbd::MirrorSnapshotNamespace>(
&snap_namespace);
if (mirror_ns == nullptr) {
lderr(cct) << "not mirror snapshot (snap_id=" << m_snap_id << ")" << dendl;
m_image_ctx->image_lock.unlock_shared();
finish(-EINVAL);
return;
}
// if there is or will be no more peers in the mirror snapshot and we have
// a more recent mirror snapshot, remove the older one
if ((mirror_ns->mirror_peer_uuids.empty() ||
(mirror_ns->mirror_peer_uuids.size() == 1 &&
mirror_ns->mirror_peer_uuids.count(m_mirror_peer_uuid) != 0)) &&
have_newer_mirror_snapshot) {
if (m_allow_remove) {
m_image_ctx->image_lock.unlock_shared();
remove_snapshot(snap_namespace, snap_name);
return;
} else {
ldout(cct, 15) << "skipping removal of snapshot: snap_id=" << m_snap_id
<< ", mirror_peer_uuid=" << m_mirror_peer_uuid
<< ", mirror_peer_uuids=" << mirror_ns->mirror_peer_uuids
<< dendl;
}
}
if (mirror_ns->mirror_peer_uuids.count(m_mirror_peer_uuid) == 0) {
ldout(cct, 15) << "no peer to unlink: snap_id=" << m_snap_id
<< ", mirror_peer_uuid=" << m_mirror_peer_uuid
<< ", mirror_peer_uuids=" << mirror_ns->mirror_peer_uuids
<< dendl;
m_image_ctx->image_lock.unlock_shared();
finish(0);
return;
}
m_image_ctx->image_lock.unlock_shared();
ldout(cct, 15) << "snap_id=" << m_snap_id << ", "
<< "mirror_peer_uuid=" << m_mirror_peer_uuid << dendl;
librados::ObjectWriteOperation op;
librbd::cls_client::mirror_image_snapshot_unlink_peer(&op, m_snap_id,
m_mirror_peer_uuid);
auto aio_comp = create_rados_callback<
UnlinkPeerRequest<I>, &UnlinkPeerRequest<I>::handle_unlink_peer>(this);
r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, aio_comp, &op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void UnlinkPeerRequest<I>::handle_unlink_peer(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r == -ERESTART || r == -ENOENT) {
if (r == -ERESTART) {
ldout(cct, 15) << "unlinking last peer not supported" << dendl;
m_allow_remove = true;
}
refresh_image();
return;
}
if (r < 0) {
lderr(cct) << "failed to unlink peer: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
notify_update();
}
template <typename I>
void UnlinkPeerRequest<I>::notify_update() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
auto ctx = create_context_callback<
UnlinkPeerRequest<I>, &UnlinkPeerRequest<I>::handle_notify_update>(this);
m_image_ctx->notify_update(ctx);
}
template <typename I>
void UnlinkPeerRequest<I>::handle_notify_update(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r == -ENOENT || r == -ETIMEDOUT) {
// non-fatel errors
lderr(cct) << "failed to notify update: " << cpp_strerror(r) << dendl;
} else if (r < 0) {
lderr(cct) << "failed to notify update: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
refresh_image();
}
template <typename I>
void UnlinkPeerRequest<I>::remove_snapshot(
const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << dendl;
auto ctx = create_context_callback<
UnlinkPeerRequest<I>, &UnlinkPeerRequest<I>::handle_remove_snapshot>(this);
m_image_ctx->operations->snap_remove(snap_namespace, snap_name, ctx);
}
template <typename I>
void UnlinkPeerRequest<I>::handle_remove_snapshot(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to remove snapshot: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
void UnlinkPeerRequest<I>::finish(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
auto on_finish = m_on_finish;
delete this;
on_finish->complete(r);
}
} // namespace snapshot
} // namespace mirror
} // namespace librbd
template class librbd::mirror::snapshot::UnlinkPeerRequest<librbd::ImageCtx>;
| 6,813 | 28.497835 | 79 | cc |
null | ceph-main/src/librbd/mirror/snapshot/UnlinkPeerRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_SNAPSHOT_UNLINK_PEER_REQUEST_H
#define CEPH_LIBRBD_MIRROR_SNAPSHOT_UNLINK_PEER_REQUEST_H
#include "include/buffer.h"
#include "cls/rbd/cls_rbd_client.h"
#include <string>
#include <set>
struct Context;
namespace librbd {
struct ImageCtx;
namespace mirror {
namespace snapshot {
template <typename ImageCtxT = librbd::ImageCtx>
class UnlinkPeerRequest {
public:
static UnlinkPeerRequest *create(ImageCtxT *image_ctx, uint64_t snap_id,
const std::string &mirror_peer_uuid,
bool allow_remove, Context *on_finish) {
return new UnlinkPeerRequest(image_ctx, snap_id, mirror_peer_uuid,
allow_remove, on_finish);
}
UnlinkPeerRequest(ImageCtxT *image_ctx, uint64_t snap_id,
const std::string &mirror_peer_uuid, bool allow_remove,
Context *on_finish)
: m_image_ctx(image_ctx), m_snap_id(snap_id),
m_mirror_peer_uuid(mirror_peer_uuid), m_allow_remove(allow_remove),
m_on_finish(on_finish) {
}
void send();
private:
/*
* @verbatim
*
* <start>
* |
* v
* REFRESH_IMAGE <--------------------------\
* | ^ (not found |
* | * or last) |
* | * |
* |\---------------> UNLINK_PEER --> NOTIFY_UPDATE
* | (not last peer or
* | no newer mirror
* | snap exists)
* |
* |\---------------> REMOVE_SNAPSHOT
* | (last peer and |
* | newer mirror |
* | snap exists) |
* | |
* |(peer not found) |
* v |
* <finish> <---------------/
*
* @endverbatim
*/
ImageCtxT *m_image_ctx;
uint64_t m_snap_id;
std::string m_mirror_peer_uuid;
bool m_allow_remove;
Context *m_on_finish;
void refresh_image();
void handle_refresh_image(int r);
void unlink_peer();
void handle_unlink_peer(int r);
void notify_update();
void handle_notify_update(int r);
void remove_snapshot(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name);
void handle_remove_snapshot(int r);
void finish(int r);
};
} // namespace snapshot
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::snapshot::UnlinkPeerRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_SNAPSHOT_UNLINK_PEER_REQUEST_H
| 2,629 | 25.565657 | 84 | h |
null | ceph-main/src/librbd/mirror/snapshot/Utils.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/dout.h"
#include "common/errno.h"
#include "include/stringify.h"
#include "librbd/ImageCtx.h"
#include "librbd/mirror/snapshot/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::snapshot::util: " \
<< " " << __func__ << ": "
namespace librbd {
namespace mirror {
namespace snapshot {
namespace util {
namespace {
const std::string IMAGE_STATE_OBJECT_PREFIX = "rbd_mirror_snapshot.";
bool get_rollback_snap_id(
std::map<librados::snap_t, SnapInfo>::reverse_iterator it,
std::map<librados::snap_t, SnapInfo>::reverse_iterator end,
uint64_t *rollback_snap_id) {
for (; it != end; it++) {
auto mirror_ns = std::get<cls::rbd::MirrorSnapshotNamespace>(
it->second.snap_namespace);
if (mirror_ns.state != cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY) {
break;
}
if (mirror_ns.complete) {
break;
}
}
if (it != end) {
*rollback_snap_id = it->first;
return true;
}
return false;
}
} // anonymous namespace
std::string get_image_meta_key(const std::string& mirror_uuid) {
return ".rbd_mirror." + mirror_uuid;
}
template <typename I>
bool can_create_primary_snapshot(I *image_ctx, bool demoted, bool force,
bool* requires_orphan,
uint64_t *rollback_snap_id) {
CephContext *cct = image_ctx->cct;
if (requires_orphan != nullptr) {
*requires_orphan = false;
}
if (rollback_snap_id) {
*rollback_snap_id = CEPH_NOSNAP;
}
std::shared_lock image_locker{image_ctx->image_lock};
for (auto it = image_ctx->snap_info.rbegin();
it != image_ctx->snap_info.rend(); it++) {
auto mirror_ns = std::get_if<cls::rbd::MirrorSnapshotNamespace>(
&it->second.snap_namespace);
if (mirror_ns == nullptr) {
continue;
}
ldout(cct, 20) << "previous snapshot snap_id=" << it->first << " "
<< *mirror_ns << dendl;
if (mirror_ns->is_demoted() && !force) {
lderr(cct) << "trying to create primary snapshot without force "
<< "when previous primary snapshot is demoted"
<< dendl;
return false;
}
if (mirror_ns->state == cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY) {
if (!force) {
lderr(cct) << "trying to create primary snapshot without force "
<< "when previous snapshot is non-primary"
<< dendl;
return false;
}
if (demoted) {
lderr(cct) << "trying to create primary demoted snapshot "
<< "when previous snapshot is non-primary"
<< dendl;
return false;
}
if (requires_orphan != nullptr) {
*requires_orphan = !mirror_ns->is_demoted();
}
if (!mirror_ns->complete) {
ldout(cct, 20) << "needs rollback" << dendl;
if (!rollback_snap_id) {
lderr(cct) << "trying to create primary snapshot "
<< "when previous non-primary snapshot is not copied yet"
<< dendl;
return false;
}
if (!get_rollback_snap_id(++it, image_ctx->snap_info.rend(),
rollback_snap_id)) {
lderr(cct) << "cannot rollback" << dendl;
return false;
}
ldout(cct, 20) << "rollback_snap_id=" << *rollback_snap_id << dendl;
}
return true;
}
return true;
}
ldout(cct, 20) << "no previous mirror snapshots found" << dendl;
return true;
}
template <typename I>
bool can_create_non_primary_snapshot(I *image_ctx) {
CephContext *cct = image_ctx->cct;
std::shared_lock image_locker{image_ctx->image_lock};
for (auto it = image_ctx->snap_info.rbegin();
it != image_ctx->snap_info.rend(); it++) {
auto mirror_ns = std::get_if<cls::rbd::MirrorSnapshotNamespace>(
&it->second.snap_namespace);
if (mirror_ns != nullptr) {
ldout(cct, 20) << "previous mirror snapshot snap_id=" << it->first << " "
<< *mirror_ns << dendl;
if (mirror_ns->state == cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY) {
if (!mirror_ns->complete) {
lderr(cct) << "trying to create non-primary snapshot "
<< "when previous non-primary snapshot is not copied yet"
<< dendl;
return false;
}
return true;
}
if (mirror_ns->state == cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY) {
lderr(cct) << "trying to create non-primary snapshot "
<< "when previous primary snapshot is not in demoted state"
<< dendl;
return false;
}
return true;
}
}
ldout(cct, 20) << "no previous mirror snapshots found" << dendl;
return true;
}
template <typename I>
std::string image_state_object_name(I *image_ctx, uint64_t snap_id,
uint64_t index) {
return IMAGE_STATE_OBJECT_PREFIX + image_ctx->id + "." +
stringify(snap_id) + "." + stringify(index);
}
} // namespace util
} // namespace snapshot
} // namespace mirror
} // namespace librbd
template bool librbd::mirror::snapshot::util::can_create_primary_snapshot(
librbd::ImageCtx *image_ctx, bool demoted, bool force,
bool* requires_orphan, uint64_t *rollback_snap_id);
template bool librbd::mirror::snapshot::util::can_create_non_primary_snapshot(
librbd::ImageCtx *image_ctx);
template std::string librbd::mirror::snapshot::util::image_state_object_name(
librbd::ImageCtx *image_ctx, uint64_t snap_id, uint64_t index);
| 5,746 | 29.73262 | 79 | cc |
null | ceph-main/src/librbd/mirror/snapshot/Utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_SNAPSHOT_UTILS_H
#define CEPH_LIBRBD_MIRROR_SNAPSHOT_UTILS_H
#include "include/int_types.h"
#include "include/stringify.h"
#include <string>
namespace librbd {
struct ImageCtx;
namespace mirror {
namespace snapshot {
namespace util {
std::string get_image_meta_key(const std::string& mirror_uuid);
template <typename ImageCtxT = librbd::ImageCtx>
bool can_create_primary_snapshot(ImageCtxT *image_ctx, bool demoted, bool force,
bool* requires_orphan,
uint64_t *rollback_snap_id);
template <typename ImageCtxT = librbd::ImageCtx>
bool can_create_non_primary_snapshot(ImageCtxT *image_ctx);
template <typename ImageCtxT = librbd::ImageCtx>
std::string image_state_object_name(ImageCtxT *image_ctx, uint64_t snap_id,
uint64_t index);
} // namespace util
} // namespace snapshot
} // namespace mirror
} // namespace librbd
#endif // CEPH_LIBRBD_MIRROR_SNAPSHOT_UTILS_H
| 1,103 | 27.307692 | 80 | h |
null | ceph-main/src/librbd/mirror/snapshot/WriteImageStateRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/mirror/snapshot/WriteImageStateRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/mirror/snapshot/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::mirror::snapshot::WriteImageStateRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace mirror {
namespace snapshot {
namespace {
static size_t header_length() {
bufferlist bl;
ImageStateHeader header;
using ceph::encode;
encode(header, bl);
return bl.length();
}
}
using librbd::util::create_rados_callback;
template <typename I>
WriteImageStateRequest<I>::WriteImageStateRequest(I *image_ctx,
uint64_t snap_id,
const ImageState &image_state,
Context *on_finish)
: m_image_ctx(image_ctx), m_snap_id(snap_id), m_image_state(image_state),
m_on_finish(on_finish), m_object_size(
1 << image_ctx->config.template get_val<uint64_t>("rbd_default_order")) {
bufferlist bl;
encode(m_image_state, bl);
m_object_count = 1 + (header_length() + bl.length()) / m_object_size;
ImageStateHeader header(m_object_count);
encode(header, m_bl);
m_bl.claim_append(bl);
}
template <typename I>
void WriteImageStateRequest<I>::send() {
write_object();
}
template <typename I>
void WriteImageStateRequest<I>::write_object() {
CephContext *cct = m_image_ctx->cct;
ceph_assert(m_object_count > 0);
m_object_count--;
auto oid = util::image_state_object_name(m_image_ctx, m_snap_id,
m_object_count);
ldout(cct, 15) << oid << dendl;
size_t off = m_object_count * m_object_size;
size_t len = std::min(m_bl.length() - off, m_object_size);
bufferlist bl;
bl.substr_of(m_bl, off, len);
librados::ObjectWriteOperation op;
op.write_full(bl);
librados::AioCompletion *comp = create_rados_callback<
WriteImageStateRequest<I>,
&WriteImageStateRequest<I>::handle_write_object>(this);
int r = m_image_ctx->md_ctx.aio_operate(oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void WriteImageStateRequest<I>::handle_write_object(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to write object: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
if (m_object_count == 0) {
finish(0);
return;
}
write_object();
}
template <typename I>
void WriteImageStateRequest<I>::finish(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 15) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace snapshot
} // namespace mirror
} // namespace librbd
template class librbd::mirror::snapshot::WriteImageStateRequest<librbd::ImageCtx>;
| 3,143 | 24.983471 | 84 | cc |
null | ceph-main/src/librbd/mirror/snapshot/WriteImageStateRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRROR_SNAPSHOT_WRITE_IMAGE_STATE_REQUEST_H
#define CEPH_LIBRBD_MIRROR_SNAPSHOT_WRITE_IMAGE_STATE_REQUEST_H
#include "librbd/mirror/snapshot/Types.h"
#include <map>
#include <string>
struct Context;
namespace librbd {
struct ImageCtx;
namespace mirror {
namespace snapshot {
template <typename ImageCtxT = librbd::ImageCtx>
class WriteImageStateRequest {
public:
static WriteImageStateRequest *create(ImageCtxT *image_ctx, uint64_t snap_id,
const ImageState &image_state,
Context *on_finish) {
return new WriteImageStateRequest(image_ctx, snap_id, image_state,
on_finish);
}
WriteImageStateRequest(ImageCtxT *image_ctx, uint64_t snap_id,
const ImageState &image_state, Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* WRITE_OBJECT (repeat for
* | every object)
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT *m_image_ctx;
uint64_t m_snap_id;
ImageState m_image_state;
Context *m_on_finish;
bufferlist m_bl;
const size_t m_object_size;
size_t m_object_count = 0;
void write_object();
void handle_write_object(int r);
void finish(int r);
};
} // namespace snapshot
} // namespace mirror
} // namespace librbd
extern template class librbd::mirror::snapshot::WriteImageStateRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRROR_SNAPSHOT_WRITE_IMAGE_STATE_REQUEST_H
| 1,664 | 21.5 | 89 | h |
null | ceph-main/src/librbd/mirroring_watcher/Types.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/Formatter.h"
#include "include/ceph_assert.h"
#include "include/stringify.h"
#include "librbd/mirroring_watcher/Types.h"
#include "librbd/watcher/Utils.h"
namespace librbd {
namespace mirroring_watcher {
namespace {
class DumpPayloadVisitor : public boost::static_visitor<void> {
public:
explicit DumpPayloadVisitor(Formatter *formatter) : m_formatter(formatter) {}
template <typename Payload>
inline void operator()(const Payload &payload) const {
NotifyOp notify_op = Payload::NOTIFY_OP;
m_formatter->dump_string("notify_op", stringify(notify_op));
payload.dump(m_formatter);
}
private:
ceph::Formatter *m_formatter;
};
} // anonymous namespace
void ModeUpdatedPayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(static_cast<uint32_t>(mirror_mode), bl);
}
void ModeUpdatedPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
uint32_t mirror_mode_decode;
decode(mirror_mode_decode, iter);
mirror_mode = static_cast<cls::rbd::MirrorMode>(mirror_mode_decode);
}
void ModeUpdatedPayload::dump(Formatter *f) const {
f->dump_stream("mirror_mode") << mirror_mode;
}
void ImageUpdatedPayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(static_cast<uint32_t>(mirror_image_state), bl);
encode(image_id, bl);
encode(global_image_id, bl);
}
void ImageUpdatedPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
uint32_t mirror_image_state_decode;
decode(mirror_image_state_decode, iter);
mirror_image_state = static_cast<cls::rbd::MirrorImageState>(
mirror_image_state_decode);
decode(image_id, iter);
decode(global_image_id, iter);
}
void ImageUpdatedPayload::dump(Formatter *f) const {
f->dump_stream("mirror_image_state") << mirror_image_state;
f->dump_string("image_id", image_id);
f->dump_string("global_image_id", global_image_id);
}
void UnknownPayload::encode(bufferlist &bl) const {
ceph_abort();
}
void UnknownPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
}
void UnknownPayload::dump(Formatter *f) const {
}
void NotifyMessage::encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
boost::apply_visitor(watcher::util::EncodePayloadVisitor(bl), payload);
ENCODE_FINISH(bl);
}
void NotifyMessage::decode(bufferlist::const_iterator& iter) {
DECODE_START(1, iter);
uint32_t notify_op;
decode(notify_op, iter);
// select the correct payload variant based upon the encoded op
switch (notify_op) {
case NOTIFY_OP_MODE_UPDATED:
payload = ModeUpdatedPayload();
break;
case NOTIFY_OP_IMAGE_UPDATED:
payload = ImageUpdatedPayload();
break;
default:
payload = UnknownPayload();
break;
}
apply_visitor(watcher::util::DecodePayloadVisitor(struct_v, iter), payload);
DECODE_FINISH(iter);
}
void NotifyMessage::dump(Formatter *f) const {
apply_visitor(DumpPayloadVisitor(f), payload);
}
void NotifyMessage::generate_test_instances(std::list<NotifyMessage *> &o) {
o.push_back(new NotifyMessage(ModeUpdatedPayload(cls::rbd::MIRROR_MODE_DISABLED)));
o.push_back(new NotifyMessage(ImageUpdatedPayload(cls::rbd::MIRROR_IMAGE_STATE_DISABLING,
"image id", "global image id")));
}
std::ostream &operator<<(std::ostream &out, const NotifyOp &op) {
switch (op) {
case NOTIFY_OP_MODE_UPDATED:
out << "ModeUpdated";
break;
case NOTIFY_OP_IMAGE_UPDATED:
out << "ImageUpdated";
break;
default:
out << "Unknown (" << static_cast<uint32_t>(op) << ")";
break;
}
return out;
}
} // namespace mirroring_watcher
} // namespace librbd
| 3,770 | 26.525547 | 91 | cc |
null | ceph-main/src/librbd/mirroring_watcher/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRRORING_WATCHER_TYPES_H
#define CEPH_LIBRBD_MIRRORING_WATCHER_TYPES_H
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/encoding.h"
#include "cls/rbd/cls_rbd_types.h"
#include <iosfwd>
#include <list>
#include <string>
#include <boost/variant.hpp>
namespace ceph { class Formatter; }
namespace librbd {
namespace mirroring_watcher {
enum NotifyOp {
NOTIFY_OP_MODE_UPDATED = 0,
NOTIFY_OP_IMAGE_UPDATED = 1
};
struct ModeUpdatedPayload {
static const NotifyOp NOTIFY_OP = NOTIFY_OP_MODE_UPDATED;
cls::rbd::MirrorMode mirror_mode = cls::rbd::MIRROR_MODE_DISABLED;
ModeUpdatedPayload() {
}
ModeUpdatedPayload(cls::rbd::MirrorMode mirror_mode)
: mirror_mode(mirror_mode) {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
struct ImageUpdatedPayload {
static const NotifyOp NOTIFY_OP = NOTIFY_OP_IMAGE_UPDATED;
cls::rbd::MirrorImageState mirror_image_state =
cls::rbd::MIRROR_IMAGE_STATE_ENABLED;
std::string image_id;
std::string global_image_id;
ImageUpdatedPayload() {
}
ImageUpdatedPayload(cls::rbd::MirrorImageState mirror_image_state,
const std::string &image_id,
const std::string &global_image_id)
: mirror_image_state(mirror_image_state), image_id(image_id),
global_image_id(global_image_id) {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
struct UnknownPayload {
static const NotifyOp NOTIFY_OP = static_cast<NotifyOp>(-1);
UnknownPayload() {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
typedef boost::variant<ModeUpdatedPayload,
ImageUpdatedPayload,
UnknownPayload> Payload;
struct NotifyMessage {
NotifyMessage(const Payload &payload = UnknownPayload()) : payload(payload) {
}
Payload payload;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<NotifyMessage *> &o);
};
WRITE_CLASS_ENCODER(NotifyMessage);
std::ostream &operator<<(std::ostream &out, const NotifyOp &op);
} // namespace mirroring_watcher
} // namespace librbd
using librbd::mirroring_watcher::encode;
using librbd::mirroring_watcher::decode;
#endif // CEPH_LIBRBD_MIRRORING_WATCHER_TYPES_H
| 2,681 | 25.038835 | 79 | h |
null | ceph-main/src/librbd/object_map/CreateRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/object_map/CreateRequest.h"
#include "include/ceph_assert.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "osdc/Striper.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::object_map::CreateRequest: "
namespace librbd {
namespace object_map {
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
CreateRequest<I>::CreateRequest(I *image_ctx, Context *on_finish)
: m_image_ctx(image_ctx), m_on_finish(on_finish) {
}
template <typename I>
void CreateRequest<I>::send() {
CephContext *cct = m_image_ctx->cct;
uint64_t max_size = m_image_ctx->size;
{
std::unique_lock image_locker{m_image_ctx->image_lock};
m_snap_ids.push_back(CEPH_NOSNAP);
for (auto it : m_image_ctx->snap_info) {
max_size = std::max(max_size, it.second.size);
m_snap_ids.push_back(it.first);
}
if (ObjectMap<>::is_compatible(m_image_ctx->layout, max_size)) {
send_object_map_resize();
return;
}
}
lderr(cct) << "image size not compatible with object map" << dendl;
m_on_finish->complete(-EINVAL);
}
template <typename I>
void CreateRequest<I>::send_object_map_resize() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
Context *ctx = create_context_callback<
CreateRequest<I>, &CreateRequest<I>::handle_object_map_resize>(this);
C_Gather *gather_ctx = new C_Gather(cct, ctx);
for (auto snap_id : m_snap_ids) {
librados::ObjectWriteOperation op;
uint64_t snap_size = m_image_ctx->get_image_size(snap_id);
cls_client::object_map_resize(&op, Striper::get_num_objects(
m_image_ctx->layout, snap_size),
OBJECT_NONEXISTENT);
std::string oid(ObjectMap<>::object_map_name(m_image_ctx->id, snap_id));
librados::AioCompletion *comp = create_rados_callback(gather_ctx->new_sub());
int r = m_image_ctx->md_ctx.aio_operate(oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
gather_ctx->activate();
}
template <typename I>
Context *CreateRequest<I>::handle_object_map_resize(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "object map resize failed: " << cpp_strerror(*result)
<< dendl;
}
return m_on_finish;
}
} // namespace object_map
} // namespace librbd
template class librbd::object_map::CreateRequest<librbd::ImageCtx>;
| 2,710 | 27.536842 | 81 | cc |
null | ceph-main/src/librbd/object_map/CreateRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OBJECT_MAP_CREATE_REQUEST_H
#define CEPH_LIBRBD_OBJECT_MAP_CREATE_REQUEST_H
#include "include/buffer.h"
#include <map>
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace object_map {
template <typename ImageCtxT = ImageCtx>
class CreateRequest {
public:
static CreateRequest *create(ImageCtxT *image_ctx, Context *on_finish) {
return new CreateRequest(image_ctx, on_finish);
}
void send();
private:
/**
* @verbatim
*
* <start>
* | . . .
* v v .
* OBJECT_MAP_RESIZE . (for every snapshot)
* | . .
* v . . .
* <finis>
*
* @endverbatim
*/
CreateRequest(ImageCtxT *image_ctx, Context *on_finish);
ImageCtxT *m_image_ctx;
Context *m_on_finish;
std::vector<uint64_t> m_snap_ids;
void send_object_map_resize();
Context *handle_object_map_resize(int *result);
};
} // namespace object_map
} // namespace librbd
extern template class librbd::object_map::CreateRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OBJECT_MAP_CREATE_REQUEST_H
| 1,206 | 19.116667 | 74 | h |
null | ceph-main/src/librbd/object_map/DiffRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/object_map/DiffRequest.h"
#include "common/debug.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "osdc/Striper.h"
#include <string>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::object_map::DiffRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace object_map {
using util::create_rados_callback;
template <typename I>
void DiffRequest<I>::send() {
auto cct = m_image_ctx->cct;
if (m_snap_id_start == CEPH_NOSNAP || m_snap_id_start > m_snap_id_end) {
lderr(cct) << "invalid start/end snap ids: "
<< "snap_id_start=" << m_snap_id_start << ", "
<< "snap_id_end=" << m_snap_id_end << dendl;
finish(-EINVAL);
return;
} else if (m_snap_id_start == m_snap_id_end) {
// no delta between the same snapshot
finish(0);
return;
}
m_object_diff_state->clear();
// collect all the snap ids in the provided range (inclusive)
if (m_snap_id_start != 0) {
m_snap_ids.insert(m_snap_id_start);
}
std::shared_lock image_locker{m_image_ctx->image_lock};
auto snap_info_it = m_image_ctx->snap_info.upper_bound(m_snap_id_start);
auto snap_info_it_end = m_image_ctx->snap_info.lower_bound(m_snap_id_end);
for (; snap_info_it != snap_info_it_end; ++snap_info_it) {
m_snap_ids.insert(snap_info_it->first);
}
m_snap_ids.insert(m_snap_id_end);
load_object_map(&image_locker);
}
template <typename I>
void DiffRequest<I>::load_object_map(
std::shared_lock<ceph::shared_mutex>* image_locker) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock));
if (m_snap_ids.empty()) {
image_locker->unlock();
finish(0);
return;
}
m_current_snap_id = *m_snap_ids.begin();
m_snap_ids.erase(m_current_snap_id);
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "snap_id=" << m_current_snap_id << dendl;
if ((m_image_ctx->features & RBD_FEATURE_FAST_DIFF) == 0) {
image_locker->unlock();
ldout(cct, 10) << "fast-diff feature not enabled" << dendl;
finish(-EINVAL);
return;
}
// ignore ENOENT with intermediate snapshots since deleted
// snaps will get merged with later snapshots
m_ignore_enoent = (m_current_snap_id != m_snap_id_start &&
m_current_snap_id != m_snap_id_end);
if (m_current_snap_id == CEPH_NOSNAP) {
m_current_size = m_image_ctx->size;
} else {
auto snap_it = m_image_ctx->snap_info.find(m_current_snap_id);
if (snap_it == m_image_ctx->snap_info.end()) {
ldout(cct, 10) << "snapshot " << m_current_snap_id << " does not exist"
<< dendl;
if (!m_ignore_enoent) {
image_locker->unlock();
finish(-ENOENT);
return;
}
load_object_map(image_locker);
return;
}
m_current_size = snap_it->second.size;
}
uint64_t flags = 0;
int r = m_image_ctx->get_flags(m_current_snap_id, &flags);
if (r < 0) {
image_locker->unlock();
lderr(cct) << "failed to retrieve image flags: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
image_locker->unlock();
if ((flags & RBD_FLAG_FAST_DIFF_INVALID) != 0) {
ldout(cct, 1) << "cannot perform fast diff on invalid object map"
<< dendl;
finish(-EINVAL);
return;
}
std::string oid(ObjectMap<>::object_map_name(m_image_ctx->id,
m_current_snap_id));
librados::ObjectReadOperation op;
cls_client::object_map_load_start(&op);
m_out_bl.clear();
auto aio_comp = create_rados_callback<
DiffRequest<I>, &DiffRequest<I>::handle_load_object_map>(this);
r = m_image_ctx->md_ctx.aio_operate(oid, aio_comp, &op, &m_out_bl);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void DiffRequest<I>::handle_load_object_map(int r) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r == 0) {
auto bl_it = m_out_bl.cbegin();
r = cls_client::object_map_load_finish(&bl_it, &m_object_map);
}
std::string oid(ObjectMap<>::object_map_name(m_image_ctx->id,
m_current_snap_id));
if (r == -ENOENT && m_ignore_enoent) {
ldout(cct, 10) << "object map " << oid << " does not exist" << dendl;
std::shared_lock image_locker{m_image_ctx->image_lock};
load_object_map(&image_locker);
return;
} else if (r < 0) {
lderr(cct) << "failed to load object map: " << oid << dendl;
finish(r);
return;
}
ldout(cct, 20) << "loaded object map " << oid << dendl;
uint64_t num_objs = Striper::get_num_objects(m_image_ctx->layout,
m_current_size);
if (m_object_map.size() < num_objs) {
ldout(cct, 1) << "object map too small: "
<< m_object_map.size() << " < " << num_objs << dendl;
finish(-EINVAL);
return;
} else {
m_object_map.resize(num_objs);
}
uint64_t prev_object_diff_state_size = m_object_diff_state->size();
if (prev_object_diff_state_size < num_objs) {
// the diff state should be the largest of all snapshots in the set
m_object_diff_state->resize(num_objs);
}
if (m_object_map.size() < m_object_diff_state->size()) {
// the image was shrunk so expanding the object map will flag end objects
// as non-existent and they will be compared against the previous object
// diff state
m_object_map.resize(m_object_diff_state->size());
}
uint64_t overlap = std::min(m_object_map.size(), prev_object_diff_state_size);
auto it = m_object_map.begin();
auto overlap_end_it = it + overlap;
auto diff_it = m_object_diff_state->begin();
uint64_t i = 0;
for (; it != overlap_end_it; ++it, ++diff_it, ++i) {
uint8_t object_map_state = *it;
uint8_t prev_object_diff_state = *diff_it;
if (object_map_state == OBJECT_EXISTS ||
object_map_state == OBJECT_PENDING ||
(object_map_state == OBJECT_EXISTS_CLEAN &&
prev_object_diff_state != DIFF_STATE_DATA &&
prev_object_diff_state != DIFF_STATE_DATA_UPDATED)) {
*diff_it = DIFF_STATE_DATA_UPDATED;
} else if (object_map_state == OBJECT_NONEXISTENT &&
prev_object_diff_state != DIFF_STATE_HOLE &&
prev_object_diff_state != DIFF_STATE_HOLE_UPDATED) {
*diff_it = DIFF_STATE_HOLE_UPDATED;
}
ldout(cct, 20) << "object state: " << i << " "
<< static_cast<uint32_t>(prev_object_diff_state)
<< "->" << static_cast<uint32_t>(*diff_it) << " ("
<< static_cast<uint32_t>(object_map_state) << ")"
<< dendl;
}
ldout(cct, 20) << "computed overlap diffs" << dendl;
bool diff_from_start = (m_snap_id_start == 0);
auto end_it = m_object_map.end();
if (m_object_map.size() > prev_object_diff_state_size) {
for (; it != end_it; ++it,++diff_it, ++i) {
uint8_t object_map_state = *it;
if (object_map_state == OBJECT_NONEXISTENT) {
*diff_it = DIFF_STATE_HOLE;
} else if (diff_from_start ||
(m_object_diff_state_valid &&
object_map_state != OBJECT_EXISTS_CLEAN)) {
*diff_it = DIFF_STATE_DATA_UPDATED;
} else {
*diff_it = DIFF_STATE_DATA;
}
ldout(cct, 20) << "object state: " << i << " "
<< "->" << static_cast<uint32_t>(*diff_it) << " ("
<< static_cast<uint32_t>(*it) << ")" << dendl;
}
}
ldout(cct, 20) << "computed resize diffs" << dendl;
m_object_diff_state_valid = true;
std::shared_lock image_locker{m_image_ctx->image_lock};
load_object_map(&image_locker);
}
template <typename I>
void DiffRequest<I>::finish(int r) {
auto cct = m_image_ctx->cct;
ldout(cct, 10) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace object_map
} // namespace librbd
template class librbd::object_map::DiffRequest<librbd::ImageCtx>;
| 8,171 | 30.552124 | 80 | cc |
null | ceph-main/src/librbd/object_map/DiffRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OBJECT_MAP_DIFF_REQUEST_H
#define CEPH_LIBRBD_OBJECT_MAP_DIFF_REQUEST_H
#include "include/int_types.h"
#include "common/bit_vector.hpp"
#include "common/ceph_mutex.h"
#include "librbd/object_map/Types.h"
#include <set>
struct Context;
namespace librbd {
struct ImageCtx;
namespace object_map {
template <typename ImageCtxT>
class DiffRequest {
public:
static DiffRequest* create(ImageCtxT* image_ctx, uint64_t snap_id_start,
uint64_t snap_id_end,
BitVector<2>* object_diff_state,
Context* on_finish) {
return new DiffRequest(image_ctx, snap_id_start, snap_id_end,
object_diff_state, on_finish);
}
DiffRequest(ImageCtxT* image_ctx, uint64_t snap_id_start,
uint64_t snap_id_end, BitVector<2>* object_diff_state,
Context* on_finish)
: m_image_ctx(image_ctx), m_snap_id_start(snap_id_start),
m_snap_id_end(snap_id_end), m_object_diff_state(object_diff_state),
m_on_finish(on_finish) {
}
void send();
private:
/**
* @verbatim
*
* <start>
* |
* | /---------\
* | | |
* v v |
* LOAD_OBJECT_MAP ---/
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT* m_image_ctx;
uint64_t m_snap_id_start;
uint64_t m_snap_id_end;
BitVector<2>* m_object_diff_state;
Context* m_on_finish;
std::set<uint64_t> m_snap_ids;
uint64_t m_current_snap_id = 0;
bool m_ignore_enoent = false;
uint64_t m_current_size = 0;
BitVector<2> m_object_map;
bool m_object_diff_state_valid = false;
bufferlist m_out_bl;
void load_object_map(std::shared_lock<ceph::shared_mutex>* image_locker);
void handle_load_object_map(int r);
void finish(int r);
};
} // namespace object_map
} // namespace librbd
extern template class librbd::object_map::DiffRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OBJECT_MAP_DIFF_REQUEST_H
| 2,104 | 22.920455 | 75 | h |
null | ceph-main/src/librbd/object_map/InvalidateRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/object_map/InvalidateRequest.h"
#include "common/dout.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::object_map::InvalidateRequest: "
namespace librbd {
namespace object_map {
template <typename I>
InvalidateRequest<I>* InvalidateRequest<I>::create(I &image_ctx,
uint64_t snap_id, bool force,
Context *on_finish) {
return new InvalidateRequest<I>(image_ctx, snap_id, force, on_finish);
}
template <typename I>
void InvalidateRequest<I>::send() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
ceph_assert(ceph_mutex_is_wlocked(image_ctx.image_lock));
uint64_t snap_flags;
int r = image_ctx.get_flags(m_snap_id, &snap_flags);
if (r < 0 || ((snap_flags & RBD_FLAG_OBJECT_MAP_INVALID) != 0)) {
this->async_complete(r);
return;
}
CephContext *cct = image_ctx.cct;
lderr(cct) << this << " invalidating object map in-memory" << dendl;
// update in-memory flags
uint64_t flags = RBD_FLAG_OBJECT_MAP_INVALID;
if ((image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) {
flags |= RBD_FLAG_FAST_DIFF_INVALID;
}
r = image_ctx.update_flags(m_snap_id, flags, true);
if (r < 0) {
this->async_complete(r);
return;
}
// do not update on-disk flags if not image owner
if (image_ctx.image_watcher == nullptr ||
(!m_force && m_snap_id == CEPH_NOSNAP &&
image_ctx.exclusive_lock != nullptr &&
!image_ctx.exclusive_lock->is_lock_owner())) {
this->async_complete(-EROFS);
return;
}
lderr(cct) << this << " invalidating object map on-disk" << dendl;
librados::ObjectWriteOperation op;
cls_client::set_flags(&op, m_snap_id, flags, flags);
librados::AioCompletion *rados_completion =
this->create_callback_completion();
r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, rados_completion,
&op);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
bool InvalidateRequest<I>::should_complete(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
lderr(cct) << this << " " << __func__ << ": r=" << r << dendl;
return true;
}
} // namespace object_map
} // namespace librbd
template class librbd::object_map::InvalidateRequest<librbd::ImageCtx>;
| 2,600 | 29.964286 | 80 | cc |
null | ceph-main/src/librbd/object_map/InvalidateRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OBJECT_MAP_INVALIDATE_REQUEST_H
#define CEPH_LIBRBD_OBJECT_MAP_INVALIDATE_REQUEST_H
#include "include/int_types.h"
#include "librbd/AsyncRequest.h"
class Context;
namespace librbd {
class ImageCtx;
namespace object_map {
template <typename ImageCtxT = ImageCtx>
class InvalidateRequest : public AsyncRequest<ImageCtxT> {
public:
static InvalidateRequest* create(ImageCtxT &image_ctx, uint64_t snap_id,
bool force, Context *on_finish);
InvalidateRequest(ImageCtxT &image_ctx, uint64_t snap_id, bool force,
Context *on_finish)
: AsyncRequest<ImageCtxT>(image_ctx, on_finish),
m_snap_id(snap_id), m_force(force) {
}
void send() override;
protected:
bool should_complete(int r) override;
private:
uint64_t m_snap_id;
bool m_force;
};
} // namespace object_map
} // namespace librbd
extern template class librbd::object_map::InvalidateRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OBJECT_MAP_INVALIDATE_REQUEST_H
| 1,122 | 23.413043 | 78 | h |
null | ceph-main/src/librbd/object_map/LockRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/object_map/LockRequest.h"
#include "cls/lock/cls_lock_client.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::object_map::LockRequest: "
namespace librbd {
namespace object_map {
using util::create_rados_callback;
template <typename I>
LockRequest<I>::LockRequest(I &image_ctx, Context *on_finish)
: m_image_ctx(image_ctx), m_on_finish(on_finish), m_broke_lock(false) {
}
template <typename I>
void LockRequest<I>::send() {
send_lock();
}
template <typename I>
void LockRequest<I>::send_lock() {
CephContext *cct = m_image_ctx.cct;
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
ldout(cct, 10) << this << " " << __func__ << ": oid=" << oid << dendl;
librados::ObjectWriteOperation op;
rados::cls::lock::lock(&op, RBD_LOCK_NAME, ClsLockType::EXCLUSIVE, "", "", "",
utime_t(), 0);
using klass = LockRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_lock>(this);
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
Context *LockRequest<I>::handle_lock(int *ret_val) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *ret_val << dendl;
if (*ret_val == 0) {
return m_on_finish;
} else if (*ret_val == -EEXIST) {
// already locked by myself
*ret_val = 0;
return m_on_finish;
} else if (m_broke_lock || *ret_val != -EBUSY) {
lderr(cct) << "failed to lock object map: " << cpp_strerror(*ret_val)
<< dendl;
*ret_val = 0;
return m_on_finish;
}
send_get_lock_info();
return nullptr;
}
template <typename I>
void LockRequest<I>::send_get_lock_info() {
CephContext *cct = m_image_ctx.cct;
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
ldout(cct, 10) << this << " " << __func__ << ": oid=" << oid << dendl;
librados::ObjectReadOperation op;
rados::cls::lock::get_lock_info_start(&op, RBD_LOCK_NAME);
using klass = LockRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_get_lock_info>(this);
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op, &m_out_bl);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
Context *LockRequest<I>::handle_get_lock_info(int *ret_val) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *ret_val << dendl;
if (*ret_val == -ENOENT) {
send_lock();
return nullptr;
}
ClsLockType lock_type;
std::string lock_tag;
if (*ret_val == 0) {
auto it = m_out_bl.cbegin();
*ret_val = rados::cls::lock::get_lock_info_finish(&it, &m_lockers,
&lock_type, &lock_tag);
}
if (*ret_val < 0) {
lderr(cct) << "failed to list object map locks: " << cpp_strerror(*ret_val)
<< dendl;
*ret_val = 0;
return m_on_finish;
}
send_break_locks();
return nullptr;
}
template <typename I>
void LockRequest<I>::send_break_locks() {
CephContext *cct = m_image_ctx.cct;
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
ldout(cct, 10) << this << " " << __func__ << ": oid=" << oid << ", "
<< "num_lockers=" << m_lockers.size() << dendl;
librados::ObjectWriteOperation op;
for (auto &locker : m_lockers) {
rados::cls::lock::break_lock(&op, RBD_LOCK_NAME, locker.first.cookie,
locker.first.locker);
}
using klass = LockRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_break_locks>(this);
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
Context *LockRequest<I>::handle_break_locks(int *ret_val) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *ret_val << dendl;
m_broke_lock = true;
if (*ret_val == 0 || *ret_val == -ENOENT) {
send_lock();
return nullptr;
}
lderr(cct) << "failed to break object map lock: " << cpp_strerror(*ret_val)
<< dendl;
*ret_val = 0;
return m_on_finish;
}
} // namespace object_map
} // namespace librbd
template class librbd::object_map::LockRequest<librbd::ImageCtx>;
| 4,758 | 29.120253 | 80 | cc |
null | ceph-main/src/librbd/object_map/LockRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OBJECT_MAP_LOCK_REQUEST_H
#define CEPH_LIBRBD_OBJECT_MAP_LOCK_REQUEST_H
#include "include/buffer.h"
#include "cls/lock/cls_lock_types.h"
#include <map>
class Context;
namespace librbd {
class ImageCtx;
namespace object_map {
template <typename ImageCtxT = ImageCtx>
class LockRequest {
public:
static LockRequest* create(ImageCtxT &image_ctx, Context *on_finish) {
return new LockRequest(image_ctx, on_finish);
}
LockRequest(ImageCtxT &image_ctx, Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start> /------------------------------------- BREAK_LOCKS * * *
* | | ^ *
* | | | *
* | | | *
* | v (EBUSY && !broke_lock) | *
* \---------> LOCK_OBJECT_MAP * * * * * * * * * * * > GET_LOCK_INFO * *
* | * ^ * *
* | * * * *
* | * * (ENOENT) * *
* | * * * * * * * * * * * * * * * * * *
* | * *
* | * (other errors) *
* | * *
* v v (other errors) *
* <finish> < * * * * * * * * * * * * * * * * * * * * * * * *
*
* @endverbatim
*/
ImageCtxT &m_image_ctx;
Context *m_on_finish;
bool m_broke_lock;
std::map<rados::cls::lock::locker_id_t,
rados::cls::lock::locker_info_t> m_lockers;
bufferlist m_out_bl;
void send_lock();
Context *handle_lock(int *ret_val);
void send_get_lock_info();
Context *handle_get_lock_info(int *ret_val);
void send_break_locks();
Context *handle_break_locks(int *ret_val);
};
} // namespace object_map
} // namespace librbd
extern template class librbd::object_map::LockRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OBJECT_MAP_LOCK_REQUEST_H
| 2,431 | 31 | 77 | h |
null | ceph-main/src/librbd/object_map/RefreshRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/object_map/RefreshRequest.h"
#include "cls/lock/cls_lock_client.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "librbd/object_map/InvalidateRequest.h"
#include "librbd/object_map/LockRequest.h"
#include "librbd/object_map/ResizeRequest.h"
#include "librbd/Utils.h"
#include "osdc/Striper.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::object_map::RefreshRequest: "
namespace librbd {
using util::create_context_callback;
using util::create_rados_callback;
namespace object_map {
template <typename I>
RefreshRequest<I>::RefreshRequest(I &image_ctx, ceph::shared_mutex* object_map_lock,
ceph::BitVector<2> *object_map,
uint64_t snap_id, Context *on_finish)
: m_image_ctx(image_ctx), m_object_map_lock(object_map_lock),
m_object_map(object_map), m_snap_id(snap_id), m_on_finish(on_finish),
m_object_count(0), m_truncate_on_disk_object_map(false) {
}
template <typename I>
void RefreshRequest<I>::send() {
{
std::shared_lock image_locker{m_image_ctx.image_lock};
m_object_count = Striper::get_num_objects(
m_image_ctx.layout, m_image_ctx.get_image_size(m_snap_id));
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": "
<< "object_count=" << m_object_count << dendl;
send_lock();
}
template <typename I>
void RefreshRequest<I>::apply() {
uint64_t num_objs;
{
std::shared_lock image_locker{m_image_ctx.image_lock};
num_objs = Striper::get_num_objects(
m_image_ctx.layout, m_image_ctx.get_image_size(m_snap_id));
}
ceph_assert(m_on_disk_object_map.size() >= num_objs);
std::unique_lock object_map_locker{*m_object_map_lock};
*m_object_map = m_on_disk_object_map;
}
template <typename I>
void RefreshRequest<I>::send_lock() {
CephContext *cct = m_image_ctx.cct;
if (m_object_count > cls::rbd::MAX_OBJECT_MAP_OBJECT_COUNT) {
send_invalidate_and_close();
return;
} else if (m_snap_id != CEPH_NOSNAP) {
send_load();
return;
}
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
ldout(cct, 10) << this << " " << __func__ << ": oid=" << oid << dendl;
using klass = RefreshRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_lock>(this);
LockRequest<I> *req = LockRequest<I>::create(m_image_ctx, ctx);
req->send();
}
template <typename I>
Context *RefreshRequest<I>::handle_lock(int *ret_val) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
ceph_assert(*ret_val == 0);
send_load();
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_load() {
CephContext *cct = m_image_ctx.cct;
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
ldout(cct, 10) << this << " " << __func__ << ": oid=" << oid << dendl;
librados::ObjectReadOperation op;
cls_client::object_map_load_start(&op);
using klass = RefreshRequest<I>;
m_out_bl.clear();
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_load>(this);
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op, &m_out_bl);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
Context *RefreshRequest<I>::handle_load(int *ret_val) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *ret_val << dendl;
if (*ret_val == 0) {
auto bl_it = m_out_bl.cbegin();
*ret_val = cls_client::object_map_load_finish(&bl_it,
&m_on_disk_object_map);
}
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
if (*ret_val == -EINVAL) {
// object map is corrupt on-disk -- clear it and properly size it
// so future IO can keep the object map in sync
lderr(cct) << "object map corrupt on-disk: " << oid << dendl;
m_truncate_on_disk_object_map = true;
send_resize_invalidate();
return nullptr;
} else if (*ret_val < 0) {
lderr(cct) << "failed to load object map: " << oid << dendl;
if (*ret_val == -ETIMEDOUT &&
!cct->_conf.get_val<bool>("rbd_invalidate_object_map_on_timeout")) {
return m_on_finish;
}
send_invalidate();
return nullptr;
}
if (m_on_disk_object_map.size() < m_object_count) {
lderr(cct) << "object map smaller than current object count: "
<< m_on_disk_object_map.size() << " != "
<< m_object_count << dendl;
send_resize_invalidate();
return nullptr;
}
ldout(cct, 20) << "refreshed object map: num_objs="
<< m_on_disk_object_map.size() << dendl;
if (m_on_disk_object_map.size() > m_object_count) {
// resize op might have been interrupted
ldout(cct, 1) << "object map larger than current object count: "
<< m_on_disk_object_map.size() << " != "
<< m_object_count << dendl;
}
apply();
return m_on_finish;
}
template <typename I>
void RefreshRequest<I>::send_invalidate() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_on_disk_object_map.clear();
object_map::ResizeRequest::resize(&m_on_disk_object_map, m_object_count,
OBJECT_EXISTS);
using klass = RefreshRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_invalidate>(this);
InvalidateRequest<I> *req = InvalidateRequest<I>::create(
m_image_ctx, m_snap_id, true, ctx);
std::shared_lock owner_locker{m_image_ctx.owner_lock};
std::unique_lock image_locker{m_image_ctx.image_lock};
req->send();
}
template <typename I>
Context *RefreshRequest<I>::handle_invalidate(int *ret_val) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *ret_val << dendl;
if (*ret_val < 0) {
lderr(cct) << "failed to invalidate object map: " << cpp_strerror(*ret_val)
<< dendl;
}
apply();
return m_on_finish;
}
template <typename I>
void RefreshRequest<I>::send_resize_invalidate() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_on_disk_object_map.clear();
object_map::ResizeRequest::resize(&m_on_disk_object_map, m_object_count,
OBJECT_EXISTS);
using klass = RefreshRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_resize_invalidate>(this);
InvalidateRequest<I> *req = InvalidateRequest<I>::create(
m_image_ctx, m_snap_id, true, ctx);
std::shared_lock owner_locker{m_image_ctx.owner_lock};
std::unique_lock image_locker{m_image_ctx.image_lock};
req->send();
}
template <typename I>
Context *RefreshRequest<I>::handle_resize_invalidate(int *ret_val) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *ret_val << dendl;
if (*ret_val < 0) {
lderr(cct) << "failed to invalidate object map: " << cpp_strerror(*ret_val)
<< dendl;
apply();
return m_on_finish;
}
send_resize();
return nullptr;
}
template <typename I>
void RefreshRequest<I>::send_resize() {
CephContext *cct = m_image_ctx.cct;
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
ldout(cct, 10) << this << " " << __func__ << ": oid=" << oid << dendl;
librados::ObjectWriteOperation op;
if (m_snap_id == CEPH_NOSNAP) {
rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME, ClsLockType::EXCLUSIVE, "", "");
}
if (m_truncate_on_disk_object_map) {
op.truncate(0);
}
cls_client::object_map_resize(&op, m_object_count, OBJECT_NONEXISTENT);
using klass = RefreshRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_resize>(this);
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
Context *RefreshRequest<I>::handle_resize(int *ret_val) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *ret_val << dendl;
if (*ret_val < 0) {
lderr(cct) << "failed to adjust object map size: " << cpp_strerror(*ret_val)
<< dendl;
*ret_val = 0;
}
apply();
return m_on_finish;
}
template <typename I>
void RefreshRequest<I>::send_invalidate_and_close() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
using klass = RefreshRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_invalidate_and_close>(this);
InvalidateRequest<I> *req = InvalidateRequest<I>::create(
m_image_ctx, m_snap_id, false, ctx);
lderr(cct) << "object map too large: " << m_object_count << dendl;
std::shared_lock owner_locker{m_image_ctx.owner_lock};
std::unique_lock image_locker{m_image_ctx.image_lock};
req->send();
}
template <typename I>
Context *RefreshRequest<I>::handle_invalidate_and_close(int *ret_val) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *ret_val << dendl;
if (*ret_val < 0) {
lderr(cct) << "failed to invalidate object map: " << cpp_strerror(*ret_val)
<< dendl;
} else {
*ret_val = -EFBIG;
}
std::unique_lock object_map_locker{*m_object_map_lock};
m_object_map->clear();
return m_on_finish;
}
} // namespace object_map
} // namespace librbd
template class librbd::object_map::RefreshRequest<librbd::ImageCtx>;
| 9,804 | 30.426282 | 88 | cc |
null | ceph-main/src/librbd/object_map/RefreshRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OBJECT_MAP_REFRESH_REQUEST_H
#define CEPH_LIBRBD_OBJECT_MAP_REFRESH_REQUEST_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "common/bit_vector.hpp"
#include "common/ceph_mutex.h"
class Context;
class RWLock;
namespace librbd {
class ImageCtx;
namespace object_map {
template <typename ImageCtxT = ImageCtx>
class RefreshRequest {
public:
static RefreshRequest *create(ImageCtxT &image_ctx,
ceph::shared_mutex* object_map_lock,
ceph::BitVector<2> *object_map,
uint64_t snap_id, Context *on_finish) {
return new RefreshRequest(image_ctx, object_map_lock, object_map, snap_id,
on_finish);
}
RefreshRequest(ImageCtxT &image_ctx, ceph::shared_mutex* object_map_lock,
ceph::BitVector<2> *object_map, uint64_t snap_id,
Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start> -----> LOCK (skip if snapshot)
* * |
* * v (other errors)
* * LOAD * * * * * * * > INVALIDATE ------------\
* * | * |
* * | * (-EINVAL or too small) |
* * | * * * * * * > INVALIDATE_AND_RESIZE |
* * | | * |
* * | | * |
* * | v * |
* * | RESIZE * |
* * | | * |
* * | | * * * * * * * |
* * | | * |
* * | v v |
* * \--------------------> LOCK <-------------/
* * |
* v v
* INVALIDATE_AND_CLOSE ---------------> <finish>
*
* @endverbatim
*/
ImageCtxT &m_image_ctx;
ceph::shared_mutex* m_object_map_lock;
ceph::BitVector<2> *m_object_map;
uint64_t m_snap_id;
Context *m_on_finish;
uint64_t m_object_count;
ceph::BitVector<2> m_on_disk_object_map;
bool m_truncate_on_disk_object_map;
bufferlist m_out_bl;
void send_lock();
Context *handle_lock(int *ret_val);
void send_load();
Context *handle_load(int *ret_val);
void send_invalidate();
Context *handle_invalidate(int *ret_val);
void send_resize_invalidate();
Context *handle_resize_invalidate(int *ret_val);
void send_resize();
Context *handle_resize(int *ret_val);
void send_invalidate_and_close();
Context *handle_invalidate_and_close(int *ret_val);
void apply();
};
} // namespace object_map
} // namespace librbd
extern template class librbd::object_map::RefreshRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OBJECT_MAP_REFRESH_REQUEST_H
| 3,126 | 29.359223 | 78 | h |
null | ceph-main/src/librbd/object_map/RemoveRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/object_map/RemoveRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::object_map::RemoveRequest: "
namespace librbd {
namespace object_map {
using util::create_rados_callback;
template <typename I>
RemoveRequest<I>::RemoveRequest(I *image_ctx, Context *on_finish)
: m_image_ctx(image_ctx), m_on_finish(on_finish) {
}
template <typename I>
void RemoveRequest<I>::send() {
send_remove_object_map();
}
template <typename I>
void RemoveRequest<I>::send_remove_object_map() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
std::unique_lock image_locker{m_image_ctx->image_lock};
std::vector<uint64_t> snap_ids;
snap_ids.push_back(CEPH_NOSNAP);
for (auto it : m_image_ctx->snap_info) {
snap_ids.push_back(it.first);
}
std::lock_guard locker{m_lock};
ceph_assert(m_ref_counter == 0);
for (auto snap_id : snap_ids) {
m_ref_counter++;
std::string oid(ObjectMap<>::object_map_name(m_image_ctx->id, snap_id));
using klass = RemoveRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_remove_object_map>(this);
int r = m_image_ctx->md_ctx.aio_remove(oid, comp);
ceph_assert(r == 0);
comp->release();
}
}
template <typename I>
Context *RemoveRequest<I>::handle_remove_object_map(int *result) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << ": r=" << *result << dendl;
{
std::lock_guard locker{m_lock};
ceph_assert(m_ref_counter > 0);
m_ref_counter--;
if (*result < 0 && *result != -ENOENT) {
lderr(cct) << "failed to remove object map: " << cpp_strerror(*result)
<< dendl;
m_error_result = *result;
}
if (m_ref_counter > 0) {
return nullptr;
}
}
if (m_error_result < 0) {
*result = m_error_result;
}
return m_on_finish;
}
} // namespace object_map
} // namespace librbd
template class librbd::object_map::RemoveRequest<librbd::ImageCtx>;
| 2,326 | 25.146067 | 76 | cc |
null | ceph-main/src/librbd/object_map/RemoveRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OBJECT_MAP_REMOVE_REQUEST_H
#define CEPH_LIBRBD_OBJECT_MAP_REMOVE_REQUEST_H
#include "include/buffer.h"
#include "common/ceph_mutex.h"
#include <map>
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace object_map {
template <typename ImageCtxT = ImageCtx>
class RemoveRequest {
public:
static RemoveRequest *create(ImageCtxT *image_ctx, Context *on_finish) {
return new RemoveRequest(image_ctx, on_finish);
}
void send();
private:
/**
* @verbatim
*
* <start>
* | . . .
* v v .
* REMOVE_OBJECT_MAP . (for every snapshot)
* | . .
* v . . .
* <finis>
*
* @endverbatim
*/
RemoveRequest(ImageCtxT *image_ctx, Context *on_finish);
ImageCtxT *m_image_ctx;
Context *m_on_finish;
int m_error_result = 0;
int m_ref_counter = 0;
mutable ceph::mutex m_lock =
ceph::make_mutex("object_map::RemoveRequest::m_lock");
void send_remove_object_map();
Context *handle_remove_object_map(int *result);
};
} // namespace object_map
} // namespace librbd
extern template class librbd::object_map::RemoveRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OBJECT_MAP_REMOVE_REQUEST_H
| 1,347 | 20.0625 | 74 | h |
null | ceph-main/src/librbd/object_map/Request.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/object_map/Request.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/RWLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/object_map/InvalidateRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::object_map::Request: "
namespace librbd {
namespace object_map {
bool Request::should_complete(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " should_complete: r=" << r << dendl;
switch (m_state)
{
case STATE_REQUEST:
if (r == -ETIMEDOUT &&
!cct->_conf.get_val<bool>("rbd_invalidate_object_map_on_timeout")) {
m_state = STATE_TIMEOUT;
return true;
} else if (r < 0) {
lderr(cct) << "failed to update object map: " << cpp_strerror(r)
<< dendl;
return invalidate();
}
finish_request();
return true;
case STATE_INVALIDATE:
ldout(cct, 20) << "INVALIDATE" << dendl;
if (r < 0) {
lderr(cct) << "failed to invalidate object map: " << cpp_strerror(r)
<< dendl;
}
return true;
default:
lderr(cct) << "invalid state: " << m_state << dendl;
ceph_abort();
break;
}
return false;
}
bool Request::invalidate() {
bool flags_set;
int r = m_image_ctx.test_flags(m_snap_id, RBD_FLAG_OBJECT_MAP_INVALID,
&flags_set);
if (r < 0 || flags_set) {
return true;
}
m_state = STATE_INVALIDATE;
std::shared_lock owner_locker{m_image_ctx.owner_lock};
std::unique_lock image_locker{m_image_ctx.image_lock};
InvalidateRequest<> *req = new InvalidateRequest<>(m_image_ctx, m_snap_id,
true,
create_callback_context());
req->send();
return false;
}
} // namespace object_map
} // namespace librbd
| 1,960 | 25.146667 | 80 | cc |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.