repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | ceph-main/src/librbd/object_map/Request.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OBJECT_MAP_REQUEST_H
#define CEPH_LIBRBD_OBJECT_MAP_REQUEST_H
#include "include/int_types.h"
#include "librbd/AsyncRequest.h"
class Context;
namespace librbd {
class ImageCtx;
namespace object_map {
class Request : public AsyncRequest<> {
public:
Request(ImageCtx &image_ctx, uint64_t snap_id, Context *on_finish)
: AsyncRequest(image_ctx, on_finish), m_snap_id(snap_id),
m_state(STATE_REQUEST)
{
}
void send() override = 0;
protected:
const uint64_t m_snap_id;
bool should_complete(int r) override;
int filter_return_code(int r) const override {
if (m_state == STATE_REQUEST) {
// never propagate an error back to the caller
return 0;
}
return r;
}
virtual void finish_request() {
}
private:
/**
* STATE_TIMEOUT --------\
* ^ |
* | v
* <start> ---> STATE_REQUEST ---> <finish>
* | ^
* v |
* STATE_INVALIDATE -------/
*/
enum State {
STATE_REQUEST,
STATE_TIMEOUT,
STATE_INVALIDATE
};
State m_state;
bool invalidate();
};
} // namespace object_map
} // namespace librbd
#endif // CEPH_LIBRBD_OBJECT_MAP_REQUEST_H
| 1,401 | 19.925373 | 70 | h |
null | ceph-main/src/librbd/object_map/ResizeRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/object_map/ResizeRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "osdc/Striper.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "cls/lock/cls_lock_client.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::object_map::ResizeRequest: "
namespace librbd {
namespace object_map {
void ResizeRequest::resize(ceph::BitVector<2> *object_map, uint64_t num_objs,
uint8_t default_state) {
size_t orig_object_map_size = object_map->size();
object_map->resize(num_objs);
if (num_objs > orig_object_map_size) {
auto it = object_map->begin() + orig_object_map_size;
auto end_it = object_map->begin() + num_objs;
for (;it != end_it; ++it) {
*it = default_state;
}
}
}
void ResizeRequest::send() {
CephContext *cct = m_image_ctx.cct;
std::unique_lock l{*m_object_map_lock};
m_num_objs = Striper::get_num_objects(m_image_ctx.layout, m_new_size);
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
ldout(cct, 5) << this << " resizing on-disk object map: "
<< "ictx=" << &m_image_ctx << ", "
<< "oid=" << oid << ", num_objs=" << m_num_objs << dendl;
librados::ObjectWriteOperation op;
if (m_snap_id == CEPH_NOSNAP) {
rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME, ClsLockType::EXCLUSIVE, "", "");
}
cls_client::object_map_resize(&op, m_num_objs, m_default_object_state);
librados::AioCompletion *rados_completion = create_callback_completion();
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
void ResizeRequest::finish_request() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " resizing in-memory object map: "
<< m_num_objs << dendl;
std::unique_lock object_map_locker{*m_object_map_lock};
resize(m_object_map, m_num_objs, m_default_object_state);
}
} // namespace object_map
} // namespace librbd
| 2,144 | 31.5 | 88 | cc |
null | ceph-main/src/librbd/object_map/ResizeRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OBJECT_MAP_RESIZE_REQUEST_H
#define CEPH_LIBRBD_OBJECT_MAP_RESIZE_REQUEST_H
#include "include/int_types.h"
#include "librbd/object_map/Request.h"
#include "common/bit_vector.hpp"
class Context;
namespace librbd {
class ImageCtx;
namespace object_map {
class ResizeRequest : public Request {
public:
ResizeRequest(ImageCtx &image_ctx, ceph::shared_mutex *object_map_lock,
ceph::BitVector<2> *object_map, uint64_t snap_id,
uint64_t new_size, uint8_t default_object_state,
Context *on_finish)
: Request(image_ctx, snap_id, on_finish),
m_object_map_lock(object_map_lock), m_object_map(object_map),
m_num_objs(0), m_new_size(new_size),
m_default_object_state(default_object_state)
{
}
static void resize(ceph::BitVector<2> *object_map, uint64_t num_objs,
uint8_t default_state);
void send() override;
protected:
void finish_request() override;
private:
ceph::shared_mutex* m_object_map_lock;
ceph::BitVector<2> *m_object_map;
uint64_t m_num_objs;
uint64_t m_new_size;
uint8_t m_default_object_state;
};
} // namespace object_map
} // namespace librbd
#endif // CEPH_LIBRBD_OBJECT_MAP_RESIZE_REQUEST_H
| 1,340 | 24.788462 | 73 | h |
null | ceph-main/src/librbd/object_map/SnapshotCreateRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/object_map/SnapshotCreateRequest.h"
#include "common/dout.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "cls/lock/cls_lock_client.h"
#include <iostream>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::object_map::SnapshotCreateRequest: "
namespace librbd {
namespace object_map {
namespace {
std::ostream& operator<<(std::ostream& os,
const SnapshotCreateRequest::State& state) {
switch(state) {
case SnapshotCreateRequest::STATE_READ_MAP:
os << "READ_MAP";
break;
case SnapshotCreateRequest::STATE_WRITE_MAP:
os << "WRITE_MAP";
break;
case SnapshotCreateRequest::STATE_ADD_SNAPSHOT:
os << "ADD_SNAPSHOT";
break;
default:
os << "UNKNOWN (" << static_cast<uint32_t>(state) << ")";
break;
}
return os;
}
} // anonymous namespace
void SnapshotCreateRequest::send() {
send_read_map();
}
bool SnapshotCreateRequest::should_complete(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": state=" << m_state << ", "
<< "r=" << r << dendl;
if (r < 0 && m_ret_val == 0) {
m_ret_val = r;
}
if (m_ret_val < 0) {
// pass errors down to base class to invalidate the object map
return Request::should_complete(r);
}
std::shared_lock owner_locker{m_image_ctx.owner_lock};
bool finished = false;
switch (m_state) {
case STATE_READ_MAP:
send_write_map();
break;
case STATE_WRITE_MAP:
finished = send_add_snapshot();
break;
case STATE_ADD_SNAPSHOT:
update_object_map();
finished = true;
break;
default:
ceph_abort();
break;
}
return finished;
}
void SnapshotCreateRequest::send_read_map() {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
CephContext *cct = m_image_ctx.cct;
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
ldout(cct, 5) << this << " " << __func__ << ": oid=" << oid << dendl;
m_state = STATE_READ_MAP;
// IO is blocked due to the snapshot creation -- consistent to read from disk
librados::ObjectReadOperation op;
op.read(0, 0, NULL, NULL);
librados::AioCompletion *rados_completion = create_callback_completion();
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op,
&m_read_bl);
ceph_assert(r == 0);
rados_completion->release();
}
void SnapshotCreateRequest::send_write_map() {
CephContext *cct = m_image_ctx.cct;
std::string snap_oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
ldout(cct, 5) << this << " " << __func__ << ": snap_oid=" << snap_oid
<< dendl;
m_state = STATE_WRITE_MAP;
librados::ObjectWriteOperation op;
op.write_full(m_read_bl);
librados::AioCompletion *rados_completion = create_callback_completion();
int r = m_image_ctx.md_ctx.aio_operate(snap_oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
bool SnapshotCreateRequest::send_add_snapshot() {
std::shared_lock image_locker{m_image_ctx.image_lock};
if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) == 0) {
return true;
}
CephContext *cct = m_image_ctx.cct;
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
ldout(cct, 5) << this << " " << __func__ << ": oid=" << oid << dendl;
m_state = STATE_ADD_SNAPSHOT;
librados::ObjectWriteOperation op;
rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME, ClsLockType::EXCLUSIVE, "", "");
cls_client::object_map_snap_add(&op);
librados::AioCompletion *rados_completion = create_callback_completion();
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
return false;
}
void SnapshotCreateRequest::update_object_map() {
std::unique_lock object_map_locker{*m_object_map_lock};
auto it = m_object_map.begin();
auto end_it = m_object_map.end();
for (; it != end_it; ++it) {
if (*it == OBJECT_EXISTS) {
*it = OBJECT_EXISTS_CLEAN;
}
}
}
} // namespace object_map
} // namespace librbd
| 4,253 | 27.743243 | 86 | cc |
null | ceph-main/src/librbd/object_map/SnapshotCreateRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OBJECT_MAP_SNAPSHOT_CREATE_REQUEST_H
#define CEPH_LIBRBD_OBJECT_MAP_SNAPSHOT_CREATE_REQUEST_H
#include "include/int_types.h"
#include "common/bit_vector.hpp"
#include "librbd/object_map/Request.h"
class Context;
class RWLock;
namespace librbd {
class ImageCtx;
namespace object_map {
class SnapshotCreateRequest : public Request {
public:
/**
* Snapshot create goes through the following state machine:
*
* @verbatim
*
* <start>
* |
* v
* STATE_READ_MAP
* |
* v (skip)
* STATE_WRITE_MAP . . . . . . .
* | .
* v v
* STATE_ADD_SNAPSHOT ---> <finish>
*
* @endverbatim
*
* The _ADD_SNAPSHOT state is skipped if the FAST_DIFF feature isn't enabled.
*/
enum State {
STATE_READ_MAP,
STATE_WRITE_MAP,
STATE_ADD_SNAPSHOT
};
SnapshotCreateRequest(ImageCtx &image_ctx, ceph::shared_mutex* object_map_lock,
ceph::BitVector<2> *object_map, uint64_t snap_id,
Context *on_finish)
: Request(image_ctx, snap_id, on_finish),
m_object_map_lock(object_map_lock), m_object_map(*object_map),
m_ret_val(0) {
}
void send() override;
protected:
bool should_complete(int r) override;
private:
ceph::shared_mutex* m_object_map_lock;
ceph::BitVector<2> &m_object_map;
State m_state = STATE_READ_MAP;
bufferlist m_read_bl;
int m_ret_val;
void send_read_map();
void send_write_map();
bool send_add_snapshot();
void update_object_map();
};
} // namespace object_map
} // namespace librbd
#endif // CEPH_LIBRBD_OBJECT_MAP_SNAPSHOT_CREATE_REQUEST_H
| 1,791 | 21.123457 | 81 | h |
null | ceph-main/src/librbd/object_map/SnapshotRemoveRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/object_map/SnapshotRemoveRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "librbd/object_map/InvalidateRequest.h"
#include "cls/lock/cls_lock_client.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::object_map::SnapshotRemoveRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace object_map {
void SnapshotRemoveRequest::send() {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(ceph_mutex_is_wlocked(m_image_ctx.image_lock));
if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) {
int r = m_image_ctx.get_flags(m_snap_id, &m_flags);
ceph_assert(r == 0);
compute_next_snap_id();
load_map();
} else {
remove_map();
}
}
void SnapshotRemoveRequest::load_map() {
CephContext *cct = m_image_ctx.cct;
std::string snap_oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
ldout(cct, 5) << "snap_oid=" << snap_oid << dendl;
librados::ObjectReadOperation op;
cls_client::object_map_load_start(&op);
auto rados_completion = librbd::util::create_rados_callback<
SnapshotRemoveRequest, &SnapshotRemoveRequest::handle_load_map>(this);
int r = m_image_ctx.md_ctx.aio_operate(snap_oid, rados_completion, &op,
&m_out_bl);
ceph_assert(r == 0);
rados_completion->release();
}
void SnapshotRemoveRequest::handle_load_map(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r == 0) {
auto it = m_out_bl.cbegin();
r = cls_client::object_map_load_finish(&it, &m_snap_object_map);
}
if (r == -ENOENT) {
// implies we have already deleted this snapshot and handled the
// necessary fast-diff cleanup
complete(0);
return;
} else if (r < 0) {
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
lderr(cct) << "failed to load object map " << oid << ": "
<< cpp_strerror(r) << dendl;
std::shared_lock owner_locker{m_image_ctx.owner_lock};
std::unique_lock image_locker{m_image_ctx.image_lock};
invalidate_next_map();
return;
}
remove_snapshot();
}
void SnapshotRemoveRequest::remove_snapshot() {
if ((m_flags & RBD_FLAG_OBJECT_MAP_INVALID) != 0) {
// snapshot object map exists on disk but is invalid. cannot clean fast-diff
// on next snapshot if current snapshot was invalid.
std::shared_lock owner_locker{m_image_ctx.owner_lock};
std::unique_lock image_locker{m_image_ctx.image_lock};
invalidate_next_map();
return;
}
CephContext *cct = m_image_ctx.cct;
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_next_snap_id));
ldout(cct, 5) << "oid=" << oid << dendl;
librados::ObjectWriteOperation op;
if (m_next_snap_id == CEPH_NOSNAP) {
rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME, ClsLockType::EXCLUSIVE, "", "");
}
cls_client::object_map_snap_remove(&op, m_snap_object_map);
auto rados_completion = librbd::util::create_rados_callback<
SnapshotRemoveRequest,
&SnapshotRemoveRequest::handle_remove_snapshot>(this);
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
void SnapshotRemoveRequest::handle_remove_snapshot(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id,
m_next_snap_id));
lderr(cct) << "failed to remove object map snapshot " << oid << ": "
<< cpp_strerror(r) << dendl;
std::shared_lock owner_locker{m_image_ctx.owner_lock};
std::unique_lock image_locker{m_image_ctx.image_lock};
invalidate_next_map();
return;
}
std::shared_lock image_locker{m_image_ctx.image_lock};
update_object_map();
remove_map();
}
void SnapshotRemoveRequest::invalidate_next_map() {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(ceph_mutex_is_wlocked(m_image_ctx.image_lock));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << dendl;
auto ctx = librbd::util::create_context_callback<
SnapshotRemoveRequest,
&SnapshotRemoveRequest::handle_invalidate_next_map>(this);
InvalidateRequest<> *req = new InvalidateRequest<>(m_image_ctx,
m_next_snap_id, true, ctx);
req->send();
}
void SnapshotRemoveRequest::handle_invalidate_next_map(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0) {
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id,
m_next_snap_id));
lderr(cct) << "failed to invalidate object map " << oid << ": "
<< cpp_strerror(r) << dendl;
complete(r);
return;
}
remove_map();
}
void SnapshotRemoveRequest::remove_map() {
CephContext *cct = m_image_ctx.cct;
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
ldout(cct, 5) << "oid=" << oid << dendl;
librados::ObjectWriteOperation op;
op.remove();
auto rados_completion = librbd::util::create_rados_callback<
SnapshotRemoveRequest, &SnapshotRemoveRequest::handle_remove_map>(this);
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
void SnapshotRemoveRequest::handle_remove_map(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
lderr(cct) << "failed to remove object map " << oid << ": "
<< cpp_strerror(r) << dendl;
complete(r);
return;
}
complete(0);
}
void SnapshotRemoveRequest::compute_next_snap_id() {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
m_next_snap_id = CEPH_NOSNAP;
std::map<librados::snap_t, SnapInfo>::const_iterator it =
m_image_ctx.snap_info.find(m_snap_id);
ceph_assert(it != m_image_ctx.snap_info.end());
++it;
if (it != m_image_ctx.snap_info.end()) {
m_next_snap_id = it->first;
}
}
void SnapshotRemoveRequest::update_object_map() {
assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
std::unique_lock object_map_locker{*m_object_map_lock};
if (m_next_snap_id == m_image_ctx.snap_id && m_next_snap_id == CEPH_NOSNAP) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << dendl;
auto it = m_object_map.begin();
auto end_it = m_object_map.end();
auto snap_it = m_snap_object_map.begin();
uint64_t i = 0;
for (; it != end_it; ++it) {
if (*it == OBJECT_EXISTS_CLEAN &&
(i >= m_snap_object_map.size() ||
*snap_it == OBJECT_EXISTS)) {
*it = OBJECT_EXISTS;
}
if (i < m_snap_object_map.size()) {
++snap_it;
}
++i;
}
}
}
} // namespace object_map
} // namespace librbd
| 7,246 | 30.785088 | 88 | cc |
null | ceph-main/src/librbd/object_map/SnapshotRemoveRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OBJECT_MAP_SNAPSHOT_REMOVE_REQUEST_H
#define CEPH_LIBRBD_OBJECT_MAP_SNAPSHOT_REMOVE_REQUEST_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "common/bit_vector.hpp"
#include "librbd/AsyncRequest.h"
namespace librbd {
namespace object_map {
class SnapshotRemoveRequest : public AsyncRequest<> {
public:
/**
* Snapshot rollback goes through the following state machine:
*
* @verbatim
*
* <start> -----------> STATE_LOAD_MAP ----\
* . * |
* . * (error) |
* . (invalid object map) v |
* . . . > STATE_INVALIDATE_NEXT_MAP |
* . | |
* . | |
* . (fast diff disabled) v v
* . . . . . . . . . . > STATE_REMOVE_MAP
* |
* v
* <finish>
*
* @endverbatim
*
* The _LOAD_MAP state is skipped if the fast diff feature is disabled.
* If the fast diff feature is enabled and the snapshot is flagged as
* invalid, the next snapshot / HEAD object mapis flagged as invalid;
* otherwise, the state machine proceeds to remove the object map.
*/
SnapshotRemoveRequest(ImageCtx &image_ctx, ceph::shared_mutex* object_map_lock,
ceph::BitVector<2> *object_map, uint64_t snap_id,
Context *on_finish)
: AsyncRequest(image_ctx, on_finish),
m_object_map_lock(object_map_lock), m_object_map(*object_map),
m_snap_id(snap_id), m_next_snap_id(CEPH_NOSNAP) {
}
void send() override;
protected:
bool should_complete(int r) override {
return true;
}
private:
ceph::shared_mutex* m_object_map_lock;
ceph::BitVector<2> &m_object_map;
uint64_t m_snap_id;
uint64_t m_next_snap_id;
uint64_t m_flags = 0;
ceph::BitVector<2> m_snap_object_map;
bufferlist m_out_bl;
void load_map();
void handle_load_map(int r);
void remove_snapshot();
void handle_remove_snapshot(int r);
void invalidate_next_map();
void handle_invalidate_next_map(int r);
void remove_map();
void handle_remove_map(int r);
void compute_next_snap_id();
void update_object_map();
};
} // namespace object_map
} // namespace librbd
#endif // CEPH_LIBRBD_OBJECT_MAP_SNAPSHOT_REMOVE_REQUEST_H
| 2,517 | 27.292135 | 81 | h |
null | ceph-main/src/librbd/object_map/SnapshotRollbackRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/object_map/SnapshotRollbackRequest.h"
#include "common/dout.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "librbd/object_map/InvalidateRequest.h"
#include "cls/lock/cls_lock_client.h"
#include <iostream>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::object_map::SnapshotRollbackRequest: "
namespace librbd {
namespace object_map {
namespace {
std::ostream& operator<<(std::ostream& os,
const SnapshotRollbackRequest::State& state) {
switch(state) {
case SnapshotRollbackRequest::STATE_READ_MAP:
os << "READ_MAP";
break;
case SnapshotRollbackRequest::STATE_INVALIDATE_MAP:
os << "INVALIDATE_MAP";
break;
case SnapshotRollbackRequest::STATE_WRITE_MAP:
os << "WRITE_MAP";
break;
default:
os << "UNKNOWN (" << static_cast<uint32_t>(state) << ")";
break;
}
return os;
}
} // anonymous namespace
void SnapshotRollbackRequest::send() {
send_read_map();
}
bool SnapshotRollbackRequest::should_complete(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": state=" << m_state << ", "
<< "r=" << r << dendl;
if (r < 0 && m_ret_val == 0) {
m_ret_val = r;
}
bool finished = false;
switch (m_state) {
case STATE_READ_MAP:
if (r < 0) {
// invalidate the snapshot object map
send_invalidate_map();
} else {
send_write_map();
}
break;
case STATE_INVALIDATE_MAP:
// invalidate the HEAD object map as well
finished = Request::should_complete(m_ret_val);
break;
case STATE_WRITE_MAP:
finished = Request::should_complete(r);
break;
default:
ceph_abort();
break;
}
return finished;
}
void SnapshotRollbackRequest::send_read_map() {
std::string snap_oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_oid=" << snap_oid
<< dendl;
m_state = STATE_READ_MAP;
librados::ObjectReadOperation op;
op.read(0, 0, NULL, NULL);
librados::AioCompletion *rados_completion = create_callback_completion();
int r = m_image_ctx.md_ctx.aio_operate(snap_oid, rados_completion, &op,
&m_read_bl);
ceph_assert(r == 0);
rados_completion->release();
}
void SnapshotRollbackRequest::send_write_map() {
std::shared_lock owner_locker{m_image_ctx.owner_lock};
CephContext *cct = m_image_ctx.cct;
std::string snap_oid(ObjectMap<>::object_map_name(m_image_ctx.id,
CEPH_NOSNAP));
ldout(cct, 5) << this << " " << __func__ << ": snap_oid=" << snap_oid
<< dendl;
m_state = STATE_WRITE_MAP;
librados::ObjectWriteOperation op;
rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME, ClsLockType::EXCLUSIVE, "", "");
op.write_full(m_read_bl);
librados::AioCompletion *rados_completion = create_callback_completion();
int r = m_image_ctx.md_ctx.aio_operate(snap_oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
void SnapshotRollbackRequest::send_invalidate_map() {
std::shared_lock owner_locker{m_image_ctx.owner_lock};
std::unique_lock image_locker{m_image_ctx.image_lock};
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
m_state = STATE_INVALIDATE_MAP;
InvalidateRequest<> *req = new InvalidateRequest<>(m_image_ctx, m_snap_id,
false,
create_callback_context());
req->send();
}
} // namespace object_map
} // namespace librbd
| 3,848 | 28.159091 | 86 | cc |
null | ceph-main/src/librbd/object_map/SnapshotRollbackRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OBJECT_MAP_SNAPSHOT_ROLLBACK_REQUEST_H
#define CEPH_LIBRBD_OBJECT_MAP_SNAPSHOT_ROLLBACK_REQUEST_H
#include "include/int_types.h"
#include "librbd/object_map/Request.h"
class Context;
namespace librbd {
class ImageCtx;
namespace object_map {
class SnapshotRollbackRequest : public Request {
public:
/**
* Snapshot rollback goes through the following state machine:
*
* @verbatim
*
* <start>
* |
* v (error)
* STATE_READ_MAP * * * * > STATE_INVALIDATE_MAP
* | |
* v v
* STATE_WRITE_MAP -------> <finish>
*
* @endverbatim
*
* If an error occurs within the READ_MAP state, the associated snapshot's
* object map will be flagged as invalid. Otherwise, an error from any state
* will result in the HEAD object map being flagged as invalid via the base
* class.
*/
enum State {
STATE_READ_MAP,
STATE_INVALIDATE_MAP,
STATE_WRITE_MAP
};
SnapshotRollbackRequest(ImageCtx &image_ctx, uint64_t snap_id,
Context *on_finish)
: Request(image_ctx, CEPH_NOSNAP, on_finish),
m_snap_id(snap_id), m_ret_val(0) {
ceph_assert(snap_id != CEPH_NOSNAP);
}
void send() override;
protected:
bool should_complete(int r) override;
private:
State m_state = STATE_READ_MAP;
uint64_t m_snap_id;
int m_ret_val;
bufferlist m_read_bl;
void send_read_map();
void send_invalidate_map();
void send_write_map();
};
} // namespace object_map
} // namespace librbd
#endif // CEPH_LIBRBD_OBJECT_MAP_SNAPSHOT_ROLLBACK_REQUEST_H
| 1,727 | 22.04 | 79 | h |
null | ceph-main/src/librbd/object_map/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OBJECT_MAP_TYPES_H
#define CEPH_LIBRBD_OBJECT_MAP_TYPES_H
namespace librbd {
namespace object_map {
enum DiffState {
DIFF_STATE_HOLE = 0, /* unchanged hole */
DIFF_STATE_DATA = 1, /* unchanged data */
DIFF_STATE_HOLE_UPDATED = 2, /* new hole */
DIFF_STATE_DATA_UPDATED = 3 /* new data */
};
} // namespace object_map
} // namespace librbd
#endif // CEPH_LIBRBD_OBJECT_MAP_TYPES_H
| 528 | 24.190476 | 70 | h |
null | ceph-main/src/librbd/object_map/UnlockRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/object_map/UnlockRequest.h"
#include "cls/lock/cls_lock_client.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::object_map::UnlockRequest: "
namespace librbd {
namespace object_map {
using util::create_rados_callback;
template <typename I>
UnlockRequest<I>::UnlockRequest(I &image_ctx, Context *on_finish)
: m_image_ctx(image_ctx), m_on_finish(on_finish) {
}
template <typename I>
void UnlockRequest<I>::send() {
send_unlock();
}
template <typename I>
void UnlockRequest<I>::send_unlock() {
CephContext *cct = m_image_ctx.cct;
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
ldout(cct, 10) << this << " " << __func__ << ": oid=" << oid << dendl;
librados::ObjectWriteOperation op;
rados::cls::lock::unlock(&op, RBD_LOCK_NAME, "");
using klass = UnlockRequest<I>;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_unlock>(this);
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
Context *UnlockRequest<I>::handle_unlock(int *ret_val) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *ret_val << dendl;
if (*ret_val < 0 && *ret_val != -ENOENT) {
lderr(m_image_ctx.cct) << "failed to release object map lock: "
<< cpp_strerror(*ret_val) << dendl;
}
*ret_val = 0;
return m_on_finish;
}
} // namespace object_map
} // namespace librbd
template class librbd::object_map::UnlockRequest<librbd::ImageCtx>;
| 1,885 | 27.149254 | 77 | cc |
null | ceph-main/src/librbd/object_map/UnlockRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OBJECT_MAP_UNLOCK_REQUEST_H
#define CEPH_LIBRBD_OBJECT_MAP_UNLOCK_REQUEST_H
class Context;
namespace librbd {
class ImageCtx;
namespace object_map {
template <typename ImageCtxT = ImageCtx>
class UnlockRequest {
public:
static UnlockRequest *create(ImageCtxT &image_ctx, Context *on_finish) {
return new UnlockRequest(image_ctx, on_finish);
}
UnlockRequest(ImageCtxT &image_ctx, Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start> ----> UNLOCK ----> <finish>
*
* @endverbatim
*/
ImageCtxT &m_image_ctx;
Context *m_on_finish;
void send_unlock();
Context* handle_unlock(int *ret_val);
};
} // namespace object_map
} // namespace librbd
extern template class librbd::object_map::UnlockRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OBJECT_MAP_UNLOCK_REQUEST_H
| 950 | 18.8125 | 74 | h |
null | ceph-main/src/librbd/object_map/UpdateRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/object_map/UpdateRequest.h"
#include "include/rbd/object_map_types.h"
#include "include/stringify.h"
#include "common/dout.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "cls/lock/cls_lock_client.h"
#include <string>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::object_map::UpdateRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace object_map {
namespace {
// keep aligned to bit_vector 4K block sizes
const uint64_t MAX_OBJECTS_PER_UPDATE = 256 * (1 << 10);
}
template <typename I>
void UpdateRequest<I>::send() {
update_object_map();
}
template <typename I>
void UpdateRequest<I>::update_object_map() {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert(ceph_mutex_is_locked(*m_object_map_lock));
CephContext *cct = m_image_ctx.cct;
// break very large requests into manageable batches
m_update_end_object_no = std::min(
m_end_object_no, m_update_start_object_no + MAX_OBJECTS_PER_UPDATE);
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
ldout(cct, 20) << "ictx=" << &m_image_ctx << ", oid=" << oid << ", "
<< "[" << m_update_start_object_no << ","
<< m_update_end_object_no << ") = "
<< (m_current_state ?
stringify(static_cast<uint32_t>(*m_current_state)) : "")
<< "->" << static_cast<uint32_t>(m_new_state)
<< dendl;
librados::ObjectWriteOperation op;
if (m_snap_id == CEPH_NOSNAP) {
rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME, ClsLockType::EXCLUSIVE, "", "");
}
cls_client::object_map_update(&op, m_update_start_object_no,
m_update_end_object_no, m_new_state,
m_current_state);
auto rados_completion = librbd::util::create_rados_callback<
UpdateRequest<I>, &UpdateRequest<I>::handle_update_object_map>(this);
std::vector<librados::snap_t> snaps;
int r = m_image_ctx.md_ctx.aio_operate(
oid, rados_completion, &op, 0, snaps,
(m_trace.valid() ? m_trace.get_info() : nullptr));
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
void UpdateRequest<I>::handle_update_object_map(int r) {
ldout(m_image_ctx.cct, 20) << "r=" << r << dendl;
if (r == -ENOENT && m_ignore_enoent) {
r = 0;
}
if (r < 0 && m_ret_val == 0) {
m_ret_val = r;
}
{
std::shared_lock image_locker{m_image_ctx.image_lock};
std::unique_lock object_map_locker{*m_object_map_lock};
update_in_memory_object_map();
if (m_update_end_object_no < m_end_object_no) {
m_update_start_object_no = m_update_end_object_no;
update_object_map();
return;
}
}
// no more batch updates to send
complete(m_ret_val);
}
template <typename I>
void UpdateRequest<I>::update_in_memory_object_map() {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert(ceph_mutex_is_locked(*m_object_map_lock));
// rebuilding the object map might update on-disk only
if (m_snap_id == m_image_ctx.snap_id) {
ldout(m_image_ctx.cct, 20) << dendl;
auto it = m_object_map.begin() +
std::min(m_update_start_object_no, m_object_map.size());
auto end_it = m_object_map.begin() +
std::min(m_update_end_object_no, m_object_map.size());
for (; it != end_it; ++it) {
auto state_ref = *it;
uint8_t state = state_ref;
if (!m_current_state || state == *m_current_state ||
(*m_current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN)) {
state_ref = m_new_state;
}
}
}
}
template <typename I>
void UpdateRequest<I>::finish_request() {
}
} // namespace object_map
} // namespace librbd
template class librbd::object_map::UpdateRequest<librbd::ImageCtx>;
| 3,980 | 29.623077 | 88 | cc |
null | ceph-main/src/librbd/object_map/UpdateRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OBJECT_MAP_UPDATE_REQUEST_H
#define CEPH_LIBRBD_OBJECT_MAP_UPDATE_REQUEST_H
#include "include/int_types.h"
#include "librbd/object_map/Request.h"
#include "common/bit_vector.hpp"
#include "common/zipkin_trace.h"
#include "librbd/Utils.h"
#include <boost/optional.hpp>
class Context;
namespace librbd {
class ImageCtx;
namespace object_map {
template <typename ImageCtxT = librbd::ImageCtx>
class UpdateRequest : public Request {
public:
static UpdateRequest *create(ImageCtx &image_ctx,
ceph::shared_mutex* object_map_lock,
ceph::BitVector<2> *object_map,
uint64_t snap_id, uint64_t start_object_no,
uint64_t end_object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace,
bool ignore_enoent, Context *on_finish) {
return new UpdateRequest(image_ctx, object_map_lock, object_map, snap_id,
start_object_no, end_object_no, new_state,
current_state, parent_trace, ignore_enoent,
on_finish);
}
UpdateRequest(ImageCtx &image_ctx, ceph::shared_mutex* object_map_lock,
ceph::BitVector<2> *object_map, uint64_t snap_id,
uint64_t start_object_no, uint64_t end_object_no,
uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace, bool ignore_enoent,
Context *on_finish)
: Request(image_ctx, snap_id, on_finish),
m_object_map_lock(object_map_lock), m_object_map(*object_map),
m_start_object_no(start_object_no), m_end_object_no(end_object_no),
m_update_start_object_no(start_object_no), m_new_state(new_state),
m_current_state(current_state),
m_trace(util::create_trace(image_ctx, "update object map", parent_trace)),
m_ignore_enoent(ignore_enoent)
{
m_trace.event("start");
}
virtual ~UpdateRequest() {
m_trace.event("finish");
}
void send() override;
protected:
void finish_request() override;
private:
/**
* @verbatim
*
* <start>
* |
* |/------------------\
* v | (repeat in batches)
* UPDATE_OBJECT_MAP -----/
* |
* v
* <finish>
*
* @endverbatim
*/
ceph::shared_mutex* m_object_map_lock;
ceph::BitVector<2> &m_object_map;
uint64_t m_start_object_no;
uint64_t m_end_object_no;
uint64_t m_update_start_object_no;
uint64_t m_update_end_object_no = 0;
uint8_t m_new_state;
boost::optional<uint8_t> m_current_state;
ZTracer::Trace m_trace;
bool m_ignore_enoent;
int m_ret_val = 0;
void update_object_map();
void handle_update_object_map(int r);
void update_in_memory_object_map();
};
} // namespace object_map
} // namespace librbd
extern template class librbd::object_map::UpdateRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OBJECT_MAP_UPDATE_REQUEST_H
| 3,250 | 29.383178 | 80 | h |
null | ceph-main/src/librbd/operation/DisableFeaturesRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/DisableFeaturesRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Journal.h"
#include "librbd/Utils.h"
#include "librbd/image/SetFlagsRequest.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/journal/RemoveRequest.h"
#include "librbd/journal/TypeTraits.h"
#include "librbd/mirror/DisableRequest.h"
#include "librbd/object_map/RemoveRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::DisableFeaturesRequest: "
namespace librbd {
namespace operation {
using util::create_async_context_callback;
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
DisableFeaturesRequest<I>::DisableFeaturesRequest(I &image_ctx,
Context *on_finish,
uint64_t journal_op_tid,
uint64_t features,
bool force)
: Request<I>(image_ctx, on_finish, journal_op_tid), m_features(features),
m_force(force) {
}
template <typename I>
void DisableFeaturesRequest<I>::send_op() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
ldout(cct, 20) << this << " " << __func__ << ": features=" << m_features
<< dendl;
send_prepare_lock();
}
template <typename I>
bool DisableFeaturesRequest<I>::should_complete(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << " r=" << r << dendl;
if (r < 0) {
lderr(cct) << "encountered error: " << cpp_strerror(r) << dendl;
}
return true;
}
template <typename I>
void DisableFeaturesRequest<I>::send_prepare_lock() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
image_ctx.state->prepare_lock(create_async_context_callback(
image_ctx, create_context_callback<
DisableFeaturesRequest<I>,
&DisableFeaturesRequest<I>::handle_prepare_lock>(this)));
}
template <typename I>
Context *DisableFeaturesRequest<I>::handle_prepare_lock(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to lock image: " << cpp_strerror(*result) << dendl;
return this->create_context_finisher(*result);
}
send_block_writes();
return nullptr;
}
template <typename I>
void DisableFeaturesRequest<I>::send_block_writes() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
std::unique_lock locker{image_ctx.owner_lock};
image_ctx.io_image_dispatcher->block_writes(create_context_callback<
DisableFeaturesRequest<I>,
&DisableFeaturesRequest<I>::handle_block_writes>(this));
}
template <typename I>
Context *DisableFeaturesRequest<I>::handle_block_writes(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to block writes: " << cpp_strerror(*result) << dendl;
return handle_finish(*result);
}
m_writes_blocked = true;
{
std::unique_lock locker{image_ctx.owner_lock};
// avoid accepting new requests from peers while we manipulate
// the image features
if (image_ctx.exclusive_lock != nullptr &&
(image_ctx.journal == nullptr ||
!image_ctx.journal->is_journal_replaying())) {
image_ctx.exclusive_lock->block_requests(0);
m_requests_blocked = true;
}
}
return send_acquire_exclusive_lock(result);
}
template <typename I>
Context *DisableFeaturesRequest<I>::send_acquire_exclusive_lock(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
{
std::unique_lock locker{image_ctx.owner_lock};
// if disabling features w/ exclusive lock supported, we need to
// acquire the lock to temporarily block IO against the image
if (image_ctx.exclusive_lock != nullptr &&
!image_ctx.exclusive_lock->is_lock_owner()) {
m_acquired_lock = true;
Context *ctx = create_context_callback<
DisableFeaturesRequest<I>,
&DisableFeaturesRequest<I>::handle_acquire_exclusive_lock>(
this, image_ctx.exclusive_lock);
image_ctx.exclusive_lock->acquire_lock(ctx);
return nullptr;
}
}
return handle_acquire_exclusive_lock(result);
}
template <typename I>
Context *DisableFeaturesRequest<I>::handle_acquire_exclusive_lock(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
image_ctx.owner_lock.lock_shared();
if (*result < 0) {
lderr(cct) << "failed to lock image: " << cpp_strerror(*result) << dendl;
image_ctx.owner_lock.unlock_shared();
return handle_finish(*result);
} else if (image_ctx.exclusive_lock != nullptr &&
!image_ctx.exclusive_lock->is_lock_owner()) {
lderr(cct) << "failed to acquire exclusive lock" << dendl;
*result = image_ctx.exclusive_lock->get_unlocked_op_error();
image_ctx.owner_lock.unlock_shared();
return handle_finish(*result);
}
do {
m_features &= image_ctx.features;
// interlock object-map and fast-diff together
if (((m_features & RBD_FEATURE_OBJECT_MAP) != 0) ||
((m_features & RBD_FEATURE_FAST_DIFF) != 0)) {
m_features |= (RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF);
}
m_new_features = image_ctx.features & ~m_features;
m_features_mask = m_features;
if ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) != 0) {
if ((m_new_features & RBD_FEATURE_OBJECT_MAP) != 0 ||
(m_new_features & RBD_FEATURE_JOURNALING) != 0) {
lderr(cct) << "cannot disable exclusive-lock. object-map "
"or journaling must be disabled before "
"disabling exclusive-lock." << dendl;
*result = -EINVAL;
break;
}
m_features_mask |= (RBD_FEATURE_OBJECT_MAP |
RBD_FEATURE_FAST_DIFF |
RBD_FEATURE_JOURNALING);
}
if ((m_features & RBD_FEATURE_FAST_DIFF) != 0) {
m_disable_flags |= RBD_FLAG_FAST_DIFF_INVALID;
}
if ((m_features & RBD_FEATURE_OBJECT_MAP) != 0) {
m_disable_flags |= RBD_FLAG_OBJECT_MAP_INVALID;
}
} while (false);
image_ctx.owner_lock.unlock_shared();
if (*result < 0) {
return handle_finish(*result);
}
send_get_mirror_mode();
return nullptr;
}
template <typename I>
void DisableFeaturesRequest<I>::send_get_mirror_mode() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
if ((m_features & RBD_FEATURE_JOURNALING) == 0) {
send_append_op_event();
return;
}
ldout(cct, 20) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
cls_client::mirror_mode_get_start(&op);
using klass = DisableFeaturesRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_get_mirror_mode>(this);
m_out_bl.clear();
int r = image_ctx.md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
Context *DisableFeaturesRequest<I>::handle_get_mirror_mode(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result == 0) {
auto it = m_out_bl.cbegin();
*result = cls_client::mirror_mode_get_finish(&it, &m_mirror_mode);
}
if (*result < 0 && *result != -ENOENT) {
lderr(cct) << "failed to retrieve pool mirror mode: "
<< cpp_strerror(*result) << dendl;
return handle_finish(*result);
}
ldout(cct, 20) << this << " " << __func__ << ": m_mirror_mode="
<< m_mirror_mode << dendl;
send_get_mirror_image();
return nullptr;
}
template <typename I>
void DisableFeaturesRequest<I>::send_get_mirror_image() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
if (m_mirror_mode != cls::rbd::MIRROR_MODE_IMAGE) {
send_disable_mirror_image();
return;
}
ldout(cct, 20) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
cls_client::mirror_image_get_start(&op, image_ctx.id);
using klass = DisableFeaturesRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_get_mirror_image>(this);
m_out_bl.clear();
int r = image_ctx.md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
Context *DisableFeaturesRequest<I>::handle_get_mirror_image(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
cls::rbd::MirrorImage mirror_image;
if (*result == 0) {
auto it = m_out_bl.cbegin();
*result = cls_client::mirror_image_get_finish(&it, &mirror_image);
}
if (*result < 0 && *result != -ENOENT) {
lderr(cct) << "failed to retrieve pool mirror image: "
<< cpp_strerror(*result) << dendl;
return handle_finish(*result);
}
if (mirror_image.state == cls::rbd::MIRROR_IMAGE_STATE_ENABLED &&
mirror_image.mode == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL && !m_force) {
lderr(cct) << "cannot disable journaling: journal-based mirroring "
<< "enabled and mirror pool mode set to image"
<< dendl;
*result = -EINVAL;
return handle_finish(*result);
}
if (mirror_image.mode != cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) {
send_close_journal();
} else {
send_disable_mirror_image();
}
return nullptr;
}
template <typename I>
void DisableFeaturesRequest<I>::send_disable_mirror_image() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
Context *ctx = create_context_callback<
DisableFeaturesRequest<I>,
&DisableFeaturesRequest<I>::handle_disable_mirror_image>(this);
mirror::DisableRequest<I> *req =
mirror::DisableRequest<I>::create(&image_ctx, m_force, true, ctx);
req->send();
}
template <typename I>
Context *DisableFeaturesRequest<I>::handle_disable_mirror_image(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to disable image mirroring: " << cpp_strerror(*result)
<< dendl;
// not fatal
}
send_close_journal();
return nullptr;
}
template <typename I>
void DisableFeaturesRequest<I>::send_close_journal() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
{
std::unique_lock locker{image_ctx.owner_lock};
if (image_ctx.journal != nullptr) {
ldout(cct, 20) << this << " " << __func__ << dendl;
std::swap(m_journal, image_ctx.journal);
Context *ctx = create_context_callback<
DisableFeaturesRequest<I>,
&DisableFeaturesRequest<I>::handle_close_journal>(this);
m_journal->close(ctx);
return;
}
}
send_remove_journal();
}
template <typename I>
Context *DisableFeaturesRequest<I>::handle_close_journal(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to close image journal: " << cpp_strerror(*result)
<< dendl;
}
ceph_assert(m_journal != nullptr);
m_journal->put();
m_journal = nullptr;
send_remove_journal();
return nullptr;
}
template <typename I>
void DisableFeaturesRequest<I>::send_remove_journal() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
Context *ctx = create_context_callback<
DisableFeaturesRequest<I>,
&DisableFeaturesRequest<I>::handle_remove_journal>(this);
typename journal::TypeTraits<I>::ContextWQ* context_wq;
Journal<I>::get_work_queue(cct, &context_wq);
journal::RemoveRequest<I> *req = journal::RemoveRequest<I>::create(
image_ctx.md_ctx, image_ctx.id, librbd::Journal<>::IMAGE_CLIENT_ID,
context_wq, ctx);
req->send();
}
template <typename I>
Context *DisableFeaturesRequest<I>::handle_remove_journal(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to remove image journal: " << cpp_strerror(*result)
<< dendl;
return handle_finish(*result);
}
send_append_op_event();
return nullptr;
}
template <typename I>
void DisableFeaturesRequest<I>::send_append_op_event() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
if (!this->template append_op_event<
DisableFeaturesRequest<I>,
&DisableFeaturesRequest<I>::handle_append_op_event>(this)) {
send_remove_object_map();
}
ldout(cct, 20) << this << " " << __func__ << dendl;
}
template <typename I>
Context *DisableFeaturesRequest<I>::handle_append_op_event(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to commit journal entry: " << cpp_strerror(*result)
<< dendl;
return handle_finish(*result);
}
send_remove_object_map();
return nullptr;
}
template <typename I>
void DisableFeaturesRequest<I>::send_remove_object_map() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
if ((m_features & RBD_FEATURE_OBJECT_MAP) == 0) {
send_set_features();
return;
}
Context *ctx = create_context_callback<
DisableFeaturesRequest<I>,
&DisableFeaturesRequest<I>::handle_remove_object_map>(this);
object_map::RemoveRequest<I> *req =
object_map::RemoveRequest<I>::create(&image_ctx, ctx);
req->send();
}
template <typename I>
Context *DisableFeaturesRequest<I>::handle_remove_object_map(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0 && *result != -ENOENT) {
lderr(cct) << "failed to remove object map: " << cpp_strerror(*result) << dendl;
return handle_finish(*result);
}
send_set_features();
return nullptr;
}
template <typename I>
void DisableFeaturesRequest<I>::send_set_features() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": new_features="
<< m_new_features << ", features_mask=" << m_features_mask
<< dendl;
librados::ObjectWriteOperation op;
librbd::cls_client::set_features(&op, m_new_features, m_features_mask);
using klass = DisableFeaturesRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_set_features>(this);
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
Context *DisableFeaturesRequest<I>::handle_set_features(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result == -EINVAL && (m_features_mask & RBD_FEATURE_JOURNALING) != 0) {
// NOTE: infernalis OSDs will not accept a mask with new features, so
// re-attempt with a reduced mask.
ldout(cct, 5) << this << " " << __func__
<< ": re-attempt with a reduced mask" << dendl;
m_features_mask &= ~RBD_FEATURE_JOURNALING;
send_set_features();
}
if (*result < 0) {
lderr(cct) << "failed to update features: " << cpp_strerror(*result)
<< dendl;
return handle_finish(*result);
}
send_update_flags();
return nullptr;
}
template <typename I>
void DisableFeaturesRequest<I>::send_update_flags() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
if (m_disable_flags == 0) {
send_notify_update();
return;
}
ldout(cct, 20) << this << " " << __func__ << ": disable_flags="
<< m_disable_flags << dendl;
Context *ctx = create_context_callback<
DisableFeaturesRequest<I>,
&DisableFeaturesRequest<I>::handle_update_flags>(this);
image::SetFlagsRequest<I> *req =
image::SetFlagsRequest<I>::create(&image_ctx, 0, m_disable_flags, ctx);
req->send();
}
template <typename I>
Context *DisableFeaturesRequest<I>::handle_update_flags(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to update image flags: " << cpp_strerror(*result)
<< dendl;
return handle_finish(*result);
}
send_notify_update();
return nullptr;
}
template <typename I>
void DisableFeaturesRequest<I>::send_notify_update() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
Context *ctx = create_context_callback<
DisableFeaturesRequest<I>,
&DisableFeaturesRequest<I>::handle_notify_update>(this);
image_ctx.notify_update(ctx);
}
template <typename I>
Context *DisableFeaturesRequest<I>::handle_notify_update(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (image_ctx.exclusive_lock == nullptr || !m_acquired_lock) {
return handle_finish(*result);
}
send_release_exclusive_lock();
return nullptr;
}
template <typename I>
void DisableFeaturesRequest<I>::send_release_exclusive_lock() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
Context *ctx = create_context_callback<
DisableFeaturesRequest<I>,
&DisableFeaturesRequest<I>::handle_release_exclusive_lock>(
this, image_ctx.exclusive_lock);
image_ctx.exclusive_lock->release_lock(ctx);
}
template <typename I>
Context *DisableFeaturesRequest<I>::handle_release_exclusive_lock(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
return handle_finish(*result);
}
template <typename I>
Context *DisableFeaturesRequest<I>::handle_finish(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
{
std::unique_lock locker{image_ctx.owner_lock};
if (image_ctx.exclusive_lock != nullptr && m_requests_blocked) {
image_ctx.exclusive_lock->unblock_requests();
}
image_ctx.io_image_dispatcher->unblock_writes();
}
image_ctx.state->handle_prepare_lock_complete();
return this->create_context_finisher(r);
}
} // namespace operation
} // namespace librbd
template class librbd::operation::DisableFeaturesRequest<librbd::ImageCtx>;
| 19,969 | 29.442073 | 84 | cc |
null | ceph-main/src/librbd/operation/DisableFeaturesRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_DISABLE_FEATURES_REQUEST_H
#define CEPH_LIBRBD_OPERATION_DISABLE_FEATURES_REQUEST_H
#include "librbd/ImageCtx.h"
#include "librbd/operation/Request.h"
#include "cls/rbd/cls_rbd_client.h"
class Context;
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class DisableFeaturesRequest : public Request<ImageCtxT> {
public:
static DisableFeaturesRequest *create(ImageCtxT &image_ctx, Context *on_finish,
uint64_t journal_op_tid,
uint64_t features, bool force) {
return new DisableFeaturesRequest(image_ctx, on_finish, journal_op_tid,
features, force);
}
DisableFeaturesRequest(ImageCtxT &image_ctx, Context *on_finish,
uint64_t journal_op_tid, uint64_t features, bool force);
protected:
void send_op() override;
bool should_complete(int r) override;
bool can_affect_io() const override {
return true;
}
journal::Event create_event(uint64_t op_tid) const override {
return journal::UpdateFeaturesEvent(op_tid, m_features, false);
}
private:
/**
* DisableFeatures goes through the following state machine:
*
* @verbatim
*
* <start>
* |
* v
* STATE_PREPARE_LOCK
* |
* v
* STATE_BLOCK_WRITES
* |
* v
* STATE_ACQUIRE_EXCLUSIVE_LOCK (skip if not
* | required)
* | (disabling journaling)
* \-------------------\
* | |
* | V
* | STATE_GET_MIRROR_MODE
* |(not |
* | disabling v
* | journaling) STATE_GET_MIRROR_IMAGE
* | |
* | v
* | STATE_DISABLE_MIRROR_IMAGE (skip if not
* | | required)
* | v
* | STATE_CLOSE_JOURNAL
* | |
* | v
* | STATE_REMOVE_JOURNAL
* | |
* |/-------------------/
* |
* v
* STATE_APPEND_OP_EVENT (skip if journaling
* | disabled)
* v
* STATE_REMOVE_OBJECT_MAP (skip if not
* | disabling object map)
* v
* STATE_SET_FEATURES
* |
* v
* STATE_UPDATE_FLAGS
* |
* v
* STATE_NOTIFY_UPDATE
* |
* v
* STATE_RELEASE_EXCLUSIVE_LOCK (skip if not
* | required)
* | (unblock writes)
* v
* <finish>
*
* @endverbatim
*
*/
uint64_t m_features;
bool m_force;
bool m_acquired_lock = false;
bool m_writes_blocked = false;
bool m_image_lock_acquired = false;
bool m_requests_blocked = false;
uint64_t m_new_features = 0;
uint64_t m_disable_flags = 0;
uint64_t m_features_mask = 0;
decltype(ImageCtxT::journal) m_journal = nullptr;
cls::rbd::MirrorMode m_mirror_mode = cls::rbd::MIRROR_MODE_DISABLED;
bufferlist m_out_bl;
void send_prepare_lock();
Context *handle_prepare_lock(int *result);
void send_block_writes();
Context *handle_block_writes(int *result);
Context *send_acquire_exclusive_lock(int *result);
Context *handle_acquire_exclusive_lock(int *result);
void send_get_mirror_mode();
Context *handle_get_mirror_mode(int *result);
void send_get_mirror_image();
Context *handle_get_mirror_image(int *result);
void send_disable_mirror_image();
Context *handle_disable_mirror_image(int *result);
void send_close_journal();
Context *handle_close_journal(int *result);
void send_remove_journal();
Context *handle_remove_journal(int *result);
void send_append_op_event();
Context *handle_append_op_event(int *result);
void send_remove_object_map();
Context *handle_remove_object_map(int *result);
void send_set_features();
Context *handle_set_features(int *result);
void send_update_flags();
Context *handle_update_flags(int *result);
void send_notify_update();
Context *handle_notify_update(int *result);
void send_release_exclusive_lock();
Context *handle_release_exclusive_lock(int *result);
Context *handle_finish(int r);
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::DisableFeaturesRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_DISABLE_FEATURES_REQUEST_H
| 4,650 | 26.040698 | 82 | h |
null | ceph-main/src/librbd/operation/EnableFeaturesRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/EnableFeaturesRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Journal.h"
#include "librbd/Utils.h"
#include "librbd/image/SetFlagsRequest.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/journal/CreateRequest.h"
#include "librbd/journal/TypeTraits.h"
#include "librbd/mirror/EnableRequest.h"
#include "librbd/object_map/CreateRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::EnableFeaturesRequest: "
namespace librbd {
namespace operation {
using util::create_async_context_callback;
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
EnableFeaturesRequest<I>::EnableFeaturesRequest(I &image_ctx,
Context *on_finish,
uint64_t journal_op_tid,
uint64_t features)
: Request<I>(image_ctx, on_finish, journal_op_tid), m_features(features) {
}
template <typename I>
void EnableFeaturesRequest<I>::send_op() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
ldout(cct, 20) << this << " " << __func__ << ": features=" << m_features
<< dendl;
send_prepare_lock();
}
template <typename I>
bool EnableFeaturesRequest<I>::should_complete(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << " r=" << r << dendl;
if (r < 0) {
lderr(cct) << "encountered error: " << cpp_strerror(r) << dendl;
}
return true;
}
template <typename I>
void EnableFeaturesRequest<I>::send_prepare_lock() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
image_ctx.state->prepare_lock(create_async_context_callback(
image_ctx, create_context_callback<
EnableFeaturesRequest<I>,
&EnableFeaturesRequest<I>::handle_prepare_lock>(this)));
}
template <typename I>
Context *EnableFeaturesRequest<I>::handle_prepare_lock(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to lock image: " << cpp_strerror(*result) << dendl;
return this->create_context_finisher(*result);
}
send_block_writes();
return nullptr;
}
template <typename I>
void EnableFeaturesRequest<I>::send_block_writes() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
std::unique_lock locker{image_ctx.owner_lock};
image_ctx.io_image_dispatcher->block_writes(create_context_callback<
EnableFeaturesRequest<I>,
&EnableFeaturesRequest<I>::handle_block_writes>(this));
}
template <typename I>
Context *EnableFeaturesRequest<I>::handle_block_writes(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to block writes: " << cpp_strerror(*result) << dendl;
return handle_finish(*result);
}
m_writes_blocked = true;
send_get_mirror_mode();
return nullptr;
}
template <typename I>
void EnableFeaturesRequest<I>::send_get_mirror_mode() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
if ((m_features & RBD_FEATURE_JOURNALING) == 0) {
Context *ctx = create_context_callback<
EnableFeaturesRequest<I>,
&EnableFeaturesRequest<I>::handle_get_mirror_mode>(this);
ctx->complete(-ENOENT);
return;
}
ldout(cct, 20) << this << " " << __func__ << dendl;
librados::ObjectReadOperation op;
cls_client::mirror_mode_get_start(&op);
using klass = EnableFeaturesRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_get_mirror_mode>(this);
m_out_bl.clear();
int r = image_ctx.md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
Context *EnableFeaturesRequest<I>::handle_get_mirror_mode(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
cls::rbd::MirrorMode mirror_mode = cls::rbd::MIRROR_MODE_DISABLED;
if (*result == 0) {
auto it = m_out_bl.cbegin();
*result = cls_client::mirror_mode_get_finish(&it, &mirror_mode);
} else if (*result == -ENOENT) {
*result = 0;
}
if (*result < 0) {
lderr(cct) << "failed to retrieve pool mirror mode: "
<< cpp_strerror(*result) << dendl;
return handle_finish(*result);
}
m_enable_mirroring = (mirror_mode == cls::rbd::MIRROR_MODE_POOL);
bool create_journal = false;
do {
std::unique_lock locker{image_ctx.owner_lock};
// avoid accepting new requests from peers while we manipulate
// the image features
if (image_ctx.exclusive_lock != nullptr &&
(image_ctx.journal == nullptr ||
!image_ctx.journal->is_journal_replaying())) {
image_ctx.exclusive_lock->block_requests(0);
m_requests_blocked = true;
}
m_features &= ~image_ctx.features;
// interlock object-map and fast-diff together
if (((m_features & RBD_FEATURE_OBJECT_MAP) != 0) ||
((m_features & RBD_FEATURE_FAST_DIFF) != 0)) {
m_features |= (RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF);
}
m_new_features = image_ctx.features | m_features;
m_features_mask = m_features;
if ((m_features & RBD_FEATURE_OBJECT_MAP) != 0) {
if ((m_new_features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0) {
lderr(cct) << "cannot enable object-map. exclusive-lock must be "
"enabled before enabling object-map." << dendl;
*result = -EINVAL;
break;
}
m_enable_flags |= RBD_FLAG_OBJECT_MAP_INVALID;
m_features_mask |= (RBD_FEATURE_EXCLUSIVE_LOCK | RBD_FEATURE_FAST_DIFF);
}
if ((m_features & RBD_FEATURE_FAST_DIFF) != 0) {
m_enable_flags |= RBD_FLAG_FAST_DIFF_INVALID;
m_features_mask |= (RBD_FEATURE_EXCLUSIVE_LOCK | RBD_FEATURE_OBJECT_MAP);
}
if ((m_features & RBD_FEATURE_JOURNALING) != 0) {
if ((m_new_features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0) {
lderr(cct) << "cannot enable journaling. exclusive-lock must be "
"enabled before enabling journaling." << dendl;
*result = -EINVAL;
break;
}
m_features_mask |= RBD_FEATURE_EXCLUSIVE_LOCK;
create_journal = true;
}
} while (false);
if (*result < 0) {
return handle_finish(*result);
}
if (create_journal) {
send_create_journal();
return nullptr;
}
send_append_op_event();
return nullptr;
}
template <typename I>
void EnableFeaturesRequest<I>::send_create_journal() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
journal::TagData tag_data(librbd::Journal<>::LOCAL_MIRROR_UUID);
Context *ctx = create_context_callback<
EnableFeaturesRequest<I>,
&EnableFeaturesRequest<I>::handle_create_journal>(this);
typename journal::TypeTraits<I>::ContextWQ* context_wq;
Journal<I>::get_work_queue(cct, &context_wq);
journal::CreateRequest<I> *req = journal::CreateRequest<I>::create(
image_ctx.md_ctx, image_ctx.id,
image_ctx.config.template get_val<uint64_t>("rbd_journal_order"),
image_ctx.config.template get_val<uint64_t>("rbd_journal_splay_width"),
image_ctx.config.template get_val<std::string>("rbd_journal_pool"),
cls::journal::Tag::TAG_CLASS_NEW, tag_data,
librbd::Journal<>::IMAGE_CLIENT_ID, context_wq, ctx);
req->send();
}
template <typename I>
Context *EnableFeaturesRequest<I>::handle_create_journal(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to create journal: " << cpp_strerror(*result)
<< dendl;
return handle_finish(*result);
}
send_append_op_event();
return nullptr;
}
template <typename I>
void EnableFeaturesRequest<I>::send_append_op_event() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
if (!this->template append_op_event<
EnableFeaturesRequest<I>,
&EnableFeaturesRequest<I>::handle_append_op_event>(this)) {
send_update_flags();
}
ldout(cct, 20) << this << " " << __func__ << dendl;
}
template <typename I>
Context *EnableFeaturesRequest<I>::handle_append_op_event(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to commit journal entry: " << cpp_strerror(*result)
<< dendl;
return handle_finish(*result);
}
send_update_flags();
return nullptr;
}
template <typename I>
void EnableFeaturesRequest<I>::send_update_flags() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
if (m_enable_flags == 0) {
send_set_features();
return;
}
ldout(cct, 20) << this << " " << __func__ << ": enable_flags="
<< m_enable_flags << dendl;
Context *ctx = create_context_callback<
EnableFeaturesRequest<I>,
&EnableFeaturesRequest<I>::handle_update_flags>(this);
image::SetFlagsRequest<I> *req =
image::SetFlagsRequest<I>::create(&image_ctx, m_enable_flags,
m_enable_flags, ctx);
req->send();
}
template <typename I>
Context *EnableFeaturesRequest<I>::handle_update_flags(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to update image flags: " << cpp_strerror(*result)
<< dendl;
return handle_finish(*result);
}
send_set_features();
return nullptr;
}
template <typename I>
void EnableFeaturesRequest<I>::send_set_features() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": new_features="
<< m_new_features << ", features_mask=" << m_features_mask
<< dendl;
librados::ObjectWriteOperation op;
librbd::cls_client::set_features(&op, m_new_features, m_features_mask);
using klass = EnableFeaturesRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_set_features>(this);
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
Context *EnableFeaturesRequest<I>::handle_set_features(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to update features: " << cpp_strerror(*result)
<< dendl;
return handle_finish(*result);
}
send_create_object_map();
return nullptr;
}
template <typename I>
void EnableFeaturesRequest<I>::send_create_object_map() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
if (((image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0) ||
((m_features & RBD_FEATURE_OBJECT_MAP) == 0)) {
send_enable_mirror_image();
return;
}
ldout(cct, 20) << this << " " << __func__ << dendl;
Context *ctx = create_context_callback<
EnableFeaturesRequest<I>,
&EnableFeaturesRequest<I>::handle_create_object_map>(this);
object_map::CreateRequest<I> *req =
object_map::CreateRequest<I>::create(&image_ctx, ctx);
req->send();
}
template <typename I>
Context *EnableFeaturesRequest<I>::handle_create_object_map(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to create object map: " << cpp_strerror(*result)
<< dendl;
return handle_finish(*result);
}
send_enable_mirror_image();
return nullptr;
}
template <typename I>
void EnableFeaturesRequest<I>::send_enable_mirror_image() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
if (!m_enable_mirroring) {
send_notify_update();
return;
}
ldout(cct, 20) << this << " " << __func__ << dendl;
Context *ctx = create_context_callback<
EnableFeaturesRequest<I>,
&EnableFeaturesRequest<I>::handle_enable_mirror_image>(this);
auto req = mirror::EnableRequest<I>::create(
&image_ctx, cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "", false, ctx);
req->send();
}
template <typename I>
Context *EnableFeaturesRequest<I>::handle_enable_mirror_image(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to enable mirroring: " << cpp_strerror(*result)
<< dendl;
// not fatal
}
send_notify_update();
return nullptr;
}
template <typename I>
void EnableFeaturesRequest<I>::send_notify_update() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
Context *ctx = create_context_callback<
EnableFeaturesRequest<I>,
&EnableFeaturesRequest<I>::handle_notify_update>(this);
image_ctx.notify_update(ctx);
}
template <typename I>
Context *EnableFeaturesRequest<I>::handle_notify_update(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
return handle_finish(*result);
}
template <typename I>
Context *EnableFeaturesRequest<I>::handle_finish(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
{
std::unique_lock locker{image_ctx.owner_lock};
if (image_ctx.exclusive_lock != nullptr && m_requests_blocked) {
image_ctx.exclusive_lock->unblock_requests();
}
if (m_writes_blocked) {
image_ctx.io_image_dispatcher->unblock_writes();
}
}
image_ctx.state->handle_prepare_lock_complete();
return this->create_context_finisher(r);
}
} // namespace operation
} // namespace librbd
template class librbd::operation::EnableFeaturesRequest<librbd::ImageCtx>;
| 14,977 | 29.258586 | 79 | cc |
null | ceph-main/src/librbd/operation/EnableFeaturesRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_ENABLE_FEATURES_REQUEST_H
#define CEPH_LIBRBD_OPERATION_ENABLE_FEATURES_REQUEST_H
#include "librbd/operation/Request.h"
class Context;
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class EnableFeaturesRequest : public Request<ImageCtxT> {
public:
static EnableFeaturesRequest *create(ImageCtxT &image_ctx, Context *on_finish,
uint64_t journal_op_tid,
uint64_t features) {
return new EnableFeaturesRequest(image_ctx, on_finish, journal_op_tid,
features);
}
EnableFeaturesRequest(ImageCtxT &image_ctx, Context *on_finish,
uint64_t journal_op_tid, uint64_t features);
protected:
void send_op() override;
bool should_complete(int r) override;
bool can_affect_io() const override {
return true;
}
journal::Event create_event(uint64_t op_tid) const override {
return journal::UpdateFeaturesEvent(op_tid, m_features, true);
}
private:
/**
* EnableFeatures goes through the following state machine:
*
* @verbatim
*
* <start>
* |
* v
* STATE_PREPARE_LOCK
* |
* v
* STATE_BLOCK_WRITES
* |
* v
* STATE_GET_MIRROR_MODE
* |
* v
* STATE_CREATE_JOURNAL (skip if not
* | required)
* v
* STATE_APPEND_OP_EVENT (skip if journaling
* | disabled)
* v
* STATE_UPDATE_FLAGS
* |
* v
* STATE_SET_FEATURES
* |
* v
* STATE_CREATE_OBJECT_MAP (skip if not
* | required)
* v
* STATE_ENABLE_MIRROR_IMAGE
* |
* V
* STATE_NOTIFY_UPDATE
* |
* | (unblock writes)
* v
* <finish>
* @endverbatim
*
*/
uint64_t m_features;
bool m_enable_mirroring = false;
bool m_requests_blocked = false;
bool m_writes_blocked = false;
uint64_t m_new_features = 0;
uint64_t m_enable_flags = 0;
uint64_t m_features_mask = 0;
bufferlist m_out_bl;
void send_prepare_lock();
Context *handle_prepare_lock(int *result);
void send_block_writes();
Context *handle_block_writes(int *result);
void send_get_mirror_mode();
Context *handle_get_mirror_mode(int *result);
void send_create_journal();
Context *handle_create_journal(int *result);
void send_append_op_event();
Context *handle_append_op_event(int *result);
void send_update_flags();
Context *handle_update_flags(int *result);
void send_set_features();
Context *handle_set_features(int *result);
void send_create_object_map();
Context *handle_create_object_map(int *result);
void send_enable_mirror_image();
Context *handle_enable_mirror_image(int *result);
void send_notify_update();
Context *handle_notify_update(int *result);
Context *handle_finish(int r);
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::EnableFeaturesRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_ENABLE_FEATURES_REQUEST_H
| 3,231 | 22.764706 | 81 | h |
null | ceph-main/src/librbd/operation/FlattenRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/FlattenRequest.h"
#include "librbd/AsyncObjectThrottle.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/crypto/CryptoInterface.h"
#include "librbd/crypto/EncryptionFormat.h"
#include "librbd/image/DetachChildRequest.h"
#include "librbd/image/DetachParentRequest.h"
#include "librbd/Types.h"
#include "librbd/io/ObjectRequest.h"
#include "librbd/io/Utils.h"
#include "common/dout.h"
#include "common/errno.h"
#include "osdc/Striper.h"
#include <boost/lambda/bind.hpp>
#include <boost/lambda/construct.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::operation::FlattenRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace operation {
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
class C_FlattenObject : public C_AsyncObjectThrottle<I> {
public:
C_FlattenObject(AsyncObjectThrottle<I> &throttle, I *image_ctx,
IOContext io_context, uint64_t object_no)
: C_AsyncObjectThrottle<I>(throttle, *image_ctx), m_io_context(io_context),
m_object_no(object_no) {
}
int send() override {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
if (image_ctx.exclusive_lock != nullptr &&
!image_ctx.exclusive_lock->is_lock_owner()) {
ldout(cct, 1) << "lost exclusive lock during flatten" << dendl;
return -ERESTART;
}
{
std::shared_lock image_lock{image_ctx.image_lock};
if (image_ctx.object_map != nullptr &&
!image_ctx.object_map->object_may_not_exist(m_object_no)) {
// can skip because the object already exists
return 1;
}
}
if (!io::util::trigger_copyup(
&image_ctx, m_object_no, m_io_context, this)) {
// stop early if the parent went away - it just means
// another flatten finished first or the image was resized
return 1;
}
return 0;
}
private:
IOContext m_io_context;
uint64_t m_object_no;
};
template <typename I>
bool FlattenRequest<I>::should_complete(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "encountered error: " << cpp_strerror(r) << dendl;
}
return true;
}
template <typename I>
void FlattenRequest<I>::send_op() {
flatten_objects();
}
template <typename I>
void FlattenRequest<I>::flatten_objects() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
assert(ceph_mutex_is_locked(image_ctx.owner_lock));
auto ctx = create_context_callback<
FlattenRequest<I>,
&FlattenRequest<I>::handle_flatten_objects>(this);
typename AsyncObjectThrottle<I>::ContextFactory context_factory(
boost::lambda::bind(boost::lambda::new_ptr<C_FlattenObject<I> >(),
boost::lambda::_1, &image_ctx, image_ctx.get_data_io_context(),
boost::lambda::_2));
AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
this, image_ctx, context_factory, ctx, &m_prog_ctx, m_start_object_no,
m_start_object_no + m_overlap_objects);
throttle->start_ops(
image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
}
template <typename I>
void FlattenRequest<I>::handle_flatten_objects(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r == -ERESTART) {
ldout(cct, 5) << "flatten operation interrupted" << dendl;
this->complete(r);
return;
} else if (r < 0) {
lderr(cct) << "flatten encountered an error: " << cpp_strerror(r) << dendl;
this->complete(r);
return;
}
crypto_flatten();
}
template <typename I>
void FlattenRequest<I>::crypto_flatten() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
auto encryption_format = image_ctx.encryption_format.get();
if (encryption_format == nullptr) {
detach_child();
return;
}
ldout(cct, 5) << dendl;
auto ctx = create_context_callback<
FlattenRequest<I>,
&FlattenRequest<I>::handle_crypto_flatten>(this);
encryption_format->flatten(&image_ctx, ctx);
}
template <typename I>
void FlattenRequest<I>::handle_crypto_flatten(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "error flattening crypto: " << cpp_strerror(r) << dendl;
this->complete(r);
return;
}
detach_child();
}
template <typename I>
void FlattenRequest<I>::detach_child() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
// should have been canceled prior to releasing lock
image_ctx.owner_lock.lock_shared();
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
// if there are no snaps, remove from the children object as well
// (if snapshots remain, they have their own parent info, and the child
// will be removed when the last snap goes away)
image_ctx.image_lock.lock_shared();
if ((image_ctx.features & RBD_FEATURE_DEEP_FLATTEN) == 0 &&
!image_ctx.snaps.empty()) {
image_ctx.image_lock.unlock_shared();
image_ctx.owner_lock.unlock_shared();
detach_parent();
return;
}
image_ctx.image_lock.unlock_shared();
ldout(cct, 5) << dendl;
auto ctx = create_context_callback<
FlattenRequest<I>,
&FlattenRequest<I>::handle_detach_child>(this);
auto req = image::DetachChildRequest<I>::create(image_ctx, ctx);
req->send();
image_ctx.owner_lock.unlock_shared();
}
template <typename I>
void FlattenRequest<I>::handle_detach_child(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(cct) << "detach encountered an error: " << cpp_strerror(r) << dendl;
this->complete(r);
return;
}
detach_parent();
}
template <typename I>
void FlattenRequest<I>::detach_parent() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
// should have been canceled prior to releasing lock
image_ctx.owner_lock.lock_shared();
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
// stop early if the parent went away - it just means
// another flatten finished first, so this one is useless.
image_ctx.image_lock.lock_shared();
if (!image_ctx.parent) {
ldout(cct, 5) << "image already flattened" << dendl;
image_ctx.image_lock.unlock_shared();
image_ctx.owner_lock.unlock_shared();
this->complete(0);
return;
}
image_ctx.image_lock.unlock_shared();
// remove parent from this (base) image
auto ctx = create_context_callback<
FlattenRequest<I>,
&FlattenRequest<I>::handle_detach_parent>(this);
auto req = image::DetachParentRequest<I>::create(image_ctx, ctx);
req->send();
image_ctx.owner_lock.unlock_shared();
}
template <typename I>
void FlattenRequest<I>::handle_detach_parent(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "remove parent encountered an error: " << cpp_strerror(r)
<< dendl;
}
this->complete(r);
}
} // namespace operation
} // namespace librbd
template class librbd::operation::FlattenRequest<librbd::ImageCtx>;
| 7,802 | 28.334586 | 82 | cc |
null | ceph-main/src/librbd/operation/FlattenRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_FLATTEN_REQUEST_H
#define CEPH_LIBRBD_OPERATION_FLATTEN_REQUEST_H
#include "librbd/operation/Request.h"
namespace librbd {
class ImageCtx;
class ProgressContext;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class FlattenRequest : public Request<ImageCtxT>
{
public:
FlattenRequest(ImageCtxT &image_ctx, Context *on_finish,
uint64_t start_object_no, uint64_t overlap_objects,
ProgressContext& prog_ctx)
: Request<ImageCtxT>(image_ctx, on_finish),
m_start_object_no(start_object_no),
m_overlap_objects(overlap_objects),
m_prog_ctx(prog_ctx) {}
protected:
void send_op() override;
bool should_complete(int r) override;
journal::Event create_event(uint64_t op_tid) const override {
return journal::FlattenEvent(op_tid);
}
private:
/**
* @verbatim
*
* <start>
* |
* v
* FLATTEN_OBJECTS
* |
* v
* CRYPTO_FLATTEN
* |
* v
* DETACH_CHILD
* |
* v
* DETACH_PARENT
* |
* v
* <finish>
*
* @endverbatim
*/
uint64_t m_start_object_no;
uint64_t m_overlap_objects;
ProgressContext &m_prog_ctx;
void flatten_objects();
void handle_flatten_objects(int r);
void crypto_flatten();
void handle_crypto_flatten(int r);
void detach_child();
void handle_detach_child(int r);
void detach_parent();
void handle_detach_parent(int r);
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::FlattenRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_FLATTEN_REQUEST_H
| 1,739 | 19.714286 | 74 | h |
null | ceph-main/src/librbd/operation/MetadataRemoveRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/MetadataRemoveRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::MetadataRemoveRequest: "
namespace librbd {
namespace operation {
template <typename I>
MetadataRemoveRequest<I>::MetadataRemoveRequest(I &image_ctx,
Context *on_finish,
const std::string &key)
: Request<I>(image_ctx, on_finish), m_key(key) {
}
template <typename I>
void MetadataRemoveRequest<I>::send_op() {
send_metadata_remove();
}
template <typename I>
bool MetadataRemoveRequest<I>::should_complete(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << " r=" << r << dendl;
if (r < 0) {
lderr(cct) << "encountered error: " << cpp_strerror(r) << dendl;
}
return true;
}
template <typename I>
void MetadataRemoveRequest<I>::send_metadata_remove() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
librados::ObjectWriteOperation op;
cls_client::metadata_remove(&op, m_key);
librados::AioCompletion *comp = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
} // namespace operation
} // namespace librbd
template class librbd::operation::MetadataRemoveRequest<librbd::ImageCtx>;
| 1,748 | 27.672131 | 74 | cc |
null | ceph-main/src/librbd/operation/MetadataRemoveRequest.h | // -*- mode:C++; tab-width:8; c-basic-offremove:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_METADATA_REMOVE_REQUEST_H
#define CEPH_LIBRBD_OPERATION_METADATA_REMOVE_REQUEST_H
#include "librbd/operation/Request.h"
#include <iosfwd>
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class MetadataRemoveRequest : public Request<ImageCtxT> {
public:
MetadataRemoveRequest(ImageCtxT &image_ctx, Context *on_finish,
const std::string &key);
protected:
void send_op() override;
bool should_complete(int r) override;
journal::Event create_event(uint64_t op_tid) const override {
return journal::MetadataRemoveEvent(op_tid, m_key);
}
private:
std::string m_key;
void send_metadata_remove();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::MetadataRemoveRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_METADATA_REMOVE_REQUEST_H
| 1,048 | 22.311111 | 81 | h |
null | ceph-main/src/librbd/operation/MetadataSetRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/MetadataSetRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::MetadataSetRequest: "
namespace librbd {
namespace operation {
template <typename I>
MetadataSetRequest<I>::MetadataSetRequest(I &image_ctx,
Context *on_finish,
const std::string &key,
const std::string &value)
: Request<I>(image_ctx, on_finish), m_key(key), m_value(value) {
}
template <typename I>
void MetadataSetRequest<I>::send_op() {
send_metadata_set();
}
template <typename I>
bool MetadataSetRequest<I>::should_complete(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << " r=" << r << dendl;
if (r < 0) {
lderr(cct) << "encountered error: " << cpp_strerror(r) << dendl;
}
return true;
}
template <typename I>
void MetadataSetRequest<I>::send_metadata_set() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
m_data[m_key].append(m_value);
librados::ObjectWriteOperation op;
cls_client::metadata_set(&op, m_data);
librados::AioCompletion *comp = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
} // namespace operation
} // namespace librbd
template class librbd::operation::MetadataSetRequest<librbd::ImageCtx>;
| 1,821 | 27.920635 | 72 | cc |
null | ceph-main/src/librbd/operation/MetadataSetRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_METADATA_SET_REQUEST_H
#define CEPH_LIBRBD_OPERATION_METADATA_SET_REQUEST_H
#include "librbd/operation/Request.h"
#include "include/buffer.h"
#include <string>
#include <map>
class Context;
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class MetadataSetRequest : public Request<ImageCtxT> {
public:
MetadataSetRequest(ImageCtxT &image_ctx, Context *on_finish,
const std::string &key, const std::string &value);
protected:
void send_op() override;
bool should_complete(int r) override;
journal::Event create_event(uint64_t op_tid) const override {
return journal::MetadataSetEvent(op_tid, m_key, m_value);
}
private:
std::string m_key;
std::string m_value;
std::map<std::string, bufferlist> m_data;
void send_metadata_set();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::MetadataSetRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_METADATA_SET_REQUEST_H
| 1,145 | 22.875 | 78 | h |
null | ceph-main/src/librbd/operation/MigrateRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/MigrateRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/AsyncObjectThrottle.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/deep_copy/ObjectCopyRequest.h"
#include "librbd/io/AsyncOperation.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/io/ObjectRequest.h"
#include "osdc/Striper.h"
#include <boost/lambda/bind.hpp>
#include <boost/lambda/construct.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::MigrateRequest: " << this << " " \
<< __func__ << ": "
namespace librbd {
namespace operation {
using util::create_context_callback;
using util::create_async_context_callback;
namespace {
template <typename I>
class C_MigrateObject : public C_AsyncObjectThrottle<I> {
public:
C_MigrateObject(AsyncObjectThrottle<I> &throttle, I *image_ctx,
IOContext io_context, uint64_t object_no)
: C_AsyncObjectThrottle<I>(throttle, *image_ctx), m_io_context(io_context),
m_object_no(object_no) {
}
int send() override {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
if (image_ctx.exclusive_lock != nullptr &&
!image_ctx.exclusive_lock->is_lock_owner()) {
ldout(cct, 1) << "lost exclusive lock during migrate" << dendl;
return -ERESTART;
}
start_async_op();
return 0;
}
private:
IOContext m_io_context;
uint64_t m_object_no;
io::AsyncOperation *m_async_op = nullptr;
void start_async_op() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << dendl;
ceph_assert(m_async_op == nullptr);
m_async_op = new io::AsyncOperation();
m_async_op->start_op(image_ctx);
if (!image_ctx.io_image_dispatcher->writes_blocked()) {
migrate_object();
return;
}
auto ctx = create_async_context_callback(
image_ctx, create_context_callback<
C_MigrateObject<I>, &C_MigrateObject<I>::handle_start_async_op>(this));
m_async_op->finish_op();
delete m_async_op;
m_async_op = nullptr;
image_ctx.io_image_dispatcher->wait_on_writes_unblocked(ctx);
}
void handle_start_async_op(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to start async op: " << cpp_strerror(r) << dendl;
this->complete(r);
return;
}
std::shared_lock owner_locker{image_ctx.owner_lock};
start_async_op();
}
bool is_within_overlap_bounds() {
I &image_ctx = this->m_image_ctx;
std::shared_lock image_locker{image_ctx.image_lock};
auto overlap = std::min(image_ctx.size, image_ctx.migration_info.overlap);
return overlap > 0 &&
Striper::get_num_objects(image_ctx.layout, overlap) > m_object_no;
}
void migrate_object() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
auto ctx = create_context_callback<
C_MigrateObject<I>, &C_MigrateObject<I>::handle_migrate_object>(this);
if (is_within_overlap_bounds()) {
bufferlist bl;
auto req = new io::ObjectWriteRequest<I>(&image_ctx, m_object_no, 0,
std::move(bl), m_io_context, 0,
0, std::nullopt, {}, ctx);
ldout(cct, 20) << "copyup object req " << req << ", object_no "
<< m_object_no << dendl;
req->send();
} else {
ceph_assert(image_ctx.parent != nullptr);
uint32_t flags = deep_copy::OBJECT_COPY_REQUEST_FLAG_MIGRATION;
if (image_ctx.migration_info.flatten) {
flags |= deep_copy::OBJECT_COPY_REQUEST_FLAG_FLATTEN;
}
auto req = deep_copy::ObjectCopyRequest<I>::create(
image_ctx.parent, &image_ctx, 0, 0, image_ctx.migration_info.snap_map,
m_object_no, flags, nullptr, ctx);
ldout(cct, 20) << "deep copy object req " << req << ", object_no "
<< m_object_no << dendl;
req->send();
}
}
void handle_migrate_object(int r) {
CephContext *cct = this->m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r == -ENOENT) {
r = 0;
}
m_async_op->finish_op();
delete m_async_op;
this->complete(r);
}
};
} // anonymous namespace
template <typename I>
void MigrateRequest<I>::send_op() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << dendl;
migrate_objects();
}
template <typename I>
bool MigrateRequest<I>::should_complete(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "encountered error: " << cpp_strerror(r) << dendl;
}
return true;
}
template <typename I>
void MigrateRequest<I>::migrate_objects() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
uint64_t overlap_objects = get_num_overlap_objects();
ldout(cct, 10) << "from 0 to " << overlap_objects << dendl;
auto ctx = create_context_callback<
MigrateRequest<I>, &MigrateRequest<I>::handle_migrate_objects>(this);
typename AsyncObjectThrottle<I>::ContextFactory context_factory(
boost::lambda::bind(boost::lambda::new_ptr<C_MigrateObject<I> >(),
boost::lambda::_1, &image_ctx, image_ctx.get_data_io_context(),
boost::lambda::_2));
AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
this, image_ctx, context_factory, ctx, &m_prog_ctx, 0, overlap_objects);
throttle->start_ops(
image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
}
template <typename I>
void MigrateRequest<I>::handle_migrate_objects(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to migrate objects: " << cpp_strerror(r) << dendl;
}
this->complete(r);
}
template <typename I>
uint64_t MigrateRequest<I>::get_num_overlap_objects() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << dendl;
std::shared_lock image_locker{image_ctx.image_lock};
auto overlap = image_ctx.migration_info.overlap;
return overlap > 0 ?
Striper::get_num_objects(image_ctx.layout, overlap) : 0;
}
} // namespace operation
} // namespace librbd
template class librbd::operation::MigrateRequest<librbd::ImageCtx>;
| 6,993 | 28.263598 | 82 | cc |
null | ceph-main/src/librbd/operation/MigrateRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_MIGRATE_REQUEST_H
#define CEPH_LIBRBD_OPERATION_MIGRATE_REQUEST_H
#include "librbd/operation/Request.h"
#include "librbd/Types.h"
namespace librbd {
class ImageCtx;
class ProgressContext;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class MigrateRequest : public Request<ImageCtxT>
{
public:
MigrateRequest(ImageCtxT &image_ctx, Context *on_finish,
ProgressContext &prog_ctx)
: Request<ImageCtxT>(image_ctx, on_finish), m_prog_ctx(prog_ctx) {
}
protected:
void send_op() override;
bool should_complete(int r) override;
bool can_affect_io() const override {
return true;
}
journal::Event create_event(uint64_t op_tid) const override {
ceph_abort();
return journal::UnknownEvent();
}
private:
/**
* Migrate goes through the following state machine to copy objects
* from the parent (migrating source) image:
*
* @verbatim
*
* <start>
* |
* v
* MIGRATE_OBJECTS
* |
* v
* <finish>
*
* @endverbatim
*
*/
ProgressContext &m_prog_ctx;
void migrate_objects();
void handle_migrate_objects(int r);
uint64_t get_num_overlap_objects();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::MigrateRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_MIGRATE_REQUEST_H
| 1,477 | 20.42029 | 74 | h |
null | ceph-main/src/librbd/operation/ObjectMapIterate.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/ObjectMapIterate.h"
#include "common/dout.h"
#include "common/errno.h"
#include "osdc/Striper.h"
#include "librbd/AsyncObjectThrottle.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/internal.h"
#include "librbd/ObjectMap.h"
#include "librbd/operation/ResizeRequest.h"
#include "librbd/object_map/InvalidateRequest.h"
#include "librbd/Utils.h"
#include <boost/lambda/bind.hpp>
#include <boost/lambda/construct.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ObjectMapIterateRequest: "
namespace librbd {
namespace operation {
namespace {
template <typename I>
class C_VerifyObjectCallback : public C_AsyncObjectThrottle<I> {
public:
C_VerifyObjectCallback(AsyncObjectThrottle<I> &throttle, I *image_ctx,
uint64_t snap_id, uint64_t object_no,
ObjectIterateWork<I> handle_mismatch,
std::atomic_flag *invalidate)
: C_AsyncObjectThrottle<I>(throttle, *image_ctx),
m_snap_id(snap_id), m_object_no(object_no),
m_oid(image_ctx->get_object_name(m_object_no)),
m_handle_mismatch(handle_mismatch),
m_invalidate(invalidate)
{
m_io_ctx.dup(image_ctx->data_ctx);
m_io_ctx.snap_set_read(CEPH_SNAPDIR);
}
void complete(int r) override {
I &image_ctx = this->m_image_ctx;
if (should_complete(r)) {
ldout(image_ctx.cct, 20) << m_oid << " C_VerifyObjectCallback completed "
<< dendl;
m_io_ctx.close();
this->finish(r);
delete this;
}
}
int send() override {
send_list_snaps();
return 0;
}
private:
librados::IoCtx m_io_ctx;
uint64_t m_snap_id;
uint64_t m_object_no;
std::string m_oid;
ObjectIterateWork<I> m_handle_mismatch;
std::atomic_flag *m_invalidate;
librados::snap_set_t m_snap_set;
int m_snap_list_ret = 0;
bool should_complete(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
if (r == 0) {
r = m_snap_list_ret;
}
if (r < 0 && r != -ENOENT) {
lderr(cct) << m_oid << " C_VerifyObjectCallback::should_complete: "
<< "encountered an error: " << cpp_strerror(r) << dendl;
return true;
}
ldout(cct, 20) << m_oid << " C_VerifyObjectCallback::should_complete: "
<< " r="
<< r << dendl;
return object_map_action(get_object_state());
}
void send_list_snaps() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
ldout(image_ctx.cct, 5) << m_oid
<< " C_VerifyObjectCallback::send_list_snaps"
<< dendl;
librados::ObjectReadOperation op;
op.list_snaps(&m_snap_set, &m_snap_list_ret);
librados::AioCompletion *comp = util::create_rados_callback(this);
int r = m_io_ctx.aio_operate(m_oid, comp, &op, NULL);
ceph_assert(r == 0);
comp->release();
}
uint8_t get_object_state() {
I &image_ctx = this->m_image_ctx;
std::shared_lock image_locker{image_ctx.image_lock};
for (std::vector<librados::clone_info_t>::const_iterator r =
m_snap_set.clones.begin(); r != m_snap_set.clones.end(); ++r) {
librados::snap_t from_snap_id;
librados::snap_t to_snap_id;
if (r->cloneid == librados::SNAP_HEAD) {
from_snap_id = next_valid_snap_id(m_snap_set.seq + 1);
to_snap_id = librados::SNAP_HEAD;
} else {
from_snap_id = next_valid_snap_id(r->snaps[0]);
to_snap_id = r->snaps[r->snaps.size()-1];
}
if (to_snap_id < m_snap_id) {
continue;
} else if (m_snap_id < from_snap_id) {
break;
}
if ((image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0 &&
from_snap_id != m_snap_id) {
return OBJECT_EXISTS_CLEAN;
}
return OBJECT_EXISTS;
}
return OBJECT_NONEXISTENT;
}
uint64_t next_valid_snap_id(uint64_t snap_id) {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
std::map<librados::snap_t, SnapInfo>::iterator it =
image_ctx.snap_info.lower_bound(snap_id);
if (it == image_ctx.snap_info.end()) {
return CEPH_NOSNAP;
}
return it->first;
}
bool object_map_action(uint8_t new_state) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
std::shared_lock owner_locker{image_ctx.owner_lock};
// should have been canceled prior to releasing lock
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
std::shared_lock image_locker{image_ctx.image_lock};
ceph_assert(image_ctx.object_map != nullptr);
uint8_t state = (*image_ctx.object_map)[m_object_no];
ldout(cct, 10) << "C_VerifyObjectCallback::object_map_action"
<< " object " << image_ctx.get_object_name(m_object_no)
<< " state " << (int)state
<< " new_state " << (int)new_state << dendl;
if (state != new_state) {
int r = 0;
ceph_assert(m_handle_mismatch);
r = m_handle_mismatch(image_ctx, m_object_no, state, new_state);
if (r) {
lderr(cct) << "object map error: object "
<< image_ctx.get_object_name(m_object_no)
<< " marked as " << (int)state << ", but should be "
<< (int)new_state << dendl;
m_invalidate->test_and_set();
} else {
ldout(cct, 1) << "object map inconsistent: object "
<< image_ctx.get_object_name(m_object_no)
<< " marked as " << (int)state << ", but should be "
<< (int)new_state << dendl;
}
}
return true;
}
};
} // anonymous namespace
template <typename I>
void ObjectMapIterateRequest<I>::send() {
if (!m_image_ctx.data_ctx.is_valid()) {
this->async_complete(-ENODEV);
return;
}
send_verify_objects();
}
template <typename I>
bool ObjectMapIterateRequest<I>::should_complete(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " should_complete: " << " r=" << r << dendl;
if (r == -ENODEV) {
lderr(cct) << "missing data pool" << dendl;
return true;
}
if (r < 0) {
lderr(cct) << "object map operation encountered an error: "
<< cpp_strerror(r) << dendl;
}
std::shared_lock owner_lock{m_image_ctx.owner_lock};
switch (m_state) {
case STATE_VERIFY_OBJECTS:
if (m_invalidate.test_and_set()) {
send_invalidate_object_map();
return false;
} else if (r == 0) {
return true;
}
break;
case STATE_INVALIDATE_OBJECT_MAP:
if (r == 0) {
return true;
}
break;
default:
ceph_abort();
break;
}
if (r < 0) {
return true;
}
return false;
}
template <typename I>
void ObjectMapIterateRequest<I>::send_verify_objects() {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
uint64_t snap_id;
uint64_t num_objects;
{
std::shared_lock l{m_image_ctx.image_lock};
snap_id = m_image_ctx.snap_id;
num_objects = Striper::get_num_objects(m_image_ctx.layout,
m_image_ctx.get_image_size(snap_id));
}
ldout(cct, 5) << this << " send_verify_objects" << dendl;
m_state = STATE_VERIFY_OBJECTS;
typename AsyncObjectThrottle<I>::ContextFactory context_factory(
boost::lambda::bind(boost::lambda::new_ptr<C_VerifyObjectCallback<I> >(),
boost::lambda::_1, &m_image_ctx, snap_id,
boost::lambda::_2, m_handle_mismatch, &m_invalidate));
AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
this, m_image_ctx, context_factory, this->create_callback_context(),
&m_prog_ctx, 0, num_objects);
throttle->start_ops(
m_image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
}
template <typename I>
uint64_t ObjectMapIterateRequest<I>::get_image_size() const {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
if (m_image_ctx.snap_id == CEPH_NOSNAP) {
if (!m_image_ctx.resize_reqs.empty()) {
return m_image_ctx.resize_reqs.front()->get_image_size();
} else {
return m_image_ctx.size;
}
}
return m_image_ctx.get_image_size(m_image_ctx.snap_id);
}
template <typename I>
void ObjectMapIterateRequest<I>::send_invalidate_object_map() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " send_invalidate_object_map" << dendl;
m_state = STATE_INVALIDATE_OBJECT_MAP;
object_map::InvalidateRequest<I>*req =
object_map::InvalidateRequest<I>::create(m_image_ctx, m_image_ctx.snap_id,
true,
this->create_callback_context());
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
std::unique_lock image_locker{m_image_ctx.image_lock};
req->send();
}
} // namespace operation
} // namespace librbd
template class librbd::operation::ObjectMapIterateRequest<librbd::ImageCtx>;
| 8,962 | 28.006472 | 84 | cc |
null | ceph-main/src/librbd/operation/ObjectMapIterate.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_OBJECT_MAP_ITERATE_H
#define CEPH_LIBRBD_OPERATION_OBJECT_MAP_ITERATE_H
#include <iostream>
#include <atomic>
#include "include/int_types.h"
#include "include/rbd/object_map_types.h"
#include "librbd/AsyncRequest.h"
namespace librbd {
class ImageCtx;
class ProgressContext;
namespace operation {
template <typename ImageCtxT = ImageCtx>
using ObjectIterateWork = bool(*)(ImageCtxT &image_ctx,
uint64_t object_no,
uint8_t current_state,
uint8_t new_state);
template <typename ImageCtxT = ImageCtx>
class ObjectMapIterateRequest : public AsyncRequest<ImageCtxT> {
public:
ObjectMapIterateRequest(ImageCtxT &image_ctx, Context *on_finish,
ProgressContext &prog_ctx,
ObjectIterateWork<ImageCtxT> handle_mismatch)
: AsyncRequest<ImageCtxT>(image_ctx, on_finish), m_image_ctx(image_ctx),
m_prog_ctx(prog_ctx), m_handle_mismatch(handle_mismatch)
{
}
void send() override;
protected:
bool should_complete(int r) override;
private:
enum State {
STATE_VERIFY_OBJECTS,
STATE_INVALIDATE_OBJECT_MAP
};
ImageCtxT &m_image_ctx;
ProgressContext &m_prog_ctx;
ObjectIterateWork<ImageCtxT> m_handle_mismatch;
std::atomic_flag m_invalidate = ATOMIC_FLAG_INIT;
State m_state = STATE_VERIFY_OBJECTS;
void send_verify_objects();
void send_invalidate_object_map();
uint64_t get_image_size() const;
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::ObjectMapIterateRequest<librbd::ImageCtx>;
#endif
| 1,633 | 23.757576 | 83 | h |
null | ceph-main/src/librbd/operation/RebuildObjectMapRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/RebuildObjectMapRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "osdc/Striper.h"
#include "librbd/AsyncObjectThrottle.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/internal.h"
#include "librbd/ObjectMap.h"
#include "librbd/operation/ResizeRequest.h"
#include "librbd/operation/TrimRequest.h"
#include "librbd/operation/ObjectMapIterate.h"
#include "librbd/Utils.h"
#include <boost/lambda/bind.hpp>
#include <boost/lambda/construct.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::RebuildObjectMapRequest: "
namespace librbd {
namespace operation {
using util::create_context_callback;
template <typename I>
void RebuildObjectMapRequest<I>::send() {
send_resize_object_map();
}
template <typename I>
bool RebuildObjectMapRequest<I>::should_complete(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " should_complete: " << " r=" << r << dendl;
std::shared_lock owner_lock{m_image_ctx.owner_lock};
switch (m_state) {
case STATE_RESIZE_OBJECT_MAP:
ldout(cct, 5) << "RESIZE_OBJECT_MAP" << dendl;
if (r == -ESTALE && !m_attempted_trim) {
// objects are still flagged as in-use -- delete them
m_attempted_trim = true;
send_trim_image();
return false;
} else if (r == 0) {
send_verify_objects();
}
break;
case STATE_TRIM_IMAGE:
ldout(cct, 5) << "TRIM_IMAGE" << dendl;
if (r == 0) {
send_resize_object_map();
}
break;
case STATE_VERIFY_OBJECTS:
ldout(cct, 5) << "VERIFY_OBJECTS" << dendl;
if (r == 0) {
send_save_object_map();
}
break;
case STATE_SAVE_OBJECT_MAP:
ldout(cct, 5) << "SAVE_OBJECT_MAP" << dendl;
if (r == 0) {
send_update_header();
}
break;
case STATE_UPDATE_HEADER:
ldout(cct, 5) << "UPDATE_HEADER" << dendl;
if (r == 0) {
return true;
}
break;
default:
ceph_abort();
break;
}
if (r == -ERESTART) {
ldout(cct, 5) << "rebuild object map operation interrupted" << dendl;
return true;
} else if (r < 0) {
lderr(cct) << "rebuild object map encountered an error: " << cpp_strerror(r)
<< dendl;
return true;
}
return false;
}
template <typename I>
void RebuildObjectMapRequest<I>::send_resize_object_map() {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
m_image_ctx.image_lock.lock_shared();
ceph_assert(m_image_ctx.object_map != nullptr);
uint64_t size = get_image_size();
uint64_t num_objects = Striper::get_num_objects(m_image_ctx.layout, size);
if (m_image_ctx.object_map->size() == num_objects) {
m_image_ctx.image_lock.unlock_shared();
send_verify_objects();
return;
}
ldout(cct, 5) << this << " send_resize_object_map" << dendl;
m_state = STATE_RESIZE_OBJECT_MAP;
// should have been canceled prior to releasing lock
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
m_image_ctx.object_map->aio_resize(size, OBJECT_NONEXISTENT,
this->create_callback_context());
m_image_ctx.image_lock.unlock_shared();
}
template <typename I>
void RebuildObjectMapRequest<I>::send_trim_image() {
CephContext *cct = m_image_ctx.cct;
std::shared_lock l{m_image_ctx.owner_lock};
// should have been canceled prior to releasing lock
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
ldout(cct, 5) << this << " send_trim_image" << dendl;
m_state = STATE_TRIM_IMAGE;
uint64_t new_size;
uint64_t orig_size;
{
std::shared_lock l{m_image_ctx.image_lock};
ceph_assert(m_image_ctx.object_map != nullptr);
new_size = get_image_size();
orig_size = m_image_ctx.get_object_size() *
m_image_ctx.object_map->size();
}
TrimRequest<I> *req = TrimRequest<I>::create(m_image_ctx,
this->create_callback_context(),
orig_size, new_size, m_prog_ctx);
req->send();
}
template <typename I>
bool update_object_map(I& image_ctx, uint64_t object_no, uint8_t current_state,
uint8_t new_state) {
CephContext *cct = image_ctx.cct;
uint64_t snap_id = image_ctx.snap_id;
current_state = (*image_ctx.object_map)[object_no];
if (current_state == OBJECT_EXISTS && new_state == OBJECT_NONEXISTENT &&
snap_id == CEPH_NOSNAP) {
// might be writing object to OSD concurrently
new_state = current_state;
}
if (new_state != current_state) {
ldout(cct, 15) << image_ctx.get_object_name(object_no)
<< " rebuild updating object map "
<< static_cast<uint32_t>(current_state) << "->"
<< static_cast<uint32_t>(new_state) << dendl;
image_ctx.object_map->set_state(object_no, new_state, current_state);
}
return false;
}
template <typename I>
void RebuildObjectMapRequest<I>::send_verify_objects() {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
m_state = STATE_VERIFY_OBJECTS;
ldout(cct, 5) << this << " send_verify_objects" << dendl;
ObjectMapIterateRequest<I> *req =
new ObjectMapIterateRequest<I>(m_image_ctx,
this->create_callback_context(),
m_prog_ctx, update_object_map);
req->send();
}
template <typename I>
void RebuildObjectMapRequest<I>::send_save_object_map() {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " send_save_object_map" << dendl;
m_state = STATE_SAVE_OBJECT_MAP;
// should have been canceled prior to releasing lock
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
std::shared_lock image_locker{m_image_ctx.image_lock};
ceph_assert(m_image_ctx.object_map != nullptr);
m_image_ctx.object_map->aio_save(this->create_callback_context());
}
template <typename I>
void RebuildObjectMapRequest<I>::send_update_header() {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
// should have been canceled prior to releasing lock
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
ldout(m_image_ctx.cct, 5) << this << " send_update_header" << dendl;
m_state = STATE_UPDATE_HEADER;
librados::ObjectWriteOperation op;
uint64_t flags = RBD_FLAG_OBJECT_MAP_INVALID | RBD_FLAG_FAST_DIFF_INVALID;
cls_client::set_flags(&op, m_image_ctx.snap_id, 0, flags);
librados::AioCompletion *comp = this->create_callback_completion();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
std::unique_lock image_locker{m_image_ctx.image_lock};
m_image_ctx.update_flags(m_image_ctx.snap_id, flags, false);
}
template <typename I>
uint64_t RebuildObjectMapRequest<I>::get_image_size() const {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
if (m_image_ctx.snap_id == CEPH_NOSNAP) {
if (!m_image_ctx.resize_reqs.empty()) {
return m_image_ctx.resize_reqs.front()->get_image_size();
} else {
return m_image_ctx.size;
}
}
return m_image_ctx.get_image_size(m_image_ctx.snap_id);
}
} // namespace operation
} // namespace librbd
template class librbd::operation::RebuildObjectMapRequest<librbd::ImageCtx>;
| 7,683 | 29.613546 | 80 | cc |
null | ceph-main/src/librbd/operation/RebuildObjectMapRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_REBUILD_OBJECT_MAP_REQUEST_H
#define CEPH_LIBRBD_OPERATION_REBUILD_OBJECT_MAP_REQUEST_H
#include "include/int_types.h"
#include "librbd/AsyncRequest.h"
namespace librbd {
class ImageCtx;
class ProgressContext;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class RebuildObjectMapRequest : public AsyncRequest<ImageCtxT> {
public:
RebuildObjectMapRequest(ImageCtxT &image_ctx, Context *on_finish,
ProgressContext &prog_ctx)
: AsyncRequest<ImageCtxT>(image_ctx, on_finish), m_image_ctx(image_ctx),
m_prog_ctx(prog_ctx), m_attempted_trim(false)
{
}
void send() override;
protected:
bool should_complete(int r) override;
private:
/**
* Rebuild object map goes through the following state machine to
* verify per-object state:
*
* <start>
* . | . . . . . . . . . .
* . | . .
* . v v .
* . STATE_RESIZE_OBJECT_MAP . . . > STATE_TRIM_IMAGE
* . |
* . v
* . . . > STATE_VERIFY_OBJECTS
* |
* v
* STATE_SAVE_OBJECT_MAP
* |
* v
* STATE_UPDATE_HEADER
*
* The _RESIZE_OBJECT_MAP state will be skipped if the object map
* is appropriately sized for the image. The _TRIM_IMAGE state will
* only be hit if the resize failed due to an in-use object.
*/
enum State {
STATE_RESIZE_OBJECT_MAP,
STATE_TRIM_IMAGE,
STATE_VERIFY_OBJECTS,
STATE_SAVE_OBJECT_MAP,
STATE_UPDATE_HEADER
};
ImageCtxT &m_image_ctx;
ProgressContext &m_prog_ctx;
State m_state = STATE_RESIZE_OBJECT_MAP;
bool m_attempted_trim;
void send_resize_object_map();
void send_trim_image();
void send_verify_objects();
void send_save_object_map();
void send_update_header();
uint64_t get_image_size() const;
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::RebuildObjectMapRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_REBUILD_OBJECT_MAP_REQUEST_H
| 2,232 | 25.270588 | 83 | h |
null | ceph-main/src/librbd/operation/RenameRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/RenameRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "include/rados/librados.hpp"
#include "librbd/ImageCtx.h"
#include "librbd/internal.h"
#include "librbd/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::operation::RenameRequest: "
namespace librbd {
namespace operation {
namespace {
template <typename I>
std::ostream& operator<<(std::ostream& os,
const typename RenameRequest<I>::State& state) {
switch(state) {
case RenameRequest<I>::STATE_READ_DIRECTORY:
os << "READ_DIRECTORY";
break;
case RenameRequest<I>::STATE_READ_SOURCE_HEADER:
os << "READ_SOURCE_HEADER";
break;
case RenameRequest<I>::STATE_WRITE_DEST_HEADER:
os << "WRITE_DEST_HEADER";
break;
case RenameRequest<I>::STATE_UPDATE_DIRECTORY:
os << "UPDATE_DIRECTORY";
break;
case RenameRequest<I>::STATE_REMOVE_SOURCE_HEADER:
os << "REMOVE_SOURCE_HEADER";
break;
default:
os << "UNKNOWN (" << static_cast<uint32_t>(state) << ")";
break;
}
return os;
}
} // anonymous namespace
template <typename I>
RenameRequest<I>::RenameRequest(I &image_ctx, Context *on_finish,
const std::string &dest_name)
: Request<I>(image_ctx, on_finish), m_dest_name(dest_name),
m_source_oid(image_ctx.old_format ? util::old_header_name(image_ctx.name) :
util::id_obj_name(image_ctx.name)),
m_dest_oid(image_ctx.old_format ? util::old_header_name(dest_name) :
util::id_obj_name(dest_name)) {
}
template <typename I>
void RenameRequest<I>::send_op() {
I &image_ctx = this->m_image_ctx;
if (image_ctx.old_format) {
send_read_source_header();
return;
}
send_read_directory();
}
template <typename I>
bool RenameRequest<I>::should_complete(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": state=" << m_state << ", "
<< "r=" << r << dendl;
r = filter_return_code(r);
if (r < 0) {
if (r == -EEXIST) {
ldout(cct, 1) << "image already exists" << dendl;
} else {
lderr(cct) << "encountered error: " << cpp_strerror(r) << dendl;
}
return true;
}
if (m_state == STATE_READ_DIRECTORY) {
std::string name;
auto it = m_source_name_bl.cbegin();
r = cls_client::dir_get_name_finish(&it, &name);
if (r < 0) {
lderr(cct) << "could not read directory: " << cpp_strerror(r) << dendl;
return true;
}
bool update = false;
{
std::shared_lock image_locker{image_ctx.image_lock};
update = image_ctx.name != name;
}
if (update) {
image_ctx.set_image_name(name);
m_source_oid = util::id_obj_name(name);
}
} else if (m_state == STATE_UPDATE_DIRECTORY) {
// update in-memory name before removing source header
apply();
} else if (m_state == STATE_REMOVE_SOURCE_HEADER) {
return true;
}
std::shared_lock owner_lock{image_ctx.owner_lock};
switch (m_state) {
case STATE_READ_DIRECTORY:
send_read_source_header();
break;
case STATE_READ_SOURCE_HEADER:
send_write_destination_header();
break;
case STATE_WRITE_DEST_HEADER:
send_update_directory();
break;
case STATE_UPDATE_DIRECTORY:
send_remove_source_header();
break;
default:
ceph_abort();
break;
}
return false;
}
template <typename I>
int RenameRequest<I>::filter_return_code(int r) const {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
if (m_state == STATE_READ_SOURCE_HEADER && r == -ENOENT) {
std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.name == m_dest_name) {
// signal that replay raced with itself
return -EEXIST;
}
} else if (m_state == STATE_REMOVE_SOURCE_HEADER && r < 0) {
if (r != -ENOENT) {
lderr(cct) << "warning: couldn't remove old source object ("
<< m_source_oid << ")" << dendl;
}
return 0;
}
return r;
}
template <typename I>
void RenameRequest<I>::send_read_directory() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
m_state = STATE_READ_DIRECTORY;
librados::ObjectReadOperation op;
cls_client::dir_get_name_start(&op, image_ctx.id);
auto comp = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(RBD_DIRECTORY, comp, &op,
&m_source_name_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void RenameRequest<I>::send_read_source_header() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
m_state = STATE_READ_SOURCE_HEADER;
librados::ObjectReadOperation op;
op.read(0, 0, NULL, NULL);
// TODO: old code read omap values but there are no omap values on the
// old format header nor the new format id object
librados::AioCompletion *rados_completion = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(m_source_oid, rados_completion, &op,
&m_header_bl);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
void RenameRequest<I>::send_write_destination_header() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
m_state = STATE_WRITE_DEST_HEADER;
librados::ObjectWriteOperation op;
op.create(true);
op.write_full(m_header_bl);
librados::AioCompletion *rados_completion = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(m_dest_oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
void RenameRequest<I>::send_update_directory() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
m_state = STATE_UPDATE_DIRECTORY;
librados::ObjectWriteOperation op;
if (image_ctx.old_format) {
bufferlist cmd_bl;
bufferlist empty_bl;
encode(static_cast<__u8>(CEPH_OSD_TMAP_SET), cmd_bl);
encode(m_dest_name, cmd_bl);
encode(empty_bl, cmd_bl);
encode(static_cast<__u8>(CEPH_OSD_TMAP_RM), cmd_bl);
encode(image_ctx.name, cmd_bl);
op.tmap_update(cmd_bl);
} else {
cls_client::dir_rename_image(&op, image_ctx.name, m_dest_name,
image_ctx.id);
}
librados::AioCompletion *rados_completion = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(RBD_DIRECTORY, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
void RenameRequest<I>::send_remove_source_header() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
m_state = STATE_REMOVE_SOURCE_HEADER;
librados::ObjectWriteOperation op;
op.remove();
librados::AioCompletion *rados_completion = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(m_source_oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
void RenameRequest<I>::apply() {
I &image_ctx = this->m_image_ctx;
image_ctx.set_image_name(m_dest_name);
}
} // namespace operation
} // namespace librbd
template class librbd::operation::RenameRequest<librbd::ImageCtx>;
| 7,682 | 28.77907 | 81 | cc |
null | ceph-main/src/librbd/operation/RenameRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_RENAME_REQUEST_H
#define CEPH_LIBRBD_RENAME_REQUEST_H
#include "librbd/operation/Request.h"
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class RenameRequest : public Request<ImageCtxT>
{
public:
/**
* Rename goes through the following state machine:
*
* @verbatim
*
* <start>
* |
* v
* STATE_READ_DIRECTORY
* |
* v
* STATE_READ_SOURCE_HEADER
* |
* v
* STATE_WRITE_DEST_HEADER
* |
* v
* STATE_UPDATE_DIRECTORY
* |
* v
* STATE_REMOVE_SOURCE_HEADER
* |
* v
* <finish>
*
* @endverbatim
*
*/
enum State {
STATE_READ_DIRECTORY,
STATE_READ_SOURCE_HEADER,
STATE_WRITE_DEST_HEADER,
STATE_UPDATE_DIRECTORY,
STATE_REMOVE_SOURCE_HEADER
};
RenameRequest(ImageCtxT &image_ctx, Context *on_finish,
const std::string &dest_name);
protected:
void send_op() override;
bool should_complete(int r) override;
int filter_return_code(int r) const override;
journal::Event create_event(uint64_t op_tid) const override {
return journal::RenameEvent(op_tid, m_dest_name);
}
private:
std::string m_dest_name;
std::string m_source_oid;
std::string m_dest_oid;
State m_state = STATE_READ_DIRECTORY;
bufferlist m_source_name_bl;
bufferlist m_header_bl;
void send_read_directory();
void send_read_source_header();
void send_write_destination_header();
void send_update_directory();
void send_remove_source_header();
void apply();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::RenameRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_RENAME_REQUEST_H
| 1,884 | 18.635417 | 73 | h |
null | ceph-main/src/librbd/operation/Request.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/Request.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/asio/ContextWQ.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::Request: "
namespace librbd {
namespace operation {
template <typename I>
Request<I>::Request(I &image_ctx, Context *on_finish, uint64_t journal_op_tid)
: AsyncRequest<I>(image_ctx, on_finish), m_op_tid(journal_op_tid) {
}
template <typename I>
void Request<I>::send() {
[[maybe_unused]] I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
// automatically create the event if we don't need to worry
// about affecting concurrent IO ops
if (can_affect_io() || !append_op_event()) {
send_op();
}
}
template <typename I>
Context *Request<I>::create_context_finisher(int r) {
// automatically commit the event if required (delete after commit)
if (m_appended_op_event && !m_committed_op_event &&
commit_op_event(r)) {
return nullptr;
}
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
return util::create_context_callback<Request<I>, &Request<I>::finish>(this);
}
template <typename I>
void Request<I>::finish_and_destroy(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
// automatically commit the event if required (delete after commit)
if (m_appended_op_event && !m_committed_op_event &&
commit_op_event(r)) {
return;
}
AsyncRequest<I>::finish_and_destroy(r);
}
template <typename I>
void Request<I>::finish(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
ceph_assert(!m_appended_op_event || m_committed_op_event);
AsyncRequest<I>::finish(r);
}
template <typename I>
bool Request<I>::append_op_event() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.journal != nullptr &&
image_ctx.journal->is_journal_appending()) {
append_op_event(util::create_context_callback<
Request<I>, &Request<I>::handle_op_event_safe>(this));
return true;
}
return false;
}
template <typename I>
bool Request<I>::commit_op_event(int r) {
I &image_ctx = this->m_image_ctx;
std::shared_lock image_locker{image_ctx.image_lock};
if (!m_appended_op_event) {
return false;
}
ceph_assert(m_op_tid != 0);
ceph_assert(!m_committed_op_event);
m_committed_op_event = true;
if (image_ctx.journal != nullptr &&
image_ctx.journal->is_journal_appending()) {
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
// ops will be canceled / completed before closing journal
ceph_assert(image_ctx.journal->is_journal_ready());
image_ctx.journal->commit_op_event(m_op_tid, r,
new C_CommitOpEvent(this, r));
return true;
}
return false;
}
template <typename I>
void Request<I>::handle_commit_op_event(int r, int original_ret_val) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to commit op event to journal: " << cpp_strerror(r)
<< dendl;
}
if (original_ret_val < 0) {
r = original_ret_val;
}
finish(r);
}
template <typename I>
void Request<I>::replay_op_ready(Context *on_safe) {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
ceph_assert(m_op_tid != 0);
m_appended_op_event = true;
image_ctx.journal->replay_op_ready(
m_op_tid, util::create_async_context_callback(image_ctx, on_safe));
}
template <typename I>
void Request<I>::append_op_event(Context *on_safe) {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_op_tid = image_ctx.journal->allocate_op_tid();
image_ctx.journal->append_op_event(
m_op_tid, journal::EventEntry{create_event(m_op_tid)},
new C_AppendOpEvent(this, on_safe));
}
template <typename I>
void Request<I>::handle_op_event_safe(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to commit op event to journal: " << cpp_strerror(r)
<< dendl;
this->finish(r);
delete this;
} else {
ceph_assert(!can_affect_io());
// haven't started the request state machine yet
std::shared_lock owner_locker{image_ctx.owner_lock};
send_op();
}
}
} // namespace operation
} // namespace librbd
#ifndef TEST_F
template class librbd::operation::Request<librbd::ImageCtx>;
#endif
| 5,347 | 28.065217 | 78 | cc |
null | ceph-main/src/librbd/operation/Request.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_REQUEST_H
#define CEPH_LIBRBD_OPERATION_REQUEST_H
#include "librbd/AsyncRequest.h"
#include "include/Context.h"
#include "librbd/Utils.h"
#include "librbd/Journal.h"
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class Request : public AsyncRequest<ImageCtxT> {
public:
Request(ImageCtxT &image_ctx, Context *on_finish,
uint64_t journal_op_tid = 0);
void send();
protected:
void finish(int r) override;
virtual void send_op() = 0;
virtual bool can_affect_io() const {
return false;
}
virtual journal::Event create_event(uint64_t op_tid) const = 0;
template <typename T, Context*(T::*MF)(int*)>
bool append_op_event(T *request) {
ImageCtxT &image_ctx = this->m_image_ctx;
ceph_assert(can_affect_io());
std::scoped_lock locker{image_ctx.owner_lock, image_ctx.image_lock};
if (image_ctx.journal != nullptr) {
if (image_ctx.journal->is_journal_replaying()) {
Context *ctx = util::create_context_callback<T, MF>(request);
replay_op_ready(ctx);
return true;
} else if (image_ctx.journal->is_journal_appending()) {
Context *ctx = util::create_context_callback<T, MF>(request);
append_op_event(ctx);
return true;
}
}
return false;
}
bool append_op_event();
// NOTE: temporary until converted to new state machine format
Context *create_context_finisher(int r);
void finish_and_destroy(int r) override;
private:
struct C_AppendOpEvent : public Context {
Request *request;
Context *on_safe;
C_AppendOpEvent(Request *request, Context *on_safe)
: request(request), on_safe(on_safe) {
}
void finish(int r) override {
if (r >= 0) {
request->m_appended_op_event = true;
}
on_safe->complete(r);
}
};
struct C_CommitOpEvent : public Context {
Request *request;
int ret_val;
C_CommitOpEvent(Request *request, int ret_val)
: request(request), ret_val(ret_val) {
}
void finish(int r) override {
request->handle_commit_op_event(r, ret_val);
delete request;
}
};
uint64_t m_op_tid = 0;
bool m_appended_op_event = false;
bool m_committed_op_event = false;
void replay_op_ready(Context *on_safe);
void append_op_event(Context *on_safe);
void handle_op_event_safe(int r);
bool commit_op_event(int r);
void handle_commit_op_event(int r, int original_ret_val);
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::Request<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_REQUEST_H
| 2,749 | 24.700935 | 72 | h |
null | ceph-main/src/librbd/operation/ResizeRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/ResizeRequest.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/internal.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/operation/TrimRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::operation::ResizeRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace operation {
using util::create_async_context_callback;
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
ResizeRequest<I>::ResizeRequest(I &image_ctx, Context *on_finish,
uint64_t new_size, bool allow_shrink, ProgressContext &prog_ctx,
uint64_t journal_op_tid, bool disable_journal)
: Request<I>(image_ctx, on_finish, journal_op_tid),
m_original_size(0), m_new_size(new_size), m_allow_shrink(allow_shrink),
m_prog_ctx(prog_ctx), m_new_parent_overlap(0), m_disable_journal(disable_journal),
m_xlist_item(this)
{
}
template <typename I>
ResizeRequest<I>::~ResizeRequest() {
I &image_ctx = this->m_image_ctx;
ResizeRequest *next_req = NULL;
{
std::unique_lock image_locker{image_ctx.image_lock};
ceph_assert(m_xlist_item.remove_myself());
if (!image_ctx.resize_reqs.empty()) {
next_req = image_ctx.resize_reqs.front();
}
}
if (next_req != NULL) {
std::shared_lock owner_locker{image_ctx.owner_lock};
next_req->send();
}
}
template <typename I>
void ResizeRequest<I>::send() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
{
std::unique_lock image_locker{image_ctx.image_lock};
if (!m_xlist_item.is_on_list()) {
image_ctx.resize_reqs.push_back(&m_xlist_item);
if (image_ctx.resize_reqs.front() != this) {
return;
}
}
ceph_assert(image_ctx.resize_reqs.front() == this);
m_original_size = image_ctx.size;
compute_parent_overlap();
}
Request<I>::send();
}
template <typename I>
void ResizeRequest<I>::send_op() {
[[maybe_unused]] I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
if (this->is_canceled()) {
this->async_complete(-ERESTART);
} else {
send_pre_block_writes();
}
}
template <typename I>
void ResizeRequest<I>::send_pre_block_writes() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
image_ctx.io_image_dispatcher->block_writes(create_context_callback<
ResizeRequest<I>, &ResizeRequest<I>::handle_pre_block_writes>(this));
}
template <typename I>
Context *ResizeRequest<I>::handle_pre_block_writes(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to block writes: " << cpp_strerror(*result) << dendl;
image_ctx.io_image_dispatcher->unblock_writes();
return this->create_context_finisher(*result);
}
return send_append_op_event();
}
template <typename I>
Context *ResizeRequest<I>::send_append_op_event() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
if (m_new_size < m_original_size && !m_allow_shrink) {
ldout(cct, 1) << "shrinking the image is not permitted" << dendl;
image_ctx.io_image_dispatcher->unblock_writes();
this->async_complete(-EINVAL);
return nullptr;
}
if (m_disable_journal || !this->template append_op_event<
ResizeRequest<I>, &ResizeRequest<I>::handle_append_op_event>(this)) {
return send_grow_object_map();
}
ldout(cct, 5) << dendl;
return nullptr;
}
template <typename I>
Context *ResizeRequest<I>::handle_append_op_event(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to commit journal entry: " << cpp_strerror(*result)
<< dendl;
image_ctx.io_image_dispatcher->unblock_writes();
return this->create_context_finisher(*result);
}
return send_grow_object_map();
}
template <typename I>
void ResizeRequest<I>::send_trim_image() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
std::shared_lock owner_locker{image_ctx.owner_lock};
TrimRequest<I> *req = TrimRequest<I>::create(
image_ctx, create_context_callback<
ResizeRequest<I>, &ResizeRequest<I>::handle_trim_image>(this),
m_original_size, m_new_size, m_prog_ctx);
req->send();
}
template <typename I>
Context *ResizeRequest<I>::handle_trim_image(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << *result << dendl;
if (*result == -ERESTART) {
ldout(cct, 5) << "resize operation interrupted" << dendl;
return this->create_context_finisher(*result);
} else if (*result < 0) {
lderr(cct) << "failed to trim image: " << cpp_strerror(*result) << dendl;
return this->create_context_finisher(*result);
}
send_post_block_writes();
return nullptr;
}
template <typename I>
void ResizeRequest<I>::send_flush_cache() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
std::shared_lock owner_locker{image_ctx.owner_lock};
auto ctx = create_context_callback<
ResizeRequest<I>, &ResizeRequest<I>::handle_flush_cache>(this);
auto aio_comp = io::AioCompletion::create_and_start(
ctx, util::get_image_ctx(&image_ctx), io::AIO_TYPE_FLUSH);
auto req = io::ImageDispatchSpec::create_flush(
image_ctx, io::IMAGE_DISPATCH_LAYER_INTERNAL_START, aio_comp,
io::FLUSH_SOURCE_INTERNAL, {});
req->send();
}
template <typename I>
Context *ResizeRequest<I>::handle_flush_cache(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to flush cache: " << cpp_strerror(*result) << dendl;
return this->create_context_finisher(*result);
}
send_invalidate_cache();
return nullptr;
}
template <typename I>
void ResizeRequest<I>::send_invalidate_cache() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
// need to invalidate since we're deleting objects, and
// ObjectCacher doesn't track non-existent objects
image_ctx.io_image_dispatcher->invalidate_cache(create_context_callback<
ResizeRequest<I>, &ResizeRequest<I>::handle_invalidate_cache>(this));
}
template <typename I>
Context *ResizeRequest<I>::handle_invalidate_cache(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << *result << dendl;
// ignore busy error -- writeback was successfully flushed so we might be
// wasting some cache space for trimmed objects, but they will get purged
// eventually. Most likely cause of the issue was a in-flight cache read
if (*result < 0 && *result != -EBUSY) {
lderr(cct) << "failed to invalidate cache: " << cpp_strerror(*result)
<< dendl;
return this->create_context_finisher(*result);
}
send_trim_image();
return nullptr;
}
template <typename I>
Context *ResizeRequest<I>::send_grow_object_map() {
I &image_ctx = this->m_image_ctx;
{
std::unique_lock image_locker{image_ctx.image_lock};
m_shrink_size_visible = true;
}
if (m_original_size == m_new_size) {
image_ctx.io_image_dispatcher->unblock_writes();
return this->create_context_finisher(0);
} else if (m_new_size < m_original_size) {
image_ctx.io_image_dispatcher->unblock_writes();
send_flush_cache();
return nullptr;
}
image_ctx.owner_lock.lock_shared();
image_ctx.image_lock.lock_shared();
if (image_ctx.object_map == nullptr) {
image_ctx.image_lock.unlock_shared();
image_ctx.owner_lock.unlock_shared();
// IO is still blocked
send_update_header();
return nullptr;
}
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
// should have been canceled prior to releasing lock
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
image_ctx.object_map->aio_resize(
m_new_size, OBJECT_NONEXISTENT, create_context_callback<
ResizeRequest<I>, &ResizeRequest<I>::handle_grow_object_map>(this));
image_ctx.image_lock.unlock_shared();
image_ctx.owner_lock.unlock_shared();
return nullptr;
}
template <typename I>
Context *ResizeRequest<I>::handle_grow_object_map(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to resize object map: "
<< cpp_strerror(*result) << dendl;
image_ctx.io_image_dispatcher->unblock_writes();
return this->create_context_finisher(*result);
}
// IO is still blocked
send_update_header();
return nullptr;
}
template <typename I>
Context *ResizeRequest<I>::send_shrink_object_map() {
I &image_ctx = this->m_image_ctx;
image_ctx.owner_lock.lock_shared();
image_ctx.image_lock.lock_shared();
if (image_ctx.object_map == nullptr || m_new_size > m_original_size) {
image_ctx.image_lock.unlock_shared();
image_ctx.owner_lock.unlock_shared();
update_size_and_overlap();
return this->create_context_finisher(0);
}
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "original_size=" << m_original_size << ", "
<< "new_size=" << m_new_size << dendl;
// should have been canceled prior to releasing lock
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
image_ctx.object_map->aio_resize(
m_new_size, OBJECT_NONEXISTENT, create_context_callback<
ResizeRequest<I>, &ResizeRequest<I>::handle_shrink_object_map>(this));
image_ctx.image_lock.unlock_shared();
image_ctx.owner_lock.unlock_shared();
return nullptr;
}
template <typename I>
Context *ResizeRequest<I>::handle_shrink_object_map(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to resize object map: "
<< cpp_strerror(*result) << dendl;
image_ctx.io_image_dispatcher->unblock_writes();
return this->create_context_finisher(*result);
}
update_size_and_overlap();
return this->create_context_finisher(0);
}
template <typename I>
void ResizeRequest<I>::send_post_block_writes() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
std::shared_lock owner_locker{image_ctx.owner_lock};
image_ctx.io_image_dispatcher->block_writes(create_context_callback<
ResizeRequest<I>, &ResizeRequest<I>::handle_post_block_writes>(this));
}
template <typename I>
Context *ResizeRequest<I>::handle_post_block_writes(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << *result << dendl;
if (*result < 0) {
image_ctx.io_image_dispatcher->unblock_writes();
lderr(cct) << "failed to block writes prior to header update: "
<< cpp_strerror(*result) << dendl;
return this->create_context_finisher(*result);
}
send_update_header();
return nullptr;
}
template <typename I>
void ResizeRequest<I>::send_update_header() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "original_size=" << m_original_size << ", "
<< "new_size=" << m_new_size << dendl;;
// should have been canceled prior to releasing lock
std::shared_lock owner_locker{image_ctx.owner_lock};
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
librados::ObjectWriteOperation op;
if (image_ctx.old_format) {
// rewrite only the size field of the header
ceph_le64 new_size(m_new_size);
bufferlist bl;
bl.append(reinterpret_cast<const char*>(&new_size), sizeof(new_size));
op.write(offsetof(rbd_obj_header_ondisk, image_size), bl);
} else {
cls_client::set_size(&op, m_new_size);
}
librados::AioCompletion *rados_completion = create_rados_callback<
ResizeRequest<I>, &ResizeRequest<I>::handle_update_header>(this);
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid,
rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
Context *ResizeRequest<I>::handle_update_header(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to update image header: " << cpp_strerror(*result)
<< dendl;
image_ctx.io_image_dispatcher->unblock_writes();
return this->create_context_finisher(*result);
}
return send_shrink_object_map();
}
template <typename I>
void ResizeRequest<I>::compute_parent_overlap() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
if (image_ctx.parent == NULL) {
m_new_parent_overlap = 0;
} else {
m_new_parent_overlap = std::min(m_new_size, image_ctx.parent_md.overlap);
}
}
template <typename I>
void ResizeRequest<I>::update_size_and_overlap() {
I &image_ctx = this->m_image_ctx;
{
std::unique_lock image_locker{image_ctx.image_lock};
image_ctx.size = m_new_size;
if (image_ctx.parent != NULL && m_new_size < m_original_size) {
image_ctx.parent_md.overlap = m_new_parent_overlap;
}
}
// blocked by PRE_BLOCK_WRITES (grow) or POST_BLOCK_WRITES (shrink) state
image_ctx.io_image_dispatcher->unblock_writes();
}
} // namespace operation
} // namespace librbd
template class librbd::operation::ResizeRequest<librbd::ImageCtx>;
| 14,389 | 29.813704 | 96 | cc |
null | ceph-main/src/librbd/operation/ResizeRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_RESIZE_REQUEST_H
#define CEPH_LIBRBD_OPERATION_RESIZE_REQUEST_H
#include "librbd/operation/Request.h"
#include "include/xlist.h"
namespace librbd
{
class ImageCtx;
class ProgressContext;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class ResizeRequest : public Request<ImageCtxT> {
public:
static ResizeRequest *create(ImageCtxT &image_ctx, Context *on_finish,
uint64_t new_size, bool allow_shrink,
ProgressContext &prog_ctx, uint64_t journal_op_tid,
bool disable_journal) {
return new ResizeRequest(image_ctx, on_finish, new_size, allow_shrink, prog_ctx,
journal_op_tid, disable_journal);
}
ResizeRequest(ImageCtxT &image_ctx, Context *on_finish, uint64_t new_size,
bool allow_shrink, ProgressContext &prog_ctx, uint64_t journal_op_tid,
bool disable_journal);
~ResizeRequest() override;
inline bool shrinking() const {
return (m_shrink_size_visible && m_new_size < m_original_size);
}
inline uint64_t get_image_size() const {
return m_new_size;
}
void send() override;
protected:
void send_op() override;
bool should_complete(int r) override {
return true;
}
bool can_affect_io() const override {
return true;
}
journal::Event create_event(uint64_t op_tid) const override {
return journal::ResizeEvent(op_tid, m_new_size);
}
private:
/**
* Resize goes through the following state machine to resize the image
* and update the object map:
*
* @verbatim
*
* <start>
* |
* v
* STATE_PRE_BLOCK_WRITES
* |
* v
* STATE_APPEND_OP_EVENT (skip if journaling
* | disabled)
* |
* | (grow)
* |\--------> STATE_GROW_OBJECT_MAP (skip if object map
* | | disabled)
* | v
* | STATE_UPDATE_HEADER ----------------------------\
* | (unblock writes) |
* | |
* | (unblock writes) |
* | |
* | (shrink) |
* |\--------> STATE_FLUSH_CACHE |
* | | |
* | v |
* | STATE_INVALIDATE_CACHE |
* | | |
* | v |
* | STATE_TRIM_IMAGE |
* | | |
* | v |
* | STATE_POST_BLOCK_WRITES |
* | | |
* | v |
* | STATE_UPDATE_HEADER |
* | | |
* | v |
* | STATE_SHRINK_OBJECT_MAP (skip if object map |
* | | disabled) |
* | | (unblock writes) |
* | (no change) v |
* \------------> <finish> <-----------------------------------/
*
* @endverbatim
*
* The _OBJECT_MAP states are skipped if the object map isn't enabled.
* The state machine will immediately transition to _FINISHED if there
* are no objects to trim.
*/
uint64_t m_original_size;
uint64_t m_new_size;
bool m_allow_shrink = true;
ProgressContext &m_prog_ctx;
uint64_t m_new_parent_overlap;
bool m_shrink_size_visible = false;
bool m_disable_journal = false;
typename xlist<ResizeRequest<ImageCtxT>*>::item m_xlist_item;
void send_pre_block_writes();
Context *handle_pre_block_writes(int *result);
Context *send_append_op_event();
Context *handle_append_op_event(int *result);
void send_flush_cache();
Context *handle_flush_cache(int *result);
void send_invalidate_cache();
Context *handle_invalidate_cache(int *result);
void send_trim_image();
Context *handle_trim_image(int *result);
Context *send_grow_object_map();
Context *handle_grow_object_map(int *result);
Context *send_shrink_object_map();
Context *handle_shrink_object_map(int *result);
void send_post_block_writes();
Context *handle_post_block_writes(int *result);
void send_update_header();
Context *handle_update_header(int *result);
void compute_parent_overlap();
void update_size_and_overlap();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::ResizeRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_RESIZE_REQUEST_H
| 5,353 | 33.101911 | 86 | h |
null | ceph-main/src/librbd/operation/SnapshotCreateRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/operation/SnapshotCreateRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/mirror/snapshot/SetImageStateRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::SnapshotCreateRequest: "
namespace librbd {
namespace operation {
using util::create_async_context_callback;
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
SnapshotCreateRequest<I>::SnapshotCreateRequest(I &image_ctx,
Context *on_finish,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
uint64_t journal_op_tid,
uint64_t flags,
ProgressContext &prog_ctx)
: Request<I>(image_ctx, on_finish, journal_op_tid),
m_snap_namespace(snap_namespace), m_snap_name(snap_name),
m_skip_object_map(flags & SNAP_CREATE_FLAG_SKIP_OBJECT_MAP),
m_skip_notify_quiesce(flags & SNAP_CREATE_FLAG_SKIP_NOTIFY_QUIESCE),
m_ignore_notify_quiesce_error(flags & SNAP_CREATE_FLAG_IGNORE_NOTIFY_QUIESCE_ERROR),
m_prog_ctx(prog_ctx) {
}
template <typename I>
void SnapshotCreateRequest<I>::send_op() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
if (!image_ctx.data_ctx.is_valid()) {
lderr(cct) << "missing data pool" << dendl;
this->async_complete(-ENODEV);
return;
}
send_notify_quiesce();
}
template <typename I>
void SnapshotCreateRequest<I>::send_notify_quiesce() {
if (m_skip_notify_quiesce) {
send_suspend_requests();
return;
}
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
image_ctx.image_watcher->notify_quiesce(
&m_request_id, m_prog_ctx, create_async_context_callback(
image_ctx, create_context_callback<SnapshotCreateRequest<I>,
&SnapshotCreateRequest<I>::handle_notify_quiesce>(this)));
}
template <typename I>
Context *SnapshotCreateRequest<I>::handle_notify_quiesce(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0 && !m_ignore_notify_quiesce_error) {
lderr(cct) << "failed to notify quiesce: " << cpp_strerror(*result)
<< dendl;
save_result(result);
send_notify_unquiesce();
return nullptr;
}
std::shared_lock owner_locker{image_ctx.owner_lock};
send_suspend_requests();
return nullptr;
}
template <typename I>
void SnapshotCreateRequest<I>::send_suspend_requests() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
// TODO suspend (shrink) resize to ensure consistent RBD mirror
send_suspend_aio();
}
template <typename I>
Context *SnapshotCreateRequest<I>::handle_suspend_requests(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
// TODO
send_suspend_aio();
return nullptr;
}
template <typename I>
void SnapshotCreateRequest<I>::send_suspend_aio() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
image_ctx.io_image_dispatcher->block_writes(create_context_callback<
SnapshotCreateRequest<I>,
&SnapshotCreateRequest<I>::handle_suspend_aio>(this));
}
template <typename I>
Context *SnapshotCreateRequest<I>::handle_suspend_aio(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to block writes: " << cpp_strerror(*result) << dendl;
save_result(result);
return send_notify_unquiesce();
}
m_writes_blocked = true;
send_append_op_event();
return nullptr;
}
template <typename I>
void SnapshotCreateRequest<I>::send_append_op_event() {
I &image_ctx = this->m_image_ctx;
if (!this->template append_op_event<
SnapshotCreateRequest<I>,
&SnapshotCreateRequest<I>::handle_append_op_event>(this)) {
send_allocate_snap_id();
return;
}
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
}
template <typename I>
Context *SnapshotCreateRequest<I>::handle_append_op_event(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to commit journal entry: " << cpp_strerror(*result)
<< dendl;
save_result(result);
return send_notify_unquiesce();
}
send_allocate_snap_id();
return nullptr;
}
template <typename I>
void SnapshotCreateRequest<I>::send_allocate_snap_id() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
librados::AioCompletion *rados_completion = create_rados_callback<
SnapshotCreateRequest<I>,
&SnapshotCreateRequest<I>::handle_allocate_snap_id>(this);
image_ctx.data_ctx.aio_selfmanaged_snap_create(&m_snap_id, rados_completion);
rados_completion->release();
}
template <typename I>
Context *SnapshotCreateRequest<I>::handle_allocate_snap_id(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << ", "
<< "snap_id=" << m_snap_id << dendl;
if (*result < 0) {
lderr(cct) << "failed to allocate snapshot id: " << cpp_strerror(*result)
<< dendl;
save_result(result);
return send_notify_unquiesce();
}
send_create_snap();
return nullptr;
}
template <typename I>
void SnapshotCreateRequest<I>::send_create_snap() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
std::shared_lock owner_locker{image_ctx.owner_lock};
std::shared_lock image_locker{image_ctx.image_lock};
// should have been canceled prior to releasing lock
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
// save current size / parent info for creating snapshot record in ImageCtx
m_size = image_ctx.size;
m_parent_info = image_ctx.parent_md;
librados::ObjectWriteOperation op;
if (image_ctx.old_format) {
cls_client::old_snapshot_add(&op, m_snap_id, m_snap_name);
} else {
cls_client::snapshot_add(&op, m_snap_id, m_snap_name, m_snap_namespace);
}
librados::AioCompletion *rados_completion = create_rados_callback<
SnapshotCreateRequest<I>,
&SnapshotCreateRequest<I>::handle_create_snap>(this);
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid,
rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
Context *SnapshotCreateRequest<I>::handle_create_snap(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result == -ESTALE) {
send_allocate_snap_id();
return nullptr;
} else if (*result < 0) {
save_result(result);
send_release_snap_id();
return nullptr;
}
return send_create_object_map();
}
template <typename I>
Context *SnapshotCreateRequest<I>::send_create_object_map() {
I &image_ctx = this->m_image_ctx;
image_ctx.image_lock.lock_shared();
if (image_ctx.object_map == nullptr || m_skip_object_map) {
image_ctx.image_lock.unlock_shared();
return send_create_image_state();
}
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
image_ctx.object_map->snapshot_add(
m_snap_id, create_context_callback<
SnapshotCreateRequest<I>,
&SnapshotCreateRequest<I>::handle_create_object_map>(this));
image_ctx.image_lock.unlock_shared();
return nullptr;
}
template <typename I>
Context *SnapshotCreateRequest<I>::handle_create_object_map(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << this << " " << __func__ << ": failed to snapshot object map: "
<< cpp_strerror(*result) << dendl;
save_result(result);
update_snap_context();
return send_notify_unquiesce();
}
return send_create_image_state();
}
template <typename I>
Context *SnapshotCreateRequest<I>::send_create_image_state() {
I &image_ctx = this->m_image_ctx;
auto mirror_ns = std::get_if<cls::rbd::MirrorSnapshotNamespace>(
&m_snap_namespace);
if (mirror_ns == nullptr || !mirror_ns->is_primary()) {
update_snap_context();
return send_notify_unquiesce();
}
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
auto req = mirror::snapshot::SetImageStateRequest<I>::create(
&image_ctx, m_snap_id, create_context_callback<
SnapshotCreateRequest<I>,
&SnapshotCreateRequest<I>::handle_create_image_state>(this));
req->send();
return nullptr;
}
template <typename I>
Context *SnapshotCreateRequest<I>::handle_create_image_state(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
update_snap_context();
if (*result < 0) {
lderr(cct) << this << " " << __func__ << ": failed to create image state: "
<< cpp_strerror(*result) << dendl;
save_result(result);
}
return send_notify_unquiesce();
}
template <typename I>
void SnapshotCreateRequest<I>::send_release_snap_id() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
ceph_assert(m_snap_id != CEPH_NOSNAP);
librados::AioCompletion *rados_completion = create_rados_callback<
SnapshotCreateRequest<I>,
&SnapshotCreateRequest<I>::handle_release_snap_id>(this);
image_ctx.data_ctx.aio_selfmanaged_snap_remove(m_snap_id, rados_completion);
rados_completion->release();
}
template <typename I>
Context *SnapshotCreateRequest<I>::handle_release_snap_id(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
return send_notify_unquiesce();
}
template <typename I>
Context *SnapshotCreateRequest<I>::send_notify_unquiesce() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
if (m_writes_blocked) {
image_ctx.io_image_dispatcher->unblock_writes();
}
if (m_skip_notify_quiesce) {
return this->create_context_finisher(m_ret_val);
}
ldout(cct, 5) << this << " " << __func__ << dendl;
image_ctx.image_watcher->notify_unquiesce(
m_request_id, create_context_callback<
SnapshotCreateRequest<I>,
&SnapshotCreateRequest<I>::handle_notify_unquiesce>(this));
return nullptr;
}
template <typename I>
Context *SnapshotCreateRequest<I>::handle_notify_unquiesce(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to notify unquiesce: " << cpp_strerror(*result)
<< dendl;
// ignore error
}
*result = m_ret_val;
return this->create_context_finisher(m_ret_val);
}
template <typename I>
void SnapshotCreateRequest<I>::update_snap_context() {
I &image_ctx = this->m_image_ctx;
std::shared_lock owner_locker{image_ctx.owner_lock};
std::unique_lock image_locker{image_ctx.image_lock};
if (image_ctx.get_snap_info(m_snap_id) != NULL) {
return;
}
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
// should have been canceled prior to releasing lock
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
// immediately add a reference to the new snapshot
utime_t snap_time = ceph_clock_now();
image_ctx.add_snap(m_snap_namespace, m_snap_name, m_snap_id, m_size,
m_parent_info, RBD_PROTECTION_STATUS_UNPROTECTED,
0, snap_time);
// immediately start using the new snap context if we
// own the exclusive lock
std::vector<snapid_t> snaps;
snaps.push_back(m_snap_id);
snaps.insert(snaps.end(), image_ctx.snapc.snaps.begin(),
image_ctx.snapc.snaps.end());
image_ctx.snapc.seq = m_snap_id;
image_ctx.snapc.snaps.swap(snaps);
image_ctx.data_ctx.selfmanaged_snap_set_write_ctx(
image_ctx.snapc.seq, image_ctx.snaps);
image_ctx.rebuild_data_io_context();
if (!image_ctx.migration_info.empty()) {
auto it = image_ctx.migration_info.snap_map.find(CEPH_NOSNAP);
ceph_assert(it != image_ctx.migration_info.snap_map.end());
ceph_assert(!it->second.empty());
if (it->second[0] == CEPH_NOSNAP) {
ldout(cct, 5) << this << " " << __func__
<< ": updating migration snap_map" << dendl;
it->second[0] = m_snap_id;
}
}
}
} // namespace operation
} // namespace librbd
template class librbd::operation::SnapshotCreateRequest<librbd::ImageCtx>;
| 14,041 | 30.204444 | 88 | cc |
null | ceph-main/src/librbd/operation/SnapshotCreateRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_SNAPSHOT_CREATE_REQUEST_H
#define CEPH_LIBRBD_OPERATION_SNAPSHOT_CREATE_REQUEST_H
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/Types.h"
#include "librbd/operation/Request.h"
#include <string>
class Context;
namespace librbd {
class ImageCtx;
class ProgressContext;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class SnapshotCreateRequest : public Request<ImageCtxT> {
public:
/**
* Snap Create goes through the following state machine:
*
* @verbatim
*
* <start>
* |
* v
* STATE_NOTIFY_QUIESCE * * * * * * * * * * * * *
* | *
* v *
* STATE_SUSPEND_REQUESTS *
* | *
* v *
* STATE_SUSPEND_AIO * * * * * * * * * * * * * * *
* | *
* v *
* STATE_APPEND_OP_EVENT (skip if journal *
* | disabled) *
* (retry) v *
* . . . > STATE_ALLOCATE_SNAP_ID *
* . | *
* . v *
* . . . . STATE_CREATE_SNAP * * * * * * * * * * * *
* | * *
* v * *
* STATE_CREATE_OBJECT_MAP (skip if * *
* | disabled) * *
* v * *
* STATE_CREATE_IMAGE_STATE (skip if * *
* | not mirror * *
* | snapshot) * *
* | v *
* | STATE_RELEASE_SNAP_ID *
* | | *
* | v *
* \------------> STATE_NOTIFY_UNQUIESCE < * *
* |
* v
* <finish>
* @endverbatim
*
* The _CREATE_STATE state may repeat back to the _ALLOCATE_SNAP_ID state
* if a stale snapshot context is allocated. If the create operation needs
* to abort, the error path is followed to record the result in the journal
* (if enabled) and bubble the originating error code back to the client.
*/
SnapshotCreateRequest(ImageCtxT &image_ctx, Context *on_finish,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name, uint64_t journal_op_tid,
uint64_t flags, ProgressContext &prog_ctx);
protected:
void send_op() override;
bool should_complete(int r) override {
return true;
}
bool can_affect_io() const override {
return true;
}
journal::Event create_event(uint64_t op_tid) const override {
return journal::SnapCreateEvent(op_tid, m_snap_namespace, m_snap_name);
}
private:
cls::rbd::SnapshotNamespace m_snap_namespace;
std::string m_snap_name;
bool m_skip_object_map;
bool m_skip_notify_quiesce;
bool m_ignore_notify_quiesce_error;
ProgressContext &m_prog_ctx;
uint64_t m_request_id = 0;
int m_ret_val = 0;
bool m_writes_blocked = false;
uint64_t m_snap_id = CEPH_NOSNAP;
uint64_t m_size;
ParentImageInfo m_parent_info;
void send_notify_quiesce();
Context *handle_notify_quiesce(int *result);
void send_suspend_requests();
Context *handle_suspend_requests(int *result);
void send_suspend_aio();
Context *handle_suspend_aio(int *result);
void send_append_op_event();
Context *handle_append_op_event(int *result);
void send_allocate_snap_id();
Context *handle_allocate_snap_id(int *result);
void send_create_snap();
Context *handle_create_snap(int *result);
Context *send_create_object_map();
Context *handle_create_object_map(int *result);
Context *send_create_image_state();
Context *handle_create_image_state(int *result);
void send_release_snap_id();
Context *handle_release_snap_id(int *result);
Context *send_notify_unquiesce();
Context *handle_notify_unquiesce(int *result);
void update_snap_context();
void save_result(int *result) {
if (m_ret_val == 0 && *result < 0) {
m_ret_val = *result;
}
}
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::SnapshotCreateRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_SNAPSHOT_CREATE_REQUEST_H
| 5,112 | 33.315436 | 81 | h |
null | ceph-main/src/librbd/operation/SnapshotLimitRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/SnapshotLimitRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::SnapshotLimitRequest: "
namespace librbd {
namespace operation {
template <typename I>
SnapshotLimitRequest<I>::SnapshotLimitRequest(I &image_ctx,
Context *on_finish,
uint64_t limit)
: Request<I>(image_ctx, on_finish), m_snap_limit(limit) {
}
template <typename I>
void SnapshotLimitRequest<I>::send_op() {
send_limit_snaps();
}
template <typename I>
bool SnapshotLimitRequest<I>::should_complete(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << " r=" << r << dendl;
if (r < 0) {
lderr(cct) << "encountered error: " << cpp_strerror(r) << dendl;
}
return true;
}
template <typename I>
void SnapshotLimitRequest<I>::send_limit_snaps() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
{
std::shared_lock image_locker{image_ctx.image_lock};
librados::ObjectWriteOperation op;
cls_client::snapshot_set_limit(&op, m_snap_limit);
librados::AioCompletion *rados_completion =
this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, rados_completion,
&op);
ceph_assert(r == 0);
rados_completion->release();
}
}
} // namespace operation
} // namespace librbd
template class librbd::operation::SnapshotLimitRequest<librbd::ImageCtx>;
| 1,793 | 25.776119 | 80 | cc |
null | ceph-main/src/librbd/operation/SnapshotLimitRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_SNAPSHOT_LIMIT_REQUEST_H
#define CEPH_LIBRBD_OPERATION_SNAPSHOT_LIMIT_REQUEST_H
#include "librbd/operation/Request.h"
#include <iosfwd>
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class SnapshotLimitRequest : public Request<ImageCtxT> {
public:
SnapshotLimitRequest(ImageCtxT &image_ctx, Context *on_finish,
uint64_t limit);
protected:
void send_op() override;
bool should_complete(int r) override;
journal::Event create_event(uint64_t op_tid) const override {
return journal::SnapLimitEvent(op_tid, m_snap_limit);
}
private:
uint64_t m_snap_limit;
void send_limit_snaps();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::SnapshotLimitRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_SNAPSHOT_LIMIT_REQUEST_H
| 1,018 | 21.644444 | 80 | h |
null | ceph-main/src/librbd/operation/SnapshotProtectRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/SnapshotProtectRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::SnapshotProtectRequest: "
namespace librbd {
namespace operation {
namespace {
template <typename I>
std::ostream& operator<<(std::ostream& os,
const typename SnapshotProtectRequest<I>::State& state) {
switch(state) {
case SnapshotProtectRequest<I>::STATE_PROTECT_SNAP:
os << "PROTECT_SNAP";
break;
}
return os;
}
} // anonymous namespace
template <typename I>
SnapshotProtectRequest<I>::SnapshotProtectRequest(I &image_ctx,
Context *on_finish,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name)
: Request<I>(image_ctx, on_finish), m_snap_namespace(snap_namespace),
m_snap_name(snap_name), m_state(STATE_PROTECT_SNAP) {
}
template <typename I>
void SnapshotProtectRequest<I>::send_op() {
send_protect_snap();
}
template <typename I>
bool SnapshotProtectRequest<I>::should_complete(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": state=" << m_state << ", "
<< "r=" << r << dendl;
if (r < 0) {
if (r == -EBUSY) {
ldout(cct, 1) << "snapshot is already protected" << dendl;
} else {
lderr(cct) << "encountered error: " << cpp_strerror(r) << dendl;
}
}
return true;
}
template <typename I>
void SnapshotProtectRequest<I>::send_protect_snap() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
int r = verify_and_send_protect_snap();
if (r < 0) {
this->async_complete(r);
return;
}
}
template <typename I>
int SnapshotProtectRequest<I>::verify_and_send_protect_snap() {
I &image_ctx = this->m_image_ctx;
std::shared_lock image_locker{image_ctx.image_lock};
CephContext *cct = image_ctx.cct;
if ((image_ctx.features & RBD_FEATURE_LAYERING) == 0) {
lderr(cct) << "image must support layering" << dendl;
return -ENOSYS;
}
uint64_t snap_id = image_ctx.get_snap_id(m_snap_namespace, m_snap_name);
if (snap_id == CEPH_NOSNAP) {
return -ENOENT;
}
bool is_protected;
int r = image_ctx.is_snap_protected(snap_id, &is_protected);
if (r < 0) {
return r;
}
if (is_protected) {
return -EBUSY;
}
librados::ObjectWriteOperation op;
cls_client::set_protection_status(&op, snap_id,
RBD_PROTECTION_STATUS_PROTECTED);
librados::AioCompletion *rados_completion =
this->create_callback_completion();
r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, rados_completion,
&op);
ceph_assert(r == 0);
rados_completion->release();
return 0;
}
} // namespace operation
} // namespace librbd
template class librbd::operation::SnapshotProtectRequest<librbd::ImageCtx>;
| 3,244 | 26.268908 | 82 | cc |
null | ceph-main/src/librbd/operation/SnapshotProtectRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_SNAPSHOT_PROTECT_REQUEST_H
#define CEPH_LIBRBD_OPERATION_SNAPSHOT_PROTECT_REQUEST_H
#include "librbd/operation/Request.h"
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class SnapshotProtectRequest : public Request<ImageCtxT> {
public:
/**
* Snap Protect goes through the following state machine:
*
* @verbatim
*
* <start>
* |
* v
* STATE_PROTECT_SNAP
* |
* v
* <finish>
*
* @endverbatim
*
*/
enum State {
STATE_PROTECT_SNAP
};
SnapshotProtectRequest(ImageCtxT &image_ctx, Context *on_finish,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name);
protected:
void send_op() override;
bool should_complete(int r) override;
journal::Event create_event(uint64_t op_tid) const override {
return journal::SnapProtectEvent(op_tid, m_snap_namespace, m_snap_name);
}
private:
cls::rbd::SnapshotNamespace m_snap_namespace;
std::string m_snap_name;
State m_state;
void send_protect_snap();
int verify_and_send_protect_snap();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::SnapshotProtectRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_SNAPSHOT_PROTECT_REQUEST_H
| 1,469 | 20.304348 | 82 | h |
null | ceph-main/src/librbd/operation/SnapshotRemoveRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/SnapshotRemoveRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "include/ceph_assert.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/image/DetachChildRequest.h"
#include "librbd/mirror/snapshot/RemoveImageStateRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::SnapshotRemoveRequest: " << this << " " \
<< __func__ << ": "
namespace librbd {
namespace operation {
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
SnapshotRemoveRequest<I>::SnapshotRemoveRequest(
I &image_ctx, Context *on_finish,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name, uint64_t snap_id)
: Request<I>(image_ctx, on_finish), m_snap_namespace(snap_namespace),
m_snap_name(snap_name), m_snap_id(snap_id) {
}
template <typename I>
void SnapshotRemoveRequest<I>::send_op() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
{
std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.snap_info.find(m_snap_id) == image_ctx.snap_info.end()) {
lderr(cct) << "snapshot doesn't exist" << dendl;
this->async_complete(-ENOENT);
return;
}
}
trash_snap();
}
template <typename I>
bool SnapshotRemoveRequest<I>::should_complete(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0 && r != -EBUSY) {
lderr(cct) << "encountered error: " << cpp_strerror(r) << dendl;
}
return true;
}
template <typename I>
void SnapshotRemoveRequest<I>::trash_snap() {
I &image_ctx = this->m_image_ctx;
if (image_ctx.old_format) {
release_snap_id();
return;
} else if (cls::rbd::get_snap_namespace_type(m_snap_namespace) ==
cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH) {
get_snap();
return;
}
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
librados::ObjectWriteOperation op;
cls_client::snapshot_trash_add(&op, m_snap_id);
auto aio_comp = create_rados_callback<
SnapshotRemoveRequest<I>,
&SnapshotRemoveRequest<I>::handle_trash_snap>(this);
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, aio_comp, &op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void SnapshotRemoveRequest<I>::handle_trash_snap(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r == -EOPNOTSUPP) {
// trash / clone v2 not supported
detach_child();
return;
} else if (r < 0 && r != -EEXIST) {
lderr(cct) << "failed to move snapshot to trash: " << cpp_strerror(r)
<< dendl;
this->complete(r);
return;
}
m_trashed_snapshot = true;
get_snap();
}
template <typename I>
void SnapshotRemoveRequest<I>::get_snap() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
librados::ObjectReadOperation op;
cls_client::snapshot_get_start(&op, m_snap_id);
auto aio_comp = create_rados_callback<
SnapshotRemoveRequest<I>,
&SnapshotRemoveRequest<I>::handle_get_snap>(this);
m_out_bl.clear();
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, aio_comp, &op,
&m_out_bl);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void SnapshotRemoveRequest<I>::handle_get_snap(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r == 0) {
cls::rbd::SnapshotInfo snap_info;
auto it = m_out_bl.cbegin();
r = cls_client::snapshot_get_finish(&it, &snap_info);
m_child_attached = (snap_info.child_count > 0);
if (r == 0 && m_child_attached) {
list_children();
return;
}
}
if (r < 0) {
lderr(cct) << "failed to retrieve snapshot: " << cpp_strerror(r)
<< dendl;
this->complete(r);
return;
}
detach_child();
}
template <typename I>
void SnapshotRemoveRequest<I>::list_children() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
librados::ObjectReadOperation op;
cls_client::children_list_start(&op, m_snap_id);
m_out_bl.clear();
m_child_images.clear();
auto aio_comp = create_rados_callback<
SnapshotRemoveRequest<I>,
&SnapshotRemoveRequest<I>::handle_list_children>(this);
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, aio_comp, &op,
&m_out_bl);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void SnapshotRemoveRequest<I>::handle_list_children(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r == 0) {
auto it = m_out_bl.cbegin();
r = cls_client::children_list_finish(&it, &m_child_images);
}
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve child: " << cpp_strerror(r)
<< dendl;
this->complete(r);
return;
}
detach_stale_child();
}
template <typename I>
void SnapshotRemoveRequest<I>::detach_stale_child() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
for (auto& child_image : m_child_images) {
m_child_attached = true;
IoCtx ioctx;
int r = util::create_ioctx(image_ctx.md_ctx, "child image",
child_image.pool_id,
child_image.pool_namespace, &ioctx);
if (r == -ENOENT) {
librados::ObjectWriteOperation op;
cls_client::child_detach(&op, m_snap_id,
{child_image.pool_id,
child_image.pool_namespace,
child_image.image_id});
auto aio_comp = create_rados_callback<
SnapshotRemoveRequest<I>,
&SnapshotRemoveRequest<I>::handle_detach_stale_child>(this);
r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, aio_comp, &op);
ceph_assert(r == 0);
aio_comp->release();
return;
} else if (r < 0) {
this->async_complete(r);
return;
}
}
detach_child();
}
template <typename I>
void SnapshotRemoveRequest<I>::handle_detach_stale_child(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to detach stale child: " << cpp_strerror(r)
<< dendl;
this->complete(r);
return;
}
m_child_attached = false;
list_children();
}
template <typename I>
void SnapshotRemoveRequest<I>::detach_child() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
bool detach_child = false;
{
std::shared_lock image_locker{image_ctx.image_lock};
cls::rbd::ParentImageSpec our_pspec;
int r = image_ctx.get_parent_spec(m_snap_id, &our_pspec);
if (r < 0) {
if (r == -ENOENT) {
ldout(cct, 1) << "No such snapshot" << dendl;
} else {
lderr(cct) << "failed to retrieve parent spec" << dendl;
}
this->async_complete(r);
return;
}
if (image_ctx.parent_md.spec != our_pspec &&
(scan_for_parents(our_pspec) == -ENOENT)) {
// no other references to the parent image
detach_child = true;
}
}
if (!detach_child) {
// HEAD image or other snapshots still associated with parent
remove_object_map();
return;
}
ldout(cct, 5) << dendl;
auto ctx = create_context_callback<
SnapshotRemoveRequest<I>,
&SnapshotRemoveRequest<I>::handle_detach_child>(this);
auto req = image::DetachChildRequest<I>::create(image_ctx, ctx);
req->send();
}
template <typename I>
void SnapshotRemoveRequest<I>::handle_detach_child(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to detach child from parent: " << cpp_strerror(r)
<< dendl;
this->complete(r);
return;
}
remove_object_map();
}
template <typename I>
void SnapshotRemoveRequest<I>::remove_object_map() {
I &image_ctx = this->m_image_ctx;
if (m_child_attached) {
// if a clone v2 child is attached to this snapshot, we cannot
// proceed. It's only an error if the snap was already in the trash
this->complete(m_trashed_snapshot ? 0 : -EBUSY);
return;
}
CephContext *cct = image_ctx.cct;
{
std::shared_lock owner_lock{image_ctx.owner_lock};
std::unique_lock image_locker{image_ctx.image_lock};
if (image_ctx.object_map != nullptr) {
ldout(cct, 5) << dendl;
auto ctx = create_context_callback<
SnapshotRemoveRequest<I>,
&SnapshotRemoveRequest<I>::handle_remove_object_map>(this);
image_ctx.object_map->snapshot_remove(m_snap_id, ctx);
return;
}
}
// object map disabled
remove_image_state();
}
template <typename I>
void SnapshotRemoveRequest<I>::handle_remove_object_map(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to remove snapshot object map: " << cpp_strerror(r)
<< dendl;
this->complete(r);
return;
}
remove_image_state();
}
template <typename I>
void SnapshotRemoveRequest<I>::remove_image_state() {
I &image_ctx = this->m_image_ctx;
auto type = cls::rbd::get_snap_namespace_type(m_snap_namespace);
if (type != cls::rbd::SNAPSHOT_NAMESPACE_TYPE_MIRROR) {
release_snap_id();
return;
}
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
auto ctx = create_context_callback<
SnapshotRemoveRequest<I>,
&SnapshotRemoveRequest<I>::handle_remove_image_state>(this);
auto req = mirror::snapshot::RemoveImageStateRequest<I>::create(
&image_ctx, m_snap_id, ctx);
req->send();
}
template <typename I>
void SnapshotRemoveRequest<I>::handle_remove_image_state(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to remove image state: " << cpp_strerror(r)
<< dendl;
if (r != -ENOENT) {
this->complete(r);
return;
}
}
release_snap_id();
}
template <typename I>
void SnapshotRemoveRequest<I>::release_snap_id() {
I &image_ctx = this->m_image_ctx;
if (!image_ctx.data_ctx.is_valid()) {
remove_snap();
return;
}
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "snap_name=" << m_snap_name << ", "
<< "snap_id=" << m_snap_id << dendl;
auto aio_comp = create_rados_callback<
SnapshotRemoveRequest<I>,
&SnapshotRemoveRequest<I>::handle_release_snap_id>(this);
image_ctx.data_ctx.aio_selfmanaged_snap_remove(m_snap_id, aio_comp);
aio_comp->release();
}
template <typename I>
void SnapshotRemoveRequest<I>::handle_release_snap_id(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to release snap id: " << cpp_strerror(r) << dendl;
this->complete(r);
return;
}
remove_snap();
}
template <typename I>
void SnapshotRemoveRequest<I>::remove_snap() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
librados::ObjectWriteOperation op;
if (image_ctx.old_format) {
cls_client::old_snapshot_remove(&op, m_snap_name);
} else {
cls_client::snapshot_remove(&op, m_snap_id);
}
auto aio_comp = create_rados_callback<
SnapshotRemoveRequest<I>,
&SnapshotRemoveRequest<I>::handle_remove_snap>(this);
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, aio_comp, &op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void SnapshotRemoveRequest<I>::handle_remove_snap(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to remove snapshot: " << cpp_strerror(r) << dendl;
this->complete(r);
return;
}
remove_snap_context();
this->complete(0);
}
template <typename I>
void SnapshotRemoveRequest<I>::remove_snap_context() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
std::unique_lock image_locker{image_ctx.image_lock};
image_ctx.rm_snap(m_snap_namespace, m_snap_name, m_snap_id);
}
template <typename I>
int SnapshotRemoveRequest<I>::scan_for_parents(
cls::rbd::ParentImageSpec &pspec) {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
if (pspec.pool_id != -1) {
std::map<uint64_t, SnapInfo>::iterator it;
for (it = image_ctx.snap_info.begin();
it != image_ctx.snap_info.end(); ++it) {
// skip our snap id (if checking base image, CEPH_NOSNAP won't match)
if (it->first == m_snap_id) {
continue;
}
if (it->second.parent.spec == pspec) {
break;
}
}
if (it == image_ctx.snap_info.end()) {
return -ENOENT;
}
}
return 0;
}
} // namespace operation
} // namespace librbd
template class librbd::operation::SnapshotRemoveRequest<librbd::ImageCtx>;
| 13,768 | 26.211462 | 80 | cc |
null | ceph-main/src/librbd/operation/SnapshotRemoveRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_SNAPSHOT_REMOVE_REQUEST_H
#define CEPH_LIBRBD_OPERATION_SNAPSHOT_REMOVE_REQUEST_H
#include "librbd/operation/Request.h"
#include "include/buffer.h"
#include "librbd/Types.h"
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class SnapshotRemoveRequest : public Request<ImageCtxT> {
public:
/**
* @verbatim
*
* <start>
* |
* v
* TRASH_SNAP
* |
* v (skip if unsupported)
* GET_SNAP
* |
* v (skip if unnecessary)
* LIST_CHILDREN <-------------\
* | |
* v (skip if unnecessary) | (repeat as needed)
* DETACH_STALE_CHILD ---------/
* |
* v (skip if unnecessary)
* DETACH_CHILD
* |
* v (skip if disabled/in-use)
* REMOVE_OBJECT_MAP
* |
* v (skip if not mirror snapshot)
* REMOVE_IMAGE_STATE
* |
* v (skip if in-use)
* RELEASE_SNAP_ID
* |
* v (skip if in-use)
* REMOVE_SNAP
* |
* v
* <finish>
*
* @endverbatim
*/
static SnapshotRemoveRequest *create(
ImageCtxT &image_ctx, const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name, uint64_t snap_id, Context *on_finish) {
return new SnapshotRemoveRequest(image_ctx, on_finish, snap_namespace,
snap_name, snap_id);
}
SnapshotRemoveRequest(ImageCtxT &image_ctx, Context *on_finish,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
uint64_t snap_id);
protected:
void send_op() override;
bool should_complete(int r) override;
journal::Event create_event(uint64_t op_tid) const override {
return journal::SnapRemoveEvent(op_tid, m_snap_namespace, m_snap_name);
}
private:
cls::rbd::SnapshotNamespace m_snap_namespace;
cls::rbd::ChildImageSpecs m_child_images;
std::string m_snap_name;
uint64_t m_snap_id;
bool m_trashed_snapshot = false;
bool m_child_attached = false;
ceph::bufferlist m_out_bl;
void trash_snap();
void handle_trash_snap(int r);
void get_snap();
void handle_get_snap(int r);
void list_children();
void handle_list_children(int r);
void detach_stale_child();
void handle_detach_stale_child(int r);
void detach_child();
void handle_detach_child(int r);
void remove_object_map();
void handle_remove_object_map(int r);
void remove_image_state();
void handle_remove_image_state(int r);
void release_snap_id();
void handle_release_snap_id(int r);
void remove_snap();
void handle_remove_snap(int r);
void remove_snap_context();
int scan_for_parents(cls::rbd::ParentImageSpec &pspec);
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::SnapshotRemoveRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_SNAPSHOT_REMOVE_REQUEST_H
| 3,047 | 22.627907 | 81 | h |
null | ceph-main/src/librbd/operation/SnapshotRenameRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/SnapshotRenameRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::SnapshotRenameRequest: "
namespace librbd {
namespace operation {
namespace {
template <typename I>
std::ostream& operator<<(std::ostream& os,
const typename SnapshotRenameRequest<I>::State& state) {
switch(state) {
case SnapshotRenameRequest<I>::STATE_RENAME_SNAP:
os << "RENAME_SNAP";
break;
}
return os;
}
} // anonymous namespace
template <typename I>
SnapshotRenameRequest<I>::SnapshotRenameRequest(I &image_ctx,
Context *on_finish,
uint64_t snap_id,
const std::string &snap_name)
: Request<I>(image_ctx, on_finish), m_snap_id(snap_id),
m_snap_name(snap_name), m_state(STATE_RENAME_SNAP) {
}
template <typename I>
journal::Event SnapshotRenameRequest<I>::create_event(uint64_t op_tid) const {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
std::string src_snap_name;
auto snap_info_it = image_ctx.snap_info.find(m_snap_id);
if (snap_info_it != image_ctx.snap_info.end()) {
src_snap_name = snap_info_it->second.name;
}
return journal::SnapRenameEvent(op_tid, m_snap_id, src_snap_name,
m_snap_name);
}
template <typename I>
void SnapshotRenameRequest<I>::send_op() {
send_rename_snap();
}
template <typename I>
bool SnapshotRenameRequest<I>::should_complete(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": state=" << m_state << ", "
<< "r=" << r << dendl;
if (r < 0) {
if (r == -EEXIST) {
ldout(cct, 1) << "snapshot already exists" << dendl;
} else {
lderr(cct) << "encountered error: " << cpp_strerror(r) << dendl;
}
}
return true;
}
template <typename I>
void SnapshotRenameRequest<I>::send_rename_snap() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
std::shared_lock image_locker{image_ctx.image_lock};
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
librados::ObjectWriteOperation op;
if (image_ctx.old_format) {
cls_client::old_snapshot_rename(&op, m_snap_id, m_snap_name);
} else {
cls_client::snapshot_rename(&op, m_snap_id, m_snap_name);
}
librados::AioCompletion *rados_completion = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid,
rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
}
} // namespace operation
} // namespace librbd
template class librbd::operation::SnapshotRenameRequest<librbd::ImageCtx>;
| 2,978 | 27.92233 | 81 | cc |
null | ceph-main/src/librbd/operation/SnapshotRenameRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_SNAPSHOT_RENAME_REQUEST_H
#define CEPH_LIBRBD_OPERATION_SNAPSHOT_RENAME_REQUEST_H
#include "librbd/operation/Request.h"
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class SnapshotRenameRequest : public Request<ImageCtxT> {
public:
/**
* Snap Rename goes through the following state machine:
*
* @verbatim
*
* <start>
* |
* v
* STATE_RENAME_SNAP
* |
* v
* <finish>
*
* @endverbatim
*
*/
enum State {
STATE_RENAME_SNAP
};
SnapshotRenameRequest(ImageCtxT &image_ctx, Context *on_finish,
uint64_t snap_id, const std::string &snap_name);
journal::Event create_event(uint64_t op_tid) const override;
protected:
void send_op() override;
bool should_complete(int r) override;
private:
uint64_t m_snap_id;
std::string m_snap_name;
State m_state;
void send_rename_snap();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::SnapshotRenameRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_SNAPSHOT_RENAME_REQUEST_H
| 1,288 | 19.140625 | 81 | h |
null | ceph-main/src/librbd/operation/SnapshotRollbackRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/SnapshotRollbackRequest.h"
#include "include/rados/librados.hpp"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/AsyncObjectThrottle.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/operation/ResizeRequest.h"
#include "osdc/Striper.h"
#include <boost/lambda/bind.hpp>
#include <boost/lambda/construct.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::SnapshotRollbackRequest: "
namespace librbd {
namespace operation {
using util::create_context_callback;
using util::create_rados_callback;
namespace {
template <typename I>
class C_RollbackObject : public C_AsyncObjectThrottle<I> {
public:
C_RollbackObject(AsyncObjectThrottle<I> &throttle, I *image_ctx,
uint64_t snap_id, uint64_t object_num,
uint64_t head_num_objects,
decltype(I::object_map) snap_object_map)
: C_AsyncObjectThrottle<I>(throttle, *image_ctx), m_snap_id(snap_id),
m_object_num(object_num), m_head_num_objects(head_num_objects),
m_snap_object_map(snap_object_map) {
}
int send() override {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << "C_RollbackObject: " << __func__ << ": object_num="
<< m_object_num << dendl;
{
std::shared_lock image_locker{image_ctx.image_lock};
if (m_object_num < m_head_num_objects &&
m_snap_object_map != nullptr &&
!image_ctx.object_map->object_may_exist(m_object_num) &&
!m_snap_object_map->object_may_exist(m_object_num)) {
return 1;
}
}
std::string oid = image_ctx.get_object_name(m_object_num);
librados::ObjectWriteOperation op;
op.selfmanaged_snap_rollback(m_snap_id);
librados::AioCompletion *rados_completion =
util::create_rados_callback(this);
image_ctx.data_ctx.aio_operate(oid, rados_completion, &op);
rados_completion->release();
return 0;
}
private:
uint64_t m_snap_id;
uint64_t m_object_num;
uint64_t m_head_num_objects;
decltype(I::object_map) m_snap_object_map;
};
} // anonymous namespace
template <typename I>
SnapshotRollbackRequest<I>::SnapshotRollbackRequest(I &image_ctx,
Context *on_finish,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
uint64_t snap_id,
uint64_t snap_size,
ProgressContext &prog_ctx)
: Request<I>(image_ctx, on_finish), m_snap_namespace(snap_namespace),
m_snap_name(snap_name), m_snap_id(snap_id),
m_snap_size(snap_size), m_prog_ctx(prog_ctx),
m_object_map(nullptr), m_snap_object_map(nullptr) {
}
template <typename I>
SnapshotRollbackRequest<I>::~SnapshotRollbackRequest() {
I &image_ctx = this->m_image_ctx;
if (m_blocking_writes) {
image_ctx.io_image_dispatcher->unblock_writes();
}
if (m_object_map) {
m_object_map->put();
m_object_map = nullptr;
}
if (m_snap_object_map) {
m_snap_object_map->put();
m_snap_object_map = nullptr;
}
}
template <typename I>
void SnapshotRollbackRequest<I>::send_op() {
send_block_writes();
}
template <typename I>
void SnapshotRollbackRequest<I>::send_block_writes() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
m_blocking_writes = true;
image_ctx.io_image_dispatcher->block_writes(create_context_callback<
SnapshotRollbackRequest<I>,
&SnapshotRollbackRequest<I>::handle_block_writes>(this));
}
template <typename I>
Context *SnapshotRollbackRequest<I>::handle_block_writes(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to block writes: " << cpp_strerror(*result) << dendl;
return this->create_context_finisher(*result);
}
send_resize_image();
return nullptr;
}
template <typename I>
void SnapshotRollbackRequest<I>::send_resize_image() {
I &image_ctx = this->m_image_ctx;
uint64_t current_size;
{
std::shared_lock owner_locker{image_ctx.owner_lock};
std::shared_lock image_locker{image_ctx.image_lock};
current_size = image_ctx.get_image_size(CEPH_NOSNAP);
}
m_head_num_objects = Striper::get_num_objects(image_ctx.layout, current_size);
if (current_size == m_snap_size) {
send_get_snap_object_map();
return;
}
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
std::shared_lock owner_locker{image_ctx.owner_lock};
Context *ctx = create_context_callback<
SnapshotRollbackRequest<I>,
&SnapshotRollbackRequest<I>::handle_resize_image>(this);
ResizeRequest<I> *req = ResizeRequest<I>::create(image_ctx, ctx, m_snap_size,
true, m_no_op_prog_ctx, 0, true);
req->send();
}
template <typename I>
Context *SnapshotRollbackRequest<I>::handle_resize_image(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to resize image for rollback: "
<< cpp_strerror(*result) << dendl;
return this->create_context_finisher(*result);
}
send_get_snap_object_map();
return nullptr;
}
template <typename I>
void SnapshotRollbackRequest<I>::send_get_snap_object_map() {
I &image_ctx = this->m_image_ctx;
uint64_t flags = 0;
bool object_map_enabled;
CephContext *cct = image_ctx.cct;
{
std::shared_lock owner_locker{image_ctx.owner_lock};
std::shared_lock image_locker{image_ctx.image_lock};
object_map_enabled = (image_ctx.object_map != nullptr);
int r = image_ctx.get_flags(m_snap_id, &flags);
if (r < 0) {
object_map_enabled = false;
}
}
if (object_map_enabled &&
(flags & RBD_FLAG_OBJECT_MAP_INVALID) != 0) {
lderr(cct) << "warning: object-map is invalid for snapshot" << dendl;
object_map_enabled = false;
}
if (!object_map_enabled) {
send_rollback_object_map();
return;
}
ldout(cct, 5) << this << " " << __func__ << dendl;
m_snap_object_map = image_ctx.create_object_map(m_snap_id);
Context *ctx = create_context_callback<
SnapshotRollbackRequest<I>,
&SnapshotRollbackRequest<I>::handle_get_snap_object_map>(this);
m_snap_object_map->open(ctx);
return;
}
template <typename I>
Context *SnapshotRollbackRequest<I>::handle_get_snap_object_map(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << this << " " << __func__ << ": failed to open object map: "
<< cpp_strerror(*result) << dendl;
m_snap_object_map->put();
m_snap_object_map = nullptr;
}
send_rollback_object_map();
return nullptr;
}
template <typename I>
void SnapshotRollbackRequest<I>::send_rollback_object_map() {
I &image_ctx = this->m_image_ctx;
{
std::shared_lock owner_locker{image_ctx.owner_lock};
std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.object_map != nullptr) {
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
Context *ctx = create_context_callback<
SnapshotRollbackRequest<I>,
&SnapshotRollbackRequest<I>::handle_rollback_object_map>(this);
image_ctx.object_map->rollback(m_snap_id, ctx);
return;
}
}
send_rollback_objects();
}
template <typename I>
Context *SnapshotRollbackRequest<I>::handle_rollback_object_map(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << this << " " << __func__ << ": failed to roll back object "
<< "map: " << cpp_strerror(*result) << dendl;
ceph_assert(m_object_map == nullptr);
apply();
return this->create_context_finisher(*result);
}
send_rollback_objects();
return nullptr;
}
template <typename I>
void SnapshotRollbackRequest<I>::send_rollback_objects() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
std::shared_lock owner_locker{image_ctx.owner_lock};
uint64_t num_objects;
{
std::shared_lock image_locker{image_ctx.image_lock};
num_objects = Striper::get_num_objects(image_ctx.layout,
image_ctx.get_current_size());
}
Context *ctx = create_context_callback<
SnapshotRollbackRequest<I>,
&SnapshotRollbackRequest<I>::handle_rollback_objects>(this);
typename AsyncObjectThrottle<I>::ContextFactory context_factory(
boost::lambda::bind(boost::lambda::new_ptr<C_RollbackObject<I> >(),
boost::lambda::_1, &image_ctx, m_snap_id, boost::lambda::_2,
m_head_num_objects, m_snap_object_map));
AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
this, image_ctx, context_factory, ctx, &m_prog_ctx, 0, num_objects);
throttle->start_ops(
image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
}
template <typename I>
Context *SnapshotRollbackRequest<I>::handle_rollback_objects(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result == -ERESTART) {
ldout(cct, 5) << "snapshot rollback operation interrupted" << dendl;
return this->create_context_finisher(*result);
} else if (*result < 0) {
lderr(cct) << "failed to rollback objects: " << cpp_strerror(*result)
<< dendl;
return this->create_context_finisher(*result);
}
return send_refresh_object_map();
}
template <typename I>
Context *SnapshotRollbackRequest<I>::send_refresh_object_map() {
I &image_ctx = this->m_image_ctx;
bool object_map_enabled;
{
std::shared_lock owner_locker{image_ctx.owner_lock};
std::shared_lock image_locker{image_ctx.image_lock};
object_map_enabled = (image_ctx.object_map != nullptr);
}
if (!object_map_enabled) {
return send_invalidate_cache();
}
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
m_object_map = image_ctx.create_object_map(CEPH_NOSNAP);
Context *ctx = create_context_callback<
SnapshotRollbackRequest<I>,
&SnapshotRollbackRequest<I>::handle_refresh_object_map>(this);
m_object_map->open(ctx);
return nullptr;
}
template <typename I>
Context *SnapshotRollbackRequest<I>::handle_refresh_object_map(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << this << " " << __func__ << ": failed to open object map: "
<< cpp_strerror(*result) << dendl;
m_object_map->put();
m_object_map = nullptr;
apply();
return this->create_context_finisher(*result);
}
return send_invalidate_cache();
}
template <typename I>
Context *SnapshotRollbackRequest<I>::send_invalidate_cache() {
I &image_ctx = this->m_image_ctx;
apply();
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
if(m_object_map != nullptr) {
Context *ctx = create_context_callback<
SnapshotRollbackRequest<I>,
&SnapshotRollbackRequest<I>::handle_invalidate_cache>(this, m_object_map);
image_ctx.io_image_dispatcher->invalidate_cache(ctx);
}
else {
Context *ctx = create_context_callback<
SnapshotRollbackRequest<I>,
&SnapshotRollbackRequest<I>::handle_invalidate_cache>(this);
image_ctx.io_image_dispatcher->invalidate_cache(ctx);
}
return nullptr;
}
template <typename I>
Context *SnapshotRollbackRequest<I>::handle_invalidate_cache(int *result) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
if (*result < 0) {
lderr(cct) << "failed to invalidate cache: " << cpp_strerror(*result)
<< dendl;
}
return this->create_context_finisher(*result);
}
template <typename I>
void SnapshotRollbackRequest<I>::apply() {
I &image_ctx = this->m_image_ctx;
std::shared_lock owner_locker{image_ctx.owner_lock};
std::unique_lock image_locker{image_ctx.image_lock};
if (image_ctx.object_map != nullptr) {
std::swap(m_object_map, image_ctx.object_map);
}
}
} // namespace operation
} // namespace librbd
template class librbd::operation::SnapshotRollbackRequest<librbd::ImageCtx>;
| 13,321 | 30.345882 | 84 | cc |
null | ceph-main/src/librbd/operation/SnapshotRollbackRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_SNAPSHOT_ROLLBACK_REQUEST_H
#define CEPH_LIBRBD_OPERATION_SNAPSHOT_ROLLBACK_REQUEST_H
#include "librbd/operation/Request.h"
#include "librbd/ImageCtx.h"
#include "librbd/internal.h"
#include "librbd/journal/Types.h"
#include <string>
class Context;
namespace librbd {
class ProgressContext;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class SnapshotRollbackRequest : public Request<ImageCtxT> {
public:
/**
* Snap Rollback goes through the following state machine:
*
* @verbatim
*
* <start> ---------\
* |
* v
* STATE_BLOCK_WRITES
* |
* v
* STATE_RESIZE_IMAGE (skip if resize not
* | required)
* v
* STATE_GET_SNAP_OBJECT_MAP (skip if object)
* | map disabled)
* v
* STATE_ROLLBACK_OBJECT_MAP (skip if object
* | map disabled)
* v
* STATE_ROLLBACK_OBJECTS
* |
* v
* STATE_REFRESH_OBJECT_MAP (skip if object
* | map disabled)
* v
* STATE_INVALIDATE_CACHE (skip if cache
* | disabled)
* v
* <finish>
*
* @endverbatim
*
* The _RESIZE_IMAGE state is skipped if the image doesn't need to be resized.
* The _ROLLBACK_OBJECT_MAP state is skipped if the object map isn't enabled.
* The _INVALIDATE_CACHE state is skipped if the cache isn't enabled.
*/
SnapshotRollbackRequest(ImageCtxT &image_ctx, Context *on_finish,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
uint64_t snap_id,
uint64_t snap_size, ProgressContext &prog_ctx);
~SnapshotRollbackRequest() override;
protected:
void send_op() override;
bool should_complete(int r) override {
return true;
}
journal::Event create_event(uint64_t op_tid) const override {
return journal::SnapRollbackEvent(op_tid, m_snap_namespace, m_snap_name);
}
private:
cls::rbd::SnapshotNamespace m_snap_namespace;
std::string m_snap_name;
uint64_t m_snap_id;
uint64_t m_snap_size;
uint64_t m_head_num_objects;
ProgressContext &m_prog_ctx;
NoOpProgressContext m_no_op_prog_ctx;
bool m_blocking_writes = false;
decltype(ImageCtxT::object_map) m_object_map;
decltype(ImageCtxT::object_map) m_snap_object_map;
void send_block_writes();
Context *handle_block_writes(int *result);
void send_resize_image();
Context *handle_resize_image(int *result);
void send_get_snap_object_map();
Context *handle_get_snap_object_map(int *result);
void send_rollback_object_map();
Context *handle_rollback_object_map(int *result);
void send_rollback_objects();
Context *handle_rollback_objects(int *result);
Context *send_refresh_object_map();
Context *handle_refresh_object_map(int *result);
Context *send_invalidate_cache();
Context *handle_invalidate_cache(int *result);
void apply();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::SnapshotRollbackRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_SNAPSHOT_ROLLBACK_REQUEST_H
| 3,587 | 28.170732 | 83 | h |
null | ceph-main/src/librbd/operation/SnapshotUnprotectRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/SnapshotUnprotectRequest.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/AsyncObjectThrottle.h"
#include "librbd/ImageCtx.h"
#include "librbd/internal.h"
#include "librbd/Types.h"
#include "librbd/Utils.h"
#include <list>
#include <set>
#include <vector>
#include <boost/lambda/bind.hpp>
#include <boost/lambda/construct.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::SnapshotUnprotectRequest: "
namespace librbd {
namespace operation {
namespace {
typedef std::pair<int64_t, std::string> Pool;
typedef std::vector<Pool> Pools;
template <typename I>
std::ostream& operator<<(std::ostream& os,
const typename SnapshotUnprotectRequest<I>::State& state) {
switch(state) {
case SnapshotUnprotectRequest<I>::STATE_UNPROTECT_SNAP_START:
os << "UNPROTECT_SNAP_START";
break;
case SnapshotUnprotectRequest<I>::STATE_SCAN_POOL_CHILDREN:
os << "SCAN_POOL_CHILDREN";
break;
case SnapshotUnprotectRequest<I>::STATE_UNPROTECT_SNAP_FINISH:
os << "UNPROTECT_SNAP_FINISH";
break;
case SnapshotUnprotectRequest<I>::STATE_UNPROTECT_SNAP_ROLLBACK:
os << "UNPROTECT_SNAP_ROLLBACK";
break;
default:
os << "UNKNOWN (" << static_cast<uint32_t>(state) << ")";
break;
}
return os;
}
template <typename I>
class C_ScanPoolChildren : public C_AsyncObjectThrottle<I> {
public:
C_ScanPoolChildren(AsyncObjectThrottle<I> &throttle, I *image_ctx,
const cls::rbd::ParentImageSpec &pspec, const Pools &pools,
size_t pool_idx)
: C_AsyncObjectThrottle<I>(throttle, *image_ctx), m_pspec(pspec),
m_pool(pools[pool_idx]) {
}
int send() override {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << this << " scanning pool '" << m_pool.second << "'"
<< dendl;
librados::Rados rados(image_ctx.md_ctx);
int64_t base_tier;
int r = rados.pool_get_base_tier(m_pool.first, &base_tier);
if (r == -ENOENT) {
ldout(cct, 1) << "pool '" << m_pool.second << "' no longer exists"
<< dendl;
return 1;
} else if (r < 0) {
lderr(cct) << "error retrieving base tier for pool '"
<< m_pool.second << "'" << dendl;
return r;
}
if (m_pool.first != base_tier) {
// pool is a cache; skip it
return 1;
}
r = util::create_ioctx(image_ctx.md_ctx, "child image", m_pool.first, {},
&m_pool_ioctx);
if (r == -ENOENT) {
return 1;
} else if (r < 0) {
return r;
}
librados::ObjectReadOperation op;
cls_client::get_children_start(&op, m_pspec);
librados::AioCompletion *rados_completion =
util::create_rados_callback(this);
r = m_pool_ioctx.aio_operate(RBD_CHILDREN, rados_completion, &op,
&m_children_bl);
ceph_assert(r == 0);
rados_completion->release();
return 0;
}
protected:
void finish(int r) override {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
if (r == 0) {
auto it = m_children_bl.cbegin();
r= cls_client::get_children_finish(&it, &m_children);
}
ldout(cct, 10) << this << " retrieved children: r=" << r << dendl;
if (r == -ENOENT) {
// no children -- proceed with unprotect
r = 0;
} else if (r < 0) {
lderr(cct) << "cannot get children for pool '" << m_pool.second << "'"
<< dendl;
} else {
lderr(cct) << "cannot unprotect: at least " << m_children.size() << " "
<< "child(ren) [" << joinify(m_children.begin(),
m_children.end(),
std::string(",")) << "] "
<< "in pool '" << m_pool.second << "'" << dendl;
r = -EBUSY;
}
C_AsyncObjectThrottle<I>::finish(r);
}
private:
cls::rbd::ParentImageSpec m_pspec;
Pool m_pool;
IoCtx m_pool_ioctx;
std::set<std::string> m_children;
bufferlist m_children_bl;
};
} // anonymous namespace
template <typename I>
SnapshotUnprotectRequest<I>::SnapshotUnprotectRequest(I &image_ctx,
Context *on_finish,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name)
: Request<I>(image_ctx, on_finish), m_snap_namespace(snap_namespace),
m_snap_name(snap_name), m_state(STATE_UNPROTECT_SNAP_START),
m_ret_val(0), m_snap_id(CEPH_NOSNAP) {
}
template <typename I>
void SnapshotUnprotectRequest<I>::send_op() {
send_unprotect_snap_start();
}
template <typename I>
bool SnapshotUnprotectRequest<I>::should_complete(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": state=" << m_state << ", "
<< "r=" << r << dendl;
if (r < 0) {
if (r == -EINVAL) {
ldout(cct, 1) << "snapshot is already unprotected" << dendl;
} else {
lderr(cct) << "encountered error: " << cpp_strerror(r) << dendl;
}
if (m_ret_val == 0) {
m_ret_val = r;
}
}
// use a different state machine once an error is encountered
if (m_ret_val < 0) {
return should_complete_error();
}
std::shared_lock owner_lock{image_ctx.owner_lock};
bool finished = false;
switch (m_state) {
case STATE_UNPROTECT_SNAP_START:
send_scan_pool_children();
break;
case STATE_SCAN_POOL_CHILDREN:
send_unprotect_snap_finish();
break;
case STATE_UNPROTECT_SNAP_FINISH:
finished = true;
break;
default:
ceph_abort();
break;
}
return finished;
}
template <typename I>
bool SnapshotUnprotectRequest<I>::should_complete_error() {
I &image_ctx = this->m_image_ctx;
std::shared_lock owner_locker{image_ctx.owner_lock};
CephContext *cct = image_ctx.cct;
lderr(cct) << this << " " << __func__ << ": "
<< "ret_val=" << m_ret_val << dendl;
bool finished = true;
if (m_state == STATE_SCAN_POOL_CHILDREN ||
m_state == STATE_UNPROTECT_SNAP_FINISH) {
send_unprotect_snap_rollback();
finished = false;
}
return finished;
}
template <typename I>
void SnapshotUnprotectRequest<I>::send_unprotect_snap_start() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
int r = verify_and_send_unprotect_snap_start();
if (r < 0) {
this->async_complete(r);
return;
}
}
template <typename I>
void SnapshotUnprotectRequest<I>::send_scan_pool_children() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
m_state = STATE_SCAN_POOL_CHILDREN;
// search all pools for children depending on this snapshot
// TODO add async version of wait_for_latest_osdmap
librados::Rados rados(image_ctx.md_ctx);
rados.wait_for_latest_osdmap();
// protect against pools being renamed/deleted
std::list<Pool> pool_list;
rados.pool_list2(pool_list);
cls::rbd::ParentImageSpec pspec(image_ctx.md_ctx.get_id(),
image_ctx.md_ctx.get_namespace(),
image_ctx.id, m_snap_id);
Pools pools(pool_list.begin(), pool_list.end());
Context *ctx = this->create_callback_context();
typename AsyncObjectThrottle<I>::ContextFactory context_factory(
boost::lambda::bind(boost::lambda::new_ptr<C_ScanPoolChildren<I> >(),
boost::lambda::_1, &image_ctx, pspec, pools, boost::lambda::_2));
AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
nullptr, image_ctx, context_factory, ctx, NULL, 0, pools.size());
throttle->start_ops(
image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
}
template <typename I>
void SnapshotUnprotectRequest<I>::send_unprotect_snap_finish() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
m_state = STATE_UNPROTECT_SNAP_FINISH;
librados::ObjectWriteOperation op;
cls_client::set_protection_status(&op, m_snap_id,
RBD_PROTECTION_STATUS_UNPROTECTED);
librados::AioCompletion *comp = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void SnapshotUnprotectRequest<I>::send_unprotect_snap_rollback() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
m_state = STATE_UNPROTECT_SNAP_ROLLBACK;
librados::ObjectWriteOperation op;
cls_client::set_protection_status(&op, m_snap_id,
RBD_PROTECTION_STATUS_PROTECTED);
librados::AioCompletion *comp = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
int SnapshotUnprotectRequest<I>::verify_and_send_unprotect_snap_start() {
I &image_ctx = this->m_image_ctx;
std::shared_lock image_locker{image_ctx.image_lock};
CephContext *cct = image_ctx.cct;
if ((image_ctx.features & RBD_FEATURE_LAYERING) == 0) {
lderr(cct) << "image must support layering" << dendl;
return -ENOSYS;
}
m_snap_id = image_ctx.get_snap_id(m_snap_namespace, m_snap_name);
if (m_snap_id == CEPH_NOSNAP) {
return -ENOENT;
}
bool is_unprotected;
int r = image_ctx.is_snap_unprotected(m_snap_id, &is_unprotected);
if (r < 0) {
return r;
}
if (is_unprotected) {
lderr(cct) << "snapshot is already unprotected" << dendl;
return -EINVAL;
}
librados::ObjectWriteOperation op;
cls_client::set_protection_status(&op, m_snap_id,
RBD_PROTECTION_STATUS_UNPROTECTING);
librados::AioCompletion *comp = this->create_callback_completion();
r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
// TODO legacy code threw a notification post UNPROTECTING update -- required?
return 0;
}
} // namespace operation
} // namespace librbd
template class librbd::operation::SnapshotUnprotectRequest<librbd::ImageCtx>;
| 10,926 | 29.867232 | 104 | cc |
null | ceph-main/src/librbd/operation/SnapshotUnprotectRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_SNAPSHOT_UNPROTECT_REQUEST_H
#define CEPH_LIBRBD_OPERATION_SNAPSHOT_UNPROTECT_REQUEST_H
#include "librbd/operation/Request.h"
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class SnapshotUnprotectRequest : public Request<ImageCtxT> {
public:
/**
* Snap Unprotect goes through the following state machine:
*
* @verbatim
*
* <start>
* |
* v
* STATE_UNPROTECT_SNAP_START
* |
* v
* STATE_SCAN_POOL_CHILDREN * * * * > STATE_UNPROTECT_SNAP_ROLLBACK
* | |
* v |
* STATE_UNPROTECT_SNAP_FINISH |
* | |
* v |
* <finish> <----------------------------/
*
* @endverbatim
*
* If the unprotect operation needs to abort, the error path is followed
* to rollback the unprotect in-progress status on the image.
*/
enum State {
STATE_UNPROTECT_SNAP_START,
STATE_SCAN_POOL_CHILDREN,
STATE_UNPROTECT_SNAP_FINISH,
STATE_UNPROTECT_SNAP_ROLLBACK
};
SnapshotUnprotectRequest(ImageCtxT &image_ctx, Context *on_finish,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name);
protected:
void send_op() override;
bool should_complete(int r) override;
int filter_return_code(int r) const override {
if (m_ret_val < 0) {
return m_ret_val;
}
return 0;
}
journal::Event create_event(uint64_t op_tid) const override {
return journal::SnapUnprotectEvent(op_tid, m_snap_namespace, m_snap_name);
}
private:
cls::rbd::SnapshotNamespace m_snap_namespace;
std::string m_snap_name;
State m_state;
int m_ret_val;
uint64_t m_snap_id;
bool should_complete_error();
void send_unprotect_snap_start();
void send_scan_pool_children();
void send_unprotect_snap_finish();
void send_unprotect_snap_rollback();
int verify_and_send_unprotect_snap_start();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::SnapshotUnprotectRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_SNAPSHOT_UNPROTECT_REQUEST_H
| 2,390 | 24.168421 | 84 | h |
null | ceph-main/src/librbd/operation/SparsifyRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/SparsifyRequest.h"
#include "cls/rbd/cls_rbd_client.h"
#include "common/dout.h"
#include "common/errno.h"
#include "include/err.h"
#include "librbd/AsyncObjectThrottle.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/Types.h"
#include "librbd/io/ObjectRequest.h"
#include "librbd/io/Utils.h"
#include "osdc/Striper.h"
#include <boost/lambda/bind.hpp>
#include <boost/lambda/construct.hpp>
#define dout_subsys ceph_subsys_rbd
namespace librbd {
namespace operation {
namespace {
bool may_be_trimmed(const std::map<uint64_t,uint64_t> &extent_map,
const bufferlist &bl, size_t sparse_size,
uint64_t *new_end_ptr) {
if (extent_map.empty()) {
*new_end_ptr = 0;
return true;
}
uint64_t end = extent_map.rbegin()->first + extent_map.rbegin()->second;
uint64_t new_end = end;
uint64_t bl_off = bl.length();
for (auto it = extent_map.rbegin(); it != extent_map.rend(); it++) {
auto off = it->first;
auto len = it->second;
new_end = p2roundup<uint64_t>(off + len, sparse_size);
uint64_t extent_left = len;
uint64_t sub_len = len % sparse_size;
if (sub_len == 0) {
sub_len = sparse_size;
}
while (extent_left > 0) {
ceph_assert(bl_off >= sub_len);
bl_off -= sub_len;
bufferlist sub_bl;
sub_bl.substr_of(bl, bl_off, sub_len);
if (!sub_bl.is_zero()) {
break;
}
new_end -= sparse_size;
extent_left -= sub_len;
sub_len = sparse_size;
}
if (extent_left > 0) {
break;
}
}
if (new_end < end) {
*new_end_ptr = new_end;
return true;
}
return false;
}
} // anonymous namespace
using util::create_context_callback;
using util::create_rados_callback;
#undef dout_prefix
#define dout_prefix *_dout << "librbd::operation::SparsifyObject: " << this \
<< " " << m_oid << " " << __func__ << ": "
template <typename I>
class C_SparsifyObject : public C_AsyncObjectThrottle<I> {
public:
/**
* @verbatim
*
* <start>
* |
* v (not supported)
* SPARSIFY * * * * * * * * * * * * > READ < * * * * * * * * * * (concurrent
* | | * update is
* | (object map disabled) | (can trim) * detected)
* |------------------------\ V *
* | | PRE UPDATE OBJECT MAP *
* | (object map enabled) | | (if needed) *
* v | V *
* PRE UPDATE OBJECT MAP | TRIM * * * * * * * * * * *
* | | |
* v | V
* CHECK EXISTS | POST UPDATE OBJECT MAP
* | | | (if needed)
* v | |
* POST UPDATE OBJECT MAP | |
* | | |
* v | |
* <finish> <------------------/<-------/
*
* @endverbatim
*
*/
C_SparsifyObject(AsyncObjectThrottle<I> &throttle, I *image_ctx,
uint64_t object_no, size_t sparse_size)
: C_AsyncObjectThrottle<I>(throttle, *image_ctx), m_cct(image_ctx->cct),
m_object_no(object_no), m_sparse_size(sparse_size),
m_oid(image_ctx->get_object_name(object_no)) {
}
int send() override {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
ldout(m_cct, 20) << dendl;
if (!image_ctx.data_ctx.is_valid()) {
lderr(m_cct) << "missing data pool" << dendl;
return -ENODEV;
}
if (image_ctx.exclusive_lock != nullptr &&
!image_ctx.exclusive_lock->is_lock_owner()) {
ldout(m_cct, 1) << "lost exclusive lock during sparsify" << dendl;
return -ERESTART;
}
{
std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.object_map != nullptr &&
!image_ctx.object_map->object_may_exist(m_object_no)) {
// can skip because the object does not exist
return 1;
}
uint64_t raw_overlap = 0;
uint64_t object_overlap = 0;
int r = image_ctx.get_parent_overlap(CEPH_NOSNAP, &raw_overlap);
ceph_assert(r == 0);
if (raw_overlap > 0) {
auto [parent_extents, area] = io::util::object_to_area_extents(
&image_ctx, m_object_no, {{0, image_ctx.layout.object_size}});
object_overlap = image_ctx.prune_parent_extents(parent_extents, area,
raw_overlap, false);
}
m_remove_empty = object_overlap == 0;
}
send_sparsify();
return 0;
}
void send_sparsify() {
I &image_ctx = this->m_image_ctx;
ldout(m_cct, 20) << dendl;
librados::ObjectWriteOperation op;
cls_client::sparsify(&op, m_sparse_size, m_remove_empty);
auto comp = create_rados_callback<
C_SparsifyObject, &C_SparsifyObject::handle_sparsify>(this);
int r = image_ctx.data_ctx.aio_operate(m_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
void handle_sparsify(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r == -EOPNOTSUPP) {
m_trying_trim = true;
send_read();
return;
}
if (r == -ENOENT) {
finish_op(0);
return;
}
if (r < 0) {
lderr(m_cct) << "failed to sparsify: " << cpp_strerror(r) << dendl;
finish_op(r);
return;
}
send_pre_update_object_map();
}
void send_pre_update_object_map() {
I &image_ctx = this->m_image_ctx;
if (m_trying_trim) {
if (!m_remove_empty || m_new_end != 0 ||
!image_ctx.test_features(RBD_FEATURE_OBJECT_MAP)) {
send_trim();
return;
}
} else if (!m_remove_empty ||
!image_ctx.test_features(RBD_FEATURE_OBJECT_MAP)) {
finish_op(0);
return;
}
ldout(m_cct, 20) << dendl;
image_ctx.owner_lock.lock_shared();
image_ctx.image_lock.lock_shared();
if (image_ctx.object_map == nullptr) {
// possible that exclusive lock was lost in background
lderr(m_cct) << "object map is not initialized" << dendl;
image_ctx.image_lock.unlock_shared();
image_ctx.owner_lock.unlock_shared();
finish_op(-EINVAL);
return;
}
int r;
m_finish_op_ctx = image_ctx.exclusive_lock->start_op(&r);
if (m_finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
image_ctx.image_lock.unlock_shared();
image_ctx.owner_lock.unlock_shared();
finish_op(r);
return;
}
auto ctx = create_context_callback<
C_SparsifyObject<I>,
&C_SparsifyObject<I>::handle_pre_update_object_map>(this);
bool sent = image_ctx.object_map->template aio_update<
Context, &Context::complete>(CEPH_NOSNAP, m_object_no, OBJECT_PENDING,
OBJECT_EXISTS, {}, false, ctx);
// NOTE: state machine might complete before we reach here
image_ctx.image_lock.unlock_shared();
image_ctx.owner_lock.unlock_shared();
if (!sent) {
finish_op(0);
}
}
void handle_pre_update_object_map(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to update object map: " << cpp_strerror(r)
<< dendl;
finish_op(r);
return;
}
if (m_trying_trim) {
send_trim();
} else {
send_check_exists();
}
}
void send_check_exists() {
I &image_ctx = this->m_image_ctx;
ldout(m_cct, 20) << dendl;
librados::ObjectReadOperation op;
op.stat(NULL, NULL, NULL);
m_bl.clear();
auto comp = create_rados_callback<
C_SparsifyObject, &C_SparsifyObject::handle_check_exists>(this);
int r = image_ctx.data_ctx.aio_operate(m_oid, comp, &op, &m_bl);
ceph_assert(r == 0);
comp->release();
}
void handle_check_exists(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "stat failed: " << cpp_strerror(r) << dendl;
finish_op(r);
return;
}
send_post_update_object_map(r == 0);
}
void send_post_update_object_map(bool exists) {
I &image_ctx = this->m_image_ctx;
ldout(m_cct, 20) << dendl;
auto ctx = create_context_callback<
C_SparsifyObject<I>,
&C_SparsifyObject<I>::handle_post_update_object_map>(this);
bool sent;
{
std::shared_lock owner_locker{image_ctx.owner_lock};
std::shared_lock image_locker{image_ctx.image_lock};
assert(image_ctx.exclusive_lock->is_lock_owner());
assert(image_ctx.object_map != nullptr);
sent = image_ctx.object_map->template aio_update<
Context, &Context::complete>(CEPH_NOSNAP, m_object_no,
exists ? OBJECT_EXISTS : OBJECT_NONEXISTENT,
OBJECT_PENDING, {}, false, ctx);
}
if (!sent) {
ctx->complete(0);
}
}
void handle_post_update_object_map(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to update object map: " << cpp_strerror(r)
<< dendl;
finish_op(r);
return;
}
finish_op(0);
}
void send_read() {
I &image_ctx = this->m_image_ctx;
ldout(m_cct, 20) << dendl;
librados::ObjectReadOperation op;
m_bl.clear();
op.sparse_read(0, image_ctx.layout.object_size, &m_extent_map, &m_bl,
nullptr);
auto comp = create_rados_callback<
C_SparsifyObject, &C_SparsifyObject::handle_read>(this);
int r = image_ctx.data_ctx.aio_operate(m_oid, comp, &op, &m_bl);
ceph_assert(r == 0);
comp->release();
}
void handle_read(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
if (r == -ENOENT) {
r = 0;
} else {
lderr(m_cct) << "failed to read object: " << cpp_strerror(r) << dendl;
}
finish_op(r);
return;
}
if (!may_be_trimmed(m_extent_map, m_bl, m_sparse_size, &m_new_end)) {
finish_op(0);
return;
}
send_pre_update_object_map();
}
void send_trim() {
I &image_ctx = this->m_image_ctx;
ldout(m_cct, 20) << dendl;
ceph_assert(m_new_end < image_ctx.layout.object_size);
librados::ObjectWriteOperation op;
m_bl.clear();
m_bl.append_zero(image_ctx.layout.object_size - m_new_end);
op.cmpext(m_new_end, m_bl, nullptr);
if (m_new_end == 0 && m_remove_empty) {
op.remove();
} else {
op.truncate(m_new_end);
}
auto comp = create_rados_callback<
C_SparsifyObject, &C_SparsifyObject::handle_trim>(this);
int r = image_ctx.data_ctx.aio_operate(m_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
void handle_trim(int r) {
I &image_ctx = this->m_image_ctx;
ldout(m_cct, 20) << "r=" << r << dendl;
if (r <= -MAX_ERRNO) {
m_finish_op_ctx->complete(0);
m_finish_op_ctx = nullptr;
send_read();
return;
}
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "failed to trim: " << cpp_strerror(r) << dendl;
finish_op(r);
return;
}
if (!m_remove_empty || m_new_end != 0 ||
!image_ctx.test_features(RBD_FEATURE_OBJECT_MAP)) {
finish_op(0);
return;
}
send_post_update_object_map(false);
}
void finish_op(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (m_finish_op_ctx != nullptr) {
m_finish_op_ctx->complete(0);
}
this->complete(r);
}
private:
CephContext *m_cct;
uint64_t m_object_no;
size_t m_sparse_size;
std::string m_oid;
bool m_remove_empty = false;
bool m_trying_trim = false;
bufferlist m_bl;
std::map<uint64_t,uint64_t> m_extent_map;
uint64_t m_new_end = 0;
Context *m_finish_op_ctx = nullptr;
};
#undef dout_prefix
#define dout_prefix *_dout << "librbd::operation::SparsifyRequest: " << this \
<< " " << __func__ << ": "
template <typename I>
bool SparsifyRequest<I>::should_complete(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "encountered error: " << cpp_strerror(r) << dendl;
}
return true;
}
template <typename I>
void SparsifyRequest<I>::send_op() {
sparsify_objects();
}
template <typename I>
void SparsifyRequest<I>::sparsify_objects() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
assert(ceph_mutex_is_locked(image_ctx.owner_lock));
uint64_t objects = 0;
{
std::shared_lock image_locker{image_ctx.image_lock};
objects = image_ctx.get_object_count(CEPH_NOSNAP);
}
auto ctx = create_context_callback<
SparsifyRequest<I>,
&SparsifyRequest<I>::handle_sparsify_objects>(this);
typename AsyncObjectThrottle<I>::ContextFactory context_factory(
boost::lambda::bind(boost::lambda::new_ptr<C_SparsifyObject<I> >(),
boost::lambda::_1, &image_ctx, boost::lambda::_2, m_sparse_size));
AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
this, image_ctx, context_factory, ctx, &m_prog_ctx, 0, objects);
throttle->start_ops(
image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
}
template <typename I>
void SparsifyRequest<I>::handle_sparsify_objects(int r) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r == -ERESTART) {
ldout(cct, 5) << "sparsify operation interrupted" << dendl;
this->complete(r);
return;
} else if (r < 0) {
lderr(cct) << "sparsify encountered an error: " << cpp_strerror(r) << dendl;
this->complete(r);
return;
}
this->complete(0);
}
} // namespace operation
} // namespace librbd
template class librbd::operation::SparsifyRequest<librbd::ImageCtx>;
| 14,249 | 26.403846 | 82 | cc |
null | ceph-main/src/librbd/operation/SparsifyRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_SPARSIFY_REQUEST_H
#define CEPH_LIBRBD_OPERATION_SPARSIFY_REQUEST_H
#include "librbd/operation/Request.h"
#include "common/snap_types.h"
namespace librbd {
class ImageCtx;
class ProgressContext;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class SparsifyRequest : public Request<ImageCtxT>
{
public:
SparsifyRequest(ImageCtxT &image_ctx, size_t sparse_size, Context *on_finish,
ProgressContext &prog_ctx)
: Request<ImageCtxT>(image_ctx, on_finish), m_sparse_size(sparse_size),
m_prog_ctx(prog_ctx) {
}
protected:
void send_op() override;
bool should_complete(int r) override;
bool can_affect_io() const override {
return true;
}
journal::Event create_event(uint64_t op_tid) const override {
ceph_abort();
return journal::UnknownEvent();
}
private:
/**
* @verbatim
*
* <start>
* |
* v
* SPARSIFY OBJECTS
* |
* v
* <finish>
*
* @endverbatim
*/
size_t m_sparse_size;
ProgressContext &m_prog_ctx;
void sparsify_objects();
void handle_sparsify_objects(int r);
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::SparsifyRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_SPARSIFY_REQUEST_H
| 1,404 | 20.615385 | 79 | h |
null | ceph-main/src/librbd/operation/TrimRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/TrimRequest.h"
#include "librbd/AsyncObjectThrottle.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/internal.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/io/ObjectDispatchSpec.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "common/ContextCompletion.h"
#include "common/dout.h"
#include "common/errno.h"
#include "osdc/Striper.h"
#include <boost/lambda/bind.hpp>
#include <boost/lambda/construct.hpp>
#include <boost/scope_exit.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::TrimRequest: "
namespace librbd {
namespace operation {
template <typename I>
class C_CopyupObject : public C_AsyncObjectThrottle<I> {
public:
C_CopyupObject(AsyncObjectThrottle<I> &throttle, I *image_ctx,
IOContext io_context, uint64_t object_no)
: C_AsyncObjectThrottle<I>(throttle, *image_ctx), m_io_context(io_context),
m_object_no(object_no)
{
}
int send() override {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
std::string oid = image_ctx.get_object_name(m_object_no);
ldout(image_ctx.cct, 10) << "removing (with copyup) " << oid << dendl;
auto object_dispatch_spec = io::ObjectDispatchSpec::create_discard(
&image_ctx, io::OBJECT_DISPATCH_LAYER_NONE, m_object_no, 0,
image_ctx.layout.object_size, m_io_context,
io::OBJECT_DISCARD_FLAG_DISABLE_OBJECT_MAP_UPDATE, 0, {}, this);
object_dispatch_spec->send();
return 0;
}
private:
IOContext m_io_context;
uint64_t m_object_no;
};
template <typename I>
class C_RemoveObject : public C_AsyncObjectThrottle<I> {
public:
C_RemoveObject(AsyncObjectThrottle<I> &throttle, I *image_ctx,
uint64_t object_no)
: C_AsyncObjectThrottle<I>(throttle, *image_ctx), m_object_no(object_no)
{
}
int send() override {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
{
std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.object_map != nullptr &&
!image_ctx.object_map->object_may_exist(m_object_no)) {
return 1;
}
}
std::string oid = image_ctx.get_object_name(m_object_no);
ldout(image_ctx.cct, 10) << "removing " << oid << dendl;
librados::AioCompletion *rados_completion =
util::create_rados_callback(this);
int r = image_ctx.data_ctx.aio_remove(oid, rados_completion);
ceph_assert(r == 0);
rados_completion->release();
return 0;
}
private:
uint64_t m_object_no;
};
template <typename I>
TrimRequest<I>::TrimRequest(I &image_ctx, Context *on_finish,
uint64_t original_size, uint64_t new_size,
ProgressContext &prog_ctx)
: AsyncRequest<I>(image_ctx, on_finish), m_new_size(new_size),
m_prog_ctx(prog_ctx)
{
uint64_t period = image_ctx.get_stripe_period();
uint64_t new_num_periods = ((m_new_size + period - 1) / period);
m_delete_off = std::min(new_num_periods * period, original_size);
// first object we can delete free and clear
m_delete_start = new_num_periods * image_ctx.get_stripe_count();
m_delete_start_min = m_delete_start;
m_num_objects = Striper::get_num_objects(image_ctx.layout, original_size);
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << this << " trim image " << original_size << " -> "
<< m_new_size << " periods " << new_num_periods
<< " discard to offset " << m_delete_off
<< " delete objects " << m_delete_start
<< " to " << m_num_objects << dendl;
}
template <typename I>
bool TrimRequest<I>::should_complete(int r)
{
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " should_complete: r=" << r << dendl;
if (r == -ERESTART) {
ldout(cct, 5) << "trim operation interrupted" << dendl;
return true;
} else if (r < 0) {
lderr(cct) << "trim encountered an error: " << cpp_strerror(r) << dendl;
return true;
}
std::shared_lock owner_lock{image_ctx.owner_lock};
switch (m_state) {
case STATE_PRE_TRIM:
ldout(cct, 5) << " PRE_TRIM" << dendl;
send_copyup_objects();
break;
case STATE_COPYUP_OBJECTS:
ldout(cct, 5) << " COPYUP_OBJECTS" << dendl;
send_remove_objects();
break;
case STATE_REMOVE_OBJECTS:
ldout(cct, 5) << " REMOVE_OBJECTS" << dendl;
send_post_trim();
break;
case STATE_POST_TRIM:
ldout(cct, 5) << " POST_TRIM" << dendl;
send_clean_boundary();
break;
case STATE_CLEAN_BOUNDARY:
ldout(cct, 5) << "CLEAN_BOUNDARY" << dendl;
send_finish(0);
break;
case STATE_FINISHED:
ldout(cct, 5) << "FINISHED" << dendl;
return true;
default:
lderr(cct) << "invalid state: " << m_state << dendl;
ceph_abort();
break;
}
return false;
}
template <typename I>
void TrimRequest<I>::send() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
if (!image_ctx.data_ctx.is_valid()) {
lderr(cct) << "missing data pool" << dendl;
send_finish(-ENODEV);
return;
}
send_pre_trim();
}
template<typename I>
void TrimRequest<I>::send_pre_trim() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
if (m_delete_start >= m_num_objects) {
send_clean_boundary();
return;
}
{
std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.object_map != nullptr) {
ldout(image_ctx.cct, 5) << this << " send_pre_trim: "
<< " delete_start_min=" << m_delete_start_min
<< " num_objects=" << m_num_objects << dendl;
m_state = STATE_PRE_TRIM;
ceph_assert(image_ctx.exclusive_lock->is_lock_owner());
if (image_ctx.object_map->template aio_update<AsyncRequest<I> >(
CEPH_NOSNAP, m_delete_start_min, m_num_objects, OBJECT_PENDING,
OBJECT_EXISTS, {}, false, this)) {
return;
}
}
}
send_copyup_objects();
}
template<typename I>
void TrimRequest<I>::send_copyup_objects() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
IOContext io_context;
bool has_snapshots;
uint64_t copyup_end;
{
std::shared_lock image_locker{image_ctx.image_lock};
io_context = image_ctx.get_data_io_context();
has_snapshots = !image_ctx.snaps.empty();
uint64_t crypto_header_objects = Striper::get_num_objects(
image_ctx.layout,
image_ctx.get_area_size(io::ImageArea::CRYPTO_HEADER));
uint64_t raw_overlap;
int r = image_ctx.get_parent_overlap(CEPH_NOSNAP, &raw_overlap);
ceph_assert(r == 0);
auto overlap = image_ctx.reduce_parent_overlap(raw_overlap, false);
uint64_t data_overlap_objects = Striper::get_num_objects(
image_ctx.layout,
(overlap.second == io::ImageArea::DATA ? overlap.first : 0));
// copyup is only required for portion of image that overlaps parent
ceph_assert(m_delete_start >= crypto_header_objects);
copyup_end = crypto_header_objects + data_overlap_objects;
}
// TODO: protect against concurrent shrink and snap create?
// skip to remove if no copyup is required.
if (copyup_end <= m_delete_start || !has_snapshots) {
send_remove_objects();
return;
}
uint64_t copyup_start = m_delete_start;
m_delete_start = copyup_end;
ldout(image_ctx.cct, 5) << this << " send_copyup_objects: "
<< " start object=" << copyup_start << ", "
<< " end object=" << copyup_end << dendl;
m_state = STATE_COPYUP_OBJECTS;
Context *ctx = this->create_callback_context();
typename AsyncObjectThrottle<I>::ContextFactory context_factory(
boost::lambda::bind(boost::lambda::new_ptr<C_CopyupObject<I> >(),
boost::lambda::_1, &image_ctx, io_context, boost::lambda::_2));
AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
this, image_ctx, context_factory, ctx, &m_prog_ctx, copyup_start,
copyup_end);
throttle->start_ops(
image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
}
template <typename I>
void TrimRequest<I>::send_remove_objects() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
ldout(image_ctx.cct, 5) << this << " send_remove_objects: "
<< " delete_start=" << m_delete_start
<< " num_objects=" << m_num_objects << dendl;
m_state = STATE_REMOVE_OBJECTS;
Context *ctx = this->create_callback_context();
typename AsyncObjectThrottle<I>::ContextFactory context_factory(
boost::lambda::bind(boost::lambda::new_ptr<C_RemoveObject<I> >(),
boost::lambda::_1, &image_ctx, boost::lambda::_2));
AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
this, image_ctx, context_factory, ctx, &m_prog_ctx, m_delete_start,
m_num_objects);
throttle->start_ops(
image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
}
template<typename I>
void TrimRequest<I>::send_post_trim() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
{
std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.object_map != nullptr) {
ldout(image_ctx.cct, 5) << this << " send_post_trim:"
<< " delete_start_min=" << m_delete_start_min
<< " num_objects=" << m_num_objects << dendl;
m_state = STATE_POST_TRIM;
ceph_assert(image_ctx.exclusive_lock->is_lock_owner());
if (image_ctx.object_map->template aio_update<AsyncRequest<I> >(
CEPH_NOSNAP, m_delete_start_min, m_num_objects, OBJECT_NONEXISTENT,
OBJECT_PENDING, {}, false, this)) {
return;
}
}
}
send_clean_boundary();
}
template <typename I>
void TrimRequest<I>::send_clean_boundary() {
I &image_ctx = this->m_image_ctx;
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
if (m_delete_off <= m_new_size) {
send_finish(0);
return;
}
// should have been canceled prior to releasing lock
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
uint64_t delete_len = m_delete_off - m_new_size;
ldout(image_ctx.cct, 5) << this << " send_clean_boundary: "
<< " delete_off=" << m_delete_off
<< " length=" << delete_len << dendl;
m_state = STATE_CLEAN_BOUNDARY;
IOContext io_context;
{
std::shared_lock image_locker{image_ctx.image_lock};
io_context = image_ctx.get_data_io_context();
}
// discard the weird boundary
std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, image_ctx.format_string,
&image_ctx.layout, m_new_size, delete_len, 0,
extents);
ContextCompletion *completion =
new ContextCompletion(this->create_async_callback_context(), true);
for (auto& extent : extents) {
ldout(cct, 20) << " ex " << extent << dendl;
Context *req_comp = new C_ContextCompletion(*completion);
if (extent.offset == 0) {
// treat as a full object delete on the boundary
extent.length = image_ctx.layout.object_size;
}
auto object_dispatch_spec = io::ObjectDispatchSpec::create_discard(
&image_ctx, io::OBJECT_DISPATCH_LAYER_NONE, extent.objectno, extent.offset,
extent.length, io_context, 0, 0, {}, req_comp);
object_dispatch_spec->send();
}
completion->finish_adding_requests();
}
template <typename I>
void TrimRequest<I>::send_finish(int r) {
m_state = STATE_FINISHED;
this->async_complete(r);
}
} // namespace operation
} // namespace librbd
template class librbd::operation::TrimRequest<librbd::ImageCtx>;
| 12,254 | 30.997389 | 82 | cc |
null | ceph-main/src/librbd/operation/TrimRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_TRIM_REQUEST_H
#define CEPH_LIBRBD_OPERATION_TRIM_REQUEST_H
#include "librbd/AsyncRequest.h"
namespace librbd
{
class ImageCtx;
class ProgressContext;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class TrimRequest : public AsyncRequest<ImageCtxT>
{
public:
static TrimRequest *create(ImageCtxT &image_ctx, Context *on_finish,
uint64_t original_size, uint64_t new_size,
ProgressContext &prog_ctx) {
return new TrimRequest(image_ctx, on_finish, original_size, new_size,
prog_ctx);
}
TrimRequest(ImageCtxT &image_ctx, Context *on_finish,
uint64_t original_size, uint64_t new_size,
ProgressContext &prog_ctx);
void send() override;
protected:
/**
* Trim goes through the following state machine to remove whole objects,
* clean partially trimmed objects, and update the object map:
*
* @verbatim
*
* <start> . . . . . . . . . . . . . . . . .
* | .
* v (skip if not needed) .
* STATE_PRE_TRIM .
* | .
* v (skip if not needed) .
* STATE_COPYUP_OBJECTS .
* | .
* v (skip if not needed) .
* STATE_REMOVE_OBJECTS .
* | .
* v (skip if not needed) .
* STATE_POST_TRIM .
* | .
* v (skip if not needed) .
* STATE_CLEAN_BOUNDARY .
* | .
* v .
* STATE_FINISHED < . . . . . . . . . . . . . . .
* |
* v
* <finish>
*
* The _COPYUP_OBJECTS state is skipped if there is no parent overlap
* within the new image size and the image does not have any snapshots.
* The _PRE_TRIM/_POST_TRIM states are skipped if the object map
* isn't enabled. The _REMOVE_OBJECTS state is skipped if no whole objects
* are removed. The _CLEAN_BOUNDARY state is skipped if no boundary
* objects are cleaned. The state machine will immediately transition
* to _FINISHED state if there are no bytes to trim.
*/
enum State {
STATE_PRE_TRIM,
STATE_COPYUP_OBJECTS,
STATE_REMOVE_OBJECTS,
STATE_POST_TRIM,
STATE_CLEAN_BOUNDARY,
STATE_FINISHED
};
bool should_complete(int r) override;
State m_state = STATE_PRE_TRIM;
private:
uint64_t m_delete_start;
uint64_t m_delete_start_min = 0;
uint64_t m_num_objects;
uint64_t m_delete_off;
uint64_t m_new_size;
ProgressContext &m_prog_ctx;
void send_pre_trim();
void send_copyup_objects();
void send_remove_objects();
void send_post_trim();
void send_clean_boundary();
void send_finish(int r);
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::TrimRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_TRIM_REQUEST_H
| 3,361 | 30.12963 | 76 | h |
null | ceph-main/src/librbd/plugin/Api.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/Timer.h"
#include "librbd/plugin/Api.h"
#include "librbd/ImageCtx.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/Utils.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
namespace librbd {
namespace plugin {
template <typename I>
void Api<I>::read_parent(
I *image_ctx, uint64_t object_no, io::ReadExtents* extents,
librados::snap_t snap_id, const ZTracer::Trace &trace,
Context* on_finish) {
io::util::read_parent<I>(image_ctx, object_no, extents, snap_id, trace,
on_finish);
}
template <typename I>
void Api<I>::execute_image_metadata_set(
I *image_ctx, const std::string &key,
const std::string &value, Context *on_finish) {
ImageCtx* ictx = util::get_image_ctx(image_ctx);
ictx->operations->execute_metadata_set(key, value, on_finish);
}
template <typename I>
void Api<I>::execute_image_metadata_remove(
I *image_ctx, const std::string &key, Context *on_finish) {
ImageCtx* ictx = util::get_image_ctx(image_ctx);
ictx->operations->execute_metadata_remove(key, on_finish);
}
template <typename I>
void Api<I>::get_image_timer_instance(
CephContext *cct, SafeTimer **timer, ceph::mutex **timer_lock) {
ImageCtx::get_timer_instance(cct, timer, timer_lock);
}
template <typename I>
bool Api<I>::test_image_features(I *image_ctx, uint64_t features) {
return image_ctx->test_features(features);
}
template <typename I>
void Api<I>::update_aio_comp(io::AioCompletion* aio_comp,
uint32_t request_count,
io::ReadResult &read_result,
io::Extents &image_extents) {
aio_comp->set_request_count(request_count);
aio_comp->read_result = std::move(read_result);
aio_comp->read_result.set_image_extents(image_extents);
start_in_flight_io(aio_comp);
}
template <typename I>
void Api<I>::update_aio_comp(
io::AioCompletion* aio_comp, uint32_t request_count) {
aio_comp->set_request_count(request_count);
start_in_flight_io(aio_comp);
}
template <typename I>
io::ReadResult::C_ImageReadRequest* Api<I>::create_image_read_request(
io::AioCompletion* aio_comp, uint64_t buffer_offset,
const Extents& image_extents) {
return new io::ReadResult::C_ImageReadRequest(
aio_comp, buffer_offset, image_extents);
}
template <typename I>
io::C_AioRequest* Api<I>::create_aio_request(io::AioCompletion* aio_comp) {
io::C_AioRequest *req_comp = new io::C_AioRequest(aio_comp);
return req_comp;
}
template <typename I>
void Api<I>::start_in_flight_io(io::AioCompletion* aio_comp) {
if (!aio_comp->async_op.started()) {
aio_comp->start_op();
}
}
} // namespace plugin
} // namespace librbd
template class librbd::plugin::Api<librbd::ImageCtx>;
| 2,862 | 29.784946 | 75 | cc |
null | ceph-main/src/librbd/plugin/Api.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_PLUGIN_API_H
#define CEPH_LIBRBD_PLUGIN_API_H
#include "common/Timer.h"
#include "common/ceph_mutex.h"
#include "include/common_fwd.h"
#include "include/int_types.h"
#include "include/rados/librados.hpp"
#include "librbd/io/Types.h"
#include "librbd/io/ReadResult.h"
namespace ZTracer { struct Trace; }
namespace librbd {
namespace io {
class AioCompletion;
class C_AioRequest;
}
struct ImageCtx;
namespace plugin {
template <typename ImageCtxT>
struct Api {
using Extents = librbd::io::Extents;
Api() {}
virtual ~Api() {}
virtual void read_parent(
ImageCtxT *image_ctx, uint64_t object_no, io::ReadExtents* extents,
librados::snap_t snap_id, const ZTracer::Trace &trace,
Context* on_finish);
virtual void execute_image_metadata_set(
ImageCtxT *image_ctx,
const std::string &key,
const std::string &value,
Context *on_finish);
virtual void execute_image_metadata_remove(
ImageCtxT *image_ctx,
const std::string &key,
Context *on_finish);
virtual void get_image_timer_instance(
CephContext *cct, SafeTimer **timer,
ceph::mutex **timer_lock);
virtual bool test_image_features(
ImageCtxT *image_ctx,
uint64_t features);
virtual void update_aio_comp(
io::AioCompletion* aio_comp,
uint32_t request_count,
io::ReadResult& read_result,
io::Extents &image_extents);
virtual void update_aio_comp(
io::AioCompletion* aio_comp,
uint32_t request_count);
virtual io::ReadResult::C_ImageReadRequest* create_image_read_request(
io::AioCompletion* aio_comp, uint64_t buffer_offset,
const Extents& image_extents);
virtual io::C_AioRequest* create_aio_request(io::AioCompletion* aio_comp);
private:
void start_in_flight_io(io::AioCompletion* aio_comp);
};
} // namespace plugin
} // namespace librbd
extern template class librbd::plugin::Api<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_PLUGIN_API_H
| 2,076 | 23.435294 | 76 | h |
null | ceph-main/src/librbd/plugin/ParentCache.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/plugin/ParentCache.h"
#include "ceph_ver.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/PluginRegistry.h"
#include "librbd/ImageCtx.h"
#include "librbd/cache/ParentCacheObjectDispatch.h"
extern "C" {
const char *__ceph_plugin_version() {
return CEPH_GIT_NICE_VER;
}
int __ceph_plugin_init(CephContext *cct, const std::string& type,
const std::string& name) {
auto plugin_registry = cct->get_plugin_registry();
return plugin_registry->add(
type, name, new librbd::plugin::ParentCache<librbd::ImageCtx>(cct));
}
} // extern "C"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::plugin::ParentCache: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace plugin {
template <typename I>
void ParentCache<I>::init(I* image_ctx, Api<I>& api,
cache::ImageWritebackInterface& image_writeback,
PluginHookPoints& hook_points_list,
Context* on_finish) {
bool parent_cache_enabled = image_ctx->config.template get_val<bool>(
"rbd_parent_cache_enabled");
if (image_ctx->child == nullptr || !parent_cache_enabled ||
!image_ctx->data_ctx.is_valid()) {
on_finish->complete(0);
return;
}
auto cct = image_ctx->cct;
ldout(cct, 5) << dendl;
auto parent_cache = cache::ParentCacheObjectDispatch<I>::create(
image_ctx, api);
on_finish = new LambdaContext([this, on_finish, parent_cache](int r) {
if (r < 0) {
// the object dispatcher will handle cleanup if successfully initialized
delete parent_cache;
}
handle_init_parent_cache(r, on_finish);
});
parent_cache->init(on_finish);
}
template <typename I>
void ParentCache<I>::handle_init_parent_cache(int r, Context* on_finish) {
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "Failed to initialize parent cache object dispatch layer: "
<< cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
on_finish->complete(0);
}
} // namespace plugin
} // namespace librbd
template class librbd::plugin::ParentCache<librbd::ImageCtx>;
| 2,338 | 27.52439 | 80 | cc |
null | ceph-main/src/librbd/plugin/ParentCache.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_PLUGIN_PARENT_CACHE_H
#define CEPH_LIBRBD_PLUGIN_PARENT_CACHE_H
#include "librbd/plugin/Types.h"
#include "include/Context.h"
namespace librbd {
struct ImageCtx;
namespace plugin {
template <typename ImageCtxT>
class ParentCache : public Interface<ImageCtxT> {
public:
ParentCache(CephContext* cct) : Interface<ImageCtxT>(cct) {
}
void init(ImageCtxT* image_ctx, Api<ImageCtxT>& api,
cache::ImageWritebackInterface& image_writeback,
PluginHookPoints& hook_points_list,
Context* on_finish) override;
private:
void handle_init_parent_cache(int r, Context* on_finish);
using ceph::Plugin::cct;
};
} // namespace plugin
} // namespace librbd
extern template class librbd::plugin::ParentCache<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_PLUGIN_PARENT_CACHE_H
| 927 | 22.794872 | 70 | h |
null | ceph-main/src/librbd/plugin/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_PLUGIN_TYPES_H
#define CEPH_LIBRBD_PLUGIN_TYPES_H
#include "include/common_fwd.h"
#include "include/Context.h"
#include "common/PluginRegistry.h"
#include "librbd/cache/ImageWriteback.h"
namespace librbd {
namespace plugin {
template <typename> struct Api;
struct HookPoints {
virtual ~HookPoints() {
}
virtual void acquired_exclusive_lock(Context* on_finish) = 0;
virtual void prerelease_exclusive_lock(Context* on_finish) = 0;
virtual void discard(Context* on_finish) {
on_finish->complete(0);
}
};
typedef std::list<std::unique_ptr<HookPoints>> PluginHookPoints;
template <typename ImageCtxT>
struct Interface : public ceph::Plugin {
Interface(CephContext* cct) : Plugin(cct) {
}
virtual ~Interface() {
}
virtual void init(ImageCtxT* image_ctx, Api<ImageCtxT>& api,
librbd::cache::ImageWritebackInterface& image_writeback,
PluginHookPoints& hook_points_list, Context* on_finish) = 0;
};
} // namespace plugin
} // namespace librbd
#endif // CEPH_LIBRBD_PLUGIN_TYPES_H
| 1,152 | 24.065217 | 80 | h |
null | ceph-main/src/librbd/plugin/WriteLogImageCache.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "ceph_ver.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/PluginRegistry.h"
#include "librbd/ImageCtx.h"
#include "librbd/cache/WriteLogImageDispatch.h"
#include "librbd/cache/ImageWriteback.h"
#include "librbd/cache/Utils.h"
#include "librbd/cache/pwl/DiscardRequest.h"
#include "librbd/cache/pwl/InitRequest.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/plugin/WriteLogImageCache.h"
extern "C" {
const char *__ceph_plugin_version() {
return CEPH_GIT_NICE_VER;
}
int __ceph_plugin_init(CephContext *cct, const std::string& type,
const std::string& name) {
auto plugin_registry = cct->get_plugin_registry();
return plugin_registry->add(
type, name, new librbd::plugin::WriteLogImageCache<librbd::ImageCtx>(cct));
}
} // extern "C"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::plugin::WriteLogImageCache: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace plugin {
template <typename I>
void WriteLogImageCache<I>::init(I* image_ctx, Api<I>& api,
cache::ImageWritebackInterface& image_writeback,
PluginHookPoints& hook_points_list,
Context* on_finish) {
bool pwl_enabled = librbd::cache::util::is_pwl_enabled(*image_ctx);
if (!pwl_enabled || !image_ctx->data_ctx.is_valid()) {
on_finish->complete(0);
return;
}
auto cct = image_ctx->cct;
ldout(cct, 5) << dendl;
auto hook_points = std::make_unique<WriteLogImageCache::HookPoints>(
image_ctx, image_writeback, api);
hook_points_list.emplace_back(std::move(hook_points));
on_finish->complete(0);
}
template <typename I>
WriteLogImageCache<I>::~WriteLogImageCache() {
}
template <typename I>
WriteLogImageCache<I>::HookPoints::HookPoints(
I* image_ctx, cache::ImageWritebackInterface& image_writeback,
plugin::Api<I>& plugin_api)
: m_image_ctx(image_ctx), m_image_writeback(image_writeback),
m_plugin_api(plugin_api)
{
}
template <typename I>
WriteLogImageCache<I>::HookPoints::~HookPoints() {
}
template <typename I>
void WriteLogImageCache<I>::HookPoints::acquired_exclusive_lock(
Context* on_finish) {
cache::pwl::InitRequest<I> *req = cache::pwl::InitRequest<I>::create(
*m_image_ctx, m_image_writeback, m_plugin_api, on_finish);
req->send();
}
template <typename I>
void WriteLogImageCache<I>::HookPoints::prerelease_exclusive_lock(
Context* on_finish) {
m_image_ctx->io_image_dispatcher->shut_down_dispatch(
io::IMAGE_DISPATCH_LAYER_WRITEBACK_CACHE, on_finish);
}
template <typename I>
void WriteLogImageCache<I>::HookPoints::discard(
Context* on_finish) {
cache::pwl::DiscardRequest<I> *req = cache::pwl::DiscardRequest<I>::create(
*m_image_ctx, m_plugin_api, on_finish);
req->send();
}
} // namespace plugin
} // namespace librbd
template class librbd::plugin::WriteLogImageCache<librbd::ImageCtx>;
| 3,129 | 28.809524 | 81 | cc |
null | ceph-main/src/librbd/plugin/WriteLogImageCache.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_PLUGIN_WRITELOG_IMAGE_CACHE_H
#define CEPH_LIBRBD_PLUGIN_WRITELOG_IMAGE_CACHE_H
#include "librbd/plugin/Types.h"
#include "include/Context.h"
namespace librbd {
struct ImageCtx;
namespace plugin {
template <typename ImageCtxT>
class WriteLogImageCache : public Interface<ImageCtxT> {
public:
WriteLogImageCache(CephContext* cct) : Interface<ImageCtxT>(cct) {
}
~WriteLogImageCache() override;
void init(ImageCtxT* image_ctx, Api<ImageCtxT>& api,
cache::ImageWritebackInterface& image_writeback,
PluginHookPoints& hook_points_list,
Context* on_finish) override;
class HookPoints : public plugin::HookPoints {
public:
HookPoints(ImageCtxT* image_ctx,
cache::ImageWritebackInterface& image_writeback,
plugin::Api<ImageCtxT>& plugin_api);
~HookPoints() override;
void acquired_exclusive_lock(Context* on_finish) override;
void prerelease_exclusive_lock(Context* on_finish) override;
void discard(Context* on_finish) override;
private:
ImageCtxT* m_image_ctx;
cache::ImageWritebackInterface& m_image_writeback;
plugin::Api<ImageCtxT>& m_plugin_api;
};
};
} // namespace plugin
} // namespace librbd
extern template class librbd::plugin::WriteLogImageCache<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_PLUGIN_WRITELOG_IMAGE_CACHE_H
| 1,455 | 25.962963 | 75 | h |
null | ceph-main/src/librbd/trash/MoveRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/trash/MoveRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::trash::MoveRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace trash {
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
void MoveRequest<I>::send() {
trash_add();
}
template <typename I>
void MoveRequest<I>::trash_add() {
ldout(m_cct, 10) << dendl;
librados::ObjectWriteOperation op;
librbd::cls_client::trash_add(&op, m_image_id, m_trash_image_spec);
auto aio_comp = create_rados_callback<
MoveRequest<I>, &MoveRequest<I>::handle_trash_add>(this);
int r = m_io_ctx.aio_operate(RBD_TRASH, aio_comp, &op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void MoveRequest<I>::handle_trash_add(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r == -EEXIST) {
ldout(m_cct, 10) << "previous unfinished deferred remove for image: "
<< m_image_id << dendl;
} else if (r < 0) {
lderr(m_cct) << "failed to add image to trash: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
remove_id();
}
template <typename I>
void MoveRequest<I>::remove_id() {
ldout(m_cct, 10) << dendl;
auto aio_comp = create_rados_callback<
MoveRequest<I>, &MoveRequest<I>::handle_remove_id>(this);
int r = m_io_ctx.aio_remove(util::id_obj_name(m_trash_image_spec.name),
aio_comp);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void MoveRequest<I>::handle_remove_id(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "failed to remove image id object: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
directory_remove();
}
template <typename I>
void MoveRequest<I>::directory_remove() {
ldout(m_cct, 10) << dendl;
librados::ObjectWriteOperation op;
librbd::cls_client::dir_remove_image(&op, m_trash_image_spec.name,
m_image_id);
auto aio_comp = create_rados_callback<
MoveRequest<I>, &MoveRequest<I>::handle_directory_remove>(this);
int r = m_io_ctx.aio_operate(RBD_DIRECTORY, aio_comp, &op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void MoveRequest<I>::handle_directory_remove(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r == -ENOENT) {
r = 0;
}
if (r < 0) {
lderr(m_cct) << "failed to remove image from directory: " << cpp_strerror(r)
<< dendl;
}
finish(r);
}
template <typename I>
void MoveRequest<I>::finish(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace trash
} // namespace librbd
template class librbd::trash::MoveRequest<librbd::ImageCtx>;
| 3,186 | 24.094488 | 80 | cc |
null | ceph-main/src/librbd/trash/MoveRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_TRASH_MOVE_REQUEST_H
#define CEPH_LIBRBD_TRASH_MOVE_REQUEST_H
#include "include/common_fwd.h"
#include "include/utime.h"
#include "include/rados/librados.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include <string>
struct Context;
namespace librbd {
struct ImageCtx;
namespace trash {
template <typename ImageCtxT = librbd::ImageCtx>
class MoveRequest {
public:
static MoveRequest* create(librados::IoCtx& io_ctx,
const std::string& image_id,
const cls::rbd::TrashImageSpec& trash_image_spec,
Context* on_finish) {
return new MoveRequest(io_ctx, image_id, trash_image_spec, on_finish);
}
MoveRequest(librados::IoCtx& io_ctx, const std::string& image_id,
const cls::rbd::TrashImageSpec& trash_image_spec,
Context* on_finish)
: m_io_ctx(io_ctx), m_image_id(image_id),
m_trash_image_spec(trash_image_spec), m_on_finish(on_finish),
m_cct(reinterpret_cast<CephContext *>(io_ctx.cct())) {
}
void send();
private:
/*
* @verbatim
*
* <start>
* |
* v
* TRASH_ADD
* |
* v
* REMOVE_ID
* |
* v
* DIRECTORY_REMOVE
* |
* v
* <finish>
*
* @endverbatim
*/
librados::IoCtx &m_io_ctx;
std::string m_image_id;
cls::rbd::TrashImageSpec m_trash_image_spec;
Context *m_on_finish;
CephContext *m_cct;
void trash_add();
void handle_trash_add(int r);
void remove_id();
void handle_remove_id(int r);
void directory_remove();
void handle_directory_remove(int r);
void finish(int r);
};
} // namespace trash
} // namespace librbd
extern template class librbd::trash::MoveRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_TRASH_MOVE_REQUEST_H
| 1,897 | 20.568182 | 78 | h |
null | ceph-main/src/librbd/trash/RemoveRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/trash/RemoveRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/image/RemoveRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::trash::RemoveRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace trash {
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
void RemoveRequest<I>::send() {
set_state();
}
template <typename I>
void RemoveRequest<I>::set_state() {
ldout(m_cct, 10) << dendl;
librados::ObjectWriteOperation op;
cls_client::trash_state_set(&op, m_image_id, m_trash_set_state,
m_trash_expect_state);
auto aio_comp = create_rados_callback<
RemoveRequest<I>, &RemoveRequest<I>::handle_set_state>(this);
int r = m_io_ctx.aio_operate(RBD_TRASH, aio_comp, &op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void RemoveRequest<I>::handle_set_state(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0 && r != -EOPNOTSUPP) {
lderr(m_cct) << "error setting trash image state: " << cpp_strerror(r)
<< dendl;
if (m_ret_val == 0) {
m_ret_val = r;
}
if (m_trash_set_state == cls::rbd::TRASH_IMAGE_STATE_REMOVING) {
close_image();
} else {
finish(m_ret_val);
}
return;
}
if (m_trash_set_state == cls::rbd::TRASH_IMAGE_STATE_REMOVING) {
remove_image();
} else {
ceph_assert(m_trash_set_state == cls::rbd::TRASH_IMAGE_STATE_NORMAL);
finish(m_ret_val < 0 ? m_ret_val : r);
};
}
template <typename I>
void RemoveRequest<I>::close_image() {
if (m_image_ctx == nullptr) {
finish(m_ret_val);
return;
}
ldout(m_cct, 10) << dendl;
auto ctx = create_context_callback<
RemoveRequest<I>, &RemoveRequest<I>::handle_close_image>(this);
m_image_ctx->state->close(ctx);
}
template <typename I>
void RemoveRequest<I>::handle_close_image(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
ldout(m_cct, 5) << "failed to close image:" << cpp_strerror(r) << dendl;
}
m_image_ctx = nullptr;
finish(m_ret_val);
}
template <typename I>
void RemoveRequest<I>::remove_image() {
ldout(m_cct, 10) << dendl;
auto ctx = create_context_callback<
RemoveRequest<I>, &RemoveRequest<I>::handle_remove_image>(this);
if (m_image_ctx != nullptr) {
auto req = librbd::image::RemoveRequest<I>::create(
m_io_ctx, m_image_ctx, m_force, true, m_prog_ctx, m_op_work_queue, ctx);
req->send();
} else {
auto req = librbd::image::RemoveRequest<I>::create(
m_io_ctx, "", m_image_id, m_force, true, m_prog_ctx, m_op_work_queue,
ctx);
req->send();
}
}
template <typename I>
void RemoveRequest<I>::handle_remove_image(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
ldout(m_cct, 5) << "failed to remove image:" << cpp_strerror(r) << dendl;
m_ret_val = r;
m_trash_set_state = cls::rbd::TRASH_IMAGE_STATE_NORMAL;
m_trash_expect_state = cls::rbd::TRASH_IMAGE_STATE_REMOVING;
set_state();
return;
}
m_image_ctx = nullptr;
remove_trash_entry();
}
template <typename I>
void RemoveRequest<I>::remove_trash_entry() {
ldout(m_cct, 10) << dendl;
librados::ObjectWriteOperation op;
cls_client::trash_remove(&op, m_image_id);
auto aio_comp = create_rados_callback<
RemoveRequest<I>, &RemoveRequest<I>::handle_remove_trash_entry>(this);
int r = m_io_ctx.aio_operate(RBD_TRASH, aio_comp, &op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void RemoveRequest<I>::handle_remove_trash_entry(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "error removing trash entry: " << cpp_strerror(r) << dendl;
}
finish(0);
}
template <typename I>
void RemoveRequest<I>::finish(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace trash
} // namespace librbd
template class librbd::trash::RemoveRequest<librbd::ImageCtx>;
| 4,427 | 24.894737 | 80 | cc |
null | ceph-main/src/librbd/trash/RemoveRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_TRASH_REMOVE_REQUEST_H
#define CEPH_LIBRBD_TRASH_REMOVE_REQUEST_H
#include "include/common_fwd.h"
#include "include/utime.h"
#include "include/rados/librados.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include <string>
class Context;
namespace librbd {
struct ImageCtx;
class ProgressContext;
namespace asio { struct ContextWQ; }
namespace trash {
template <typename ImageCtxT = librbd::ImageCtx>
class RemoveRequest {
public:
static RemoveRequest* create(librados::IoCtx &io_ctx,
const std::string &image_id,
asio::ContextWQ *op_work_queue, bool force,
ProgressContext &prog_ctx, Context *on_finish) {
return new RemoveRequest(io_ctx, image_id, op_work_queue, force, prog_ctx,
on_finish);
}
static RemoveRequest* create(librados::IoCtx &io_ctx, ImageCtxT *image_ctx,
asio::ContextWQ *op_work_queue, bool force,
ProgressContext &prog_ctx, Context *on_finish) {
return new RemoveRequest(io_ctx, image_ctx, op_work_queue, force, prog_ctx,
on_finish);
}
RemoveRequest(librados::IoCtx &io_ctx, const std::string &image_id,
asio::ContextWQ *op_work_queue, bool force,
ProgressContext &prog_ctx, Context *on_finish)
: m_io_ctx(io_ctx), m_image_id(image_id), m_op_work_queue(op_work_queue),
m_force(force), m_prog_ctx(prog_ctx), m_on_finish(on_finish),
m_cct(reinterpret_cast<CephContext *>(io_ctx.cct())) {
}
RemoveRequest(librados::IoCtx &io_ctx, ImageCtxT *image_ctx,
asio::ContextWQ *op_work_queue, bool force,
ProgressContext &prog_ctx, Context *on_finish)
: m_io_ctx(io_ctx), m_image_ctx(image_ctx), m_image_id(m_image_ctx->id),
m_op_work_queue(op_work_queue), m_force(force), m_prog_ctx(prog_ctx),
m_on_finish(on_finish),
m_cct(reinterpret_cast<CephContext *>(io_ctx.cct())) {
}
void send();
private:
/*
* @verbatim
*
* <start>
* |
* v
* SET_STATE (removing) * * * * * * *> CLOSE_IMAGE
* | |
* v |
* REMOVE_IMAGE * * *> SET_STATE (normal) |
* | | |
* v | |
* REMOVE_TRASH_ENTRY | |
* | | |
* v | |
* <finish> <-------------/<---------------/
*
* @endverbatim
*/
librados::IoCtx &m_io_ctx;
ImageCtxT *m_image_ctx = nullptr;
std::string m_image_id;
asio::ContextWQ *m_op_work_queue;
bool m_force;
ProgressContext &m_prog_ctx;
Context *m_on_finish;
CephContext *m_cct;
cls::rbd::TrashImageState m_trash_set_state =
cls::rbd::TRASH_IMAGE_STATE_REMOVING;
cls::rbd::TrashImageState m_trash_expect_state =
cls::rbd::TRASH_IMAGE_STATE_NORMAL;
int m_ret_val = 0;
void set_state();
void handle_set_state(int r);
void close_image();
void handle_close_image(int r);
void remove_image();
void handle_remove_image(int r);
void remove_trash_entry();
void handle_remove_trash_entry(int r);
void finish(int r);
};
} // namespace trash
} // namespace librbd
extern template class librbd::trash::RemoveRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_TRASH_REMOVE_REQUEST_H
| 3,589 | 29.168067 | 79 | h |
null | ceph-main/src/librbd/trash_watcher/Types.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/Formatter.h"
#include "include/ceph_assert.h"
#include "include/stringify.h"
#include "librbd/trash_watcher/Types.h"
#include "librbd/watcher/Utils.h"
namespace librbd {
namespace trash_watcher {
namespace {
class DumpPayloadVisitor : public boost::static_visitor<void> {
public:
explicit DumpPayloadVisitor(Formatter *formatter) : m_formatter(formatter) {}
template <typename Payload>
inline void operator()(const Payload &payload) const {
NotifyOp notify_op = Payload::NOTIFY_OP;
m_formatter->dump_string("notify_op", stringify(notify_op));
payload.dump(m_formatter);
}
private:
ceph::Formatter *m_formatter;
};
} // anonymous namespace
void ImageAddedPayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(image_id, bl);
encode(trash_image_spec, bl);
}
void ImageAddedPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
decode(image_id, iter);
decode(trash_image_spec, iter);
}
void ImageAddedPayload::dump(Formatter *f) const {
f->dump_string("image_id", image_id);
f->open_object_section("trash_image_spec");
trash_image_spec.dump(f);
f->close_section();
}
void ImageRemovedPayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(image_id, bl);
}
void ImageRemovedPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
decode(image_id, iter);
}
void ImageRemovedPayload::dump(Formatter *f) const {
f->dump_string("image_id", image_id);
}
void UnknownPayload::encode(bufferlist &bl) const {
ceph_abort();
}
void UnknownPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
}
void UnknownPayload::dump(Formatter *f) const {
}
void NotifyMessage::encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
boost::apply_visitor(watcher::util::EncodePayloadVisitor(bl), payload);
ENCODE_FINISH(bl);
}
void NotifyMessage::decode(bufferlist::const_iterator& iter) {
DECODE_START(1, iter);
uint32_t notify_op;
decode(notify_op, iter);
// select the correct payload variant based upon the encoded op
switch (notify_op) {
case NOTIFY_OP_IMAGE_ADDED:
payload = ImageAddedPayload();
break;
case NOTIFY_OP_IMAGE_REMOVED:
payload = ImageRemovedPayload();
break;
default:
payload = UnknownPayload();
break;
}
apply_visitor(watcher::util::DecodePayloadVisitor(struct_v, iter), payload);
DECODE_FINISH(iter);
}
void NotifyMessage::dump(Formatter *f) const {
apply_visitor(DumpPayloadVisitor(f), payload);
}
void NotifyMessage::generate_test_instances(std::list<NotifyMessage *> &o) {
o.push_back(new NotifyMessage{ImageAddedPayload{
"id", {cls::rbd::TRASH_IMAGE_SOURCE_USER, "name", {}, {}}}});
o.push_back(new NotifyMessage{ImageRemovedPayload{"id"}});
}
std::ostream &operator<<(std::ostream &out, const NotifyOp &op) {
switch (op) {
case NOTIFY_OP_IMAGE_ADDED:
out << "ImageAdded";
break;
case NOTIFY_OP_IMAGE_REMOVED:
out << "ImageRemoved";
break;
default:
out << "Unknown (" << static_cast<uint32_t>(op) << ")";
break;
}
return out;
}
} // namespace trash_watcher
} // namespace librbd
| 3,267 | 23.946565 | 82 | cc |
null | ceph-main/src/librbd/trash_watcher/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_TRASH_WATCHER_TYPES_H
#define CEPH_LIBRBD_TRASH_WATCHER_TYPES_H
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/encoding.h"
#include "cls/rbd/cls_rbd_types.h"
#include <iosfwd>
#include <list>
#include <string>
#include <boost/variant.hpp>
namespace librbd {
namespace trash_watcher {
enum NotifyOp {
NOTIFY_OP_IMAGE_ADDED = 0,
NOTIFY_OP_IMAGE_REMOVED = 1
};
struct ImageAddedPayload {
static const NotifyOp NOTIFY_OP = NOTIFY_OP_IMAGE_ADDED;
std::string image_id;
cls::rbd::TrashImageSpec trash_image_spec;
ImageAddedPayload() {
}
ImageAddedPayload(const std::string& image_id,
const cls::rbd::TrashImageSpec& trash_image_spec)
: image_id(image_id), trash_image_spec(trash_image_spec) {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
struct ImageRemovedPayload {
static const NotifyOp NOTIFY_OP = NOTIFY_OP_IMAGE_REMOVED;
std::string image_id;
ImageRemovedPayload() {
}
ImageRemovedPayload(const std::string& image_id)
: image_id(image_id) {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
struct UnknownPayload {
static const NotifyOp NOTIFY_OP = static_cast<NotifyOp>(-1);
UnknownPayload() {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
typedef boost::variant<ImageAddedPayload,
ImageRemovedPayload,
UnknownPayload> Payload;
struct NotifyMessage {
NotifyMessage(const Payload &payload = UnknownPayload()) : payload(payload) {
}
Payload payload;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<NotifyMessage *> &o);
};
WRITE_CLASS_ENCODER(NotifyMessage);
std::ostream &operator<<(std::ostream &out, const NotifyOp &op);
} // namespace trash_watcher
} // namespace librbd
using librbd::trash_watcher::encode;
using librbd::trash_watcher::decode;
#endif // CEPH_LIBRBD_TRASH_WATCHER_TYPES_H
| 2,375 | 23.244898 | 79 | h |
null | ceph-main/src/librbd/watcher/Notifier.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/watcher/Notifier.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/watcher/Types.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::watcher::Notifier: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace watcher {
const uint64_t Notifier::NOTIFY_TIMEOUT = 5000;
Notifier::C_AioNotify::C_AioNotify(Notifier *notifier, NotifyResponse *response,
Context *on_finish)
: notifier(notifier), response(response), on_finish(on_finish) {
}
void Notifier::C_AioNotify::finish(int r) {
if (response != nullptr) {
if (r == 0 || r == -ETIMEDOUT) {
try {
auto it = out_bl.cbegin();
decode(*response, it);
} catch (const buffer::error &err) {
r = -EBADMSG;
}
}
}
notifier->handle_notify(r, on_finish);
}
Notifier::Notifier(asio::ContextWQ *work_queue, IoCtx &ioctx,
const std::string &oid)
: m_work_queue(work_queue), m_ioctx(ioctx), m_oid(oid),
m_aio_notify_lock(ceph::make_mutex(util::unique_lock_name(
"librbd::object_watcher::Notifier::m_aio_notify_lock", this))) {
m_cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
}
Notifier::~Notifier() {
std::lock_guard aio_notify_locker{m_aio_notify_lock};
ceph_assert(m_pending_aio_notifies == 0);
}
void Notifier::flush(Context *on_finish) {
std::lock_guard aio_notify_locker{m_aio_notify_lock};
if (m_pending_aio_notifies == 0) {
m_work_queue->queue(on_finish, 0);
return;
}
m_aio_notify_flush_ctxs.push_back(on_finish);
}
void Notifier::notify(bufferlist &bl, NotifyResponse *response,
Context *on_finish) {
{
std::lock_guard aio_notify_locker{m_aio_notify_lock};
++m_pending_aio_notifies;
ldout(m_cct, 20) << "pending=" << m_pending_aio_notifies << dendl;
}
C_AioNotify *ctx = new C_AioNotify(this, response, on_finish);
librados::AioCompletion *comp = util::create_rados_callback(ctx);
int r = m_ioctx.aio_notify(m_oid, comp, bl, NOTIFY_TIMEOUT, &ctx->out_bl);
ceph_assert(r == 0);
comp->release();
}
void Notifier::handle_notify(int r, Context *on_finish) {
ldout(m_cct, 20) << "r=" << r << dendl;
std::lock_guard aio_notify_locker{m_aio_notify_lock};
ceph_assert(m_pending_aio_notifies > 0);
--m_pending_aio_notifies;
ldout(m_cct, 20) << "pending=" << m_pending_aio_notifies << dendl;
if (m_pending_aio_notifies == 0) {
for (auto ctx : m_aio_notify_flush_ctxs) {
m_work_queue->queue(ctx, 0);
}
m_aio_notify_flush_ctxs.clear();
}
if (on_finish != nullptr) {
m_work_queue->queue(on_finish, r);
}
}
} // namespace watcher
} // namespace librbd
| 2,897 | 27.98 | 80 | cc |
null | ceph-main/src/librbd/watcher/Notifier.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_WATCHER_NOTIFIER_H
#define CEPH_LIBRBD_WATCHER_NOTIFIER_H
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "common/ceph_mutex.h"
#include <list>
namespace librbd {
namespace asio { struct ContextWQ; }
namespace watcher {
struct NotifyResponse;
class Notifier {
public:
static const uint64_t NOTIFY_TIMEOUT;
Notifier(asio::ContextWQ *work_queue, librados::IoCtx &ioctx,
const std::string &oid);
~Notifier();
void flush(Context *on_finish);
void notify(bufferlist &bl, NotifyResponse *response, Context *on_finish);
private:
typedef std::list<Context*> Contexts;
struct C_AioNotify : public Context {
Notifier *notifier;
NotifyResponse *response;
Context *on_finish;
bufferlist out_bl;
C_AioNotify(Notifier *notifier, NotifyResponse *response,
Context *on_finish);
void finish(int r) override;
};
asio::ContextWQ *m_work_queue;
librados::IoCtx &m_ioctx;
CephContext *m_cct;
std::string m_oid;
ceph::mutex m_aio_notify_lock;
size_t m_pending_aio_notifies = 0;
Contexts m_aio_notify_flush_ctxs;
void handle_notify(int r, Context *on_finish);
};
} // namespace watcher
} // namespace librbd
#endif // CEPH_LIBRBD_WATCHER_NOTIFIER_H
| 1,437 | 21.123077 | 76 | h |
null | ceph-main/src/librbd/watcher/RewatchRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/watcher/RewatchRequest.h"
#include "common/ceph_mutex.h"
#include "common/errno.h"
#include "librbd/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::watcher::RewatchRequest: " \
<< this << " " << __func__ << " "
namespace librbd {
using util::create_context_callback;
using util::create_rados_callback;
namespace watcher {
using std::string;
RewatchRequest::RewatchRequest(librados::IoCtx& ioctx, const string& oid,
ceph::shared_mutex &watch_lock,
librados::WatchCtx2 *watch_ctx,
uint64_t *watch_handle, Context *on_finish)
: m_ioctx(ioctx), m_oid(oid), m_watch_lock(watch_lock),
m_watch_ctx(watch_ctx), m_watch_handle(watch_handle),
m_on_finish(on_finish) {
}
void RewatchRequest::send() {
unwatch();
}
void RewatchRequest::unwatch() {
ceph_assert(ceph_mutex_is_wlocked(m_watch_lock));
if (*m_watch_handle == 0) {
rewatch();
return;
}
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
ldout(cct, 10) << dendl;
uint64_t watch_handle = 0;
std::swap(*m_watch_handle, watch_handle);
librados::AioCompletion *aio_comp = create_rados_callback<
RewatchRequest, &RewatchRequest::handle_unwatch>(this);
int r = m_ioctx.aio_unwatch(watch_handle, aio_comp);
ceph_assert(r == 0);
aio_comp->release();
}
void RewatchRequest::handle_unwatch(int r) {
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
ldout(cct, 10) << "r=" << r << dendl;
if (r == -EBLOCKLISTED) {
lderr(cct) << "client blocklisted" << dendl;
finish(r);
return;
} else if (r < 0) {
lderr(cct) << "failed to unwatch: " << cpp_strerror(r) << dendl;
}
rewatch();
}
void RewatchRequest::rewatch() {
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
ldout(cct, 10) << dendl;
librados::AioCompletion *aio_comp = create_rados_callback<
RewatchRequest, &RewatchRequest::handle_rewatch>(this);
int r = m_ioctx.aio_watch(m_oid, aio_comp, &m_rewatch_handle, m_watch_ctx);
ceph_assert(r == 0);
aio_comp->release();
}
void RewatchRequest::handle_rewatch(int r) {
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to watch object: " << cpp_strerror(r)
<< dendl;
m_rewatch_handle = 0;
}
{
std::unique_lock watch_locker{m_watch_lock};
*m_watch_handle = m_rewatch_handle;
}
finish(r);
}
void RewatchRequest::finish(int r) {
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
ldout(cct, 10) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace watcher
} // namespace librbd
| 2,987 | 26.412844 | 79 | cc |
null | ceph-main/src/librbd/watcher/RewatchRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_WATCHER_REWATCH_REQUEST_H
#define CEPH_LIBRBD_WATCHER_REWATCH_REQUEST_H
#include "common/ceph_mutex.h"
#include "include/int_types.h"
#include "include/rados/librados.hpp"
struct Context;
namespace librbd {
namespace watcher {
class RewatchRequest {
public:
static RewatchRequest *create(librados::IoCtx& ioctx, const std::string& oid,
ceph::shared_mutex &watch_lock,
librados::WatchCtx2 *watch_ctx,
uint64_t *watch_handle, Context *on_finish) {
return new RewatchRequest(ioctx, oid, watch_lock, watch_ctx, watch_handle,
on_finish);
}
RewatchRequest(librados::IoCtx& ioctx, const std::string& oid,
ceph::shared_mutex &watch_lock, librados::WatchCtx2 *watch_ctx,
uint64_t *watch_handle, Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* UNWATCH
* |
* | . . . .
* | . . (recoverable error)
* v v .
* REWATCH . . .
* |
* v
* <finish>
*
* @endverbatim
*/
librados::IoCtx& m_ioctx;
std::string m_oid;
ceph::shared_mutex &m_watch_lock;
librados::WatchCtx2 *m_watch_ctx;
uint64_t *m_watch_handle;
Context *m_on_finish;
uint64_t m_rewatch_handle = 0;
void unwatch();
void handle_unwatch(int r);
void rewatch();
void handle_rewatch(int r);
void finish(int r);
};
} // namespace watcher
} // namespace librbd
#endif // CEPH_LIBRBD_WATCHER_REWATCH_REQUEST_H
| 1,697 | 21.342105 | 80 | h |
null | ceph-main/src/librbd/watcher/Types.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/watcher/Types.h"
#include "common/Formatter.h"
namespace librbd {
namespace watcher {
void ClientId::encode(bufferlist &bl) const {
using ceph::encode;
encode(gid, bl);
encode(handle, bl);
}
void ClientId::decode(bufferlist::const_iterator &iter) {
using ceph::decode;
decode(gid, iter);
decode(handle, iter);
}
void ClientId::dump(Formatter *f) const {
f->dump_unsigned("gid", gid);
f->dump_unsigned("handle", handle);
}
void NotifyResponse::encode(bufferlist& bl) const {
using ceph::encode;
encode(acks, bl);
encode(timeouts, bl);
}
void NotifyResponse::decode(bufferlist::const_iterator& iter) {
using ceph::decode;
decode(acks, iter);
decode(timeouts, iter);
}
std::ostream &operator<<(std::ostream &out,
const ClientId &client_id) {
out << "[" << client_id.gid << "," << client_id.handle << "]";
return out;
}
} // namespace watcher
} // namespace librbd
| 1,041 | 21.652174 | 70 | cc |
null | ceph-main/src/librbd/watcher/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_WATCHER_TYPES_H
#define CEPH_LIBRBD_WATCHER_TYPES_H
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/encoding.h"
namespace ceph { class Formatter; }
namespace librbd {
class Watcher;
namespace watcher {
struct ClientId {
uint64_t gid;
uint64_t handle;
ClientId() : gid(0), handle(0) {}
ClientId(uint64_t gid, uint64_t handle) : gid(gid), handle(handle) {}
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
inline bool is_valid() const {
return (*this != ClientId());
}
inline bool operator==(const ClientId &rhs) const {
return (gid == rhs.gid && handle == rhs.handle);
}
inline bool operator!=(const ClientId &rhs) const {
return !(*this == rhs);
}
inline bool operator<(const ClientId &rhs) const {
if (gid != rhs.gid) {
return gid < rhs.gid;
} else {
return handle < rhs.handle;
}
}
};
struct NotifyResponse {
std::map<ClientId, bufferlist> acks;
std::vector<ClientId> timeouts;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
};
template <typename ImageCtxT>
struct Traits {
typedef librbd::Watcher Watcher;
};
std::ostream &operator<<(std::ostream &out,
const ClientId &client);
WRITE_CLASS_ENCODER(ClientId);
WRITE_CLASS_ENCODER(NotifyResponse);
} // namespace watcher
} // namespace librbd
#endif // CEPH_LIBRBD_WATCHER_TYPES_H
| 1,597 | 21.194444 | 71 | h |
null | ceph-main/src/librbd/watcher/Utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_WATCHER_UTILS_H
#define CEPH_LIBRBD_WATCHER_UTILS_H
#include "include/buffer_fwd.h"
#include "include/encoding.h"
#include "include/Context.h"
#include "librbd/Watcher.h"
namespace ceph { class Formatter; }
namespace librbd {
namespace watcher {
namespace util {
template <typename Watcher>
struct HandlePayloadVisitor : public boost::static_visitor<void> {
Watcher *watcher;
uint64_t notify_id;
uint64_t handle;
HandlePayloadVisitor(Watcher *watcher_, uint64_t notify_id_,
uint64_t handle_)
: watcher(watcher_), notify_id(notify_id_), handle(handle_)
{
}
template <typename P>
inline void operator()(const P &payload) const {
typename Watcher::C_NotifyAck *ctx =
new typename Watcher::C_NotifyAck(watcher, notify_id, handle);
if (watcher->handle_payload(payload, ctx)) {
ctx->complete(0);
}
}
};
class EncodePayloadVisitor : public boost::static_visitor<void> {
public:
explicit EncodePayloadVisitor(bufferlist &bl) : m_bl(bl) {}
template <typename P>
inline void operator()(const P &payload) const {
using ceph::encode;
encode(static_cast<uint32_t>(P::NOTIFY_OP), m_bl);
payload.encode(m_bl);
}
private:
bufferlist &m_bl;
};
class DecodePayloadVisitor : public boost::static_visitor<void> {
public:
DecodePayloadVisitor(__u8 version, bufferlist::const_iterator &iter)
: m_version(version), m_iter(iter) {}
template <typename P>
inline void operator()(P &payload) const {
payload.decode(m_version, m_iter);
}
private:
__u8 m_version;
bufferlist::const_iterator &m_iter;
};
} // namespace util
} // namespace watcher
} // namespace librbd
#endif // CEPH_LIBRBD_WATCHER_UTILS_H
| 1,804 | 23.066667 | 70 | h |
null | ceph-main/src/log/Entry.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef __CEPH_LOG_ENTRY_H
#define __CEPH_LOG_ENTRY_H
#include "log/LogClock.h"
#include "common/StackStringStream.h"
#include "boost/container/small_vector.hpp"
#include <pthread.h>
#include <string_view>
namespace ceph {
namespace logging {
class Entry {
public:
using time = log_time;
Entry() = delete;
Entry(short pr, short sub) :
m_stamp(clock().now()),
m_thread(pthread_self()),
m_prio(pr),
m_subsys(sub)
{}
Entry(const Entry &) = default;
Entry& operator=(const Entry &) = default;
Entry(Entry &&e) = default;
Entry& operator=(Entry &&e) = default;
virtual ~Entry() = default;
virtual std::string_view strv() const = 0;
virtual std::size_t size() const = 0;
time m_stamp;
pthread_t m_thread;
short m_prio, m_subsys;
static log_clock& clock() {
static log_clock clock;
return clock;
}
};
/* This should never be moved to the heap! Only allocate this on the stack. See
* CachedStackStringStream for rationale.
*/
class MutableEntry : public Entry {
public:
MutableEntry() = delete;
MutableEntry(short pr, short sub) : Entry(pr, sub) {}
MutableEntry(const MutableEntry&) = delete;
MutableEntry& operator=(const MutableEntry&) = delete;
MutableEntry(MutableEntry&&) = delete;
MutableEntry& operator=(MutableEntry&&) = delete;
~MutableEntry() override = default;
std::ostream& get_ostream() {
return *cos;
}
std::string_view strv() const override {
return cos->strv();
}
std::size_t size() const override {
return cos->strv().size();
}
private:
CachedStackStringStream cos;
};
class ConcreteEntry : public Entry {
public:
ConcreteEntry() = delete;
ConcreteEntry(const Entry& e) : Entry(e) {
auto strv = e.strv();
str.reserve(strv.size());
str.insert(str.end(), strv.begin(), strv.end());
}
ConcreteEntry& operator=(const Entry& e) {
Entry::operator=(e);
auto strv = e.strv();
str.reserve(strv.size());
str.assign(strv.begin(), strv.end());
return *this;
}
ConcreteEntry(ConcreteEntry&& e) noexcept : Entry(e), str(std::move(e.str)) {}
ConcreteEntry& operator=(ConcreteEntry&& e) {
Entry::operator=(e);
str = std::move(e.str);
return *this;
}
~ConcreteEntry() override = default;
std::string_view strv() const override {
return std::string_view(str.data(), str.size());
}
std::size_t size() const override {
return str.size();
}
private:
boost::container::small_vector<char, 1024> str;
};
}
}
#endif
| 2,598 | 21.405172 | 80 | h |
null | ceph-main/src/log/Log.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "Log.h"
#include "common/errno.h"
#include "common/safe_io.h"
#include "common/Graylog.h"
#include "common/Journald.h"
#include "common/valgrind.h"
#include "include/ceph_assert.h"
#include "include/compat.h"
#include "include/on_exit.h"
#include "include/uuid.h"
#include "Entry.h"
#include "LogClock.h"
#include "SubsystemMap.h"
#include <boost/container/vector.hpp>
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <syslog.h>
#include <algorithm>
#include <iostream>
#include <set>
#include <fmt/format.h>
#include <fmt/ostream.h>
#define MAX_LOG_BUF 65536
namespace ceph {
namespace logging {
static OnExitManager exit_callbacks;
static void log_on_exit(void *p)
{
Log *l = *(Log **)p;
if (l)
l->flush();
delete (Log **)p;// Delete allocated pointer (not Log object, the pointer only!)
}
Log::Log(const SubsystemMap *s)
: m_indirect_this(nullptr),
m_subs(s),
m_recent(DEFAULT_MAX_RECENT)
{
m_log_buf.reserve(MAX_LOG_BUF);
_configure_stderr();
}
Log::~Log()
{
if (m_indirect_this) {
*m_indirect_this = nullptr;
}
ceph_assert(!is_started());
if (m_fd >= 0) {
VOID_TEMP_FAILURE_RETRY(::close(m_fd));
m_fd = -1;
}
}
void Log::_configure_stderr()
{
#ifndef _WIN32
struct stat info;
if (int rc = fstat(m_fd_stderr, &info); rc == -1) {
std::cerr << "failed to stat stderr: " << cpp_strerror(errno) << std::endl;
return;
}
if (S_ISFIFO(info.st_mode)) {
/* Set O_NONBLOCK on FIFO stderr file. We want to ensure atomic debug log
* writes so they do not get partially read by e.g. buggy container
* runtimes. See also IEEE Std 1003.1-2017 and Log::_log_stderr below.
*
* This isn't required on Windows.
*/
int flags = fcntl(m_fd_stderr, F_GETFL);
if (flags == -1) {
std::cerr << "failed to get fcntl flags for stderr: " << cpp_strerror(errno) << std::endl;
return;
}
if (!(flags & O_NONBLOCK)) {
flags |= O_NONBLOCK;
flags = fcntl(m_fd_stderr, F_SETFL, flags);
if (flags == -1) {
std::cerr << "failed to set fcntl flags for stderr: " << cpp_strerror(errno) << std::endl;
return;
}
}
do_stderr_poll = true;
}
#endif // !_WIN32
}
///
void Log::set_coarse_timestamps(bool coarse) {
std::scoped_lock lock(m_flush_mutex);
if (coarse)
Entry::clock().coarsen();
else
Entry::clock().refine();
}
void Log::set_flush_on_exit()
{
std::scoped_lock lock(m_flush_mutex);
// Make sure we flush on shutdown. We do this by deliberately
// leaking an indirect pointer to ourselves (on_exit() can't
// unregister a callback). This is not racy only becuase we
// assume that exit() won't race with ~Log().
if (m_indirect_this == NULL) {
m_indirect_this = new (Log*)(this);
exit_callbacks.add_callback(log_on_exit, m_indirect_this);
}
}
void Log::set_max_new(std::size_t n)
{
std::scoped_lock lock(m_queue_mutex);
m_max_new = n;
}
void Log::set_max_recent(std::size_t n)
{
std::scoped_lock lock(m_flush_mutex);
m_recent.set_capacity(n);
}
void Log::set_log_file(std::string_view fn)
{
std::scoped_lock lock(m_flush_mutex);
m_log_file = fn;
}
void Log::set_log_stderr_prefix(std::string_view p)
{
std::scoped_lock lock(m_flush_mutex);
m_log_stderr_prefix = p;
}
void Log::reopen_log_file()
{
std::scoped_lock lock(m_flush_mutex);
if (!is_started()) {
return;
}
m_flush_mutex_holder = pthread_self();
if (m_fd >= 0) {
VOID_TEMP_FAILURE_RETRY(::close(m_fd));
m_fd = -1;
}
if (m_log_file.length()) {
m_fd = ::open(m_log_file.c_str(), O_CREAT|O_WRONLY|O_APPEND|O_CLOEXEC, 0644);
if (m_fd >= 0 && (m_uid || m_gid)) {
if (::fchown(m_fd, m_uid, m_gid) < 0) {
int e = errno;
std::cerr << "failed to chown " << m_log_file << ": " << cpp_strerror(e)
<< std::endl;
}
}
}
m_flush_mutex_holder = 0;
}
void Log::chown_log_file(uid_t uid, gid_t gid)
{
std::scoped_lock lock(m_flush_mutex);
if (m_fd >= 0) {
int r = ::fchown(m_fd, uid, gid);
if (r < 0) {
r = -errno;
std::cerr << "failed to chown " << m_log_file << ": " << cpp_strerror(r)
<< std::endl;
}
}
}
void Log::set_syslog_level(int log, int crash)
{
std::scoped_lock lock(m_flush_mutex);
m_syslog_log = log;
m_syslog_crash = crash;
}
void Log::set_stderr_level(int log, int crash)
{
std::scoped_lock lock(m_flush_mutex);
m_stderr_log = log;
m_stderr_crash = crash;
}
void Log::set_graylog_level(int log, int crash)
{
std::scoped_lock lock(m_flush_mutex);
m_graylog_log = log;
m_graylog_crash = crash;
}
void Log::start_graylog(const std::string& host,
const uuid_d& fsid)
{
std::scoped_lock lock(m_flush_mutex);
if (! m_graylog.get()) {
m_graylog = std::make_shared<Graylog>(m_subs, "dlog");
m_graylog->set_hostname(host);
m_graylog->set_fsid(fsid);
}
}
void Log::stop_graylog()
{
std::scoped_lock lock(m_flush_mutex);
m_graylog.reset();
}
void Log::set_journald_level(int log, int crash)
{
std::scoped_lock lock(m_flush_mutex);
m_journald_log = log;
m_journald_crash = crash;
}
void Log::start_journald_logger()
{
std::scoped_lock lock(m_flush_mutex);
if (!m_journald) {
m_journald = std::make_unique<JournaldLogger>(m_subs);
}
}
void Log::stop_journald_logger()
{
std::scoped_lock lock(m_flush_mutex);
m_journald.reset();
}
void Log::submit_entry(Entry&& e)
{
std::unique_lock lock(m_queue_mutex);
m_queue_mutex_holder = pthread_self();
if (unlikely(m_inject_segv))
*(volatile int *)(0) = 0xdead;
// wait for flush to catch up
while (is_started() &&
m_new.size() > m_max_new) {
if (m_stop) break; // force addition
m_cond_loggers.wait(lock);
}
m_new.emplace_back(std::move(e));
m_cond_flusher.notify_all();
m_queue_mutex_holder = 0;
}
void Log::flush()
{
std::scoped_lock lock1(m_flush_mutex);
m_flush_mutex_holder = pthread_self();
{
std::scoped_lock lock2(m_queue_mutex);
m_queue_mutex_holder = pthread_self();
assert(m_flush.empty());
m_flush.swap(m_new);
m_cond_loggers.notify_all();
m_queue_mutex_holder = 0;
}
_flush(m_flush, false);
m_flush_mutex_holder = 0;
}
void Log::_log_safe_write(std::string_view sv)
{
if (m_fd < 0)
return;
int r = safe_write(m_fd, sv.data(), sv.size());
if (r != m_fd_last_error) {
if (r < 0)
std::cerr << "problem writing to " << m_log_file
<< ": " << cpp_strerror(r)
<< std::endl;
m_fd_last_error = r;
}
}
void Log::set_stderr_fd(int fd)
{
m_fd_stderr = fd;
_configure_stderr();
}
void Log::_log_stderr(std::string_view strv)
{
if (do_stderr_poll) {
auto& prefix = m_log_stderr_prefix;
size_t const len = prefix.size() + strv.size();
boost::container::small_vector<char, PIPE_BUF> buf;
buf.resize(len+1, '\0');
memcpy(buf.data(), prefix.c_str(), prefix.size());
memcpy(buf.data()+prefix.size(), strv.data(), strv.size());
char const* const start = buf.data();
char const* current = start;
while ((size_t)(current-start) < len) {
auto chunk = std::min<ssize_t>(PIPE_BUF, len-(ssize_t)(current-start));
while (1) {
ssize_t rc = write(m_fd_stderr, current, chunk);
if (rc == chunk) {
current += chunk;
break;
} else if (rc > 0) {
/* According to IEEE Std 1003.1-2017, this cannot happen:
*
* Write requests to a pipe or FIFO shall be handled in the same way as a regular file with the following exceptions:
* ...
* If the O_NONBLOCK flag is set ...
* ...
* A write request for {PIPE_BUF} or fewer bytes shall have the
* following effect: if there is sufficient space available in
* the pipe, write() shall transfer all the data and return the
* number of bytes requested. Otherwise, write() shall transfer
* no data and return -1 with errno set to [EAGAIN].
*
* In any case, handle misbehavior gracefully by incrementing current.
*/
current += rc;
break;
} else if (rc == -1) {
if (errno == EAGAIN) {
struct pollfd pfd[1];
pfd[0].fd = m_fd_stderr;
pfd[0].events = POLLOUT;
poll(pfd, 1, -1);
/* ignore errors / success, just retry the write */
} else if (errno == EINTR) {
continue;
} else {
/* some other kind of error, no point logging if stderr writes fail */
return;
}
}
}
}
} else {
fmt::print(std::cerr, "{}{}", m_log_stderr_prefix, strv);
}
}
void Log::_flush_logbuf()
{
if (m_log_buf.size()) {
_log_safe_write(std::string_view(m_log_buf.data(), m_log_buf.size()));
m_log_buf.resize(0);
}
}
void Log::_flush(EntryVector& t, bool crash)
{
long len = 0;
if (t.empty()) {
assert(m_log_buf.empty());
return;
}
if (crash) {
len = t.size();
}
for (auto& e : t) {
auto prio = e.m_prio;
auto stamp = e.m_stamp;
auto sub = e.m_subsys;
auto thread = e.m_thread;
auto str = e.strv();
bool should_log = crash || m_subs->get_log_level(sub) >= prio;
bool do_fd = m_fd >= 0 && should_log;
bool do_syslog = m_syslog_crash >= prio && should_log;
bool do_stderr = m_stderr_crash >= prio && should_log;
bool do_graylog2 = m_graylog_crash >= prio && should_log;
bool do_journald = m_journald_crash >= prio && should_log;
if (do_fd || do_syslog || do_stderr) {
const std::size_t cur = m_log_buf.size();
std::size_t used = 0;
const std::size_t allocated = e.size() + 80;
m_log_buf.resize(cur + allocated);
char* const start = m_log_buf.data();
char* pos = start + cur;
if (crash) {
used += (std::size_t)snprintf(pos + used, allocated - used, "%6ld> ", -(--len));
}
used += (std::size_t)append_time(stamp, pos + used, allocated - used);
used += (std::size_t)snprintf(pos + used, allocated - used, " %lx %2d ", (unsigned long)thread, prio);
memcpy(pos + used, str.data(), str.size());
used += str.size();
pos[used] = '\0';
ceph_assert((used + 1 /* '\n' */) < allocated);
if (do_syslog) {
syslog(LOG_USER|LOG_INFO, "%s", pos);
}
/* now add newline */
pos[used++] = '\n';
if (do_stderr) {
_log_stderr(std::string_view(pos, used));
}
if (do_fd) {
m_log_buf.resize(cur + used);
} else {
m_log_buf.resize(0);
}
if (m_log_buf.size() > MAX_LOG_BUF) {
_flush_logbuf();
}
}
if (do_graylog2 && m_graylog) {
m_graylog->log_entry(e);
}
if (do_journald && m_journald) {
m_journald->log_entry(e);
}
m_recent.push_back(std::move(e));
}
t.clear();
_flush_logbuf();
}
void Log::_log_message(std::string_view s, bool crash)
{
if (m_fd >= 0) {
std::string b = fmt::format("{}\n", s);
int r = safe_write(m_fd, b.data(), b.size());
if (r < 0)
std::cerr << "problem writing to " << m_log_file << ": " << cpp_strerror(r) << std::endl;
}
if ((crash ? m_syslog_crash : m_syslog_log) >= 0) {
syslog(LOG_USER|LOG_INFO, "%.*s", static_cast<int>(s.size()), s.data());
}
if ((crash ? m_stderr_crash : m_stderr_log) >= 0) {
std::cerr << s << std::endl;
}
}
template<typename T>
static uint64_t tid_to_int(T tid)
{
if constexpr (std::is_pointer_v<T>) {
return reinterpret_cast<std::uintptr_t>(tid);
} else {
return tid;
}
}
void Log::dump_recent()
{
std::scoped_lock lock1(m_flush_mutex);
m_flush_mutex_holder = pthread_self();
{
std::scoped_lock lock2(m_queue_mutex);
m_queue_mutex_holder = pthread_self();
assert(m_flush.empty());
m_flush.swap(m_new);
m_queue_mutex_holder = 0;
}
_flush(m_flush, false);
_log_message("--- begin dump of recent events ---", true);
std::set<pthread_t> recent_pthread_ids;
{
EntryVector t;
t.insert(t.end(), std::make_move_iterator(m_recent.begin()), std::make_move_iterator(m_recent.end()));
m_recent.clear();
for (const auto& e : t) {
recent_pthread_ids.emplace(e.m_thread);
}
_flush(t, true);
}
_log_message("--- logging levels ---", true);
for (const auto& p : m_subs->m_subsys) {
_log_message(fmt::format(" {:2d}/{:2d} {}",
p.log_level, p.gather_level, p.name), true);
}
_log_message(fmt::format(" {:2d}/{:2d} (syslog threshold)",
m_syslog_log, m_syslog_crash), true);
_log_message(fmt::format(" {:2d}/{:2d} (stderr threshold)",
m_stderr_log, m_stderr_crash), true);
_log_message("--- pthread ID / name mapping for recent threads ---", true);
for (const auto pthread_id : recent_pthread_ids)
{
char pthread_name[16] = {0}; //limited by 16B include terminating null byte.
ceph_pthread_getname(pthread_id, pthread_name, sizeof(pthread_name));
// we want the ID to be printed in the same format as we use for a log entry.
// The reason is easier grepping.
_log_message(fmt::format(" {:x} / {}",
tid_to_int(pthread_id), pthread_name), true);
}
_log_message(fmt::format(" max_recent {:9}", m_recent.capacity()), true);
_log_message(fmt::format(" max_new {:9}", m_max_new), true);
_log_message(fmt::format(" log_file {}", m_log_file), true);
_log_message("--- end dump of recent events ---", true);
assert(m_log_buf.empty());
m_flush_mutex_holder = 0;
}
void Log::start()
{
ceph_assert(!is_started());
{
std::scoped_lock lock(m_queue_mutex);
m_stop = false;
}
create("log");
}
void Log::stop()
{
if (is_started()) {
{
std::scoped_lock lock(m_queue_mutex);
m_stop = true;
m_cond_flusher.notify_one();
m_cond_loggers.notify_all();
}
join();
}
}
void *Log::entry()
{
reopen_log_file();
{
std::unique_lock lock(m_queue_mutex);
m_queue_mutex_holder = pthread_self();
while (!m_stop) {
if (!m_new.empty()) {
m_queue_mutex_holder = 0;
lock.unlock();
flush();
lock.lock();
m_queue_mutex_holder = pthread_self();
continue;
}
m_cond_flusher.wait(lock);
}
m_queue_mutex_holder = 0;
}
flush();
return NULL;
}
bool Log::is_inside_log_lock()
{
return
pthread_self() == m_queue_mutex_holder ||
pthread_self() == m_flush_mutex_holder;
}
void Log::inject_segv()
{
m_inject_segv = true;
}
void Log::reset_segv()
{
m_inject_segv = false;
}
} // ceph::logging::
} // ceph::
| 14,755 | 23.390083 | 127 | cc |
null | ceph-main/src/log/Log.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef __CEPH_LOG_LOG_H
#define __CEPH_LOG_LOG_H
#include <boost/circular_buffer.hpp>
#include <condition_variable>
#include <memory>
#include <mutex>
#include <queue>
#include <string>
#include <string_view>
#include "common/Thread.h"
#include "common/likely.h"
#include "log/Entry.h"
#include <unistd.h>
struct uuid_d;
namespace ceph {
namespace logging {
class Graylog;
class JournaldLogger;
class SubsystemMap;
class Log : private Thread
{
public:
using Thread::is_started;
Log(const SubsystemMap *s);
~Log() override;
void set_flush_on_exit();
void set_coarse_timestamps(bool coarse);
void set_max_new(std::size_t n);
void set_max_recent(std::size_t n);
void set_log_file(std::string_view fn);
void reopen_log_file();
void chown_log_file(uid_t uid, gid_t gid);
void set_log_stderr_prefix(std::string_view p);
void set_stderr_fd(int fd);
void flush();
void dump_recent();
void set_syslog_level(int log, int crash);
void set_stderr_level(int log, int crash);
void set_graylog_level(int log, int crash);
void start_graylog(const std::string& host,
const uuid_d& fsid);
void stop_graylog();
void set_journald_level(int log, int crash);
void start_journald_logger();
void stop_journald_logger();
std::shared_ptr<Graylog> graylog() { return m_graylog; }
void submit_entry(Entry&& e);
void start();
void stop();
/// true if the log lock is held by our thread
bool is_inside_log_lock();
/// induce a segv on the next log event
void inject_segv();
void reset_segv();
protected:
using EntryVector = std::vector<ConcreteEntry>;
virtual void _flush(EntryVector& q, bool crash);
private:
using EntryRing = boost::circular_buffer<ConcreteEntry>;
static const std::size_t DEFAULT_MAX_NEW = 100;
static const std::size_t DEFAULT_MAX_RECENT = 10000;
Log **m_indirect_this;
const SubsystemMap *m_subs;
std::mutex m_queue_mutex;
std::mutex m_flush_mutex;
std::condition_variable m_cond_loggers;
std::condition_variable m_cond_flusher;
pthread_t m_queue_mutex_holder;
pthread_t m_flush_mutex_holder;
EntryVector m_new; ///< new entries
EntryRing m_recent; ///< recent (less new) entries we've already written at low detail
EntryVector m_flush; ///< entries to be flushed (here to optimize heap allocations)
std::string m_log_file;
int m_fd = -1;
uid_t m_uid = 0;
gid_t m_gid = 0;
int m_fd_stderr = STDERR_FILENO;
int m_fd_last_error = 0; ///< last error we say writing to fd (if any)
int m_syslog_log = -2, m_syslog_crash = -2;
int m_stderr_log = -1, m_stderr_crash = -1;
int m_graylog_log = -3, m_graylog_crash = -3;
int m_journald_log = -3, m_journald_crash = -3;
std::string m_log_stderr_prefix;
bool do_stderr_poll = false;
std::shared_ptr<Graylog> m_graylog;
std::unique_ptr<JournaldLogger> m_journald;
std::vector<char> m_log_buf;
bool m_stop = false;
std::size_t m_max_new = DEFAULT_MAX_NEW;
bool m_inject_segv = false;
void *entry() override;
void _log_safe_write(std::string_view sv);
void _flush_logbuf();
void _log_message(std::string_view s, bool crash);
void _configure_stderr();
void _log_stderr(std::string_view strv);
};
}
}
#endif
| 3,335 | 20.803922 | 88 | h |
null | ceph-main/src/log/LogClock.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LOG_CLOCK_H
#define CEPH_LOG_CLOCK_H
#include <cstdio>
#include <chrono>
#include <ctime>
#include <sys/time.h>
#include "include/ceph_assert.h"
#include "common/ceph_time.h"
#ifndef HAVE_SUSECONDS_T
typedef long suseconds_t;
#endif
namespace ceph {
namespace logging {
namespace _logclock {
// Because the underlying representations of a duration can be any
// arithmetic type we wish, slipping a coarseness tag there is the
// least hacky way to tag them. I'd also considered doing bit-stealing
// and just setting the low bit of the representation unconditionally
// to mark it as fine, BUT that would cut our nanosecond precision in
// half which sort of obviates the point of 'fine'…admittedly real
// computers probably don't care. More to the point it wouldn't be
// durable under arithmetic unless we wrote a whole class to support
// it /anyway/, and if I'm going to do that I may as well add a bool.
// (Yes I know we don't do arithmetic on log timestamps, but I don't
// want everything to suddenly break because someone did something
// that the std::chrono::timepoint contract actually supports.)
struct taggedrep {
uint64_t count;
bool coarse;
explicit taggedrep(uint64_t count) : count(count), coarse(true) {}
taggedrep(uint64_t count, bool coarse) : count(count), coarse(coarse) {}
explicit operator uint64_t() {
return count;
}
};
// Proper significant figure support would be a bit excessive. Also
// we'd have to know the precision of the clocks on Linux and FreeBSD
// and whatever else we want to support.
inline taggedrep operator +(const taggedrep& l, const taggedrep& r) {
return { l.count + r.count, l.coarse || r.coarse };
}
inline taggedrep operator -(const taggedrep& l, const taggedrep& r) {
return { l.count - r.count, l.coarse || r.coarse };
}
inline taggedrep operator *(const taggedrep& l, const taggedrep& r) {
return { l.count * r.count, l.coarse || r.coarse };
}
inline taggedrep operator /(const taggedrep& l, const taggedrep& r) {
return { l.count / r.count, l.coarse || r.coarse };
}
inline taggedrep operator %(const taggedrep& l, const taggedrep& r) {
return { l.count % r.count, l.coarse || r.coarse };
}
// You can compare coarse and fine time. You shouldn't do so in any
// case where ordering actually MATTERS but in practice people won't
// actually ping-pong their logs back and forth between them.
inline bool operator ==(const taggedrep& l, const taggedrep& r) {
return l.count == r.count;
}
inline bool operator !=(const taggedrep& l, const taggedrep& r) {
return l.count != r.count;
}
inline bool operator <(const taggedrep& l, const taggedrep& r) {
return l.count < r.count;
}
inline bool operator <=(const taggedrep& l, const taggedrep& r) {
return l.count <= r.count;
}
inline bool operator >=(const taggedrep& l, const taggedrep& r) {
return l.count >= r.count;
}
inline bool operator >(const taggedrep& l, const taggedrep& r) {
return l.count > r.count;
}
}
class log_clock {
public:
using rep = _logclock::taggedrep;
using period = std::nano;
using duration = std::chrono::duration<rep, period>;
// The second template parameter defaults to the clock's duration
// type.
using time_point = std::chrono::time_point<log_clock>;
static constexpr const bool is_steady = false;
time_point now() noexcept {
return appropriate_now();
}
void coarsen() {
appropriate_now = coarse_now;
}
void refine() {
appropriate_now = fine_now;
}
// Since our formatting is done in microseconds and we're using it
// anyway, we may as well keep this one
static timeval to_timeval(time_point t) {
auto rep = t.time_since_epoch().count();
timespan ts(rep.count);
#ifndef _WIN32
return { static_cast<time_t>(std::chrono::duration_cast<std::chrono::seconds>(ts).count()),
static_cast<suseconds_t>(std::chrono::duration_cast<std::chrono::microseconds>(
ts % std::chrono::seconds(1)).count()) };
#else
return { static_cast<long>(std::chrono::duration_cast<std::chrono::seconds>(ts).count()),
static_cast<long>(std::chrono::duration_cast<std::chrono::microseconds>(
ts % std::chrono::seconds(1)).count()) };
#endif
}
private:
static time_point coarse_now() {
return time_point(
duration(_logclock::taggedrep(coarse_real_clock::now()
.time_since_epoch().count(), true)));
}
static time_point fine_now() {
return time_point(
duration(_logclock::taggedrep(real_clock::now()
.time_since_epoch().count(), false)));
}
time_point(*appropriate_now)() = coarse_now;
};
using log_time = log_clock::time_point;
inline int append_time(const log_time& t, char *out, int outlen) {
bool coarse = t.time_since_epoch().count().coarse;
auto tv = log_clock::to_timeval(t);
std::tm bdt;
time_t t_sec = tv.tv_sec;
localtime_r(&t_sec, &bdt);
char tz[32] = { 0 };
strftime(tz, sizeof(tz), "%z", &bdt);
int r;
if (coarse) {
r = std::snprintf(out, outlen, "%04d-%02d-%02dT%02d:%02d:%02d.%03ld%s",
bdt.tm_year + 1900, bdt.tm_mon + 1, bdt.tm_mday,
bdt.tm_hour, bdt.tm_min, bdt.tm_sec,
static_cast<long>(tv.tv_usec / 1000), tz);
} else {
r = std::snprintf(out, outlen, "%04d-%02d-%02dT%02d:%02d:%02d.%06ld%s",
bdt.tm_year + 1900, bdt.tm_mon + 1, bdt.tm_mday,
bdt.tm_hour, bdt.tm_min, bdt.tm_sec,
static_cast<long>(tv.tv_usec), tz);
}
// Since our caller just adds the return value to something without
// checking it…
ceph_assert(r >= 0);
return r;
}
}
}
#endif
| 5,688 | 32.662722 | 95 | h |
null | ceph-main/src/log/SubsystemMap.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LOG_SUBSYSTEMS
#define CEPH_LOG_SUBSYSTEMS
#include <string>
#include <vector>
#include <algorithm>
#include "common/likely.h"
#include "common/subsys_types.h"
#include "include/ceph_assert.h"
namespace ceph {
namespace logging {
class SubsystemMap {
// Access to the current gathering levels must be *FAST* as they are
// read over and over from all places in the code (via should_gather()
// by i.e. dout).
std::array<uint8_t, ceph_subsys_get_num()> m_gather_levels;
// The rest. Should be as small as possible to not unnecessarily
// enlarge md_config_t and spread it other elements across cache
// lines. Access can be slow.
std::vector<ceph_subsys_item_t> m_subsys;
friend class Log;
public:
SubsystemMap() {
constexpr auto s = ceph_subsys_get_as_array();
m_subsys.reserve(s.size());
std::size_t i = 0;
for (const ceph_subsys_item_t& item : s) {
m_subsys.emplace_back(item);
m_gather_levels[i++] = std::max(item.log_level, item.gather_level);
}
}
constexpr static std::size_t get_num() {
return ceph_subsys_get_num();
}
constexpr static std::size_t get_max_subsys_len() {
return ceph_subsys_max_name_length();
}
int get_log_level(unsigned subsys) const {
if (subsys >= get_num())
subsys = 0;
return m_subsys[subsys].log_level;
}
int get_gather_level(unsigned subsys) const {
if (subsys >= get_num())
subsys = 0;
return m_subsys[subsys].gather_level;
}
// TODO(rzarzynski): move to string_view?
constexpr const char* get_name(unsigned subsys) const {
if (subsys >= get_num())
subsys = 0;
return ceph_subsys_get_as_array()[subsys].name;
}
template <unsigned SubV, int LvlV>
bool should_gather() const {
static_assert(SubV < get_num(), "wrong subsystem ID");
static_assert(LvlV >= -1 && LvlV <= 200);
if constexpr (LvlV <= 0) {
// handle the -1 and 0 levels entirely at compile-time.
// Such debugs are intended to be gathered regardless even
// of the user configuration.
return true;
} else {
// we expect that setting level different than the default
// is rather unusual.
return expect(LvlV <= static_cast<int>(m_gather_levels[SubV]),
LvlV <= ceph_subsys_get_max_default_level(SubV));
}
}
bool should_gather(const unsigned sub, int level) const {
ceph_assert(sub < m_subsys.size());
return level <= static_cast<int>(m_gather_levels[sub]);
}
void set_log_level(unsigned subsys, uint8_t log)
{
ceph_assert(subsys < m_subsys.size());
m_subsys[subsys].log_level = log;
m_gather_levels[subsys] = \
std::max(log, m_subsys[subsys].gather_level);
}
void set_gather_level(unsigned subsys, uint8_t gather)
{
ceph_assert(subsys < m_subsys.size());
m_subsys[subsys].gather_level = gather;
m_gather_levels[subsys] = \
std::max(m_subsys[subsys].log_level, gather);
}
};
}
}
#endif
| 3,065 | 25.894737 | 73 | h |
null | ceph-main/src/log/test.cc | #include <gtest/gtest.h>
#include "log/Log.h"
#include "common/Clock.h"
#include "include/coredumpctl.h"
#include "SubsystemMap.h"
#include "global/global_init.h"
#include "common/ceph_argparse.h"
#include "global/global_context.h"
#include "common/dout.h"
#include <unistd.h>
#include <limits.h>
using namespace std;
using namespace ceph::logging;
TEST(Log, Simple)
{
SubsystemMap subs;
subs.set_log_level(0, 10);
subs.set_gather_level(0, 10);
subs.set_log_level(1, 20);
subs.set_gather_level(1, 1);
subs.set_log_level(2, 20);
subs.set_gather_level(2, 2);
subs.set_log_level(3, 10);
subs.set_gather_level(3, 3);
Log log(&subs);
log.start();
log.set_log_file("foo");
log.reopen_log_file();
log.set_stderr_level(5, -1);
for (int i=0; i<100; i++) {
int sys = i % 4;
int l = 5 + (i%4);
if (subs.should_gather(sys, l)) {
MutableEntry e(l, sys);
log.submit_entry(std::move(e));
}
}
log.flush();
log.dump_recent();
log.stop();
}
TEST(Log, ReuseBad)
{
SubsystemMap subs;
subs.set_log_level(1, 1);
subs.set_gather_level(1, 1);
Log log(&subs);
log.start();
log.set_log_file("foo");
log.reopen_log_file();
const int l = 0;
{
MutableEntry e(l, 1);
auto& out = e.get_ostream();
out << (std::streambuf*)nullptr;
EXPECT_TRUE(out.bad()); // writing nullptr to a stream sets its badbit
log.submit_entry(std::move(e));
}
{
MutableEntry e(l, 1);
auto& out = e.get_ostream();
EXPECT_FALSE(out.bad()); // should not see failures from previous log entry
out << "hello world";
log.submit_entry(std::move(e));
}
log.flush();
log.stop();
}
int many = 10000;
TEST(Log, ManyNoGather)
{
SubsystemMap subs;
subs.set_log_level(1, 1);
subs.set_gather_level(1, 1);
Log log(&subs);
log.start();
log.set_log_file("big");
log.reopen_log_file();
for (int i=0; i<many; i++) {
int l = 10;
if (subs.should_gather(1, l))
log.submit_entry(MutableEntry(1, 0));
}
log.flush();
log.stop();
}
TEST(Log, ManyGatherLog)
{
SubsystemMap subs;
subs.set_log_level(1, 20);
subs.set_gather_level(1, 10);
Log log(&subs);
log.start();
log.set_log_file("big");
log.reopen_log_file();
for (int i=0; i<many; i++) {
int l = 10;
if (subs.should_gather(1, l)) {
MutableEntry e(l, 1);
e.get_ostream() << "this is a long string asdf asdf asdf asdf asdf asdf asd fasd fasdf ";
log.submit_entry(std::move(e));
}
}
log.flush();
log.stop();
}
TEST(Log, ManyGatherLogStackSpillover)
{
SubsystemMap subs;
subs.set_log_level(1, 20);
subs.set_gather_level(1, 10);
Log log(&subs);
log.start();
log.set_log_file("big");
log.reopen_log_file();
for (int i=0; i<many; i++) {
int l = 10;
if (subs.should_gather(1, l)) {
MutableEntry e(l, 1);
auto& s = e.get_ostream();
s << "foo";
s << std::string(sizeof(e) * 2, '-');
log.submit_entry(std::move(e));
}
}
log.flush();
log.stop();
}
TEST(Log, ManyGather)
{
SubsystemMap subs;
subs.set_log_level(1, 20);
subs.set_gather_level(1, 1);
Log log(&subs);
log.start();
log.set_log_file("big");
log.reopen_log_file();
for (int i=0; i<many; i++) {
int l = 10;
if (subs.should_gather(1, l))
log.submit_entry(MutableEntry(l, 1));
}
log.flush();
log.stop();
}
static void readpipe(int fd, int verify)
{
while (1) {
/* Use larger buffer on receiver as Linux will allow pipes buffers to
* exceed PIPE_BUF. We can't avoid tearing due to small read buffers from
* the Ceph side.
*/
char buf[65536] = "";
int rc = read(fd, buf, (sizeof buf) - 1);
if (rc == 0) {
_exit(0);
} else if (rc == -1) {
_exit(1);
} else if (rc > 0) {
if (verify) {
char* p = strrchr(buf, '\n');
/* verify no torn writes */
if (p == NULL) {
_exit(2);
} else if (p[1] != '\0') {
write(2, buf, strlen(buf));
_exit(3);
}
}
} else _exit(100);
usleep(500);
}
}
TEST(Log, StderrPipeAtomic)
{
int pfd[2] = {-1, -1};
int rc = pipe(pfd);
ASSERT_EQ(rc, 0);
pid_t pid = fork();
if (pid == 0) {
close(pfd[1]);
readpipe(pfd[0], 1);
} else if (pid == (pid_t)-1) {
ASSERT_EQ(0, 1);
}
close(pfd[0]);
SubsystemMap subs;
subs.set_log_level(1, 20);
subs.set_gather_level(1, 10);
Log log(&subs);
log.start();
log.set_log_file("");
log.reopen_log_file();
log.set_stderr_fd(pfd[1]);
log.set_stderr_level(1, 20);
/* -128 for prefix space */
for (int i = 0; i < PIPE_BUF-128; i++) {
MutableEntry e(1, 1);
auto& s = e.get_ostream();
for (int j = 0; j < i; j++) {
char c = 'a';
c += (j % 26);
s << c;
}
log.submit_entry(std::move(e));
}
log.flush();
log.stop();
close(pfd[1]);
int status;
pid_t waited = waitpid(pid, &status, 0);
ASSERT_EQ(pid, waited);
ASSERT_NE(WIFEXITED(status), 0);
ASSERT_EQ(WEXITSTATUS(status), 0);
}
TEST(Log, StderrPipeBig)
{
int pfd[2] = {-1, -1};
int rc = pipe(pfd);
ASSERT_EQ(rc, 0);
pid_t pid = fork();
if (pid == 0) {
/* no verification as some reads will be torn due to size > PIPE_BUF */
close(pfd[1]);
readpipe(pfd[0], 0);
} else if (pid == (pid_t)-1) {
ASSERT_EQ(0, 1);
}
close(pfd[0]);
SubsystemMap subs;
subs.set_log_level(1, 20);
subs.set_gather_level(1, 10);
Log log(&subs);
log.start();
log.set_log_file("");
log.reopen_log_file();
log.set_stderr_fd(pfd[1]);
log.set_stderr_level(1, 20);
/* -128 for prefix space */
for (int i = 0; i < PIPE_BUF*2; i++) {
MutableEntry e(1, 1);
auto& s = e.get_ostream();
for (int j = 0; j < i; j++) {
char c = 'a';
c += (j % 26);
s << c;
}
log.submit_entry(std::move(e));
}
log.flush();
log.stop();
close(pfd[1]);
int status;
pid_t waited = waitpid(pid, &status, 0);
ASSERT_EQ(pid, waited);
ASSERT_NE(WIFEXITED(status), 0);
ASSERT_EQ(WEXITSTATUS(status), 0);
}
void do_segv()
{
SubsystemMap subs;
subs.set_log_level(1, 20);
subs.set_gather_level(1, 1);
Log log(&subs);
log.start();
log.set_log_file("big");
log.reopen_log_file();
log.inject_segv();
MutableEntry e(10, 1);
{
PrCtl unset_dumpable;
log.submit_entry(std::move(e)); // this should segv
}
log.flush();
log.stop();
}
TEST(Log, InternalSegv)
{
ASSERT_DEATH(do_segv(), ".*");
}
TEST(Log, LargeLog)
{
SubsystemMap subs;
subs.set_log_level(1, 20);
subs.set_gather_level(1, 10);
Log log(&subs);
log.start();
log.set_log_file("big");
log.reopen_log_file();
int l = 10;
{
MutableEntry e(l, 1);
std::string msg(10000000, 'a');
e.get_ostream() << msg;
log.submit_entry(std::move(e));
}
log.flush();
log.stop();
}
TEST(Log, LargeFromSmallLog)
{
SubsystemMap subs;
subs.set_log_level(1, 20);
subs.set_gather_level(1, 10);
Log log(&subs);
log.start();
log.set_log_file("big");
log.reopen_log_file();
int l = 10;
{
MutableEntry e(l, 1);
for (int i = 0; i < 1000000; i++) {
std::string msg(10, 'a');
e.get_ostream() << msg;
}
log.submit_entry(std::move(e));
}
log.flush();
log.stop();
}
// Make sure nothing bad happens when we switch
TEST(Log, TimeSwitch)
{
SubsystemMap subs;
subs.set_log_level(1, 20);
subs.set_gather_level(1, 10);
Log log(&subs);
log.start();
log.set_log_file("time_switch_log");
log.reopen_log_file();
int l = 10;
bool coarse = true;
for (auto i = 0U; i < 300; ++i) {
MutableEntry e(l, 1);
e.get_ostream() << "SQUID THEFT! PUNISHABLE BY DEATH!";
log.submit_entry(std::move(e));
if (i % 50)
log.set_coarse_timestamps(coarse = !coarse);
}
log.flush();
log.stop();
}
TEST(Log, TimeFormat)
{
static constexpr auto buflen = 128u;
char buf[buflen];
ceph::logging::log_clock clock;
{
clock.coarsen();
auto t = clock.now();
ceph::logging::append_time(t, buf, buflen);
auto c = std::strrchr(buf, '.');
ASSERT_NE(c, nullptr);
ASSERT_EQ(8u, strlen(c + 1));
}
{
clock.refine();
auto t = clock.now();
ceph::logging::append_time(t, buf, buflen);
auto c = std::strrchr(buf, '.');
ASSERT_NE(c, nullptr);
ASSERT_EQ(11u, std::strlen(c + 1));
}
}
#define dout_subsys ceph_subsys_context
template <int depth, int x> struct do_log
{
void log(CephContext* cct);
};
template <int x> struct do_log<12, x>
{
void log(CephContext* cct);
};
template<int depth, int x> void do_log<depth,x>::log(CephContext* cct)
{
ldout(cct, 20) << "Log depth=" << depth << " x=" << x << dendl;
if (rand() % 2) {
do_log<depth+1, x*2> log;
log.log(cct);
} else {
do_log<depth+1, x*2+1> log;
log.log(cct);
}
}
std::string recursion(CephContext* cct)
{
ldout(cct, 20) << "Preparing recursion string" << dendl;
return "here-recursion";
}
template<int x> void do_log<12, x>::log(CephContext* cct)
{
if ((rand() % 16) == 0) {
ldout(cct, 20) << "End " << recursion(cct) << "x=" << x << dendl;
} else {
ldout(cct, 20) << "End x=" << x << dendl;
}
}
TEST(Log, Speed_gather)
{
do_log<0,0> start;
g_ceph_context->_conf->subsys.set_gather_level(ceph_subsys_context, 30);
g_ceph_context->_conf->subsys.set_log_level(ceph_subsys_context, 0);
for (int i=0; i<100000;i++) {
ldout(g_ceph_context, 20) << "Iteration " << i << dendl;
start.log(g_ceph_context);
}
}
TEST(Log, Speed_nogather)
{
do_log<0,0> start;
g_ceph_context->_conf->subsys.set_gather_level(ceph_subsys_context, 0);
g_ceph_context->_conf->subsys.set_log_level(ceph_subsys_context, 0);
for (int i=0; i<100000;i++) {
ldout(g_ceph_context, 20) << "Iteration " << i << dendl;
start.log(g_ceph_context);
}
}
TEST(Log, GarbleRecovery)
{
static const char* test_file="log_for_moment";
Log* saved = g_ceph_context->_log;
Log log(&g_ceph_context->_conf->subsys);
log.start();
unlink(test_file);
log.set_log_file(test_file);
log.reopen_log_file();
g_ceph_context->_log = &log;
std::string long_message(1000,'c');
ldout(g_ceph_context, 0) << long_message << dendl;
ldout(g_ceph_context, 0) << "Prologue" << (std::streambuf*)nullptr << long_message << dendl;
ldout(g_ceph_context, 0) << "Epitaph" << long_message << dendl;
g_ceph_context->_log = saved;
log.flush();
log.stop();
struct stat file_status;
ASSERT_EQ(stat(test_file, &file_status), 0);
ASSERT_GT(file_status.st_size, 2000);
}
int main(int argc, char **argv)
{
auto args = argv_to_vec(argc, argv);
auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 10,860 | 20.506931 | 95 | cc |
null | ceph-main/src/mds/Anchor.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "mds/Anchor.h"
#include "common/Formatter.h"
void Anchor::encode(bufferlist &bl) const
{
ENCODE_START(2, 1, bl);
encode(ino, bl);
encode(dirino, bl);
encode(d_name, bl);
encode(d_type, bl);
encode(frags, bl);
ENCODE_FINISH(bl);
}
void Anchor::decode(bufferlist::const_iterator &bl)
{
DECODE_START(2, bl);
decode(ino, bl);
decode(dirino, bl);
decode(d_name, bl);
decode(d_type, bl);
if (struct_v >= 2)
decode(frags, bl);
DECODE_FINISH(bl);
}
void Anchor::dump(Formatter *f) const
{
f->dump_unsigned("ino", ino);
f->dump_unsigned("dirino", dirino);
f->dump_string("d_name", d_name);
f->dump_unsigned("d_type", d_type);
}
void Anchor::generate_test_instances(std::list<Anchor*>& ls)
{
ls.push_back(new Anchor);
ls.push_back(new Anchor);
ls.back()->ino = 1;
ls.back()->dirino = 2;
ls.back()->d_name = "hello";
ls.back()->d_type = DT_DIR;
}
std::ostream& operator<<(std::ostream& out, const Anchor &a)
{
return out << "a(" << a.ino << " " << a.dirino << "/'" << a.d_name << "' " << a.d_type << ")";
}
| 1,464 | 21.890625 | 96 | cc |
null | ceph-main/src/mds/Anchor.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_ANCHOR_H
#define CEPH_ANCHOR_H
#include <string>
#include "include/types.h"
#include "mdstypes.h"
#include "include/buffer.h"
/*
* Anchor represents primary linkage of an inode. When adding inode to an
* anchor table, MDS ensures that the table also contains inode's ancestor
* inodes. MDS can get inode's path by looking up anchor table recursively.
*/
class Anchor {
public:
Anchor() {}
Anchor(inodeno_t i, inodeno_t di, std::string_view str, __u8 tp) :
ino(i), dirino(di), d_name(str), d_type(tp) {}
void encode(bufferlist &bl) const;
void decode(bufferlist::const_iterator &bl);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<Anchor*>& ls);
bool operator==(const Anchor &r) const {
return ino == r.ino && dirino == r.dirino &&
d_name == r.d_name && d_type == r.d_type &&
frags == r.frags;
}
inodeno_t ino; // anchored ino
inodeno_t dirino;
std::string d_name;
__u8 d_type = 0;
std::set<frag_t> frags;
int omap_idx = -1; // stored in which omap object
};
WRITE_CLASS_ENCODER(Anchor)
class RecoveredAnchor : public Anchor {
public:
RecoveredAnchor() {}
mds_rank_t auth = MDS_RANK_NONE; // auth hint
};
class OpenedAnchor : public Anchor {
public:
OpenedAnchor(inodeno_t i, inodeno_t di, std::string_view str, __u8 tp, int nr) :
Anchor(i, di, str, tp),
nref(nr)
{}
mutable int nref = 0; // how many children
};
std::ostream& operator<<(std::ostream& out, const Anchor &a);
#endif
| 1,904 | 24.743243 | 82 | h |
null | ceph-main/src/mds/BatchOp.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/debug.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
#include "BatchOp.h"
void BatchOp::forward(mds_rank_t target)
{
dout(20) << __func__ << ": forwarding batch ops to " << target << ": ";
print(*_dout);
*_dout << dendl;
_forward(target);
}
void BatchOp::respond(int r)
{
dout(20) << __func__ << ": responding to batch ops with result=" << r << ": ";
print(*_dout);
*_dout << dendl;
_respond(r);
}
| 861 | 22.944444 | 80 | cc |
null | ceph-main/src/mds/BatchOp.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef MDS_BATCHOP_H
#define MDS_BATCHOP_H
#include "common/ref.h"
#include "mdstypes.h"
class BatchOp {
public:
virtual ~BatchOp() {}
virtual void add_request(const ceph::ref_t<class MDRequestImpl>& mdr) = 0;
virtual ceph::ref_t<class MDRequestImpl> find_new_head() = 0;
virtual void print(std::ostream&) = 0;
void forward(mds_rank_t target);
void respond(int r);
protected:
virtual void _forward(mds_rank_t) = 0;
virtual void _respond(mds_rank_t) = 0;
};
#endif
| 889 | 20.707317 | 76 | h |
null | ceph-main/src/mds/Beacon.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/dout.h"
#include "common/likely.h"
#include "common/HeartbeatMap.h"
#include "include/stringify.h"
#include "include/util.h"
#include "mon/MonClient.h"
#include "mds/MDLog.h"
#include "mds/MDSRank.h"
#include "mds/MDSMap.h"
#include "mds/Locker.h"
#include "Beacon.h"
#include <chrono>
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
#undef dout_prefix
#define dout_prefix *_dout << "mds.beacon." << name << ' '
using std::map;
using std::string;
using namespace std::chrono_literals;
Beacon::Beacon(CephContext *cct, MonClient *monc, std::string_view name)
:
Dispatcher(cct),
beacon_interval(g_conf()->mds_beacon_interval),
monc(monc),
name(name),
compat(MDSMap::get_compat_set_all())
{
}
Beacon::~Beacon()
{
shutdown();
}
void Beacon::shutdown()
{
std::unique_lock<std::mutex> lock(mutex);
if (!finished) {
finished = true;
lock.unlock();
if (sender.joinable())
sender.join();
}
}
void Beacon::init(const MDSMap &mdsmap)
{
std::unique_lock lock(mutex);
_notify_mdsmap(mdsmap);
sender = std::thread([this]() {
std::unique_lock<std::mutex> lock(mutex);
bool sent;
while (!finished) {
auto now = clock::now();
auto since = std::chrono::duration<double>(now-last_send).count();
auto interval = beacon_interval;
sent = false;
if (since >= interval*.90) {
if (!_send()) {
interval = 0.5; /* 500ms */
}
else {
sent = true;
}
} else {
interval -= since;
}
dout(20) << "sender thread waiting interval " << interval << "s" << dendl;
if (cvar.wait_for(lock, interval*1s) == std::cv_status::timeout) {
if (sent) {
//missed beacon ack because we timedout after a beacon send
dout(0) << "missed beacon ack from the monitors" << dendl;
missed_beacon_ack_dump = true;
}
}
}
});
}
bool Beacon::ms_can_fast_dispatch2(const cref_t<Message>& m) const
{
return m->get_type() == MSG_MDS_BEACON;
}
void Beacon::ms_fast_dispatch2(const ref_t<Message>& m)
{
bool handled = ms_dispatch2(m);
ceph_assert(handled);
}
bool Beacon::ms_dispatch2(const ref_t<Message>& m)
{
if (m->get_type() == MSG_MDS_BEACON) {
if (m->get_connection()->get_peer_type() == CEPH_ENTITY_TYPE_MON) {
handle_mds_beacon(ref_cast<MMDSBeacon>(m));
}
return true;
}
return false;
}
/**
* Update lagginess state based on response from remote MDSMonitor
*/
void Beacon::handle_mds_beacon(const cref_t<MMDSBeacon> &m)
{
std::unique_lock lock(mutex);
version_t seq = m->get_seq();
// update lab
auto it = seq_stamp.find(seq);
if (it != seq_stamp.end()) {
auto now = clock::now();
last_acked_stamp = it->second;
auto rtt = std::chrono::duration<double>(now - last_acked_stamp).count();
dout(5) << "received beacon reply " << ceph_mds_state_name(m->get_state()) << " seq " << m->get_seq() << " rtt " << rtt << dendl;
if (laggy && rtt < g_conf()->mds_beacon_grace) {
dout(0) << " MDS is no longer laggy" << dendl;
laggy = false;
last_laggy = now;
}
// clean up seq_stamp map
seq_stamp.erase(seq_stamp.begin(), ++it);
// Wake a waiter up if present
cvar.notify_all();
} else {
dout(1) << "discarding unexpected beacon reply " << ceph_mds_state_name(m->get_state())
<< " seq " << m->get_seq() << " dne" << dendl;
}
}
void Beacon::send()
{
std::unique_lock lock(mutex);
_send();
}
void Beacon::send_and_wait(const double duration)
{
std::unique_lock lock(mutex);
_send();
auto awaiting_seq = last_seq;
dout(20) << __func__ << ": awaiting " << awaiting_seq
<< " for up to " << duration << "s" << dendl;
auto start = clock::now();
while (!seq_stamp.empty() && seq_stamp.begin()->first <= awaiting_seq) {
auto now = clock::now();
auto s = duration*.95-std::chrono::duration<double>(now-start).count();
if (s < 0) {
//missed beacon ACKs
missed_beacon_ack_dump = true;
break;
}
cvar.wait_for(lock, s*1s);
}
}
/**
* Call periodically, or when you have updated the desired state
*/
bool Beacon::_send()
{
auto now = clock::now();
auto since = std::chrono::duration<double>(now-last_acked_stamp).count();
if (!cct->get_heartbeat_map()->is_healthy()) {
/* If anything isn't progressing, let avoid sending a beacon so that
* the MDS will consider us laggy */
dout(0) << "Skipping beacon heartbeat to monitors (last acked " << since << "s ago); MDS internal heartbeat is not healthy!" << dendl;
//missed internal heartbeat
missed_internal_heartbeat_dump = true;
return false;
}
++last_seq;
dout(5) << "Sending beacon " << ceph_mds_state_name(want_state) << " seq " << last_seq << dendl;
seq_stamp[last_seq] = now;
ceph_assert(want_state != MDSMap::STATE_NULL);
auto beacon = make_message<MMDSBeacon>(
monc->get_fsid(), mds_gid_t(monc->get_global_id()),
name,
epoch,
want_state,
last_seq,
CEPH_FEATURES_SUPPORTED_DEFAULT);
beacon->set_health(health);
beacon->set_compat(compat);
beacon->set_fs(g_conf().get_val<std::string>("mds_join_fs"));
// piggyback the sys info on beacon msg
if (want_state == MDSMap::STATE_BOOT) {
map<string, string> sys_info;
collect_sys_info(&sys_info, cct);
sys_info["addr"] = stringify(monc->get_myaddrs());
beacon->set_sys_info(sys_info);
}
monc->send_mon_message(beacon.detach());
last_send = now;
return true;
}
/**
* Call this when there is a new MDSMap available
*/
void Beacon::notify_mdsmap(const MDSMap &mdsmap)
{
std::unique_lock lock(mutex);
_notify_mdsmap(mdsmap);
}
void Beacon::_notify_mdsmap(const MDSMap &mdsmap)
{
ceph_assert(mdsmap.get_epoch() >= epoch);
if (mdsmap.get_epoch() >= epoch) {
epoch = mdsmap.get_epoch();
}
}
bool Beacon::is_laggy()
{
std::unique_lock lock(mutex);
auto now = clock::now();
auto since = std::chrono::duration<double>(now-last_acked_stamp).count();
if (since > g_conf()->mds_beacon_grace) {
if (!laggy) {
dout(1) << "MDS connection to Monitors appears to be laggy; " << since
<< "s since last acked beacon" << dendl;
}
laggy = true;
return true;
}
return false;
}
void Beacon::set_want_state(const MDSMap &mdsmap, MDSMap::DaemonState newstate)
{
std::unique_lock lock(mutex);
// Update mdsmap epoch atomically with updating want_state, so that when
// we send a beacon with the new want state it has the latest epoch, and
// once we have updated to the latest epoch, we are not sending out
// a stale want_state (i.e. one from before making it through MDSMap
// handling)
_notify_mdsmap(mdsmap);
if (want_state != newstate) {
dout(5) << __func__ << ": "
<< ceph_mds_state_name(want_state) << " -> "
<< ceph_mds_state_name(newstate) << dendl;
want_state = newstate;
}
}
/**
* We are 'shown' an MDS briefly in order to update
* some health metrics that we will send in the next
* beacon.
*/
void Beacon::notify_health(MDSRank const *mds)
{
std::unique_lock lock(mutex);
if (!mds) {
// No MDS rank held
return;
}
// I'm going to touch this MDS, so it must be locked
ceph_assert(ceph_mutex_is_locked_by_me(mds->mds_lock));
health.metrics.clear();
if (unlikely(g_conf().get_val<bool>("mds_inject_health_dummy"))) {
MDSHealthMetric m(MDS_HEALTH_DUMMY, HEALTH_ERR, std::string("dummy"));
health.metrics.push_back(m);
}
// Detect presence of entries in DamageTable
if (!mds->damage_table.empty()) {
MDSHealthMetric m(MDS_HEALTH_DAMAGE, HEALTH_ERR, std::string(
"Metadata damage detected"));
health.metrics.push_back(m);
}
// Detect MDS_HEALTH_TRIM condition
// Indicates MDS is not trimming promptly
{
if (mds->mdlog->get_num_segments() > (size_t)(g_conf()->mds_log_max_segments * g_conf().get_val<double>("mds_log_warn_factor"))) {
CachedStackStringStream css;
*css << "Behind on trimming (" << mds->mdlog->get_num_segments()
<< "/" << g_conf()->mds_log_max_segments << ")";
MDSHealthMetric m(MDS_HEALTH_TRIM, HEALTH_WARN, css->strv());
m.metadata["num_segments"] = stringify(mds->mdlog->get_num_segments());
m.metadata["max_segments"] = stringify(g_conf()->mds_log_max_segments);
health.metrics.push_back(m);
}
}
// Detect clients failing to respond to modifications to capabilities in
// CLIENT_CAPS messages.
{
auto&& late_clients = mds->locker->get_late_revoking_clients(mds->mdsmap->get_session_timeout());
std::vector<MDSHealthMetric> late_cap_metrics;
for (const auto& client : late_clients) {
// client_t is equivalent to session.info.inst.name.num
// Construct an entity_name_t to lookup into SessionMap
entity_name_t ename(CEPH_ENTITY_TYPE_CLIENT, client.v);
Session const *s = mds->sessionmap.get_session(ename);
if (s == NULL) {
// Shouldn't happen, but not worth crashing if it does as this is
// just health-reporting code.
derr << "Client ID without session: " << client.v << dendl;
continue;
}
CachedStackStringStream css;
*css << "Client " << s->get_human_name() << " failing to respond to capability release";
MDSHealthMetric m(MDS_HEALTH_CLIENT_LATE_RELEASE, HEALTH_WARN, css->strv());
m.metadata["client_id"] = stringify(client.v);
late_cap_metrics.emplace_back(std::move(m));
}
if (late_cap_metrics.size() <= (size_t)g_conf()->mds_health_summarize_threshold) {
auto&& m = late_cap_metrics;
health.metrics.insert(std::end(health.metrics), std::cbegin(m), std::cend(m));
} else {
CachedStackStringStream css;
*css << "Many clients (" << late_cap_metrics.size()
<< ") failing to respond to capability release";
MDSHealthMetric m(MDS_HEALTH_CLIENT_LATE_RELEASE_MANY, HEALTH_WARN, css->strv());
m.metadata["client_count"] = stringify(late_cap_metrics.size());
health.metrics.push_back(std::move(m));
}
}
// Detect clients failing to generate cap releases from CEPH_SESSION_RECALL_STATE
// messages. May be due to buggy client or resource-hogging application.
//
// Detect clients failing to advance their old_client_tid
{
std::set<Session*> sessions;
mds->sessionmap.get_client_session_set(sessions);
const auto min_caps_working_set = g_conf().get_val<uint64_t>("mds_min_caps_working_set");
const auto recall_warning_threshold = g_conf().get_val<Option::size_t>("mds_recall_warning_threshold");
const auto max_completed_requests = g_conf()->mds_max_completed_requests;
const auto max_completed_flushes = g_conf()->mds_max_completed_flushes;
std::vector<MDSHealthMetric> late_recall_metrics;
std::vector<MDSHealthMetric> large_completed_requests_metrics;
for (auto& session : sessions) {
const uint64_t num_caps = session->get_num_caps();
const uint64_t recall_caps = session->get_recall_caps();
if (recall_caps > recall_warning_threshold && num_caps > min_caps_working_set) {
dout(2) << "Session " << *session <<
" is not releasing caps fast enough. Recalled caps at " << recall_caps
<< " > " << recall_warning_threshold << " (mds_recall_warning_threshold)." << dendl;
CachedStackStringStream css;
*css << "Client " << session->get_human_name() << " failing to respond to cache pressure";
MDSHealthMetric m(MDS_HEALTH_CLIENT_RECALL, HEALTH_WARN, css->strv());
m.metadata["client_id"] = stringify(session->get_client());
late_recall_metrics.emplace_back(std::move(m));
}
if ((session->get_num_trim_requests_warnings() > 0 &&
session->get_num_completed_requests() >= max_completed_requests) ||
(session->get_num_trim_flushes_warnings() > 0 &&
session->get_num_completed_flushes() >= max_completed_flushes)) {
CachedStackStringStream css;
*css << "Client " << session->get_human_name() << " failing to advance its oldest client/flush tid. ";
MDSHealthMetric m(MDS_HEALTH_CLIENT_OLDEST_TID, HEALTH_WARN, css->strv());
m.metadata["client_id"] = stringify(session->get_client());
large_completed_requests_metrics.emplace_back(std::move(m));
}
}
if (late_recall_metrics.size() <= (size_t)g_conf()->mds_health_summarize_threshold) {
auto&& m = late_recall_metrics;
health.metrics.insert(std::end(health.metrics), std::cbegin(m), std::cend(m));
} else {
CachedStackStringStream css;
*css << "Many clients (" << late_recall_metrics.size()
<< ") failing to respond to cache pressure";
MDSHealthMetric m(MDS_HEALTH_CLIENT_RECALL_MANY, HEALTH_WARN, css->strv());
m.metadata["client_count"] = stringify(late_recall_metrics.size());
health.metrics.push_back(m);
late_recall_metrics.clear();
}
if (large_completed_requests_metrics.size() <= (size_t)g_conf()->mds_health_summarize_threshold) {
auto&& m = large_completed_requests_metrics;
health.metrics.insert(std::end(health.metrics), std::cbegin(m), std::cend(m));
} else {
CachedStackStringStream css;
*css << "Many clients (" << large_completed_requests_metrics.size()
<< ") failing to advance their oldest client/flush tid";
MDSHealthMetric m(MDS_HEALTH_CLIENT_OLDEST_TID_MANY, HEALTH_WARN, css->strv());
m.metadata["client_count"] = stringify(large_completed_requests_metrics.size());
health.metrics.push_back(m);
large_completed_requests_metrics.clear();
}
}
// Detect MDS_HEALTH_SLOW_REQUEST condition
{
int slow = mds->get_mds_slow_req_count();
if (slow) {
dout(20) << slow << " slow request found" << dendl;
CachedStackStringStream css;
*css << slow << " slow requests are blocked > " << g_conf()->mds_op_complaint_time << " secs";
MDSHealthMetric m(MDS_HEALTH_SLOW_REQUEST, HEALTH_WARN, css->strv());
health.metrics.push_back(m);
}
}
{
auto complaint_time = g_conf()->osd_op_complaint_time;
auto now = clock::now();
auto cutoff = now - ceph::make_timespan(complaint_time);
std::string count;
ceph::coarse_mono_time oldest;
if (MDSIOContextBase::check_ios_in_flight(cutoff, count, oldest)) {
dout(20) << count << " slow metadata IOs found" << dendl;
auto oldest_secs = std::chrono::duration<double>(now - oldest).count();
CachedStackStringStream css;
*css << count << " slow metadata IOs are blocked > " << complaint_time
<< " secs, oldest blocked for " << (int64_t)oldest_secs << " secs";
MDSHealthMetric m(MDS_HEALTH_SLOW_METADATA_IO, HEALTH_WARN, css->strv());
health.metrics.push_back(m);
}
}
// Report a health warning if we are readonly
if (mds->mdcache->is_readonly()) {
MDSHealthMetric m(MDS_HEALTH_READ_ONLY, HEALTH_WARN,
"MDS in read-only mode");
health.metrics.push_back(m);
}
// Report if we have significantly exceeded our cache size limit
if (mds->mdcache->cache_overfull()) {
CachedStackStringStream css;
*css << "MDS cache is too large (" << bytes2str(mds->mdcache->cache_size())
<< "/" << bytes2str(mds->mdcache->cache_limit_memory()) << "); "
<< mds->mdcache->num_inodes_with_caps << " inodes in use by clients, "
<< mds->mdcache->get_num_strays() << " stray files";
MDSHealthMetric m(MDS_HEALTH_CACHE_OVERSIZED, HEALTH_WARN, css->strv());
health.metrics.push_back(m);
}
// Report laggy client(s) due to laggy OSDs
{
auto&& laggy_clients = mds->server->get_laggy_clients();
if (!laggy_clients.empty()) {
std::vector<MDSHealthMetric> laggy_clients_metrics;
for (const auto& laggy_client: laggy_clients) {
CachedStackStringStream css;
*css << "Client " << laggy_client << " is laggy; not evicted"
<< " because some OSD(s) is/are laggy";
MDSHealthMetric m(MDS_HEALTH_CLIENTS_LAGGY, HEALTH_WARN, css->strv());
laggy_clients_metrics.emplace_back(std::move(m));
}
auto&& m = laggy_clients_metrics;
health.metrics.insert(std::end(health.metrics), std::cbegin(m),
std::cend(m));
}
}
}
MDSMap::DaemonState Beacon::get_want_state() const
{
std::unique_lock lock(mutex);
return want_state;
}
| 16,836 | 31.131679 | 138 | cc |
null | ceph-main/src/mds/Beacon.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef BEACON_STATE_H
#define BEACON_STATE_H
#include <mutex>
#include <string_view>
#include <thread>
#include "include/types.h"
#include "include/Context.h"
#include "msg/Dispatcher.h"
#include "messages/MMDSBeacon.h"
class MonClient;
class MDSRank;
/**
* One of these per MDS. Handle beacon logic in this separate class so
* that a busy MDS holding its own lock does not hold up sending beacon
* messages to the mon and cause false lagginess.
*
* So that we can continue to operate while the MDS is holding its own lock,
* we keep copies of the data needed to generate beacon messages. The MDS is
* responsible for calling Beacon::notify_* when things change.
*/
class Beacon : public Dispatcher
{
public:
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
bool missed_beacon_ack_dump = false;
bool missed_internal_heartbeat_dump = false;
Beacon(CephContext *cct, MonClient *monc, std::string_view name);
~Beacon() override;
void init(const MDSMap &mdsmap);
void shutdown();
bool ms_can_fast_dispatch_any() const override { return true; }
bool ms_can_fast_dispatch2(const cref_t<Message>& m) const override;
void ms_fast_dispatch2(const ref_t<Message>& m) override;
bool ms_dispatch2(const ref_t<Message> &m) override;
void ms_handle_connect(Connection *c) override {}
bool ms_handle_reset(Connection *c) override {return false;}
void ms_handle_remote_reset(Connection *c) override {}
bool ms_handle_refused(Connection *c) override {return false;}
void notify_mdsmap(const MDSMap &mdsmap);
void notify_health(const MDSRank *mds);
void handle_mds_beacon(const cref_t<MMDSBeacon> &m);
void send();
void set_want_state(const MDSMap &mdsmap, MDSMap::DaemonState newstate);
MDSMap::DaemonState get_want_state() const;
/**
* Send a beacon, and block until the ack is received from the mon
* or `duration` seconds pass, whichever happens sooner. Useful
* for emitting a last message on shutdown.
*/
void send_and_wait(const double duration);
bool is_laggy();
double last_cleared_laggy() const {
std::unique_lock lock(mutex);
return std::chrono::duration<double>(clock::now()-last_laggy).count();
}
private:
void _notify_mdsmap(const MDSMap &mdsmap);
bool _send();
mutable std::mutex mutex;
std::thread sender;
std::condition_variable cvar;
time last_send = clock::zero();
double beacon_interval = 5.0;
bool finished = false;
MonClient* monc;
// Items we duplicate from the MDS to have access under our own lock
std::string name;
version_t epoch = 0;
CompatSet compat;
MDSMap::DaemonState want_state = MDSMap::STATE_BOOT;
// Internal beacon state
version_t last_seq = 0; // last seq sent to monitor
std::map<version_t,time> seq_stamp; // seq # -> time sent
time last_acked_stamp = clock::zero(); // last time we sent a beacon that got acked
bool laggy = false;
time last_laggy = clock::zero();
// Health status to be copied into each beacon message
MDSHealth health;
};
#endif // BEACON_STATE_H
| 3,476 | 28.717949 | 86 | h |
null | ceph-main/src/mds/CDentry.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "CDentry.h"
#include "CInode.h"
#include "CDir.h"
#include "SnapClient.h"
#include "MDSRank.h"
#include "MDCache.h"
#include "Locker.h"
#include "LogSegment.h"
#include "messages/MLock.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
#undef dout_prefix
#define dout_prefix *_dout << "mds." << dir->mdcache->mds->get_nodeid() << ".cache.den(" << dir->dirfrag() << " " << name << ") "
using namespace std;
ostream& CDentry::print_db_line_prefix(ostream& out)
{
return out << ceph_clock_now() << " mds." << dir->mdcache->mds->get_nodeid() << ".cache.den(" << dir->ino() << " " << name << ") ";
}
LockType CDentry::lock_type(CEPH_LOCK_DN);
LockType CDentry::versionlock_type(CEPH_LOCK_DVERSION);
// CDentry
ostream& operator<<(ostream& out, const CDentry& dn)
{
filepath path;
dn.make_path(path);
out << "[dentry " << path;
if (true || dn.first != 0 || dn.last != CEPH_NOSNAP) {
out << " [" << dn.first << ",";
if (dn.last == CEPH_NOSNAP)
out << "head";
else
out << dn.last;
out << ']';
}
if (dn.is_auth()) {
out << " auth";
if (dn.is_replicated())
out << dn.get_replicas();
} else {
mds_authority_t a = dn.authority();
out << " rep@" << a.first;
if (a.second != CDIR_AUTH_UNKNOWN)
out << "," << a.second;
out << "." << dn.get_replica_nonce();
}
if (dn.get_linkage()->is_null()) out << " NULL";
if (dn.get_linkage()->is_remote()) {
out << " REMOTE(";
out << dn.get_linkage()->get_remote_d_type_string();
out << ")";
}
if (!dn.lock.is_sync_and_unlocked())
out << " " << dn.lock;
if (!dn.versionlock.is_sync_and_unlocked())
out << " " << dn.versionlock;
if (dn.get_projected_version() != dn.get_version())
out << " pv=" << dn.get_projected_version();
out << " v=" << dn.get_version();
if (dn.get_num_auth_pins()) {
out << " ap=" << dn.get_num_auth_pins();
#ifdef MDS_AUTHPIN_SET
dn.print_authpin_set(out);
#endif
}
{
const CInode *inode = dn.get_linkage()->get_inode();
out << " ino=";
if (inode) {
out << inode->ino();
} else {
out << "(nil)";
}
}
out << " state=" << dn.get_state();
if (dn.is_new()) out << "|new";
if (dn.state_test(CDentry::STATE_BOTTOMLRU)) out << "|bottomlru";
if (dn.state_test(CDentry::STATE_UNLINKING)) out << "|unlinking";
if (dn.state_test(CDentry::STATE_REINTEGRATING)) out << "|reintegrating";
if (dn.get_num_ref()) {
out << " |";
dn.print_pin_set(out);
}
if (dn.get_alternate_name().size()) {
out << " altname=" << binstrprint(dn.get_alternate_name(), 16);
}
out << " " << &dn;
out << "]";
return out;
}
bool operator<(const CDentry& l, const CDentry& r)
{
if ((l.get_dir()->ino() < r.get_dir()->ino()) ||
(l.get_dir()->ino() == r.get_dir()->ino() &&
(l.get_name() < r.get_name() ||
(l.get_name() == r.get_name() && l.last < r.last))))
return true;
return false;
}
void CDentry::print(ostream& out)
{
out << *this;
}
/*
inodeno_t CDentry::get_ino()
{
if (get_inode())
return get_inode()->ino();
return inodeno_t();
}
*/
mds_authority_t CDentry::authority() const
{
return dir->authority();
}
void CDentry::add_waiter(uint64_t tag, MDSContext *c)
{
// wait on the directory?
if (tag & (WAIT_UNFREEZE|WAIT_SINGLEAUTH)) {
dir->add_waiter(tag, c);
return;
}
MDSCacheObject::add_waiter(tag, c);
}
version_t CDentry::pre_dirty(version_t min)
{
projected_version = dir->pre_dirty(min);
dout(10) << __func__ << " " << *this << dendl;
return projected_version;
}
void CDentry::_mark_dirty(LogSegment *ls)
{
// state+pin
if (!state_test(STATE_DIRTY)) {
state_set(STATE_DIRTY);
get(PIN_DIRTY);
dir->inc_num_dirty();
dir->dirty_dentries.push_back(&item_dir_dirty);
ceph_assert(ls);
}
if (ls)
ls->dirty_dentries.push_back(&item_dirty);
}
void CDentry::mark_dirty(version_t pv, LogSegment *ls)
{
dout(10) << __func__ << " " << *this << dendl;
// i now live in this new dir version
ceph_assert(pv <= projected_version);
version = pv;
_mark_dirty(ls);
// mark dir too
dir->mark_dirty(ls, pv);
}
void CDentry::mark_clean()
{
dout(10) << __func__ << " " << *this << dendl;
ceph_assert(is_dirty());
// not always true for recalc_auth_bits during resolve finish
//assert(dir->get_version() == 0 || version <= dir->get_version()); // hmm?
state_clear(STATE_DIRTY|STATE_NEW);
dir->dec_num_dirty();
item_dir_dirty.remove_myself();
item_dirty.remove_myself();
put(PIN_DIRTY);
}
void CDentry::mark_new()
{
dout(10) << __func__ << " " << *this << dendl;
state_set(STATE_NEW);
}
void CDentry::mark_auth()
{
if (!is_auth()) {
state_set(STATE_AUTH);
dir->adjust_dentry_lru(this);
}
}
void CDentry::clear_auth()
{
if (is_auth()) {
state_clear(STATE_AUTH);
dir->adjust_dentry_lru(this);
}
}
void CDentry::make_path_string(string& s, bool projected) const
{
if (dir) {
dir->inode->make_path_string(s, projected);
} else {
s = "???";
}
s += "/";
s.append(name.data(), name.length());
}
void CDentry::make_path(filepath& fp, bool projected) const
{
ceph_assert(dir);
dir->inode->make_path(fp, projected);
fp.push_dentry(get_name());
}
/*
* we only add ourselves to remote_parents when the linkage is
* active (no longer projected). if the passed dnl is projected,
* don't link in, and do that work later in pop_projected_linkage().
*/
void CDentry::link_remote(CDentry::linkage_t *dnl, CInode *in)
{
ceph_assert(dnl->is_remote());
ceph_assert(in->ino() == dnl->get_remote_ino());
dnl->inode = in;
if (dnl == &linkage)
in->add_remote_parent(this);
// check for reintegration
dir->mdcache->eval_remote(this);
}
void CDentry::unlink_remote(CDentry::linkage_t *dnl)
{
ceph_assert(dnl->is_remote());
ceph_assert(dnl->inode);
if (dnl == &linkage)
dnl->inode->remove_remote_parent(this);
dnl->inode = 0;
}
void CDentry::push_projected_linkage()
{
_project_linkage();
if (is_auth()) {
CInode *diri = dir->inode;
if (diri->is_stray())
diri->mdcache->notify_stray_removed();
}
}
void CDentry::push_projected_linkage(CInode *inode)
{
// dirty rstat tracking is in the projected plane
bool dirty_rstat = inode->is_dirty_rstat();
if (dirty_rstat)
inode->clear_dirty_rstat();
_project_linkage()->inode = inode;
inode->push_projected_parent(this);
if (dirty_rstat)
inode->mark_dirty_rstat();
if (is_auth()) {
CInode *diri = dir->inode;
if (diri->is_stray())
diri->mdcache->notify_stray_created();
}
}
CDentry::linkage_t *CDentry::pop_projected_linkage()
{
ceph_assert(projected.size());
linkage_t& n = projected.front();
/*
* the idea here is that the link_remote_inode(), link_primary_inode(),
* etc. calls should make linkage identical to &n (and we assert as
* much).
*/
if (n.remote_ino) {
dir->link_remote_inode(this, n.remote_ino, n.remote_d_type);
if (n.inode) {
linkage.inode = n.inode;
linkage.inode->add_remote_parent(this);
}
} else {
if (n.inode) {
dir->link_primary_inode(this, n.inode);
n.inode->pop_projected_parent();
}
}
ceph_assert(n.inode == linkage.inode);
ceph_assert(n.remote_ino == linkage.remote_ino);
ceph_assert(n.remote_d_type == linkage.remote_d_type);
projected.pop_front();
return &linkage;
}
// ----------------------------
// auth pins
int CDentry::get_num_dir_auth_pins() const
{
ceph_assert(!is_projected());
if (get_linkage()->is_primary())
return auth_pins + get_linkage()->get_inode()->get_num_auth_pins();
return auth_pins;
}
bool CDentry::can_auth_pin(int *err_ret) const
{
ceph_assert(dir);
return dir->can_auth_pin(err_ret);
}
void CDentry::auth_pin(void *by)
{
if (auth_pins == 0)
get(PIN_AUTHPIN);
auth_pins++;
#ifdef MDS_AUTHPIN_SET
auth_pin_set.insert(by);
#endif
dout(10) << "auth_pin by " << by << " on " << *this << " now " << auth_pins << dendl;
dir->adjust_nested_auth_pins(1, by);
}
void CDentry::auth_unpin(void *by)
{
auth_pins--;
#ifdef MDS_AUTHPIN_SET
{
auto it = auth_pin_set.find(by);
ceph_assert(it != auth_pin_set.end());
auth_pin_set.erase(it);
}
#endif
if (auth_pins == 0)
put(PIN_AUTHPIN);
dout(10) << "auth_unpin by " << by << " on " << *this << " now " << auth_pins << dendl;
ceph_assert(auth_pins >= 0);
dir->adjust_nested_auth_pins(-1, by);
}
void CDentry::adjust_nested_auth_pins(int diradj, void *by)
{
dir->adjust_nested_auth_pins(diradj, by);
}
bool CDentry::is_frozen() const
{
return dir->is_frozen();
}
bool CDentry::is_freezing() const
{
return dir->is_freezing();
}
// ----------------------------
// locking
void CDentry::set_object_info(MDSCacheObjectInfo &info)
{
info.dirfrag = dir->dirfrag();
info.dname = name;
info.snapid = last;
}
void CDentry::encode_lock_state(int type, bufferlist& bl)
{
encode(first, bl);
// null, ino, or remote_ino?
char c;
if (linkage.is_primary()) {
c = 1;
encode(c, bl);
encode(linkage.get_inode()->ino(), bl);
}
else if (linkage.is_remote()) {
c = 2;
encode(c, bl);
encode(linkage.get_remote_ino(), bl);
}
else if (linkage.is_null()) {
// encode nothing.
}
else ceph_abort();
}
void CDentry::decode_lock_state(int type, const bufferlist& bl)
{
auto p = bl.cbegin();
snapid_t newfirst;
decode(newfirst, p);
if (!is_auth() && newfirst != first) {
dout(10) << __func__ << " first " << first << " -> " << newfirst << dendl;
ceph_assert(newfirst > first);
first = newfirst;
}
if (p.end()) {
// null
ceph_assert(linkage.is_null());
return;
}
char c;
inodeno_t ino;
decode(c, p);
switch (c) {
case 1:
case 2:
decode(ino, p);
// newly linked?
if (linkage.is_null() && !is_auth()) {
// force trim from cache!
dout(10) << __func__ << " replica dentry null -> non-null, must trim" << dendl;
//assert(get_num_ref() == 0);
} else {
// verify?
}
break;
default:
ceph_abort();
}
}
ClientLease *CDentry::add_client_lease(client_t c, Session *session)
{
ClientLease *l;
if (client_lease_map.count(c))
l = client_lease_map[c];
else {
dout(20) << __func__ << " client." << c << " on " << lock << dendl;
if (client_lease_map.empty()) {
get(PIN_CLIENTLEASE);
lock.get_client_lease();
}
l = client_lease_map[c] = new ClientLease(c, this);
l->seq = ++session->lease_seq;
}
return l;
}
void CDentry::remove_client_lease(ClientLease *l, Locker *locker)
{
ceph_assert(l->parent == this);
bool gather = false;
dout(20) << __func__ << " client." << l->client << " on " << lock << dendl;
client_lease_map.erase(l->client);
l->item_lease.remove_myself();
l->item_session_lease.remove_myself();
delete l;
if (client_lease_map.empty()) {
gather = !lock.is_stable();
lock.put_client_lease();
put(PIN_CLIENTLEASE);
}
if (gather)
locker->eval_gather(&lock);
}
void CDentry::remove_client_leases(Locker *locker)
{
while (!client_lease_map.empty())
remove_client_lease(client_lease_map.begin()->second, locker);
}
void CDentry::_put()
{
if (get_num_ref() <= ((int)is_dirty() + 1)) {
CDentry::linkage_t *dnl = get_projected_linkage();
if (dnl->is_primary()) {
CInode *in = dnl->get_inode();
if (get_num_ref() == (int)is_dirty() + !!in->get_num_ref())
in->mdcache->maybe_eval_stray(in, true);
}
}
}
void CDentry::encode_remote(inodeno_t& ino, unsigned char d_type,
std::string_view alternate_name,
bufferlist &bl)
{
bl.append('l'); // remote link
// marker, name, ino
ENCODE_START(2, 1, bl);
encode(ino, bl);
encode(d_type, bl);
encode(alternate_name, bl);
ENCODE_FINISH(bl);
}
void CDentry::decode_remote(char icode, inodeno_t& ino, unsigned char& d_type,
mempool::mds_co::string& alternate_name,
ceph::buffer::list::const_iterator& bl)
{
if (icode == 'l') {
DECODE_START(2, bl);
decode(ino, bl);
decode(d_type, bl);
if (struct_v >= 2)
decode(alternate_name, bl);
DECODE_FINISH(bl);
} else if (icode == 'L') {
decode(ino, bl);
decode(d_type, bl);
} else ceph_assert(0);
}
void CDentry::dump(Formatter *f) const
{
ceph_assert(f != NULL);
filepath path;
make_path(path);
f->dump_string("path", path.get_path());
f->dump_unsigned("path_ino", path.get_ino().val);
f->dump_unsigned("snap_first", first);
f->dump_unsigned("snap_last", last);
f->dump_bool("is_primary", get_linkage()->is_primary());
f->dump_bool("is_remote", get_linkage()->is_remote());
f->dump_bool("is_null", get_linkage()->is_null());
f->dump_bool("is_new", is_new());
if (get_linkage()->get_inode()) {
f->dump_unsigned("inode", get_linkage()->get_inode()->ino());
} else {
f->dump_unsigned("inode", 0);
}
if (linkage.is_remote()) {
f->dump_string("remote_type", linkage.get_remote_d_type_string());
} else {
f->dump_string("remote_type", "");
}
f->dump_unsigned("version", get_version());
f->dump_unsigned("projected_version", get_projected_version());
f->dump_int("auth_pins", auth_pins);
MDSCacheObject::dump(f);
f->open_object_section("lock");
lock.dump(f);
f->close_section();
f->open_object_section("versionlock");
versionlock.dump(f);
f->close_section();
f->open_array_section("states");
MDSCacheObject::dump_states(f);
if (state_test(STATE_NEW))
f->dump_string("state", "new");
if (state_test(STATE_FRAGMENTING))
f->dump_string("state", "fragmenting");
if (state_test(STATE_PURGING))
f->dump_string("state", "purging");
if (state_test(STATE_BADREMOTEINO))
f->dump_string("state", "badremoteino");
if (state_test(STATE_STRAY))
f->dump_string("state", "stray");
f->close_section();
}
std::string CDentry::linkage_t::get_remote_d_type_string() const
{
switch (DTTOIF(remote_d_type)) {
case S_IFSOCK: return "sock";
case S_IFLNK: return "lnk";
case S_IFREG: return "reg";
case S_IFBLK: return "blk";
case S_IFDIR: return "dir";
case S_IFCHR: return "chr";
case S_IFIFO: return "fifo";
default: ceph_abort(); return "";
}
}
bool CDentry::scrub(snapid_t next_seq)
{
dout(20) << "scrubbing " << *this << " next_seq = " << next_seq << dendl;
/* attempt to locate damage in first of CDentry, see:
* https://tracker.ceph.com/issues/56140
*/
/* skip projected dentries as first/last may have placeholder values */
if (!is_projected()) {
CDir* dir = get_dir();
if (first > next_seq) {
derr << __func__ << ": first > next_seq (" << next_seq << ") " << *this << dendl;
dir->go_bad_dentry(last, get_name());
return true;
} else if (first > last) {
derr << __func__ << ": first > last " << *this << dendl;
dir->go_bad_dentry(last, get_name());
return true;
}
auto&& realm = dir->get_inode()->find_snaprealm();
if (realm) {
auto&& snaps = realm->get_snaps();
auto it = snaps.lower_bound(first);
bool stale = last != CEPH_NOSNAP && (it == snaps.end() || *it > last);
if (stale) {
dout(20) << "is stale" << dendl;
/* TODO: maybe trim? */
}
}
}
return false;
}
bool CDentry::check_corruption(bool load)
{
auto&& snapclient = dir->mdcache->mds->snapclient;
auto next_snap = snapclient->get_last_seq()+1;
if (first > last || (snapclient->is_server_ready() && first > next_snap)) {
if (load) {
dout(1) << "loaded already corrupt dentry: " << *this << dendl;
corrupt_first_loaded = true;
} else {
derr << "newly corrupt dentry to be committed: " << *this << dendl;
}
if (g_conf().get_val<bool>("mds_go_bad_corrupt_dentry")) {
dir->go_bad_dentry(last, get_name());
}
if (!load && g_conf().get_val<bool>("mds_abort_on_newly_corrupt_dentry")) {
dir->mdcache->mds->clog->error() << "MDS abort because newly corrupt dentry to be committed: " << *this;
ceph_abort("detected newly corrupt dentry"); /* avoid writing out newly corrupted dn */
}
return true;
}
return false;
}
MEMPOOL_DEFINE_OBJECT_FACTORY(CDentry, co_dentry, mds_co);
| 16,857 | 22.188446 | 133 | cc |
null | ceph-main/src/mds/CDentry.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CDENTRY_H
#define CEPH_CDENTRY_H
#include <string>
#include <string_view>
#include <set>
#include "include/counter.h"
#include "include/types.h"
#include "include/buffer_fwd.h"
#include "include/lru.h"
#include "include/elist.h"
#include "include/filepath.h"
#include "BatchOp.h"
#include "MDSCacheObject.h"
#include "MDSContext.h"
#include "Mutation.h"
#include "SimpleLock.h"
#include "LocalLockC.h"
#include "ScrubHeader.h"
class CInode;
class CDir;
class Locker;
class CDentry;
class LogSegment;
class Session;
// define an ordering
bool operator<(const CDentry& l, const CDentry& r);
// dentry
class CDentry : public MDSCacheObject, public LRUObject, public Counter<CDentry> {
public:
MEMPOOL_CLASS_HELPERS();
friend class CDir;
struct linkage_t {
CInode *inode = nullptr;
inodeno_t remote_ino = 0;
unsigned char remote_d_type = 0;
linkage_t() {}
// dentry type is primary || remote || null
// inode ptr is required for primary, optional for remote, undefined for null
bool is_primary() const { return remote_ino == 0 && inode != 0; }
bool is_remote() const { return remote_ino > 0; }
bool is_null() const { return remote_ino == 0 && inode == 0; }
CInode *get_inode() { return inode; }
const CInode *get_inode() const { return inode; }
inodeno_t get_remote_ino() const { return remote_ino; }
unsigned char get_remote_d_type() const { return remote_d_type; }
std::string get_remote_d_type_string() const;
void set_remote(inodeno_t ino, unsigned char d_type) {
remote_ino = ino;
remote_d_type = d_type;
inode = 0;
}
};
// -- state --
static const int STATE_NEW = (1<<0);
static const int STATE_FRAGMENTING = (1<<1);
static const int STATE_PURGING = (1<<2);
static const int STATE_BADREMOTEINO = (1<<3);
static const int STATE_EVALUATINGSTRAY = (1<<4);
static const int STATE_PURGINGPINNED = (1<<5);
static const int STATE_BOTTOMLRU = (1<<6);
static const int STATE_UNLINKING = (1<<7);
static const int STATE_REINTEGRATING = (1<<8);
// stray dentry needs notification of releasing reference
static const int STATE_STRAY = STATE_NOTIFYREF;
static const int MASK_STATE_IMPORT_KEPT = STATE_BOTTOMLRU;
// -- pins --
static const int PIN_INODEPIN = 1; // linked inode is pinned
static const int PIN_FRAGMENTING = -2; // containing dir is refragmenting
static const int PIN_PURGING = 3;
static const int PIN_SCRUBPARENT = 4;
static const int PIN_WAITUNLINKSTATE = 5;
static const unsigned EXPORT_NONCE = 1;
const static uint64_t WAIT_UNLINK_STATE = (1<<0);
const static uint64_t WAIT_UNLINK_FINISH = (1<<1);
const static uint64_t WAIT_REINTEGRATE_FINISH = (1<<2);
uint32_t replica_unlinking_ref = 0;
CDentry(std::string_view n, __u32 h,
mempool::mds_co::string alternate_name,
snapid_t f, snapid_t l) :
hash(h),
first(f), last(l),
item_dirty(this),
lock(this, &lock_type),
versionlock(this, &versionlock_type),
name(n),
alternate_name(std::move(alternate_name))
{}
CDentry(std::string_view n, __u32 h,
mempool::mds_co::string alternate_name,
inodeno_t ino, unsigned char dt,
snapid_t f, snapid_t l) :
hash(h),
first(f), last(l),
item_dirty(this),
lock(this, &lock_type),
versionlock(this, &versionlock_type),
name(n),
alternate_name(std::move(alternate_name))
{
linkage.remote_ino = ino;
linkage.remote_d_type = dt;
}
~CDentry() override {
ceph_assert(batch_ops.empty());
}
std::string_view pin_name(int p) const override {
switch (p) {
case PIN_INODEPIN: return "inodepin";
case PIN_FRAGMENTING: return "fragmenting";
case PIN_PURGING: return "purging";
case PIN_SCRUBPARENT: return "scrubparent";
case PIN_WAITUNLINKSTATE: return "waitunlinkstate";
default: return generic_pin_name(p);
}
}
// -- wait --
//static const int WAIT_LOCK_OFFSET = 8;
void add_waiter(uint64_t tag, MDSContext *c) override;
bool is_lt(const MDSCacheObject *r) const override {
return *this < *static_cast<const CDentry*>(r);
}
dentry_key_t key() {
return dentry_key_t(last, name.c_str(), hash);
}
bool check_corruption(bool load);
const CDir *get_dir() const { return dir; }
CDir *get_dir() { return dir; }
std::string_view get_name() const { return std::string_view(name); }
std::string_view get_alternate_name() const {
return std::string_view(alternate_name);
}
void set_alternate_name(mempool::mds_co::string altn) {
alternate_name = std::move(altn);
}
void set_alternate_name(std::string_view altn) {
alternate_name = mempool::mds_co::string(altn);
}
__u32 get_hash() const { return hash; }
// linkage
const linkage_t *get_linkage() const { return &linkage; }
linkage_t *get_linkage() { return &linkage; }
linkage_t *_project_linkage() {
projected.push_back(linkage_t());
return &projected.back();
}
void push_projected_linkage();
void push_projected_linkage(inodeno_t ino, char d_type) {
linkage_t *p = _project_linkage();
p->remote_ino = ino;
p->remote_d_type = d_type;
}
void push_projected_linkage(CInode *inode);
linkage_t *pop_projected_linkage();
bool is_projected() const { return !projected.empty(); }
linkage_t *get_projected_linkage() {
if (!projected.empty())
return &projected.back();
return &linkage;
}
const linkage_t *get_projected_linkage() const {
if (!projected.empty())
return &projected.back();
return &linkage;
}
CInode *get_projected_inode() {
return get_projected_linkage()->inode;
}
bool use_projected(client_t client, const MutationRef& mut) const {
return lock.can_read_projected(client) ||
lock.get_xlock_by() == mut;
}
linkage_t *get_linkage(client_t client, const MutationRef& mut) {
return use_projected(client, mut) ? get_projected_linkage() : get_linkage();
}
// ref counts: pin ourselves in the LRU when we're pinned.
void first_get() override {
lru_pin();
}
void last_put() override {
lru_unpin();
}
void _put() override;
// auth pins
bool can_auth_pin(int *err_ret=nullptr) const override;
void auth_pin(void *by) override;
void auth_unpin(void *by) override;
void adjust_nested_auth_pins(int diradj, void *by);
bool is_frozen() const override;
bool is_freezing() const override;
int get_num_dir_auth_pins() const;
// remote links
void link_remote(linkage_t *dnl, CInode *in);
void unlink_remote(linkage_t *dnl);
// copy cons
CDentry(const CDentry& m);
const CDentry& operator= (const CDentry& right);
// misc
void make_path_string(std::string& s, bool projected=false) const;
void make_path(filepath& fp, bool projected=false) const;
// -- version --
version_t get_version() const { return version; }
void set_version(version_t v) { projected_version = version = v; }
version_t get_projected_version() const { return projected_version; }
void set_projected_version(version_t v) { projected_version = v; }
mds_authority_t authority() const override;
version_t pre_dirty(version_t min=0);
void _mark_dirty(LogSegment *ls);
void mark_dirty(version_t pv, LogSegment *ls);
void mark_clean();
void mark_new();
bool is_new() const { return state_test(STATE_NEW); }
void clear_new() { state_clear(STATE_NEW); }
void mark_auth();
void clear_auth();
bool scrub(snapid_t next_seq);
// -- exporting
// note: this assumes the dentry already exists.
// i.e., the name is already extracted... so we just need the other state.
void encode_export(ceph::buffer::list& bl) {
ENCODE_START(1, 1, bl);
encode(first, bl);
encode(state, bl);
encode(version, bl);
encode(projected_version, bl);
encode(lock, bl);
encode(get_replicas(), bl);
get(PIN_TEMPEXPORTING);
ENCODE_FINISH(bl);
}
void finish_export() {
// twiddle
clear_replica_map();
replica_nonce = EXPORT_NONCE;
clear_auth();
if (is_dirty())
mark_clean();
put(PIN_TEMPEXPORTING);
}
void abort_export() {
put(PIN_TEMPEXPORTING);
}
void decode_import(ceph::buffer::list::const_iterator& blp, LogSegment *ls) {
DECODE_START(1, blp);
decode(first, blp);
__u32 nstate;
decode(nstate, blp);
decode(version, blp);
decode(projected_version, blp);
decode(lock, blp);
decode(get_replicas(), blp);
// twiddle
state &= MASK_STATE_IMPORT_KEPT;
mark_auth();
if (nstate & STATE_DIRTY)
_mark_dirty(ls);
if (is_replicated())
get(PIN_REPLICATED);
replica_nonce = 0;
DECODE_FINISH(blp);
}
// -- locking --
SimpleLock* get_lock(int type) override {
ceph_assert(type == CEPH_LOCK_DN);
return &lock;
}
void set_object_info(MDSCacheObjectInfo &info) override;
void encode_lock_state(int type, ceph::buffer::list& bl) override;
void decode_lock_state(int type, const ceph::buffer::list& bl) override;
// ---------------------------------------------
// replicas (on clients)
bool is_any_leases() const {
return !client_lease_map.empty();
}
const ClientLease *get_client_lease(client_t c) const {
if (client_lease_map.count(c))
return client_lease_map.find(c)->second;
return 0;
}
ClientLease *get_client_lease(client_t c) {
if (client_lease_map.count(c))
return client_lease_map.find(c)->second;
return 0;
}
bool have_client_lease(client_t c) const {
const ClientLease *l = get_client_lease(c);
if (l)
return true;
else
return false;
}
ClientLease *add_client_lease(client_t c, Session *session);
void remove_client_lease(ClientLease *r, Locker *locker); // returns remaining mask (if any), and kicks locker eval_gathers
void remove_client_leases(Locker *locker);
std::ostream& print_db_line_prefix(std::ostream& out) override;
void print(std::ostream& out) override;
void dump(ceph::Formatter *f) const;
static void encode_remote(inodeno_t& ino, unsigned char d_type,
std::string_view alternate_name,
bufferlist &bl);
static void decode_remote(char icode, inodeno_t& ino, unsigned char& d_type,
mempool::mds_co::string& alternate_name,
ceph::buffer::list::const_iterator& bl);
__u32 hash;
snapid_t first, last;
bool corrupt_first_loaded = false; /* for Postgres corruption detection */
elist<CDentry*>::item item_dirty, item_dir_dirty;
elist<CDentry*>::item item_stray;
// lock
static LockType lock_type;
static LockType versionlock_type;
SimpleLock lock; // FIXME referenced containers not in mempool
LocalLockC versionlock; // FIXME referenced containers not in mempool
mempool::mds_co::map<client_t,ClientLease*> client_lease_map;
std::map<int, std::unique_ptr<BatchOp>> batch_ops;
protected:
friend class Migrator;
friend class Locker;
friend class MDCache;
friend class StrayManager;
friend class CInode;
friend class C_MDC_XlockRequest;
CDir *dir = nullptr; // containing dirfrag
linkage_t linkage; /* durable */
mempool::mds_co::list<linkage_t> projected;
version_t version = 0; // dir version when last touched.
version_t projected_version = 0; // what it will be when i unlock/commit.
private:
mempool::mds_co::string name;
mempool::mds_co::string alternate_name;
};
std::ostream& operator<<(std::ostream& out, const CDentry& dn);
#endif
| 12,047 | 28.171913 | 126 | h |
null | ceph-main/src/mds/CDir.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <string_view>
#include <algorithm>
#include "include/types.h"
#include "CDir.h"
#include "CDentry.h"
#include "CInode.h"
#include "Mutation.h"
#include "MDSMap.h"
#include "MDSRank.h"
#include "MDCache.h"
#include "Locker.h"
#include "MDLog.h"
#include "LogSegment.h"
#include "MDBalancer.h"
#include "SnapClient.h"
#include "common/bloom_filter.hpp"
#include "common/likely.h"
#include "include/Context.h"
#include "common/Clock.h"
#include "osdc/Objecter.h"
#include "common/config.h"
#include "include/ceph_assert.h"
#include "include/compat.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
#undef dout_prefix
#define dout_prefix *_dout << "mds." << mdcache->mds->get_nodeid() << ".cache.dir(" << this->dirfrag() << ") "
using namespace std;
int CDir::num_frozen_trees = 0;
int CDir::num_freezing_trees = 0;
CDir::fnode_const_ptr CDir::empty_fnode = CDir::allocate_fnode();
class CDirContext : public MDSContext
{
protected:
CDir *dir;
MDSRank* get_mds() override {return dir->mdcache->mds;}
public:
explicit CDirContext(CDir *d) : dir(d) {
ceph_assert(dir != NULL);
}
};
class CDirIOContext : public MDSIOContextBase
{
protected:
CDir *dir;
MDSRank* get_mds() override {return dir->mdcache->mds;}
public:
explicit CDirIOContext(CDir *d) : dir(d) {
ceph_assert(dir != NULL);
}
};
// PINS
//int cdir_pins[CDIR_NUM_PINS] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
ostream& operator<<(ostream& out, const CDir& dir)
{
out << "[dir " << dir.dirfrag() << " " << dir.get_path() << "/"
<< " [" << dir.first << ",head]";
if (dir.is_auth()) {
out << " auth";
if (dir.is_replicated())
out << dir.get_replicas();
if (dir.is_projected())
out << " pv=" << dir.get_projected_version();
out << " v=" << dir.get_version();
out << " cv=" << dir.get_committing_version();
out << "/" << dir.get_committed_version();
} else {
mds_authority_t a = dir.authority();
out << " rep@" << a.first;
if (a.second != CDIR_AUTH_UNKNOWN)
out << "," << a.second;
out << "." << dir.get_replica_nonce();
}
if (dir.is_rep()) out << " REP";
if (dir.get_dir_auth() != CDIR_AUTH_DEFAULT) {
if (dir.get_dir_auth().second == CDIR_AUTH_UNKNOWN)
out << " dir_auth=" << dir.get_dir_auth().first;
else
out << " dir_auth=" << dir.get_dir_auth();
}
if (dir.get_auth_pins() || dir.get_dir_auth_pins()) {
out << " ap=" << dir.get_auth_pins()
<< "+" << dir.get_dir_auth_pins();
#ifdef MDS_AUTHPIN_SET
dir.print_authpin_set(out);
#endif
}
out << " state=" << dir.get_state();
if (dir.state_test(CDir::STATE_COMPLETE)) out << "|complete";
if (dir.state_test(CDir::STATE_FREEZINGTREE)) out << "|freezingtree";
if (dir.state_test(CDir::STATE_FROZENTREE)) out << "|frozentree";
if (dir.state_test(CDir::STATE_AUXSUBTREE)) out << "|auxsubtree";
if (dir.state_test(CDir::STATE_FROZENDIR)) out << "|frozendir";
if (dir.state_test(CDir::STATE_FREEZINGDIR)) out << "|freezingdir";
if (dir.state_test(CDir::STATE_EXPORTBOUND)) out << "|exportbound";
if (dir.state_test(CDir::STATE_IMPORTBOUND)) out << "|importbound";
if (dir.state_test(CDir::STATE_BADFRAG)) out << "|badfrag";
if (dir.state_test(CDir::STATE_FRAGMENTING)) out << "|fragmenting";
if (dir.state_test(CDir::STATE_CREATING)) out << "|creating";
if (dir.state_test(CDir::STATE_COMMITTING)) out << "|committing";
if (dir.state_test(CDir::STATE_FETCHING)) out << "|fetching";
if (dir.state_test(CDir::STATE_EXPORTING)) out << "|exporting";
if (dir.state_test(CDir::STATE_IMPORTING)) out << "|importing";
if (dir.state_test(CDir::STATE_STICKY)) out << "|sticky";
if (dir.state_test(CDir::STATE_DNPINNEDFRAG)) out << "|dnpinnedfrag";
if (dir.state_test(CDir::STATE_ASSIMRSTAT)) out << "|assimrstat";
// fragstat
out << " " << dir.get_fnode()->fragstat;
if (!(dir.get_fnode()->fragstat == dir.get_fnode()->accounted_fragstat))
out << "/" << dir.get_fnode()->accounted_fragstat;
if (g_conf()->mds_debug_scatterstat && dir.is_projected()) {
const auto& pf = dir.get_projected_fnode();
out << "->" << pf->fragstat;
if (!(pf->fragstat == pf->accounted_fragstat))
out << "/" << pf->accounted_fragstat;
}
// rstat
out << " " << dir.get_fnode()->rstat;
if (!(dir.get_fnode()->rstat == dir.get_fnode()->accounted_rstat))
out << "/" << dir.get_fnode()->accounted_rstat;
if (g_conf()->mds_debug_scatterstat && dir.is_projected()) {
const auto& pf = dir.get_projected_fnode();
out << "->" << pf->rstat;
if (!(pf->rstat == pf->accounted_rstat))
out << "/" << pf->accounted_rstat;
}
out << " hs=" << dir.get_num_head_items() << "+" << dir.get_num_head_null();
out << ",ss=" << dir.get_num_snap_items() << "+" << dir.get_num_snap_null();
if (dir.get_num_dirty())
out << " dirty=" << dir.get_num_dirty();
if (dir.get_num_ref()) {
out << " |";
dir.print_pin_set(out);
}
out << " " << &dir;
return out << "]";
}
void CDir::print(ostream& out)
{
out << *this;
}
ostream& CDir::print_db_line_prefix(ostream& out)
{
return out << ceph_clock_now() << " mds." << mdcache->mds->get_nodeid() << ".cache.dir(" << this->dirfrag() << ") ";
}
// -------------------------------------------------------------------
// CDir
CDir::CDir(CInode *in, frag_t fg, MDCache *mdc, bool auth) :
mdcache(mdc), inode(in), frag(fg),
dirty_rstat_inodes(member_offset(CInode, dirty_rstat_item)),
dirty_dentries(member_offset(CDentry, item_dir_dirty)),
item_dirty(this), item_new(this),
lock_caches_with_auth_pins(member_offset(MDLockCache::DirItem, item_dir)),
freezing_inodes(member_offset(CInode, item_freezing_inode)),
dir_rep(REP_NONE),
pop_me(mdc->decayrate),
pop_nested(mdc->decayrate),
pop_auth_subtree(mdc->decayrate),
pop_auth_subtree_nested(mdc->decayrate),
pop_lru_subdirs(member_offset(CInode, item_pop_lru)),
dir_auth(CDIR_AUTH_DEFAULT)
{
// auth
ceph_assert(in->is_dir());
if (auth)
state_set(STATE_AUTH);
}
/**
* Check the recursive statistics on size for consistency.
* If mds_debug_scatterstat is enabled, assert for correctness,
* otherwise just print out the mismatch and continue.
*/
bool CDir::check_rstats(bool scrub)
{
if (!g_conf()->mds_debug_scatterstat && !scrub)
return true;
dout(25) << "check_rstats on " << this << dendl;
if (!is_complete() || !is_auth() || is_frozen()) {
dout(3) << "check_rstats " << (scrub ? "(scrub) " : "")
<< "bailing out -- incomplete or non-auth or frozen dir on "
<< *this << dendl;
return !scrub;
}
frag_info_t frag_info;
nest_info_t nest_info;
for (auto i = items.begin(); i != items.end(); ++i) {
if (i->second->last != CEPH_NOSNAP)
continue;
CDentry::linkage_t *dnl = i->second->get_linkage();
if (dnl->is_primary()) {
CInode *in = dnl->get_inode();
nest_info.add(in->get_inode()->accounted_rstat);
if (in->is_dir())
frag_info.nsubdirs++;
else
frag_info.nfiles++;
} else if (dnl->is_remote())
frag_info.nfiles++;
}
bool good = true;
// fragstat
if(!frag_info.same_sums(fnode->fragstat)) {
dout(1) << "mismatch between head items and fnode.fragstat! printing dentries" << dendl;
dout(1) << "get_num_head_items() = " << get_num_head_items()
<< "; fnode.fragstat.nfiles=" << fnode->fragstat.nfiles
<< " fnode.fragstat.nsubdirs=" << fnode->fragstat.nsubdirs << dendl;
good = false;
} else {
dout(20) << "get_num_head_items() = " << get_num_head_items()
<< "; fnode.fragstat.nfiles=" << fnode->fragstat.nfiles
<< " fnode.fragstat.nsubdirs=" << fnode->fragstat.nsubdirs << dendl;
}
// rstat
if (!nest_info.same_sums(fnode->rstat)) {
dout(1) << "mismatch between child accounted_rstats and my rstats!" << dendl;
dout(1) << "total of child dentries: " << nest_info << dendl;
dout(1) << "my rstats: " << fnode->rstat << dendl;
good = false;
} else {
dout(20) << "total of child dentries: " << nest_info << dendl;
dout(20) << "my rstats: " << fnode->rstat << dendl;
}
if (!good) {
if (!scrub) {
for (auto i = items.begin(); i != items.end(); ++i) {
CDentry *dn = i->second;
if (dn->get_linkage()->is_primary()) {
CInode *in = dn->get_linkage()->inode;
dout(1) << *dn << " rstat " << in->get_inode()->accounted_rstat << dendl;
} else {
dout(1) << *dn << dendl;
}
}
ceph_assert(frag_info.nfiles == fnode->fragstat.nfiles);
ceph_assert(frag_info.nsubdirs == fnode->fragstat.nsubdirs);
ceph_assert(nest_info.rbytes == fnode->rstat.rbytes);
ceph_assert(nest_info.rfiles == fnode->rstat.rfiles);
ceph_assert(nest_info.rsubdirs == fnode->rstat.rsubdirs);
}
}
dout(10) << "check_rstats complete on " << this << dendl;
return good;
}
void CDir::adjust_num_inodes_with_caps(int d)
{
// FIXME: smarter way to decide if adding 'this' to open file table
if (num_inodes_with_caps == 0 && d > 0)
mdcache->open_file_table.add_dirfrag(this);
else if (num_inodes_with_caps > 0 && num_inodes_with_caps == -d)
mdcache->open_file_table.remove_dirfrag(this);
num_inodes_with_caps += d;
ceph_assert(num_inodes_with_caps >= 0);
}
CDentry *CDir::lookup(std::string_view name, snapid_t snap)
{
dout(20) << "lookup (" << name << ", '" << snap << "')" << dendl;
auto iter = items.lower_bound(dentry_key_t(snap, name, inode->hash_dentry_name(name)));
if (iter == items.end())
return 0;
if (iter->second->get_name() == name &&
iter->second->first <= snap &&
iter->second->last >= snap) {
dout(20) << " hit -> " << iter->first << dendl;
return iter->second;
}
dout(20) << " miss -> " << iter->first << dendl;
return 0;
}
CDentry *CDir::lookup_exact_snap(std::string_view name, snapid_t last) {
dout(20) << __func__ << " (" << last << ", '" << name << "')" << dendl;
auto p = items.find(dentry_key_t(last, name, inode->hash_dentry_name(name)));
if (p == items.end())
return NULL;
return p->second;
}
void CDir::adjust_dentry_lru(CDentry *dn)
{
bool bottom_lru;
if (dn->get_linkage()->is_primary()) {
bottom_lru = !is_auth() && inode->is_stray();
} else if (dn->get_linkage()->is_remote()) {
bottom_lru = false;
} else {
bottom_lru = !is_auth();
}
if (bottom_lru) {
if (!dn->state_test(CDentry::STATE_BOTTOMLRU)) {
mdcache->lru.lru_remove(dn);
mdcache->bottom_lru.lru_insert_mid(dn);
dn->state_set(CDentry::STATE_BOTTOMLRU);
}
} else {
if (dn->state_test(CDentry::STATE_BOTTOMLRU)) {
mdcache->bottom_lru.lru_remove(dn);
mdcache->lru.lru_insert_mid(dn);
dn->state_clear(CDentry::STATE_BOTTOMLRU);
}
}
}
/***
* linking fun
*/
CDentry* CDir::add_null_dentry(std::string_view dname,
snapid_t first, snapid_t last)
{
// foreign
ceph_assert(lookup_exact_snap(dname, last) == 0);
// create dentry
CDentry* dn = new CDentry(dname, inode->hash_dentry_name(dname), "", first, last);
dn->dir = this;
dn->version = get_projected_version();
dn->check_corruption(true);
if (is_auth()) {
dn->state_set(CDentry::STATE_AUTH);
mdcache->lru.lru_insert_mid(dn);
} else {
mdcache->bottom_lru.lru_insert_mid(dn);
dn->state_set(CDentry::STATE_BOTTOMLRU);
}
// add to dir
ceph_assert(items.count(dn->key()) == 0);
//assert(null_items.count(dn->get_name()) == 0);
items[dn->key()] = dn;
if (last == CEPH_NOSNAP)
num_head_null++;
else
num_snap_null++;
if (state_test(CDir::STATE_DNPINNEDFRAG)) {
dn->get(CDentry::PIN_FRAGMENTING);
dn->state_set(CDentry::STATE_FRAGMENTING);
}
dout(12) << __func__ << " " << *dn << dendl;
// pin?
if (get_num_any() == 1)
get(PIN_CHILD);
ceph_assert(get_num_any() == items.size());
return dn;
}
CDentry* CDir::add_primary_dentry(std::string_view dname, CInode *in,
mempool::mds_co::string alternate_name,
snapid_t first, snapid_t last)
{
// primary
ceph_assert(lookup_exact_snap(dname, last) == 0);
// create dentry
CDentry* dn = new CDentry(dname, inode->hash_dentry_name(dname), std::move(alternate_name), first, last);
dn->dir = this;
dn->version = get_projected_version();
dn->check_corruption(true);
if (is_auth())
dn->state_set(CDentry::STATE_AUTH);
if (is_auth() || !inode->is_stray()) {
mdcache->lru.lru_insert_mid(dn);
} else {
mdcache->bottom_lru.lru_insert_mid(dn);
dn->state_set(CDentry::STATE_BOTTOMLRU);
}
// add to dir
ceph_assert(items.count(dn->key()) == 0);
//assert(null_items.count(dn->get_name()) == 0);
items[dn->key()] = dn;
dn->get_linkage()->inode = in;
link_inode_work(dn, in);
if (dn->last == CEPH_NOSNAP)
num_head_items++;
else
num_snap_items++;
if (state_test(CDir::STATE_DNPINNEDFRAG)) {
dn->get(CDentry::PIN_FRAGMENTING);
dn->state_set(CDentry::STATE_FRAGMENTING);
}
dout(12) << __func__ << " " << *dn << dendl;
// pin?
if (get_num_any() == 1)
get(PIN_CHILD);
ceph_assert(get_num_any() == items.size());
return dn;
}
CDentry* CDir::add_remote_dentry(std::string_view dname, inodeno_t ino, unsigned char d_type,
mempool::mds_co::string alternate_name,
snapid_t first, snapid_t last)
{
// foreign
ceph_assert(lookup_exact_snap(dname, last) == 0);
// create dentry
CDentry* dn = new CDentry(dname, inode->hash_dentry_name(dname), std::move(alternate_name), ino, d_type, first, last);
dn->dir = this;
dn->version = get_projected_version();
dn->check_corruption(true);
if (is_auth())
dn->state_set(CDentry::STATE_AUTH);
mdcache->lru.lru_insert_mid(dn);
// add to dir
ceph_assert(items.count(dn->key()) == 0);
//assert(null_items.count(dn->get_name()) == 0);
items[dn->key()] = dn;
if (last == CEPH_NOSNAP)
num_head_items++;
else
num_snap_items++;
if (state_test(CDir::STATE_DNPINNEDFRAG)) {
dn->get(CDentry::PIN_FRAGMENTING);
dn->state_set(CDentry::STATE_FRAGMENTING);
}
dout(12) << __func__ << " " << *dn << dendl;
// pin?
if (get_num_any() == 1)
get(PIN_CHILD);
ceph_assert(get_num_any() == items.size());
return dn;
}
void CDir::remove_dentry(CDentry *dn)
{
dout(12) << __func__ << " " << *dn << dendl;
// there should be no client leases at this point!
ceph_assert(dn->client_lease_map.empty());
if (state_test(CDir::STATE_DNPINNEDFRAG)) {
dn->put(CDentry::PIN_FRAGMENTING);
dn->state_clear(CDentry::STATE_FRAGMENTING);
}
if (dn->get_linkage()->is_null()) {
if (dn->last == CEPH_NOSNAP)
num_head_null--;
else
num_snap_null--;
} else {
if (dn->last == CEPH_NOSNAP)
num_head_items--;
else
num_snap_items--;
}
if (!dn->get_linkage()->is_null())
// detach inode and dentry
unlink_inode_work(dn);
// remove from list
ceph_assert(items.count(dn->key()) == 1);
items.erase(dn->key());
// clean?
if (dn->is_dirty())
dn->mark_clean();
if (dn->state_test(CDentry::STATE_BOTTOMLRU))
mdcache->bottom_lru.lru_remove(dn);
else
mdcache->lru.lru_remove(dn);
delete dn;
// unpin?
if (get_num_any() == 0)
put(PIN_CHILD);
ceph_assert(get_num_any() == items.size());
}
void CDir::link_remote_inode(CDentry *dn, CInode *in)
{
link_remote_inode(dn, in->ino(), IFTODT(in->get_projected_inode()->mode));
}
void CDir::link_remote_inode(CDentry *dn, inodeno_t ino, unsigned char d_type)
{
dout(12) << __func__ << " " << *dn << " remote " << ino << dendl;
ceph_assert(dn->get_linkage()->is_null());
dn->get_linkage()->set_remote(ino, d_type);
if (dn->state_test(CDentry::STATE_BOTTOMLRU)) {
mdcache->bottom_lru.lru_remove(dn);
mdcache->lru.lru_insert_mid(dn);
dn->state_clear(CDentry::STATE_BOTTOMLRU);
}
if (dn->last == CEPH_NOSNAP) {
num_head_items++;
num_head_null--;
} else {
num_snap_items++;
num_snap_null--;
}
ceph_assert(get_num_any() == items.size());
}
void CDir::link_primary_inode(CDentry *dn, CInode *in)
{
dout(12) << __func__ << " " << *dn << " " << *in << dendl;
ceph_assert(dn->get_linkage()->is_null());
dn->get_linkage()->inode = in;
link_inode_work(dn, in);
if (dn->state_test(CDentry::STATE_BOTTOMLRU) &&
(is_auth() || !inode->is_stray())) {
mdcache->bottom_lru.lru_remove(dn);
mdcache->lru.lru_insert_mid(dn);
dn->state_clear(CDentry::STATE_BOTTOMLRU);
}
if (dn->last == CEPH_NOSNAP) {
num_head_items++;
num_head_null--;
} else {
num_snap_items++;
num_snap_null--;
}
ceph_assert(get_num_any() == items.size());
}
void CDir::link_inode_work( CDentry *dn, CInode *in)
{
ceph_assert(dn->get_linkage()->get_inode() == in);
in->set_primary_parent(dn);
// set inode version
//in->inode.version = dn->get_version();
// pin dentry?
if (in->get_num_ref())
dn->get(CDentry::PIN_INODEPIN);
if (in->state_test(CInode::STATE_TRACKEDBYOFT))
mdcache->open_file_table.notify_link(in);
if (in->is_any_caps())
adjust_num_inodes_with_caps(1);
// adjust auth pin count
if (in->auth_pins)
dn->adjust_nested_auth_pins(in->auth_pins, NULL);
if (in->is_freezing_inode())
freezing_inodes.push_back(&in->item_freezing_inode);
else if (in->is_frozen_inode() || in->is_frozen_auth_pin())
num_frozen_inodes++;
// verify open snaprealm parent
if (in->snaprealm)
in->snaprealm->adjust_parent();
else if (in->is_any_caps())
in->move_to_realm(inode->find_snaprealm());
}
void CDir::unlink_inode(CDentry *dn, bool adjust_lru)
{
if (dn->get_linkage()->is_primary()) {
dout(12) << __func__ << " " << *dn << " " << *dn->get_linkage()->get_inode() << dendl;
} else {
dout(12) << __func__ << " " << *dn << dendl;
}
unlink_inode_work(dn);
if (adjust_lru && !is_auth() &&
!dn->state_test(CDentry::STATE_BOTTOMLRU)) {
mdcache->lru.lru_remove(dn);
mdcache->bottom_lru.lru_insert_mid(dn);
dn->state_set(CDentry::STATE_BOTTOMLRU);
}
if (dn->last == CEPH_NOSNAP) {
num_head_items--;
num_head_null++;
} else {
num_snap_items--;
num_snap_null++;
}
ceph_assert(get_num_any() == items.size());
}
void CDir::try_remove_unlinked_dn(CDentry *dn)
{
ceph_assert(dn->dir == this);
ceph_assert(dn->get_linkage()->is_null());
// no pins (besides dirty)?
if (dn->get_num_ref() != dn->is_dirty())
return;
// was the dn new?
if (dn->is_new()) {
dout(10) << __func__ << " " << *dn << " in " << *this << dendl;
if (dn->is_dirty())
dn->mark_clean();
remove_dentry(dn);
// NOTE: we may not have any more dirty dentries, but the fnode
// still changed, so the directory must remain dirty.
}
}
void CDir::unlink_inode_work(CDentry *dn)
{
CInode *in = dn->get_linkage()->get_inode();
if (dn->get_linkage()->is_remote()) {
// remote
if (in)
dn->unlink_remote(dn->get_linkage());
dn->get_linkage()->set_remote(0, 0);
} else if (dn->get_linkage()->is_primary()) {
// primary
// unpin dentry?
if (in->get_num_ref())
dn->put(CDentry::PIN_INODEPIN);
if (in->state_test(CInode::STATE_TRACKEDBYOFT))
mdcache->open_file_table.notify_unlink(in);
if (in->is_any_caps())
adjust_num_inodes_with_caps(-1);
// unlink auth_pin count
if (in->auth_pins)
dn->adjust_nested_auth_pins(-in->auth_pins, nullptr);
if (in->is_freezing_inode())
in->item_freezing_inode.remove_myself();
else if (in->is_frozen_inode() || in->is_frozen_auth_pin())
num_frozen_inodes--;
// detach inode
in->remove_primary_parent(dn);
if (in->is_dir())
in->item_pop_lru.remove_myself();
dn->get_linkage()->inode = 0;
} else {
ceph_assert(!dn->get_linkage()->is_null());
}
}
void CDir::add_to_bloom(CDentry *dn)
{
ceph_assert(dn->last == CEPH_NOSNAP);
if (!bloom) {
/* not create bloom filter for incomplete dir that was added by log replay */
if (!is_complete())
return;
/* don't maintain bloom filters in standby replay (saves cycles, and also
* avoids need to implement clearing it in EExport for #16924) */
if (mdcache->mds->is_standby_replay()) {
return;
}
unsigned size = get_num_head_items() + get_num_snap_items();
if (size < 100) size = 100;
bloom.reset(new bloom_filter(size, 1.0 / size, 0));
}
/* This size and false positive probability is completely random.*/
bloom->insert(dn->get_name().data(), dn->get_name().size());
}
bool CDir::is_in_bloom(std::string_view name)
{
if (!bloom)
return false;
return bloom->contains(name.data(), name.size());
}
void CDir::remove_null_dentries() {
dout(12) << __func__ << " " << *this << dendl;
auto p = items.begin();
while (p != items.end()) {
CDentry *dn = p->second;
++p;
if (dn->get_linkage()->is_null() && !dn->is_projected())
remove_dentry(dn);
}
ceph_assert(num_snap_null == 0);
ceph_assert(num_head_null == 0);
ceph_assert(get_num_any() == items.size());
}
/** remove dirty null dentries for deleted directory. the dirfrag will be
* deleted soon, so it's safe to not commit dirty dentries.
*
* This is called when a directory is being deleted, a prerequisite
* of which is that its children have been unlinked: we expect to only see
* null, unprojected dentries here.
*/
void CDir::try_remove_dentries_for_stray()
{
dout(10) << __func__ << dendl;
ceph_assert(get_parent_dir()->inode->is_stray());
// clear dirty only when the directory was not snapshotted
bool clear_dirty = !inode->snaprealm;
auto p = items.begin();
while (p != items.end()) {
CDentry *dn = p->second;
++p;
if (dn->last == CEPH_NOSNAP) {
ceph_assert(!dn->is_projected());
ceph_assert(dn->get_linkage()->is_null());
if (clear_dirty && dn->is_dirty())
dn->mark_clean();
// It's OK to remove lease prematurely because we will never link
// the dentry to inode again.
if (dn->is_any_leases())
dn->remove_client_leases(mdcache->mds->locker);
if (dn->get_num_ref() == 0)
remove_dentry(dn);
} else {
ceph_assert(!dn->is_projected());
CDentry::linkage_t *dnl= dn->get_linkage();
CInode *in = NULL;
if (dnl->is_primary()) {
in = dnl->get_inode();
if (clear_dirty && in->is_dirty())
in->mark_clean();
}
if (clear_dirty && dn->is_dirty())
dn->mark_clean();
if (dn->get_num_ref() == 0) {
remove_dentry(dn);
if (in)
mdcache->remove_inode(in);
}
}
}
if (clear_dirty && is_dirty())
mark_clean();
}
bool CDir::try_trim_snap_dentry(CDentry *dn, const set<snapid_t>& snaps)
{
if (dn->last == CEPH_NOSNAP) {
return false;
}
set<snapid_t>::const_iterator p = snaps.lower_bound(dn->first);
CDentry::linkage_t *dnl= dn->get_linkage();
CInode *in = 0;
if (dnl->is_primary())
in = dnl->get_inode();
if ((p == snaps.end() || *p > dn->last) &&
(dn->get_num_ref() == dn->is_dirty()) &&
(!in || in->get_num_ref() == in->is_dirty())) {
dout(10) << " purging snapped " << *dn << dendl;
if (in && in->is_dirty())
in->mark_clean();
remove_dentry(dn);
if (in) {
dout(10) << " purging snapped " << *in << dendl;
mdcache->remove_inode(in);
}
return true;
}
return false;
}
/**
* steal_dentry -- semi-violently move a dentry from one CDir to another
* (*) violently, in that nitems, most pins, etc. are not correctly maintained
* on the old CDir corpse; must call finish_old_fragment() when finished.
*/
void CDir::steal_dentry(CDentry *dn)
{
dout(15) << __func__ << " " << *dn << dendl;
items[dn->key()] = dn;
dn->dir->items.erase(dn->key());
if (dn->dir->items.empty())
dn->dir->put(PIN_CHILD);
if (get_num_any() == 0)
get(PIN_CHILD);
if (dn->get_linkage()->is_null()) {
if (dn->last == CEPH_NOSNAP)
num_head_null++;
else
num_snap_null++;
} else if (dn->last == CEPH_NOSNAP) {
num_head_items++;
auto _fnode = _get_fnode();
if (dn->get_linkage()->is_primary()) {
CInode *in = dn->get_linkage()->get_inode();
const auto& pi = in->get_projected_inode();
if (in->is_dir()) {
_fnode->fragstat.nsubdirs++;
if (in->item_pop_lru.is_on_list())
pop_lru_subdirs.push_back(&in->item_pop_lru);
} else {
_fnode->fragstat.nfiles++;
}
_fnode->rstat.rbytes += pi->accounted_rstat.rbytes;
_fnode->rstat.rfiles += pi->accounted_rstat.rfiles;
_fnode->rstat.rsubdirs += pi->accounted_rstat.rsubdirs;
_fnode->rstat.rsnaps += pi->accounted_rstat.rsnaps;
if (pi->accounted_rstat.rctime > fnode->rstat.rctime)
_fnode->rstat.rctime = pi->accounted_rstat.rctime;
if (in->is_any_caps())
adjust_num_inodes_with_caps(1);
// move dirty inode rstat to new dirfrag
if (in->is_dirty_rstat())
dirty_rstat_inodes.push_back(&in->dirty_rstat_item);
} else if (dn->get_linkage()->is_remote()) {
if (dn->get_linkage()->get_remote_d_type() == DT_DIR)
_fnode->fragstat.nsubdirs++;
else
_fnode->fragstat.nfiles++;
}
} else {
num_snap_items++;
if (dn->get_linkage()->is_primary()) {
CInode *in = dn->get_linkage()->get_inode();
if (in->is_dirty_rstat())
dirty_rstat_inodes.push_back(&in->dirty_rstat_item);
}
}
{
int dap = dn->get_num_dir_auth_pins();
if (dap) {
adjust_nested_auth_pins(dap, NULL);
dn->dir->adjust_nested_auth_pins(-dap, NULL);
}
}
if (dn->is_dirty()) {
dirty_dentries.push_back(&dn->item_dir_dirty);
num_dirty++;
}
dn->dir = this;
}
void CDir::prepare_old_fragment(map<string_snap_t, MDSContext::vec >& dentry_waiters, bool replay)
{
// auth_pin old fragment for duration so that any auth_pinning
// during the dentry migration doesn't trigger side effects
if (!replay && is_auth())
auth_pin(this);
if (!waiting_on_dentry.empty()) {
for (const auto &p : waiting_on_dentry) {
std::copy(p.second.begin(), p.second.end(),
std::back_inserter(dentry_waiters[p.first]));
}
waiting_on_dentry.clear();
put(PIN_DNWAITER);
}
}
void CDir::prepare_new_fragment(bool replay)
{
if (!replay && is_auth()) {
_freeze_dir();
mark_complete();
}
inode->add_dirfrag(this);
}
void CDir::finish_old_fragment(MDSContext::vec& waiters, bool replay)
{
// take waiters _before_ unfreeze...
if (!replay) {
take_waiting(WAIT_ANY_MASK, waiters);
if (is_auth()) {
auth_unpin(this); // pinned in prepare_old_fragment
ceph_assert(is_frozen_dir());
unfreeze_dir();
}
}
ceph_assert(dir_auth_pins == 0);
ceph_assert(auth_pins == 0);
num_head_items = num_head_null = 0;
num_snap_items = num_snap_null = 0;
adjust_num_inodes_with_caps(-num_inodes_with_caps);
// this mirrors init_fragment_pins()
if (is_auth())
clear_replica_map();
if (is_dirty())
mark_clean();
if (state_test(STATE_IMPORTBOUND))
put(PIN_IMPORTBOUND);
if (state_test(STATE_EXPORTBOUND))
put(PIN_EXPORTBOUND);
if (is_subtree_root())
put(PIN_SUBTREE);
if (auth_pins > 0)
put(PIN_AUTHPIN);
ceph_assert(get_num_ref() == (state_test(STATE_STICKY) ? 1:0));
}
void CDir::init_fragment_pins()
{
if (is_replicated())
get(PIN_REPLICATED);
if (state_test(STATE_DIRTY))
get(PIN_DIRTY);
if (state_test(STATE_EXPORTBOUND))
get(PIN_EXPORTBOUND);
if (state_test(STATE_IMPORTBOUND))
get(PIN_IMPORTBOUND);
if (is_subtree_root())
get(PIN_SUBTREE);
}
void CDir::split(int bits, std::vector<CDir*>* subs, MDSContext::vec& waiters, bool replay)
{
dout(10) << "split by " << bits << " bits on " << *this << dendl;
ceph_assert(replay || is_complete() || !is_auth());
frag_vec_t frags;
frag.split(bits, frags);
vector<CDir*> subfrags(1 << bits);
double fac = 1.0 / (double)(1 << bits); // for scaling load vecs
version_t rstat_version = inode->get_projected_inode()->rstat.version;
version_t dirstat_version = inode->get_projected_inode()->dirstat.version;
nest_info_t rstatdiff;
frag_info_t fragstatdiff;
if (fnode->accounted_rstat.version == rstat_version)
rstatdiff.add_delta(fnode->accounted_rstat, fnode->rstat);
if (fnode->accounted_fragstat.version == dirstat_version)
fragstatdiff.add_delta(fnode->accounted_fragstat, fnode->fragstat);
dout(10) << " rstatdiff " << rstatdiff << " fragstatdiff " << fragstatdiff << dendl;
map<string_snap_t, MDSContext::vec > dentry_waiters;
prepare_old_fragment(dentry_waiters, replay);
// create subfrag dirs
int n = 0;
for (const auto& fg : frags) {
CDir *f = new CDir(inode, fg, mdcache, is_auth());
f->state_set(state & (MASK_STATE_FRAGMENT_KEPT | STATE_COMPLETE));
f->get_replicas() = get_replicas();
f->pop_me = pop_me;
f->pop_me.scale(fac);
// FIXME; this is an approximation
f->pop_nested = pop_nested;
f->pop_nested.scale(fac);
f->pop_auth_subtree = pop_auth_subtree;
f->pop_auth_subtree.scale(fac);
f->pop_auth_subtree_nested = pop_auth_subtree_nested;
f->pop_auth_subtree_nested.scale(fac);
dout(10) << " subfrag " << fg << " " << *f << dendl;
subfrags[n++] = f;
subs->push_back(f);
f->set_dir_auth(get_dir_auth());
f->freeze_tree_state = freeze_tree_state;
f->prepare_new_fragment(replay);
f->init_fragment_pins();
}
// repartition dentries
while (!items.empty()) {
auto p = items.begin();
CDentry *dn = p->second;
frag_t subfrag = inode->pick_dirfrag(dn->get_name());
int n = (subfrag.value() & (subfrag.mask() ^ frag.mask())) >> subfrag.mask_shift();
dout(15) << " subfrag " << subfrag << " n=" << n << " for " << p->first << dendl;
CDir *f = subfrags[n];
f->steal_dentry(dn);
}
for (const auto &p : dentry_waiters) {
frag_t subfrag = inode->pick_dirfrag(p.first.name);
int n = (subfrag.value() & (subfrag.mask() ^ frag.mask())) >> subfrag.mask_shift();
CDir *f = subfrags[n];
if (f->waiting_on_dentry.empty())
f->get(PIN_DNWAITER);
std::copy(p.second.begin(), p.second.end(),
std::back_inserter(f->waiting_on_dentry[p.first]));
}
// FIXME: handle dirty old rstat
// fix up new frag fragstats
for (int i = 0; i < n; i++) {
CDir *f = subfrags[i];
auto _fnode = f->_get_fnode();
_fnode->version = f->projected_version = get_version();
_fnode->rstat.version = rstat_version;
_fnode->accounted_rstat = _fnode->rstat;
_fnode->fragstat.version = dirstat_version;
_fnode->accounted_fragstat = _fnode->fragstat;
dout(10) << " rstat " << _fnode->rstat << " fragstat " << _fnode->fragstat
<< " on " << *f << dendl;
if (i == 0) {
// give any outstanding frag stat differential to first frag
dout(10) << " giving rstatdiff " << rstatdiff << " fragstatdiff" << fragstatdiff
<< " to " << *subfrags[0] << dendl;
_fnode->accounted_rstat.add(rstatdiff);
_fnode->accounted_fragstat.add(fragstatdiff);
}
}
finish_old_fragment(waiters, replay);
}
void CDir::merge(const std::vector<CDir*>& subs, MDSContext::vec& waiters, bool replay)
{
dout(10) << "merge " << subs << dendl;
ceph_assert(subs.size() > 0);
set_dir_auth(subs.front()->get_dir_auth());
freeze_tree_state = subs.front()->freeze_tree_state;
for (const auto& dir : subs) {
ceph_assert(get_dir_auth() == dir->get_dir_auth());
ceph_assert(freeze_tree_state == dir->freeze_tree_state);
}
prepare_new_fragment(replay);
auto _fnode = _get_fnode();
nest_info_t rstatdiff;
frag_info_t fragstatdiff;
bool touched_mtime, touched_chattr;
version_t rstat_version = inode->get_projected_inode()->rstat.version;
version_t dirstat_version = inode->get_projected_inode()->dirstat.version;
map<string_snap_t, MDSContext::vec > dentry_waiters;
for (const auto& dir : subs) {
dout(10) << " subfrag " << dir->get_frag() << " " << *dir << dendl;
ceph_assert(!dir->is_auth() || dir->is_complete() || replay);
if (dir->get_fnode()->accounted_rstat.version == rstat_version)
rstatdiff.add_delta(dir->get_fnode()->accounted_rstat, dir->get_fnode()->rstat);
if (dir->get_fnode()->accounted_fragstat.version == dirstat_version)
fragstatdiff.add_delta(dir->get_fnode()->accounted_fragstat, dir->get_fnode()->fragstat,
&touched_mtime, &touched_chattr);
dir->prepare_old_fragment(dentry_waiters, replay);
// steal dentries
while (!dir->items.empty())
steal_dentry(dir->items.begin()->second);
// merge replica map
for (const auto &p : dir->get_replicas()) {
unsigned cur = get_replicas()[p.first];
if (p.second > cur)
get_replicas()[p.first] = p.second;
}
// merge version
if (dir->get_version() > _fnode->version)
_fnode->version = projected_version = dir->get_version();
// merge state
state_set(dir->get_state() & MASK_STATE_FRAGMENT_KEPT);
dir->finish_old_fragment(waiters, replay);
inode->close_dirfrag(dir->get_frag());
}
if (!dentry_waiters.empty()) {
get(PIN_DNWAITER);
for (const auto &p : dentry_waiters) {
std::copy(p.second.begin(), p.second.end(),
std::back_inserter(waiting_on_dentry[p.first]));
}
}
if (is_auth() && !replay)
mark_complete();
// FIXME: merge dirty old rstat
_fnode->rstat.version = rstat_version;
_fnode->accounted_rstat = _fnode->rstat;
_fnode->accounted_rstat.add(rstatdiff);
_fnode->fragstat.version = dirstat_version;
_fnode->accounted_fragstat = _fnode->fragstat;
_fnode->accounted_fragstat.add(fragstatdiff);
init_fragment_pins();
}
void CDir::resync_accounted_fragstat()
{
auto pf = _get_projected_fnode();
const auto& pi = inode->get_projected_inode();
if (pf->accounted_fragstat.version != pi->dirstat.version) {
pf->fragstat.version = pi->dirstat.version;
dout(10) << __func__ << " " << pf->accounted_fragstat << " -> " << pf->fragstat << dendl;
pf->accounted_fragstat = pf->fragstat;
}
}
/*
* resync rstat and accounted_rstat with inode
*/
void CDir::resync_accounted_rstat()
{
auto pf = _get_projected_fnode();
const auto& pi = inode->get_projected_inode();
if (pf->accounted_rstat.version != pi->rstat.version) {
pf->rstat.version = pi->rstat.version;
dout(10) << __func__ << " " << pf->accounted_rstat << " -> " << pf->rstat << dendl;
pf->accounted_rstat = pf->rstat;
dirty_old_rstat.clear();
}
}
void CDir::assimilate_dirty_rstat_inodes(MutationRef& mut)
{
dout(10) << __func__ << dendl;
for (elist<CInode*>::iterator p = dirty_rstat_inodes.begin_use_current();
!p.end(); ++p) {
CInode *in = *p;
ceph_assert(in->is_auth());
if (in->is_frozen())
continue;
mut->auth_pin(in);
auto pi = in->project_inode(mut);
pi.inode->version = in->pre_dirty();
mdcache->project_rstat_inode_to_frag(mut, in, this, 0, 0, nullptr);
}
state_set(STATE_ASSIMRSTAT);
dout(10) << __func__ << " done" << dendl;
}
void CDir::assimilate_dirty_rstat_inodes_finish(EMetaBlob *blob)
{
if (!state_test(STATE_ASSIMRSTAT))
return;
state_clear(STATE_ASSIMRSTAT);
dout(10) << __func__ << dendl;
elist<CInode*>::iterator p = dirty_rstat_inodes.begin_use_current();
while (!p.end()) {
CInode *in = *p;
++p;
if (in->is_frozen())
continue;
CDentry *dn = in->get_projected_parent_dn();
in->clear_dirty_rstat();
blob->add_primary_dentry(dn, in, true);
}
if (!dirty_rstat_inodes.empty())
mdcache->mds->locker->mark_updated_scatterlock(&inode->nestlock);
}
/****************************************
* WAITING
*/
void CDir::add_dentry_waiter(std::string_view dname, snapid_t snapid, MDSContext *c)
{
if (waiting_on_dentry.empty())
get(PIN_DNWAITER);
waiting_on_dentry[string_snap_t(dname, snapid)].push_back(c);
dout(10) << __func__ << " dentry " << dname
<< " snap " << snapid
<< " " << c << " on " << *this << dendl;
}
void CDir::take_dentry_waiting(std::string_view dname, snapid_t first, snapid_t last,
MDSContext::vec& ls)
{
if (waiting_on_dentry.empty())
return;
string_snap_t lb(dname, first);
string_snap_t ub(dname, last);
auto it = waiting_on_dentry.lower_bound(lb);
while (it != waiting_on_dentry.end() &&
!(ub < it->first)) {
dout(10) << __func__ << " " << dname
<< " [" << first << "," << last << "] found waiter on snap "
<< it->first.snapid
<< " on " << *this << dendl;
std::copy(it->second.begin(), it->second.end(), std::back_inserter(ls));
waiting_on_dentry.erase(it++);
}
if (waiting_on_dentry.empty())
put(PIN_DNWAITER);
}
void CDir::add_waiter(uint64_t tag, MDSContext *c)
{
// hierarchical?
// at subtree root?
if (tag & WAIT_ATSUBTREEROOT) {
if (!is_subtree_root()) {
// try parent
dout(10) << "add_waiter " << std::hex << tag << std::dec << " " << c << " should be ATSUBTREEROOT, " << *this << " is not root, trying parent" << dendl;
inode->parent->dir->add_waiter(tag, c);
return;
}
}
ceph_assert(!(tag & WAIT_CREATED) || state_test(STATE_CREATING));
MDSCacheObject::add_waiter(tag, c);
}
/* NOTE: this checks dentry waiters too */
void CDir::take_waiting(uint64_t mask, MDSContext::vec& ls)
{
if ((mask & WAIT_DENTRY) && !waiting_on_dentry.empty()) {
// take all dentry waiters
for (const auto &p : waiting_on_dentry) {
dout(10) << "take_waiting dentry " << p.first.name
<< " snap " << p.first.snapid << " on " << *this << dendl;
std::copy(p.second.begin(), p.second.end(), std::back_inserter(ls));
}
waiting_on_dentry.clear();
put(PIN_DNWAITER);
}
// waiting
MDSCacheObject::take_waiting(mask, ls);
}
void CDir::finish_waiting(uint64_t mask, int result)
{
dout(11) << __func__ << " mask " << hex << mask << dec << " result " << result << " on " << *this << dendl;
MDSContext::vec finished;
take_waiting(mask, finished);
if (result < 0)
finish_contexts(g_ceph_context, finished, result);
else
mdcache->mds->queue_waiters(finished);
}
// dirty/clean
CDir::fnode_ptr CDir::project_fnode(const MutationRef& mut)
{
ceph_assert(get_version() != 0);
if (mut && mut->is_projected(this))
return std::const_pointer_cast<fnode_t>(projected_fnode.back());
auto pf = allocate_fnode(*get_projected_fnode());
if (scrub_infop && scrub_infop->last_scrub_dirty) {
pf->localized_scrub_stamp = scrub_infop->last_local.time;
pf->localized_scrub_version = scrub_infop->last_local.version;
pf->recursive_scrub_stamp = scrub_infop->last_recursive.time;
pf->recursive_scrub_version = scrub_infop->last_recursive.version;
scrub_infop->last_scrub_dirty = false;
scrub_maybe_delete_info();
}
projected_fnode.emplace_back(pf);
if (mut)
mut->add_projected_node(this);
dout(10) << __func__ << " " << pf.get() << dendl;
return pf;
}
void CDir::pop_and_dirty_projected_fnode(LogSegment *ls, const MutationRef& mut)
{
ceph_assert(!projected_fnode.empty());
auto pf = std::move(projected_fnode.front());
dout(15) << __func__ << " " << pf.get() << " v" << pf->version << dendl;
projected_fnode.pop_front();
if (mut)
mut->remove_projected_node(this);
reset_fnode(std::move(pf));
_mark_dirty(ls);
}
version_t CDir::pre_dirty(version_t min)
{
if (min > projected_version)
projected_version = min;
++projected_version;
dout(10) << __func__ << " " << projected_version << dendl;
return projected_version;
}
void CDir::mark_dirty(LogSegment *ls, version_t pv)
{
ceph_assert(is_auth());
if (pv) {
ceph_assert(get_version() < pv);
ceph_assert(pv <= projected_version);
ceph_assert(!projected_fnode.empty() &&
pv <= projected_fnode.front()->version);
}
_mark_dirty(ls);
}
void CDir::_mark_dirty(LogSegment *ls)
{
if (!state_test(STATE_DIRTY)) {
dout(10) << __func__ << " (was clean) " << *this << " version " << get_version() << dendl;
_set_dirty_flag();
ceph_assert(ls);
} else {
dout(10) << __func__ << " (already dirty) " << *this << " version " << get_version() << dendl;
}
if (ls) {
ls->dirty_dirfrags.push_back(&item_dirty);
// if i've never committed, i need to be before _any_ mention of me is trimmed from the journal.
if (committed_version == 0 && !item_new.is_on_list())
ls->new_dirfrags.push_back(&item_new);
}
}
void CDir::mark_new(LogSegment *ls)
{
ls->new_dirfrags.push_back(&item_new);
state_clear(STATE_CREATING);
MDSContext::vec waiters;
take_waiting(CDir::WAIT_CREATED, waiters);
mdcache->mds->queue_waiters(waiters);
}
void CDir::set_fresh_fnode(fnode_const_ptr&& ptr) {
ceph_assert(inode->is_auth());
ceph_assert(!is_projected());
ceph_assert(!state_test(STATE_COMMITTING));
reset_fnode(std::move(ptr));
projected_version = committing_version = committed_version = get_version();
if (state_test(STATE_REJOINUNDEF)) {
ceph_assert(mdcache->mds->is_rejoin());
state_clear(STATE_REJOINUNDEF);
mdcache->opened_undef_dirfrag(this);
}
}
void CDir::mark_clean()
{
dout(10) << __func__ << " " << *this << " version " << get_version() << dendl;
if (state_test(STATE_DIRTY)) {
item_dirty.remove_myself();
item_new.remove_myself();
state_clear(STATE_DIRTY);
put(PIN_DIRTY);
}
}
// caller should hold auth pin of this
void CDir::log_mark_dirty()
{
if (is_dirty() || projected_version > get_version())
return; // noop if it is already dirty or will be dirty
auto _fnode = allocate_fnode(*get_fnode());
_fnode->version = pre_dirty();
reset_fnode(std::move(_fnode));
mark_dirty(mdcache->mds->mdlog->get_current_segment());
}
void CDir::mark_complete() {
state_set(STATE_COMPLETE);
bloom.reset();
}
void CDir::first_get()
{
inode->get(CInode::PIN_DIRFRAG);
}
void CDir::last_put()
{
inode->put(CInode::PIN_DIRFRAG);
}
/******************************************************************************
* FETCH and COMMIT
*/
// -----------------------
// FETCH
void CDir::fetch(std::string_view dname, snapid_t last,
MDSContext *c, bool ignore_authpinnability)
{
if (dname.empty())
dout(10) << "fetch on " << *this << dendl;
else
dout(10) << "fetch key(" << dname << ", '" << last << "')" << dendl;
ceph_assert(is_auth());
ceph_assert(!is_complete());
if (!ignore_authpinnability && !can_auth_pin()) {
if (c) {
dout(7) << "fetch waiting for authpinnable" << dendl;
add_waiter(WAIT_UNFREEZE, c);
} else
dout(7) << "fetch not authpinnable and no context" << dendl;
return;
}
// unlinked directory inode shouldn't have any entry
if (CDir *pdir = get_parent_dir();
pdir && pdir->inode->is_stray() && !inode->snaprealm) {
dout(7) << "fetch dirfrag for unlinked directory, mark complete" << dendl;
if (get_version() == 0) {
auto _fnode = allocate_fnode();
_fnode->version = 1;
set_fresh_fnode(std::move(_fnode));
}
mark_complete();
if (c)
mdcache->mds->queue_waiter(c);
return;
}
// FIXME: to fetch a snap dentry, we need to get omap key in range
// [(name, last), (name, CEPH_NOSNAP))
if (!dname.empty() && last == CEPH_NOSNAP && !g_conf().get_val<bool>("mds_dir_prefetch")) {
dentry_key_t key(last, dname, inode->hash_dentry_name(dname));
fetch_keys({key}, c);
return;
}
if (c)
add_waiter(WAIT_COMPLETE, c);
// already fetching?
if (state_test(CDir::STATE_FETCHING)) {
dout(7) << "already fetching; waiting" << dendl;
return;
}
auth_pin(this);
state_set(CDir::STATE_FETCHING);
_omap_fetch(nullptr, nullptr);
if (mdcache->mds->logger)
mdcache->mds->logger->inc(l_mds_dir_fetch_complete);
mdcache->mds->balancer->hit_dir(this, META_POP_FETCH);
}
void CDir::fetch_keys(const std::vector<dentry_key_t>& keys, MDSContext *c)
{
dout(10) << __func__ << " " << keys.size() << " keys on " << *this << dendl;
ceph_assert(is_auth());
ceph_assert(!is_complete());
if (CDir *pdir = get_parent_dir();
pdir && pdir->inode->is_stray() && !inode->snaprealm) {
fetch(c, true);
return;
}
MDSContext::vec_alloc<mempool::mds_co::pool_allocator> *fallback_waiting = nullptr;
std::set<std::string> str_keys;
for (auto& key : keys) {
ceph_assert(key.snapid == CEPH_NOSNAP);
if (waiting_on_dentry.empty())
get(PIN_DNWAITER);
auto em = waiting_on_dentry.emplace(std::piecewise_construct,
std::forward_as_tuple(key.name, key.snapid),
std::forward_as_tuple());
if (!em.second) {
if (!fallback_waiting)
fallback_waiting = &em.first->second;
continue;
}
if (c) {
em.first->second.push_back(c);
c = nullptr;
}
string str;
key.encode(str);
str_keys.emplace(std::move(str));
}
if (str_keys.empty()) {
if (c && fallback_waiting) {
fallback_waiting->push_back(c);
c = nullptr;
}
if (get_version() > 0) {
dout(7) << "fetch keys, all are already being fetched" << dendl;
ceph_assert(!c);
return;
}
}
if (state_test(CDir::STATE_FETCHING)) {
dout(7) << "fetch keys, waiting for full fetch" << dendl;
if (c)
add_waiter(WAIT_COMPLETE, c);
return;
}
auth_pin(this);
_omap_fetch(&str_keys, c);
if (mdcache->mds->logger)
mdcache->mds->logger->inc(l_mds_dir_fetch_keys);
mdcache->mds->balancer->hit_dir(this, META_POP_FETCH);
}
class C_IO_Dir_OMAP_FetchedMore : public CDirIOContext {
MDSContext *fin;
public:
const version_t omap_version;
bufferlist hdrbl;
bool more = false;
map<string, bufferlist> omap; ///< carry-over from before
map<string, bufferlist> omap_more; ///< new batch
int ret;
C_IO_Dir_OMAP_FetchedMore(CDir *d, version_t v, MDSContext *f) :
CDirIOContext(d), fin(f), omap_version(v), ret(0) { }
void finish(int r) {
if (omap_version < dir->get_committed_version()) {
omap.clear();
dir->_omap_fetch(nullptr, fin);
return;
}
// merge results
if (omap.empty()) {
omap.swap(omap_more);
} else {
omap.insert(omap_more.begin(), omap_more.end());
}
if (more) {
dir->_omap_fetch_more(omap_version, hdrbl, omap, fin);
} else {
dir->_omap_fetched(hdrbl, omap, true, {}, r);
if (fin)
fin->complete(r);
}
}
void print(ostream& out) const override {
out << "dirfrag_fetch_more(" << dir->dirfrag() << ")";
}
};
class C_IO_Dir_OMAP_Fetched : public CDirIOContext {
MDSContext *fin;
public:
const version_t omap_version;
bool complete = true;
std::set<string> keys;
bufferlist hdrbl;
bool more = false;
map<string, bufferlist> omap;
bufferlist btbl;
int ret1, ret2, ret3;
C_IO_Dir_OMAP_Fetched(CDir *d, MDSContext *f) :
CDirIOContext(d), fin(f),
omap_version(d->get_committing_version()),
ret1(0), ret2(0), ret3(0) { }
void finish(int r) override {
// check the correctness of backtrace
if (r >= 0 && ret3 != -CEPHFS_ECANCELED)
dir->inode->verify_diri_backtrace(btbl, ret3);
if (r >= 0) r = ret1;
if (r >= 0) r = ret2;
if (more) {
if (omap_version < dir->get_committed_version()) {
dir->_omap_fetch(nullptr, fin);
} else {
dir->_omap_fetch_more(omap_version, hdrbl, omap, fin);
}
return;
}
dir->_omap_fetched(hdrbl, omap, complete, keys, r);
if (fin)
fin->complete(r);
}
void print(ostream& out) const override {
out << "dirfrag_fetch(" << dir->dirfrag() << ")";
}
};
void CDir::_omap_fetch(std::set<string> *keys, MDSContext *c)
{
C_IO_Dir_OMAP_Fetched *fin = new C_IO_Dir_OMAP_Fetched(this, c);
object_t oid = get_ondisk_object();
object_locator_t oloc(mdcache->mds->mdsmap->get_metadata_pool());
ObjectOperation rd;
rd.omap_get_header(&fin->hdrbl, &fin->ret1);
if (keys) {
fin->complete = false;
fin->keys.swap(*keys);
rd.omap_get_vals_by_keys(fin->keys, &fin->omap, &fin->ret2);
} else {
ceph_assert(!c);
rd.omap_get_vals("", "", g_conf()->mds_dir_keys_per_op,
&fin->omap, &fin->more, &fin->ret2);
}
// check the correctness of backtrace
if (g_conf()->mds_verify_backtrace > 0 && frag == frag_t()) {
rd.getxattr("parent", &fin->btbl, &fin->ret3);
rd.set_last_op_flags(CEPH_OSD_OP_FLAG_FAILOK);
} else {
fin->ret3 = -CEPHFS_ECANCELED;
}
mdcache->mds->objecter->read(oid, oloc, rd, CEPH_NOSNAP, NULL, 0,
new C_OnFinisher(fin, mdcache->mds->finisher));
}
void CDir::_omap_fetch_more(version_t omap_version, bufferlist& hdrbl,
map<string, bufferlist>& omap, MDSContext *c)
{
// we have more omap keys to fetch!
object_t oid = get_ondisk_object();
object_locator_t oloc(mdcache->mds->mdsmap->get_metadata_pool());
auto fin = new C_IO_Dir_OMAP_FetchedMore(this, omap_version, c);
fin->hdrbl = std::move(hdrbl);
fin->omap.swap(omap);
ObjectOperation rd;
rd.omap_get_vals(fin->omap.rbegin()->first,
"", /* filter prefix */
g_conf()->mds_dir_keys_per_op,
&fin->omap_more,
&fin->more,
&fin->ret);
mdcache->mds->objecter->read(oid, oloc, rd, CEPH_NOSNAP, NULL, 0,
new C_OnFinisher(fin, mdcache->mds->finisher));
}
CDentry *CDir::_load_dentry(
std::string_view key,
std::string_view dname,
const snapid_t last,
bufferlist &bl,
const int pos,
const std::set<snapid_t> *snaps,
double rand_threshold,
bool *force_dirty)
{
auto q = bl.cbegin();
snapid_t first;
decode(first, q);
// marker
char type;
decode(type, q);
dout(20) << "_fetched pos " << pos << " marker '" << type << "' dname '" << dname
<< " [" << first << "," << last << "]"
<< dendl;
bool stale = false;
if (snaps && last != CEPH_NOSNAP) {
set<snapid_t>::const_iterator p = snaps->lower_bound(first);
if (p == snaps->end() || *p > last) {
dout(10) << " skipping stale dentry on [" << first << "," << last << "]" << dendl;
stale = true;
}
}
/*
* look for existing dentry for _last_ snap, because unlink +
* create may leave a "hole" (epochs during which the dentry
* doesn't exist) but for which no explicit negative dentry is in
* the cache.
*/
CDentry *dn;
if (stale)
dn = lookup_exact_snap(dname, last);
else
dn = lookup(dname, last);
if (type == 'L' || type == 'l') {
// hard link
inodeno_t ino;
unsigned char d_type;
mempool::mds_co::string alternate_name;
CDentry::decode_remote(type, ino, d_type, alternate_name, q);
if (stale) {
if (!dn) {
stale_items.insert(mempool::mds_co::string(key));
*force_dirty = true;
}
return dn;
}
if (dn) {
CDentry::linkage_t *dnl = dn->get_linkage();
dout(12) << "_fetched had " << (dnl->is_null() ? "NEG" : "") << " dentry " << *dn << dendl;
if (committed_version == 0 &&
dnl->is_remote() &&
dn->is_dirty() &&
ino == dnl->get_remote_ino() &&
d_type == dnl->get_remote_d_type() &&
alternate_name == dn->get_alternate_name()) {
// see comment below
dout(10) << "_fetched had underwater dentry " << *dn << ", marking clean" << dendl;
dn->mark_clean();
}
} else {
// (remote) link
dn = add_remote_dentry(dname, ino, d_type, std::move(alternate_name), first, last);
// link to inode?
CInode *in = mdcache->get_inode(ino); // we may or may not have it.
if (in) {
dn->link_remote(dn->get_linkage(), in);
dout(12) << "_fetched got remote link " << ino << " which we have " << *in << dendl;
} else {
dout(12) << "_fetched got remote link " << ino << " (don't have it)" << dendl;
}
}
}
else if (type == 'I' || type == 'i') {
InodeStore inode_data;
mempool::mds_co::string alternate_name;
// inode
// Load inode data before looking up or constructing CInode
if (type == 'i') {
DECODE_START(2, q);
if (struct_v >= 2) {
decode(alternate_name, q);
}
inode_data.decode(q);
DECODE_FINISH(q);
} else {
inode_data.decode_bare(q);
}
if (stale) {
if (!dn) {
stale_items.insert(mempool::mds_co::string(key));
*force_dirty = true;
}
return dn;
}
bool undef_inode = false;
if (dn) {
CDentry::linkage_t *dnl = dn->get_linkage();
dout(12) << "_fetched had " << (dnl->is_null() ? "NEG" : "") << " dentry " << *dn << dendl;
if (dnl->is_primary()) {
CInode *in = dnl->get_inode();
if (in->state_test(CInode::STATE_REJOINUNDEF)) {
undef_inode = true;
} else if (committed_version == 0 &&
dn->is_dirty() &&
inode_data.inode->ino == in->ino() &&
inode_data.inode->version == in->get_version()) {
/* clean underwater item?
* Underwater item is something that is dirty in our cache from
* journal replay, but was previously flushed to disk before the
* mds failed.
*
* We only do this is committed_version == 0. that implies either
* - this is a fetch after from a clean/empty CDir is created
* (and has no effect, since the dn won't exist); or
* - this is a fetch after _recovery_, which is what we're worried
* about. Items that are marked dirty from the journal should be
* marked clean if they appear on disk.
*/
dout(10) << "_fetched had underwater dentry " << *dn << ", marking clean" << dendl;
dn->mark_clean();
dout(10) << "_fetched had underwater inode " << *dnl->get_inode() << ", marking clean" << dendl;
in->mark_clean();
}
}
}
if (!dn || undef_inode) {
// add inode
CInode *in = mdcache->get_inode(inode_data.inode->ino, last);
if (!in || undef_inode) {
if (undef_inode && in)
in->first = first;
else
in = new CInode(mdcache, true, first, last);
in->reset_inode(std::move(inode_data.inode));
in->reset_xattrs(std::move(inode_data.xattrs));
// symlink?
if (in->is_symlink())
in->symlink = inode_data.symlink;
in->dirfragtree.swap(inode_data.dirfragtree);
in->reset_old_inodes(std::move(inode_data.old_inodes));
if (in->is_any_old_inodes()) {
snapid_t min_first = in->get_old_inodes()->rbegin()->first + 1;
if (min_first > in->first)
in->first = min_first;
}
in->oldest_snap = inode_data.oldest_snap;
in->decode_snap_blob(inode_data.snap_blob);
if (snaps && !in->snaprealm)
in->purge_stale_snap_data(*snaps);
if (!undef_inode) {
mdcache->add_inode(in); // add
mdcache->insert_taken_inos(in->ino());
dn = add_primary_dentry(dname, in, std::move(alternate_name), first, last); // link
}
dout(12) << "_fetched got " << *dn << " " << *in << dendl;
if (in->get_inode()->is_dirty_rstat())
in->mark_dirty_rstat();
in->maybe_ephemeral_rand(rand_threshold);
//in->hack_accessed = false;
//in->hack_load_stamp = ceph_clock_now();
//num_new_inodes_loaded++;
} else if (g_conf().get_val<bool>("mds_hack_allow_loading_invalid_metadata")) {
dout(20) << "hack: adding duplicate dentry for " << *in << dendl;
dn = add_primary_dentry(dname, in, std::move(alternate_name), first, last);
} else {
dout(0) << "_fetched badness: got (but i already had) " << *in
<< " mode " << in->get_inode()->mode
<< " mtime " << in->get_inode()->mtime << dendl;
string dirpath, inopath;
this->inode->make_path_string(dirpath);
in->make_path_string(inopath);
mdcache->mds->clog->error() << "loaded dup inode " << inode_data.inode->ino
<< " [" << first << "," << last << "] v" << inode_data.inode->version
<< " at " << dirpath << "/" << dname
<< ", but inode " << in->vino() << " v" << in->get_version()
<< " already exists at " << inopath;
return dn;
}
}
} else {
CachedStackStringStream css;
*css << "Invalid tag char '" << type << "' pos " << pos;
throw buffer::malformed_input(css->str());
}
return dn;
}
void CDir::_omap_fetched(bufferlist& hdrbl, map<string, bufferlist>& omap,
bool complete, const std::set<string>& keys, int r)
{
LogChannelRef clog = mdcache->mds->clog;
dout(10) << "_fetched header " << hdrbl.length() << " bytes "
<< omap.size() << " keys for " << *this << dendl;
ceph_assert(r == 0 || r == -CEPHFS_ENOENT || r == -CEPHFS_ENODATA);
ceph_assert(is_auth());
ceph_assert(!is_frozen());
if (hdrbl.length() == 0) {
dout(0) << "_fetched missing object for " << *this << dendl;
clog->error() << "dir " << dirfrag() << " object missing on disk; some "
"files may be lost (" << get_path() << ")";
go_bad(complete);
return;
}
fnode_t got_fnode;
{
auto p = hdrbl.cbegin();
try {
decode(got_fnode, p);
} catch (const buffer::error &err) {
derr << "Corrupt fnode in dirfrag " << dirfrag()
<< ": " << err.what() << dendl;
clog->warn() << "Corrupt fnode header in " << dirfrag() << ": "
<< err.what() << " (" << get_path() << ")";
go_bad(complete);
return;
}
if (!p.end()) {
clog->warn() << "header buffer of dir " << dirfrag() << " has "
<< hdrbl.length() - p.get_off() << " extra bytes ("
<< get_path() << ")";
go_bad(complete);
return;
}
}
dout(10) << "_fetched version " << got_fnode.version << dendl;
// take the loaded fnode?
// only if we are a fresh CDir* with no prior state.
if (get_version() == 0) {
set_fresh_fnode(allocate_fnode(got_fnode));
}
list<CInode*> undef_inodes;
// purge stale snaps?
bool force_dirty = false;
const set<snapid_t> *snaps = NULL;
SnapRealm *realm = inode->find_snaprealm();
if (fnode->snap_purged_thru < realm->get_last_destroyed()) {
snaps = &realm->get_snaps();
dout(10) << " snap_purged_thru " << fnode->snap_purged_thru
<< " < " << realm->get_last_destroyed()
<< ", snap purge based on " << *snaps << dendl;
if (get_num_snap_items() == 0) {
const_cast<snapid_t&>(fnode->snap_purged_thru) = realm->get_last_destroyed();
force_dirty = true;
}
}
MDSContext::vec finished;
std::vector<string_snap_t> null_keys;
auto k_it = keys.rbegin();
auto w_it = waiting_on_dentry.rbegin();
std::string_view last_name = "";
auto proc_waiters = [&](const string_snap_t& key) {
bool touch = false;
if (last_name < key.name) {
// string_snap_t and key string are not in the same order
w_it = decltype(w_it)(waiting_on_dentry.upper_bound(key));
}
while (w_it != waiting_on_dentry.rend()) {
int cmp = w_it->first.compare(key);
if (cmp < 0)
break;
if (cmp == 0) {
touch = true;
std::copy(w_it->second.begin(), w_it->second.end(),
std::back_inserter(finished));
waiting_on_dentry.erase(std::next(w_it).base());
if (waiting_on_dentry.empty())
put(PIN_DNWAITER);
break;
}
++w_it;
}
return touch;
};
auto proc_nulls_and_waiters = [&](const string& str_key, const string_snap_t& key) {
bool touch = false;
int count = 0;
while (k_it != keys.rend()) {
int cmp = k_it->compare(str_key);
if (cmp < 0)
break;
if (cmp == 0) {
touch = true;
proc_waiters(key);
++k_it;
break;
}
string_snap_t n_key;
dentry_key_t::decode_helper(*k_it, n_key.name, n_key.snapid);
ceph_assert(n_key.snapid == CEPH_NOSNAP);
proc_waiters(n_key);
last_name = std::string_view(k_it->c_str(), n_key.name.length());
null_keys.emplace_back(std::move(n_key));
++k_it;
if (!(++count % mdcache->mds->heartbeat_reset_grace()))
mdcache->mds->heartbeat_reset();
}
return touch;
};
int count = 0;
unsigned pos = omap.size() - 1;
double rand_threshold = get_inode()->get_ephemeral_rand();
for (auto p = omap.rbegin(); p != omap.rend(); ++p, --pos) {
string_snap_t key;
dentry_key_t::decode_helper(p->first, key.name, key.snapid);
bool touch = false;
if (key.snapid == CEPH_NOSNAP) {
if (complete) {
touch = proc_waiters(key);
} else {
touch = proc_nulls_and_waiters(p->first, key);
}
last_name = std::string_view(p->first.c_str(), key.name.length());
}
if (!(++count % mdcache->mds->heartbeat_reset_grace()))
mdcache->mds->heartbeat_reset();
CDentry *dn = nullptr;
try {
dn = _load_dentry(
p->first, key.name, key.snapid, p->second, pos, snaps,
rand_threshold, &force_dirty);
} catch (const buffer::error &err) {
mdcache->mds->clog->warn() << "Corrupt dentry '" << key.name << "' in "
"dir frag " << dirfrag() << ": "
<< err.what() << "(" << get_path() << ")";
// Remember that this dentry is damaged. Subsequent operations
// that try to act directly on it will get their CEPHFS_EIOs, but this
// dirfrag as a whole will continue to look okay (minus the
// mysteriously-missing dentry)
go_bad_dentry(key.snapid, key.name);
// Anyone who was WAIT_DENTRY for this guy will get kicked
// to RetryRequest, and hit the DamageTable-interrogating path.
// Stats will now be bogus because we will think we're complete,
// but have 1 or more missing dentries.
continue;
}
if (!dn)
continue;
if (touch) {
dout(10) << " touching wanted dn " << *dn << dendl;
mdcache->touch_dentry(dn);
}
CDentry::linkage_t *dnl = dn->get_linkage();
if (dnl->is_primary() && dnl->get_inode()->state_test(CInode::STATE_REJOINUNDEF))
undef_inodes.push_back(dnl->get_inode());
}
if (complete) {
if (!waiting_on_dentry.empty()) {
for (auto &p : waiting_on_dentry) {
std::copy(p.second.begin(), p.second.end(), std::back_inserter(finished));
if (p.first.snapid == CEPH_NOSNAP)
null_keys.emplace_back(p.first);
}
waiting_on_dentry.clear();
put(PIN_DNWAITER);
}
} else {
proc_nulls_and_waiters("", string_snap_t());
}
if (!null_keys.empty()) {
snapid_t first = mdcache->get_global_snaprealm()->get_newest_seq() + 1;
for (auto& key : null_keys) {
CDentry* dn = lookup(key.name, key.snapid);
if (dn) {
dout(12) << "_fetched got null for key " << key << ", have " << *dn << dendl;
} else {
dn = add_null_dentry(key.name, first, key.snapid);
dout(12) << "_fetched got null for key " << key << ", added " << *dn << dendl;
}
mdcache->touch_dentry(dn);
if (!(++count % mdcache->mds->heartbeat_reset_grace(2)))
mdcache->mds->heartbeat_reset();
}
}
//cache->mds->logger->inc("newin", num_new_inodes_loaded);
// mark complete, !fetching
if (complete) {
mark_complete();
state_clear(STATE_FETCHING);
take_waiting(WAIT_COMPLETE, finished);
}
// open & force frags
while (!undef_inodes.empty()) {
CInode *in = undef_inodes.front();
undef_inodes.pop_front();
in->state_clear(CInode::STATE_REJOINUNDEF);
mdcache->opened_undef_inode(in);
if (!(++count % mdcache->mds->heartbeat_reset_grace()))
mdcache->mds->heartbeat_reset();
}
// dirty myself to remove stale snap dentries
if (force_dirty && !mdcache->is_readonly())
log_mark_dirty();
auth_unpin(this);
if (!finished.empty())
mdcache->mds->queue_waiters(finished);
}
void CDir::go_bad_dentry(snapid_t last, std::string_view dname)
{
dout(10) << __func__ << " " << dname << dendl;
std::string path(get_path());
path += "/";
path += dname;
const bool fatal = mdcache->mds->damage_table.notify_dentry(
inode->ino(), frag, last, dname, path);
if (fatal) {
mdcache->mds->damaged();
ceph_abort(); // unreachable, damaged() respawns us
}
}
void CDir::go_bad(bool complete)
{
dout(10) << __func__ << " " << frag << dendl;
const bool fatal = mdcache->mds->damage_table.notify_dirfrag(
inode->ino(), frag, get_path());
if (fatal) {
mdcache->mds->damaged();
ceph_abort(); // unreachable, damaged() respawns us
}
if (complete) {
if (get_version() == 0) {
auto _fnode = allocate_fnode();
_fnode->version = 1;
reset_fnode(std::move(_fnode));
}
state_set(STATE_BADFRAG);
mark_complete();
}
state_clear(STATE_FETCHING);
auth_unpin(this);
finish_waiting(WAIT_COMPLETE, -CEPHFS_EIO);
}
// -----------------------
// COMMIT
/**
* commit
*
* @param want - min version i want committed
* @param c - callback for completion
*/
void CDir::commit(version_t want, MDSContext *c, bool ignore_authpinnability, int op_prio)
{
dout(10) << "commit want " << want << " on " << *this << dendl;
if (want == 0) want = get_version();
// preconditions
ceph_assert(want <= get_version() || get_version() == 0); // can't commit the future
ceph_assert(want > committed_version); // the caller is stupid
ceph_assert(is_auth());
ceph_assert(ignore_authpinnability || can_auth_pin());
// note: queue up a noop if necessary, so that we always
// get an auth_pin.
if (!c)
c = new C_MDSInternalNoop;
// auth_pin on first waiter
if (waiting_for_commit.empty())
auth_pin(this);
waiting_for_commit[want].push_back(c);
// ok.
_commit(want, op_prio);
}
class C_IO_Dir_Committed : public CDirIOContext {
version_t version;
public:
C_IO_Dir_Committed(CDir *d, version_t v) : CDirIOContext(d), version(v) { }
void finish(int r) override {
dir->_committed(r, version);
}
void print(ostream& out) const override {
out << "dirfrag_committed(" << dir->dirfrag() << ")";
}
};
class C_IO_Dir_Commit_Ops : public Context {
public:
C_IO_Dir_Commit_Ops(CDir *d, int pr,
vector<CDir::dentry_commit_item> &&s, bufferlist &&bl,
vector<string> &&r,
mempool::mds_co::compact_set<mempool::mds_co::string> &&stales) :
dir(d), op_prio(pr) {
metapool = dir->mdcache->mds->get_metadata_pool();
version = dir->get_version();
is_new = dir->is_new();
to_set.swap(s);
dfts.swap(bl);
to_remove.swap(r);
stale_items.swap(stales);
}
void finish(int r) override {
dir->_omap_commit_ops(r, op_prio, metapool, version, is_new, to_set, dfts,
to_remove, stale_items);
}
private:
CDir *dir;
int op_prio;
int64_t metapool;
version_t version;
bool is_new;
vector<CDir::dentry_commit_item> to_set;
bufferlist dfts;
vector<string> to_remove;
mempool::mds_co::compact_set<mempool::mds_co::string> stale_items;
};
// This is doing the same thing with the InodeStoreBase::encode()
void CDir::_encode_primary_inode_base(dentry_commit_item &item, bufferlist &dfts,
bufferlist &bl)
{
ENCODE_START(6, 4, bl);
encode(*item.inode, bl, item.features);
if (!item.symlink.empty())
encode(item.symlink, bl);
// dirfragtree
dfts.splice(0, item.dft_len, &bl);
if (item.xattrs)
encode(*item.xattrs, bl);
else
encode((__u32)0, bl);
if (item.snaprealm) {
bufferlist snapr_bl;
encode(item.srnode, snapr_bl);
encode(snapr_bl, bl);
} else {
encode(bufferlist(), bl);
}
if (item.old_inodes)
encode(*item.old_inodes, bl, item.features);
else
encode((__u32)0, bl);
encode(item.oldest_snap, bl);
encode(item.damage_flags, bl);
ENCODE_FINISH(bl);
}
// This is not locked by mds_lock
void CDir::_omap_commit_ops(int r, int op_prio, int64_t metapool, version_t version, bool _new,
vector<dentry_commit_item> &to_set, bufferlist &dfts,
vector<string>& to_remove,
mempool::mds_co::compact_set<mempool::mds_co::string> &stales)
{
dout(10) << __func__ << dendl;
if (r < 0) {
mdcache->mds->handle_write_error_with_lock(r);
return;
}
C_GatherBuilder gather(g_ceph_context,
new C_OnFinisher(new C_IO_Dir_Committed(this, version),
mdcache->mds->finisher));
SnapContext snapc;
object_t oid = get_ondisk_object();
object_locator_t oloc(metapool);
map<string, bufferlist> _set;
set<string> _rm;
unsigned max_write_size = mdcache->max_dir_commit_size;
unsigned write_size = 0;
auto commit_one = [&](bool header=false) {
ObjectOperation op;
/*
* Shouldn't submit empty op to Rados, which could cause
* the cephfs to become readonly.
*/
ceph_assert(header || !_set.empty() || !_rm.empty());
// don't create new dirfrag blindly
if (!_new)
op.stat(nullptr, nullptr, nullptr);
/*
* save the header at the last moment.. If we were to send it off before
* other updates, but die before sending them all, we'd think that the
* on-disk state was fully committed even though it wasn't! However, since
* the messages are strictly ordered between the MDS and the OSD, and
* since messages to a given PG are strictly ordered, if we simply send
* the message containing the header off last, we cannot get our header
* into an incorrect state.
*/
if (header) {
bufferlist header;
encode(*fnode, header);
op.omap_set_header(header);
}
op.priority = op_prio;
if (!_set.empty())
op.omap_set(_set);
if (!_rm.empty())
op.omap_rm_keys(_rm);
mdcache->mds->objecter->mutate(oid, oloc, op, snapc,
ceph::real_clock::now(),
0, gather.new_sub());
write_size = 0;
_set.clear();
_rm.clear();
};
int count = 0;
for (auto &key : stales) {
unsigned size = key.length() + sizeof(__u32);
if (write_size > 0 && write_size + size > max_write_size)
commit_one();
write_size += size;
_rm.emplace(key);
if (!(++count % mdcache->mds->heartbeat_reset_grace(2)))
mdcache->mds->heartbeat_reset();
}
for (auto &key : to_remove) {
unsigned size = key.length() + sizeof(__u32);
if (write_size > 0 && write_size + size > max_write_size)
commit_one();
write_size += size;
_rm.emplace(std::move(key));
if (!(++count % mdcache->mds->heartbeat_reset_grace(2)))
mdcache->mds->heartbeat_reset();
}
bufferlist bl;
using ceph::encode;
for (auto &item : to_set) {
encode(item.first, bl);
if (item.is_remote) {
// remote link
CDentry::encode_remote(item.ino, item.d_type, item.alternate_name, bl);
} else {
// marker, name, inode, [symlink string]
bl.append('i'); // inode
ENCODE_START(2, 1, bl);
encode(item.alternate_name, bl);
_encode_primary_inode_base(item, dfts, bl);
ENCODE_FINISH(bl);
}
unsigned size = item.key.length() + bl.length() + 2 * sizeof(__u32);
if (write_size > 0 && write_size + size > max_write_size)
commit_one();
write_size += size;
_set[std::move(item.key)].swap(bl);
if (!(++count % mdcache->mds->heartbeat_reset_grace()))
mdcache->mds->heartbeat_reset();
}
commit_one(true);
gather.activate();
}
/**
* Flush out the modified dentries in this dir. Keep the bufferlist
* below max_write_size;
*/
void CDir::_omap_commit(int op_prio)
{
dout(10) << __func__ << dendl;
if (op_prio < 0)
op_prio = CEPH_MSG_PRIO_DEFAULT;
// snap purge?
const set<snapid_t> *snaps = NULL;
SnapRealm *realm = inode->find_snaprealm();
if (fnode->snap_purged_thru < realm->get_last_destroyed()) {
snaps = &realm->get_snaps();
dout(10) << " snap_purged_thru " << fnode->snap_purged_thru
<< " < " << realm->get_last_destroyed()
<< ", snap purge based on " << *snaps << dendl;
// fnode.snap_purged_thru = realm->get_last_destroyed();
}
size_t items_count = 0;
if (state_test(CDir::STATE_FRAGMENTING) && is_new()) {
items_count = get_num_head_items() + get_num_snap_items();
} else {
for (elist<CDentry*>::iterator it = dirty_dentries.begin(); !it.end(); ++it)
++items_count;
}
vector<string> to_remove;
// reverve enough memories, which maybe larger than the actually needed
to_remove.reserve(items_count);
vector<dentry_commit_item> to_set;
// reverve enough memories, which maybe larger than the actually needed
to_set.reserve(items_count);
// for dir fragtrees
bufferlist dfts(CEPH_PAGE_SIZE);
auto write_one = [&](CDentry *dn) {
string key;
dn->key().encode(key);
if (!dn->corrupt_first_loaded) {
dn->check_corruption(false);
}
if (snaps && try_trim_snap_dentry(dn, *snaps)) {
dout(10) << " rm " << key << dendl;
to_remove.emplace_back(std::move(key));
return;
}
if (dn->get_linkage()->is_null()) {
dout(10) << " rm " << dn->get_name() << " " << *dn << dendl;
to_remove.emplace_back(std::move(key));
} else {
dout(10) << " set " << dn->get_name() << " " << *dn << dendl;
uint64_t off = dfts.length();
// try to reserve new size if there has less
// than 1/8 page space
uint64_t left = CEPH_PAGE_SIZE - off % CEPH_PAGE_SIZE;
if (left < CEPH_PAGE_SIZE / 8)
dfts.reserve(left + CEPH_PAGE_SIZE);
auto& item = to_set.emplace_back();
item.key = std::move(key);
_parse_dentry(dn, item, snaps, dfts);
item.dft_len = dfts.length() - off;
}
};
int count = 0;
if (state_test(CDir::STATE_FRAGMENTING) && is_new()) {
ceph_assert(committed_version == 0);
for (auto p = items.begin(); p != items.end(); ) {
CDentry *dn = p->second;
++p;
if (dn->get_linkage()->is_null())
continue;
write_one(dn);
if (!(++count % mdcache->mds->heartbeat_reset_grace()))
mdcache->mds->heartbeat_reset();
}
} else {
for (auto p = dirty_dentries.begin(); !p.end(); ) {
CDentry *dn = *p;
++p;
write_one(dn);
if (!(++count % mdcache->mds->heartbeat_reset_grace()))
mdcache->mds->heartbeat_reset();
}
}
auto c = new C_IO_Dir_Commit_Ops(this, op_prio, std::move(to_set), std::move(dfts),
std::move(to_remove), std::move(stale_items));
stale_items.clear();
mdcache->mds->finisher->queue(c);
}
void CDir::_parse_dentry(CDentry *dn, dentry_commit_item &item,
const set<snapid_t> *snaps, bufferlist &bl)
{
// clear dentry NEW flag, if any. we can no longer silently drop it.
dn->clear_new();
item.first = dn->first;
// primary or remote?
auto& linkage = dn->linkage;
item.alternate_name = dn->get_alternate_name();
if (linkage.is_remote()) {
item.is_remote = true;
item.ino = linkage.get_remote_ino();
item.d_type = linkage.get_remote_d_type();
dout(14) << " dn '" << dn->get_name() << "' remote ino " << item.ino << dendl;
} else if (linkage.is_primary()) {
// primary link
CInode *in = linkage.get_inode();
ceph_assert(in);
dout(14) << " dn '" << dn->get_name() << "' inode " << *in << dendl;
if (in->is_multiversion()) {
if (!in->snaprealm) {
if (snaps)
in->purge_stale_snap_data(*snaps);
} else {
in->purge_stale_snap_data(in->snaprealm->get_snaps());
}
}
if (in->snaprealm) {
item.snaprealm = true;
item.srnode = in->snaprealm->srnode;
}
item.features = mdcache->mds->mdsmap->get_up_features();
item.inode = in->inode;
if (in->inode->is_symlink())
item.symlink = in->symlink;
using ceph::encode;
encode(in->dirfragtree, bl);
item.xattrs = in->xattrs;
item.old_inodes = in->old_inodes;
item.oldest_snap = in->oldest_snap;
item.damage_flags = in->damage_flags;
} else {
ceph_assert(!linkage.is_null());
}
}
void CDir::_commit(version_t want, int op_prio)
{
dout(10) << "_commit want " << want << " on " << *this << dendl;
// we can't commit things in the future.
// (even the projected future.)
ceph_assert(want <= get_version() || get_version() == 0);
// check pre+postconditions.
ceph_assert(is_auth());
// already committed?
if (committed_version >= want) {
dout(10) << "already committed " << committed_version << " >= " << want << dendl;
return;
}
// already committing >= want?
if (committing_version >= want) {
dout(10) << "already committing " << committing_version << " >= " << want << dendl;
ceph_assert(state_test(STATE_COMMITTING));
return;
}
// alrady committed an older version?
if (committing_version > committed_version) {
dout(10) << "already committing older " << committing_version << ", waiting for that to finish" << dendl;
return;
}
// commit.
committing_version = get_version();
// mark committing (if not already)
if (!state_test(STATE_COMMITTING)) {
dout(10) << "marking committing" << dendl;
state_set(STATE_COMMITTING);
}
if (mdcache->mds->logger) mdcache->mds->logger->inc(l_mds_dir_commit);
mdcache->mds->balancer->hit_dir(this, META_POP_STORE);
_omap_commit(op_prio);
}
/**
* _committed
*
* @param v version i just committed
*/
void CDir::_committed(int r, version_t v)
{
if (r < 0) {
// the directory could be partly purged during MDS failover
if (r == -CEPHFS_ENOENT && committed_version == 0 &&
!inode->is_base() && get_parent_dir()->inode->is_stray()) {
r = 0;
if (inode->snaprealm)
inode->state_set(CInode::STATE_MISSINGOBJS);
}
if (r < 0) {
dout(1) << "commit error " << r << " v " << v << dendl;
mdcache->mds->clog->error() << "failed to commit dir " << dirfrag() << " object,"
<< " errno " << r;
mdcache->mds->handle_write_error(r);
return;
}
}
dout(10) << "_committed v " << v << " on " << *this << dendl;
ceph_assert(is_auth());
bool stray = inode->is_stray();
// take note.
ceph_assert(v > committed_version);
ceph_assert(v <= committing_version);
committed_version = v;
// _all_ commits done?
if (committing_version == committed_version)
state_clear(CDir::STATE_COMMITTING);
// _any_ commit, even if we've been redirtied, means we're no longer new.
item_new.remove_myself();
// dir clean?
if (committed_version == get_version())
mark_clean();
int count = 0;
// dentries clean?
for (auto p = dirty_dentries.begin(); !p.end(); ) {
CDentry *dn = *p;
++p;
// inode?
if (dn->linkage.is_primary()) {
CInode *in = dn->linkage.get_inode();
ceph_assert(in);
ceph_assert(in->is_auth());
if (committed_version >= in->get_version()) {
if (in->is_dirty()) {
dout(15) << " dir " << committed_version << " >= inode " << in->get_version() << " now clean " << *in << dendl;
in->mark_clean();
}
} else {
dout(15) << " dir " << committed_version << " < inode " << in->get_version() << " still dirty " << *in << dendl;
ceph_assert(in->is_dirty() || in->last < CEPH_NOSNAP); // special case for cow snap items (not predirtied)
}
}
// dentry
if (committed_version >= dn->get_version()) {
dout(15) << " dir " << committed_version << " >= dn " << dn->get_version() << " now clean " << *dn << dendl;
dn->mark_clean();
// drop clean null stray dentries immediately
if (stray &&
dn->get_num_ref() == 0 &&
!dn->is_projected() &&
dn->get_linkage()->is_null())
remove_dentry(dn);
} else {
dout(15) << " dir " << committed_version << " < dn " << dn->get_version() << " still dirty " << *dn << dendl;
ceph_assert(dn->is_dirty());
}
if (!(++count % mdcache->mds->heartbeat_reset_grace()))
mdcache->mds->heartbeat_reset();
}
// finishers?
bool were_waiters = !waiting_for_commit.empty();
auto it = waiting_for_commit.begin();
while (it != waiting_for_commit.end()) {
auto _it = it;
++_it;
if (it->first > committed_version) {
dout(10) << " there are waiters for " << it->first << ", committing again" << dendl;
_commit(it->first, -1);
break;
}
MDSContext::vec t;
for (const auto &waiter : it->second)
t.push_back(waiter);
mdcache->mds->queue_waiters(t);
waiting_for_commit.erase(it);
it = _it;
if (!(++count % mdcache->mds->heartbeat_reset_grace()))
mdcache->mds->heartbeat_reset();
}
// try drop dentries in this dirfrag if it's about to be purged
if (!inode->is_base() && get_parent_dir()->inode->is_stray() &&
inode->snaprealm)
mdcache->maybe_eval_stray(inode, true);
// unpin if we kicked the last waiter.
if (were_waiters &&
waiting_for_commit.empty())
auth_unpin(this);
}
// IMPORT/EXPORT
mds_rank_t CDir::get_export_pin(bool inherit) const
{
mds_rank_t export_pin = inode->get_export_pin(inherit);
if (export_pin == MDS_RANK_EPHEMERAL_DIST)
export_pin = mdcache->hash_into_rank_bucket(ino(), get_frag());
else if (export_pin == MDS_RANK_EPHEMERAL_RAND)
export_pin = mdcache->hash_into_rank_bucket(ino());
return export_pin;
}
bool CDir::is_exportable(mds_rank_t dest) const
{
mds_rank_t export_pin = get_export_pin();
if (export_pin == dest)
return true;
if (export_pin >= 0)
return false;
return true;
}
void CDir::encode_export(bufferlist& bl)
{
ENCODE_START(1, 1, bl);
ceph_assert(!is_projected());
encode(first, bl);
encode(*fnode, bl);
encode(dirty_old_rstat, bl);
encode(committed_version, bl);
encode(state, bl);
encode(dir_rep, bl);
encode(pop_me, bl);
encode(pop_auth_subtree, bl);
encode(dir_rep_by, bl);
encode(get_replicas(), bl);
get(PIN_TEMPEXPORTING);
ENCODE_FINISH(bl);
}
void CDir::finish_export()
{
state &= MASK_STATE_EXPORT_KEPT;
pop_nested.sub(pop_auth_subtree);
pop_auth_subtree_nested.sub(pop_auth_subtree);
pop_me.zero();
pop_auth_subtree.zero();
put(PIN_TEMPEXPORTING);
dirty_old_rstat.clear();
}
void CDir::decode_import(bufferlist::const_iterator& blp, LogSegment *ls)
{
DECODE_START(1, blp);
decode(first, blp);
{
auto _fnode = allocate_fnode();
decode(*_fnode, blp);
reset_fnode(std::move(_fnode));
}
update_projected_version();
decode(dirty_old_rstat, blp);
decode(committed_version, blp);
committing_version = committed_version;
unsigned s;
decode(s, blp);
state &= MASK_STATE_IMPORT_KEPT;
state_set(STATE_AUTH | (s & MASK_STATE_EXPORTED));
if (is_dirty()) {
get(PIN_DIRTY);
_mark_dirty(ls);
}
decode(dir_rep, blp);
decode(pop_me, blp);
decode(pop_auth_subtree, blp);
pop_nested.add(pop_auth_subtree);
pop_auth_subtree_nested.add(pop_auth_subtree);
decode(dir_rep_by, blp);
decode(get_replicas(), blp);
if (is_replicated()) get(PIN_REPLICATED);
replica_nonce = 0; // no longer defined
// did we import some dirty scatterlock data?
if (dirty_old_rstat.size() ||
!(fnode->rstat == fnode->accounted_rstat)) {
mdcache->mds->locker->mark_updated_scatterlock(&inode->nestlock);
ls->dirty_dirfrag_nest.push_back(&inode->item_dirty_dirfrag_nest);
}
if (!(fnode->fragstat == fnode->accounted_fragstat)) {
mdcache->mds->locker->mark_updated_scatterlock(&inode->filelock);
ls->dirty_dirfrag_dir.push_back(&inode->item_dirty_dirfrag_dir);
}
if (is_dirty_dft()) {
if (inode->dirfragtreelock.get_state() != LOCK_MIX &&
inode->dirfragtreelock.is_stable()) {
// clear stale dirtydft
state_clear(STATE_DIRTYDFT);
} else {
mdcache->mds->locker->mark_updated_scatterlock(&inode->dirfragtreelock);
ls->dirty_dirfrag_dirfragtree.push_back(&inode->item_dirty_dirfrag_dirfragtree);
}
}
DECODE_FINISH(blp);
}
void CDir::abort_import()
{
ceph_assert(is_auth());
state_clear(CDir::STATE_AUTH);
remove_bloom();
clear_replica_map();
set_replica_nonce(CDir::EXPORT_NONCE);
if (is_dirty())
mark_clean();
pop_nested.sub(pop_auth_subtree);
pop_auth_subtree_nested.sub(pop_auth_subtree);
pop_me.zero();
pop_auth_subtree.zero();
}
void CDir::encode_dirstat(bufferlist& bl, const session_info_t& info, const DirStat& ds) {
if (info.has_feature(CEPHFS_FEATURE_REPLY_ENCODING)) {
ENCODE_START(1, 1, bl);
encode(ds.frag, bl);
encode(ds.auth, bl);
encode(ds.dist, bl);
ENCODE_FINISH(bl);
}
else {
encode(ds.frag, bl);
encode(ds.auth, bl);
encode(ds.dist, bl);
}
}
/********************************
* AUTHORITY
*/
/*
* if dir_auth.first == parent, auth is same as inode.
* unless .second != unknown, in which case that sticks.
*/
mds_authority_t CDir::authority() const
{
if (is_subtree_root())
return dir_auth;
else
return inode->authority();
}
/** is_subtree_root()
* true if this is an auth delegation point.
* that is, dir_auth != default (parent,unknown)
*
* some key observations:
* if i am auth:
* - any region bound will be an export, or frozen.
*
* note that this DOES heed dir_auth.pending
*/
/*
bool CDir::is_subtree_root()
{
if (dir_auth == CDIR_AUTH_DEFAULT) {
//dout(10) << "is_subtree_root false " << dir_auth << " != " << CDIR_AUTH_DEFAULT
//<< " on " << ino() << dendl;
return false;
} else {
//dout(10) << "is_subtree_root true " << dir_auth << " != " << CDIR_AUTH_DEFAULT
//<< " on " << ino() << dendl;
return true;
}
}
*/
/** contains(x)
* true if we are x, or an ancestor of x
*/
bool CDir::contains(CDir *x)
{
while (1) {
if (x == this)
return true;
x = x->get_inode()->get_projected_parent_dir();
if (x == 0)
return false;
}
}
bool CDir::can_rep() const
{
if (!is_rep())
return true;
unsigned mds_num = mdcache->mds->get_mds_map()->get_num_mds(MDSMap::STATE_ACTIVE);
if ((mds_num - 1) > get_replicas().size())
return true;
return false;
}
/** set_dir_auth
*/
void CDir::set_dir_auth(const mds_authority_t &a)
{
dout(10) << "setting dir_auth=" << a
<< " from " << dir_auth
<< " on " << *this << dendl;
bool was_subtree = is_subtree_root();
bool was_ambiguous = dir_auth.second >= 0;
// set it.
dir_auth = a;
// new subtree root?
if (!was_subtree && is_subtree_root()) {
dout(10) << " new subtree root, adjusting auth_pins" << dendl;
if (freeze_tree_state) {
// only by CDir::_freeze_tree()
ceph_assert(is_freezing_tree_root());
}
inode->num_subtree_roots++;
// unpin parent of frozen dir/tree?
if (inode->is_auth()) {
ceph_assert(!is_frozen_tree_root());
if (is_frozen_dir())
inode->auth_unpin(this);
}
}
if (was_subtree && !is_subtree_root()) {
dout(10) << " old subtree root, adjusting auth_pins" << dendl;
inode->num_subtree_roots--;
// pin parent of frozen dir/tree?
if (inode->is_auth()) {
ceph_assert(!is_frozen_tree_root());
if (is_frozen_dir())
inode->auth_pin(this);
}
}
// newly single auth?
if (was_ambiguous && dir_auth.second == CDIR_AUTH_UNKNOWN) {
MDSContext::vec ls;
take_waiting(WAIT_SINGLEAUTH, ls);
mdcache->mds->queue_waiters(ls);
}
}
/*****************************************
* AUTH PINS and FREEZING
*
* the basic plan is that auth_pins only exist in auth regions, and they
* prevent a freeze (and subsequent auth change).
*
* however, we also need to prevent a parent from freezing if a child is frozen.
* for that reason, the parent inode of a frozen directory is auth_pinned.
*
* the oddity is when the frozen directory is a subtree root. if that's the case,
* the parent inode isn't frozen. which means that when subtree authority is adjusted
* at the bounds, inodes for any frozen bound directories need to get auth_pins at that
* time.
*
*/
void CDir::auth_pin(void *by)
{
if (auth_pins == 0)
get(PIN_AUTHPIN);
auth_pins++;
#ifdef MDS_AUTHPIN_SET
auth_pin_set.insert(by);
#endif
dout(10) << "auth_pin by " << by << " on " << *this << " count now " << auth_pins << dendl;
if (freeze_tree_state)
freeze_tree_state->auth_pins += 1;
}
void CDir::auth_unpin(void *by)
{
auth_pins--;
#ifdef MDS_AUTHPIN_SET
{
auto it = auth_pin_set.find(by);
ceph_assert(it != auth_pin_set.end());
auth_pin_set.erase(it);
}
#endif
if (auth_pins == 0)
put(PIN_AUTHPIN);
dout(10) << "auth_unpin by " << by << " on " << *this << " count now " << auth_pins << dendl;
ceph_assert(auth_pins >= 0);
if (freeze_tree_state)
freeze_tree_state->auth_pins -= 1;
maybe_finish_freeze(); // pending freeze?
}
void CDir::adjust_nested_auth_pins(int dirinc, void *by)
{
ceph_assert(dirinc);
dir_auth_pins += dirinc;
dout(15) << __func__ << " " << dirinc << " on " << *this
<< " by " << by << " count now "
<< auth_pins << "/" << dir_auth_pins << dendl;
ceph_assert(dir_auth_pins >= 0);
if (freeze_tree_state)
freeze_tree_state->auth_pins += dirinc;
if (dirinc < 0)
maybe_finish_freeze(); // pending freeze?
}
#ifdef MDS_VERIFY_FRAGSTAT
void CDir::verify_fragstat()
{
ceph_assert(is_complete());
if (inode->is_stray())
return;
frag_info_t c;
memset(&c, 0, sizeof(c));
for (auto it = items.begin();
it != items.end();
++it) {
CDentry *dn = it->second;
if (dn->is_null())
continue;
dout(10) << " " << *dn << dendl;
if (dn->is_primary())
dout(10) << " " << *dn->inode << dendl;
if (dn->is_primary()) {
if (dn->inode->is_dir())
c.nsubdirs++;
else
c.nfiles++;
}
if (dn->is_remote()) {
if (dn->get_remote_d_type() == DT_DIR)
c.nsubdirs++;
else
c.nfiles++;
}
}
if (c.nsubdirs != fnode->fragstat.nsubdirs ||
c.nfiles != fnode->fragstat.nfiles) {
dout(0) << "verify_fragstat failed " << fnode->fragstat << " on " << *this << dendl;
dout(0) << " i count " << c << dendl;
ceph_abort();
} else {
dout(0) << "verify_fragstat ok " << fnode->fragstat << " on " << *this << dendl;
}
}
#endif
/*****************************************************************************
* FREEZING
*/
// FREEZE TREE
void CDir::_walk_tree(std::function<bool(CDir*)> callback)
{
deque<CDir*> dfq;
dfq.push_back(this);
while (!dfq.empty()) {
CDir *dir = dfq.front();
dfq.pop_front();
for (auto& p : *dir) {
CDentry *dn = p.second;
if (!dn->get_linkage()->is_primary())
continue;
CInode *in = dn->get_linkage()->get_inode();
if (!in->is_dir())
continue;
auto&& dfv = in->get_nested_dirfrags();
for (auto& dir : dfv) {
auto ret = callback(dir);
if (ret)
dfq.push_back(dir);
}
}
}
}
bool CDir::freeze_tree()
{
ceph_assert(!is_frozen());
ceph_assert(!is_freezing());
ceph_assert(!freeze_tree_state);
auth_pin(this);
// Travese the subtree to mark dirfrags as 'freezing' (set freeze_tree_state)
// and to accumulate auth pins and record total count in freeze_tree_state.
// when auth unpin an 'freezing' object, the counter in freeze_tree_state also
// gets decreased. Subtree become 'frozen' when the counter reaches zero.
freeze_tree_state = std::make_shared<freeze_tree_state_t>(this);
freeze_tree_state->auth_pins += get_auth_pins() + get_dir_auth_pins();
if (!lock_caches_with_auth_pins.empty())
mdcache->mds->locker->invalidate_lock_caches(this);
_walk_tree([this](CDir *dir) {
if (dir->freeze_tree_state)
return false;
dir->freeze_tree_state = freeze_tree_state;
freeze_tree_state->auth_pins += dir->get_auth_pins() + dir->get_dir_auth_pins();
if (!dir->lock_caches_with_auth_pins.empty())
mdcache->mds->locker->invalidate_lock_caches(dir);
return true;
}
);
if (is_freezeable(true)) {
_freeze_tree();
auth_unpin(this);
return true;
} else {
state_set(STATE_FREEZINGTREE);
++num_freezing_trees;
dout(10) << "freeze_tree waiting " << *this << dendl;
return false;
}
}
void CDir::_freeze_tree()
{
dout(10) << __func__ << " " << *this << dendl;
ceph_assert(is_freezeable(true));
if (freeze_tree_state) {
ceph_assert(is_auth());
} else {
ceph_assert(!is_auth());
freeze_tree_state = std::make_shared<freeze_tree_state_t>(this);
}
freeze_tree_state->frozen = true;
if (is_auth()) {
mds_authority_t auth;
bool was_subtree = is_subtree_root();
if (was_subtree) {
auth = get_dir_auth();
} else {
// temporarily prevent parent subtree from becoming frozen.
inode->auth_pin(this);
// create new subtree
auth = authority();
}
_walk_tree([this, &auth] (CDir *dir) {
if (dir->freeze_tree_state != freeze_tree_state) {
mdcache->adjust_subtree_auth(dir, auth);
return false;
}
return true;
}
);
ceph_assert(auth.first >= 0);
ceph_assert(auth.second == CDIR_AUTH_UNKNOWN);
auth.second = auth.first;
mdcache->adjust_subtree_auth(this, auth);
if (!was_subtree)
inode->auth_unpin(this);
} else {
// importing subtree ?
_walk_tree([this] (CDir *dir) {
ceph_assert(!dir->freeze_tree_state);
dir->freeze_tree_state = freeze_tree_state;
return true;
}
);
}
// twiddle state
if (state_test(STATE_FREEZINGTREE)) {
state_clear(STATE_FREEZINGTREE);
--num_freezing_trees;
}
state_set(STATE_FROZENTREE);
++num_frozen_trees;
get(PIN_FROZEN);
}
void CDir::unfreeze_tree()
{
dout(10) << __func__ << " " << *this << dendl;
MDSContext::vec unfreeze_waiters;
take_waiting(WAIT_UNFREEZE, unfreeze_waiters);
if (freeze_tree_state) {
_walk_tree([this, &unfreeze_waiters](CDir *dir) {
if (dir->freeze_tree_state != freeze_tree_state)
return false;
dir->freeze_tree_state.reset();
dir->take_waiting(WAIT_UNFREEZE, unfreeze_waiters);
return true;
}
);
}
if (state_test(STATE_FROZENTREE)) {
// frozen. unfreeze.
state_clear(STATE_FROZENTREE);
--num_frozen_trees;
put(PIN_FROZEN);
if (is_auth()) {
// must be subtree
ceph_assert(is_subtree_root());
// for debug purpose, caller should ensure 'dir_auth.second == dir_auth.first'
mds_authority_t auth = get_dir_auth();
ceph_assert(auth.first >= 0);
ceph_assert(auth.second == auth.first);
auth.second = CDIR_AUTH_UNKNOWN;
mdcache->adjust_subtree_auth(this, auth);
}
freeze_tree_state.reset();
} else {
ceph_assert(state_test(STATE_FREEZINGTREE));
// freezing. stop it.
state_clear(STATE_FREEZINGTREE);
--num_freezing_trees;
freeze_tree_state.reset();
finish_waiting(WAIT_FROZEN, -1);
auth_unpin(this);
}
mdcache->mds->queue_waiters(unfreeze_waiters);
}
void CDir::adjust_freeze_after_rename(CDir *dir)
{
if (!freeze_tree_state || dir->freeze_tree_state != freeze_tree_state)
return;
CDir *newdir = dir->get_inode()->get_parent_dir();
if (newdir == this || newdir->freeze_tree_state == freeze_tree_state)
return;
ceph_assert(!freeze_tree_state->frozen);
ceph_assert(get_dir_auth_pins() > 0);
MDSContext::vec unfreeze_waiters;
auto unfreeze = [this, &unfreeze_waiters](CDir *dir) {
if (dir->freeze_tree_state != freeze_tree_state)
return false;
int dec = dir->get_auth_pins() + dir->get_dir_auth_pins();
// shouldn't become zero because srcdn of rename was auth pinned
ceph_assert(freeze_tree_state->auth_pins > dec);
freeze_tree_state->auth_pins -= dec;
dir->freeze_tree_state.reset();
dir->take_waiting(WAIT_UNFREEZE, unfreeze_waiters);
return true;
};
unfreeze(dir);
dir->_walk_tree(unfreeze);
mdcache->mds->queue_waiters(unfreeze_waiters);
}
bool CDir::can_auth_pin(int *err_ret) const
{
int err;
if (!is_auth()) {
err = ERR_NOT_AUTH;
} else if (is_freezing_dir() || is_frozen_dir()) {
err = ERR_FRAGMENTING_DIR;
} else {
auto p = is_freezing_or_frozen_tree();
if (p.first || p.second) {
err = ERR_EXPORTING_TREE;
} else {
err = 0;
}
}
if (err && err_ret)
*err_ret = err;
return !err;
}
class C_Dir_AuthUnpin : public CDirContext {
public:
explicit C_Dir_AuthUnpin(CDir *d) : CDirContext(d) {}
void finish(int r) override {
dir->auth_unpin(dir->get_inode());
}
};
void CDir::maybe_finish_freeze()
{
if (dir_auth_pins != 0)
return;
// we can freeze the _dir_ even with nested pins...
if (state_test(STATE_FREEZINGDIR)) {
if (auth_pins == 1) {
_freeze_dir();
auth_unpin(this);
finish_waiting(WAIT_FROZEN);
}
}
if (freeze_tree_state) {
if (freeze_tree_state->frozen ||
freeze_tree_state->auth_pins != 1)
return;
if (freeze_tree_state->dir != this) {
freeze_tree_state->dir->maybe_finish_freeze();
return;
}
ceph_assert(state_test(STATE_FREEZINGTREE));
if (!is_subtree_root() && inode->is_frozen()) {
dout(10) << __func__ << " !subtree root and frozen inode, waiting for unfreeze on " << inode << dendl;
// retake an auth_pin...
auth_pin(inode);
// and release it when the parent inode unfreezes
inode->add_waiter(WAIT_UNFREEZE, new C_Dir_AuthUnpin(this));
return;
}
_freeze_tree();
auth_unpin(this);
finish_waiting(WAIT_FROZEN);
}
}
// FREEZE DIR
bool CDir::freeze_dir()
{
ceph_assert(!is_frozen());
ceph_assert(!is_freezing());
auth_pin(this);
if (is_freezeable_dir(true)) {
_freeze_dir();
auth_unpin(this);
return true;
} else {
state_set(STATE_FREEZINGDIR);
if (!lock_caches_with_auth_pins.empty())
mdcache->mds->locker->invalidate_lock_caches(this);
dout(10) << "freeze_dir + wait " << *this << dendl;
return false;
}
}
void CDir::_freeze_dir()
{
dout(10) << __func__ << " " << *this << dendl;
//assert(is_freezeable_dir(true));
// not always true during split because the original fragment may have frozen a while
// ago and we're just now getting around to breaking it up.
state_clear(STATE_FREEZINGDIR);
state_set(STATE_FROZENDIR);
get(PIN_FROZEN);
if (is_auth() && !is_subtree_root())
inode->auth_pin(this); // auth_pin for duration of freeze
}
void CDir::unfreeze_dir()
{
dout(10) << __func__ << " " << *this << dendl;
if (state_test(STATE_FROZENDIR)) {
state_clear(STATE_FROZENDIR);
put(PIN_FROZEN);
// unpin (may => FREEZEABLE) FIXME: is this order good?
if (is_auth() && !is_subtree_root())
inode->auth_unpin(this);
finish_waiting(WAIT_UNFREEZE);
} else {
finish_waiting(WAIT_FROZEN, -1);
// still freezing. stop.
ceph_assert(state_test(STATE_FREEZINGDIR));
state_clear(STATE_FREEZINGDIR);
auth_unpin(this);
finish_waiting(WAIT_UNFREEZE);
}
}
void CDir::enable_frozen_inode()
{
ceph_assert(frozen_inode_suppressed > 0);
if (--frozen_inode_suppressed == 0) {
for (auto p = freezing_inodes.begin(); !p.end(); ) {
CInode *in = *p;
++p;
ceph_assert(in->is_freezing_inode());
in->maybe_finish_freeze_inode();
}
}
}
/**
* Slightly less complete than operator<<, because this is intended
* for identifying a directory and its state rather than for dumping
* debug output.
*/
void CDir::dump(Formatter *f, int flags) const
{
ceph_assert(f != NULL);
if (flags & DUMP_PATH) {
f->dump_stream("path") << get_path();
}
if (flags & DUMP_DIRFRAG) {
f->dump_stream("dirfrag") << dirfrag();
}
if (flags & DUMP_SNAPID_FIRST) {
f->dump_int("snapid_first", first);
}
if (flags & DUMP_VERSIONS) {
f->dump_stream("projected_version") << get_projected_version();
f->dump_stream("version") << get_version();
f->dump_stream("committing_version") << get_committing_version();
f->dump_stream("committed_version") << get_committed_version();
}
if (flags & DUMP_REP) {
f->dump_bool("is_rep", is_rep());
}
if (flags & DUMP_DIR_AUTH) {
if (get_dir_auth() != CDIR_AUTH_DEFAULT) {
if (get_dir_auth().second == CDIR_AUTH_UNKNOWN) {
f->dump_stream("dir_auth") << get_dir_auth().first;
} else {
f->dump_stream("dir_auth") << get_dir_auth();
}
} else {
f->dump_string("dir_auth", "");
}
}
if (flags & DUMP_STATES) {
f->open_array_section("states");
MDSCacheObject::dump_states(f);
if (state_test(CDir::STATE_COMPLETE)) f->dump_string("state", "complete");
if (state_test(CDir::STATE_FREEZINGTREE)) f->dump_string("state", "freezingtree");
if (state_test(CDir::STATE_FROZENTREE)) f->dump_string("state", "frozentree");
if (state_test(CDir::STATE_FROZENDIR)) f->dump_string("state", "frozendir");
if (state_test(CDir::STATE_FREEZINGDIR)) f->dump_string("state", "freezingdir");
if (state_test(CDir::STATE_EXPORTBOUND)) f->dump_string("state", "exportbound");
if (state_test(CDir::STATE_IMPORTBOUND)) f->dump_string("state", "importbound");
if (state_test(CDir::STATE_BADFRAG)) f->dump_string("state", "badfrag");
f->close_section();
}
if (flags & DUMP_MDS_CACHE_OBJECT) {
MDSCacheObject::dump(f);
}
if (flags & DUMP_ITEMS) {
f->open_array_section("dentries");
for (auto &p : items) {
CDentry *dn = p.second;
f->open_object_section("dentry");
dn->dump(f);
f->close_section();
}
f->close_section();
}
}
void CDir::dump_load(Formatter *f)
{
f->dump_stream("path") << get_path();
f->dump_stream("dirfrag") << dirfrag();
f->open_object_section("pop_me");
pop_me.dump(f);
f->close_section();
f->open_object_section("pop_nested");
pop_nested.dump(f);
f->close_section();
f->open_object_section("pop_auth_subtree");
pop_auth_subtree.dump(f);
f->close_section();
f->open_object_section("pop_auth_subtree_nested");
pop_auth_subtree_nested.dump(f);
f->close_section();
}
/****** Scrub Stuff *******/
void CDir::scrub_info_create() const
{
ceph_assert(!scrub_infop);
// break out of const-land to set up implicit initial state
CDir *me = const_cast<CDir*>(this);
const auto& pf = me->get_projected_fnode();
std::unique_ptr<scrub_info_t> si(new scrub_info_t());
si->last_recursive.version = pf->recursive_scrub_version;
si->last_recursive.time = pf->recursive_scrub_stamp;
si->last_local.version = pf->localized_scrub_version;
si->last_local.time = pf->localized_scrub_stamp;
me->scrub_infop.swap(si);
}
void CDir::scrub_initialize(const ScrubHeaderRef& header)
{
ceph_assert(header);
// FIXME: weird implicit construction, is someone else meant
// to be calling scrub_info_create first?
scrub_info();
scrub_infop->directory_scrubbing = true;
scrub_infop->header = header;
header->inc_num_pending();
}
void CDir::scrub_aborted() {
dout(20) << __func__ << dendl;
ceph_assert(scrub_is_in_progress());
scrub_infop->last_scrub_dirty = false;
scrub_infop->directory_scrubbing = false;
scrub_infop->header->dec_num_pending();
scrub_infop.reset();
}
void CDir::scrub_finished()
{
dout(20) << __func__ << dendl;
ceph_assert(scrub_is_in_progress());
scrub_infop->last_local.time = ceph_clock_now();
scrub_infop->last_local.version = get_version();
if (scrub_infop->header->get_recursive())
scrub_infop->last_recursive = scrub_infop->last_local;
scrub_infop->last_scrub_dirty = true;
scrub_infop->directory_scrubbing = false;
scrub_infop->header->dec_num_pending();
}
void CDir::scrub_maybe_delete_info()
{
if (scrub_infop &&
!scrub_infop->directory_scrubbing &&
!scrub_infop->last_scrub_dirty)
scrub_infop.reset();
}
bool CDir::scrub_local()
{
ceph_assert(is_complete());
bool good = check_rstats(true);
if (!good && scrub_infop->header->get_repair()) {
mdcache->repair_dirfrag_stats(this);
scrub_infop->header->set_repaired();
good = true;
}
return good;
}
std::string CDir::get_path() const
{
std::string path;
get_inode()->make_path_string(path, true);
return path;
}
bool CDir::should_split_fast() const
{
// Max size a fragment can be before trigger fast splitting
int fast_limit = g_conf()->mds_bal_split_size * g_conf()->mds_bal_fragment_fast_factor;
// Fast path: the sum of accounted size and null dentries does not
// exceed threshold: we definitely are not over it.
if (get_frag_size() + get_num_head_null() <= fast_limit) {
return false;
}
// Fast path: the accounted size of the frag exceeds threshold: we
// definitely are over it
if (get_frag_size() > fast_limit) {
return true;
}
int64_t effective_size = 0;
for (const auto &p : items) {
const CDentry *dn = p.second;
if (!dn->get_projected_linkage()->is_null()) {
effective_size++;
}
}
return effective_size > fast_limit;
}
bool CDir::should_merge() const
{
if (get_frag() == frag_t())
return false;
if (inode->is_ephemeral_dist()) {
unsigned min_frag_bits = mdcache->get_ephemeral_dist_frag_bits();
if (min_frag_bits > 0 && get_frag().bits() < min_frag_bits + 1)
return false;
}
return ((int)get_frag_size() + (int)get_num_snap_items()) < g_conf()->mds_bal_merge_size;
}
MEMPOOL_DEFINE_OBJECT_FACTORY(CDir, co_dir, mds_co);
MEMPOOL_DEFINE_OBJECT_FACTORY(CDir::scrub_info_t, scrub_info_t, mds_co)
| 105,683 | 26.731304 | 158 | cc |
null | ceph-main/src/mds/CDir.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CDIR_H
#define CEPH_CDIR_H
#include <iosfwd>
#include <list>
#include <map>
#include <set>
#include <string>
#include <string_view>
#include "common/bloom_filter.hpp"
#include "common/config.h"
#include "include/buffer_fwd.h"
#include "include/counter.h"
#include "include/types.h"
#include "CInode.h"
#include "MDSCacheObject.h"
#include "MDSContext.h"
#include "cephfs_features.h"
#include "SessionMap.h"
#include "messages/MClientReply.h"
class CDentry;
class MDCache;
std::ostream& operator<<(std::ostream& out, const class CDir& dir);
class CDir : public MDSCacheObject, public Counter<CDir> {
public:
MEMPOOL_CLASS_HELPERS();
typedef mempool::mds_co::map<dentry_key_t, CDentry*> dentry_key_map;
typedef mempool::mds_co::set<dentry_key_t> dentry_key_set;
using fnode_ptr = std::shared_ptr<fnode_t>;
using fnode_const_ptr = std::shared_ptr<const fnode_t>;
template <typename ...Args>
static fnode_ptr allocate_fnode(Args && ...args) {
static mempool::mds_co::pool_allocator<fnode_t> allocator;
return std::allocate_shared<fnode_t>(allocator, std::forward<Args>(args)...);
}
struct dentry_commit_item {
std::string key;
snapid_t first;
bool is_remote = false;
inodeno_t ino;
unsigned char d_type;
mempool::mds_co::string alternate_name;
bool snaprealm = false;
sr_t srnode;
mempool::mds_co::string symlink;
uint64_t features;
uint64_t dft_len;
CInode::inode_const_ptr inode;
CInode::xattr_map_const_ptr xattrs;
CInode::old_inode_map_const_ptr old_inodes;
snapid_t oldest_snap;
damage_flags_t damage_flags;
};
// -- freezing --
struct freeze_tree_state_t {
CDir *dir; // freezing/frozen tree root
int auth_pins = 0;
bool frozen = false;
freeze_tree_state_t(CDir *d) : dir(d) {}
};
class scrub_info_t {
public:
MEMPOOL_CLASS_HELPERS();
struct scrub_stamps {
version_t version = 0;
utime_t time;
};
scrub_info_t() {}
scrub_stamps last_recursive; // when we last finished a recursive scrub
scrub_stamps last_local; // when we last did a local scrub
bool directory_scrubbing = false; /// safety check
bool last_scrub_dirty = false; /// is scrub info dirty or is it flushed to fnode?
ScrubHeaderRef header;
};
// -- pins --
static const int PIN_DNWAITER = 1;
static const int PIN_INOWAITER = 2;
static const int PIN_CHILD = 3;
static const int PIN_FROZEN = 4;
static const int PIN_SUBTREE = 5;
static const int PIN_IMPORTING = 7;
static const int PIN_IMPORTBOUND = 9;
static const int PIN_EXPORTBOUND = 10;
static const int PIN_STICKY = 11;
static const int PIN_SUBTREETEMP = 12; // used by MDCache::trim_non_auth()
// -- state --
static const unsigned STATE_COMPLETE = (1<< 0); // the complete contents are in cache
static const unsigned STATE_FROZENTREE = (1<< 1); // root of tree (bounded by exports)
static const unsigned STATE_FREEZINGTREE = (1<< 2); // in process of freezing
static const unsigned STATE_FROZENDIR = (1<< 3);
static const unsigned STATE_FREEZINGDIR = (1<< 4);
static const unsigned STATE_COMMITTING = (1<< 5); // mid-commit
static const unsigned STATE_FETCHING = (1<< 6); // currenting fetching
static const unsigned STATE_CREATING = (1<< 7);
static const unsigned STATE_IMPORTBOUND = (1<< 8);
static const unsigned STATE_EXPORTBOUND = (1<< 9);
static const unsigned STATE_EXPORTING = (1<<10);
static const unsigned STATE_IMPORTING = (1<<11);
static const unsigned STATE_FRAGMENTING = (1<<12);
static const unsigned STATE_STICKY = (1<<13); // sticky pin due to inode stickydirs
static const unsigned STATE_DNPINNEDFRAG = (1<<14); // dir is refragmenting
static const unsigned STATE_ASSIMRSTAT = (1<<15); // assimilating inode->frag rstats
static const unsigned STATE_DIRTYDFT = (1<<16); // dirty dirfragtree
static const unsigned STATE_BADFRAG = (1<<17); // bad dirfrag
static const unsigned STATE_TRACKEDBYOFT = (1<<18); // tracked by open file table
static const unsigned STATE_AUXSUBTREE = (1<<19); // no subtree merge
// common states
static const unsigned STATE_CLEAN = 0;
// these state bits are preserved by an import/export
// ...except if the directory is hashed, in which case none of them are!
static const unsigned MASK_STATE_EXPORTED =
(STATE_COMPLETE|STATE_DIRTY|STATE_DIRTYDFT|STATE_BADFRAG);
static const unsigned MASK_STATE_IMPORT_KEPT =
(
STATE_IMPORTING |
STATE_IMPORTBOUND |
STATE_EXPORTBOUND |
STATE_FROZENTREE |
STATE_STICKY |
STATE_TRACKEDBYOFT);
static const unsigned MASK_STATE_EXPORT_KEPT =
(STATE_EXPORTING |
STATE_IMPORTBOUND |
STATE_EXPORTBOUND |
STATE_FROZENTREE |
STATE_FROZENDIR |
STATE_STICKY |
STATE_TRACKEDBYOFT);
static const unsigned MASK_STATE_FRAGMENT_KEPT =
(STATE_DIRTY |
STATE_EXPORTBOUND |
STATE_IMPORTBOUND |
STATE_AUXSUBTREE |
STATE_REJOINUNDEF);
// -- rep spec --
static const int REP_NONE = 0;
static const int REP_ALL = 1;
static const int REP_LIST = 2;
static const unsigned EXPORT_NONCE = 1;
// -- wait masks --
static const uint64_t WAIT_DENTRY = (1<<0); // wait for item to be in cache
static const uint64_t WAIT_COMPLETE = (1<<1); // wait for complete dir contents
static const uint64_t WAIT_FROZEN = (1<<2); // auth pins removed
static const uint64_t WAIT_CREATED = (1<<3); // new dirfrag is logged
static const int WAIT_DNLOCK_OFFSET = 4;
static const uint64_t WAIT_ANY_MASK = (uint64_t)(-1);
static const uint64_t WAIT_ATSUBTREEROOT = (WAIT_SINGLEAUTH);
// -- dump flags --
static const int DUMP_PATH = (1 << 0);
static const int DUMP_DIRFRAG = (1 << 1);
static const int DUMP_SNAPID_FIRST = (1 << 2);
static const int DUMP_VERSIONS = (1 << 3);
static const int DUMP_REP = (1 << 4);
static const int DUMP_DIR_AUTH = (1 << 5);
static const int DUMP_STATES = (1 << 6);
static const int DUMP_MDS_CACHE_OBJECT = (1 << 7);
static const int DUMP_ITEMS = (1 << 8);
static const int DUMP_ALL = (-1);
static const int DUMP_DEFAULT = DUMP_ALL & (~DUMP_ITEMS);
CDir(CInode *in, frag_t fg, MDCache *mdc, bool auth);
std::string_view pin_name(int p) const override {
switch (p) {
case PIN_DNWAITER: return "dnwaiter";
case PIN_INOWAITER: return "inowaiter";
case PIN_CHILD: return "child";
case PIN_FROZEN: return "frozen";
case PIN_SUBTREE: return "subtree";
case PIN_IMPORTING: return "importing";
case PIN_IMPORTBOUND: return "importbound";
case PIN_EXPORTBOUND: return "exportbound";
case PIN_STICKY: return "sticky";
case PIN_SUBTREETEMP: return "subtreetemp";
default: return generic_pin_name(p);
}
}
bool is_lt(const MDSCacheObject *r) const override {
return dirfrag() < (static_cast<const CDir*>(r))->dirfrag();
}
void resync_accounted_fragstat();
void resync_accounted_rstat();
void assimilate_dirty_rstat_inodes(MutationRef& mut);
void assimilate_dirty_rstat_inodes_finish(EMetaBlob *blob);
void mark_exporting() {
state_set(CDir::STATE_EXPORTING);
inode->num_exporting_dirs++;
}
void clear_exporting() {
state_clear(CDir::STATE_EXPORTING);
inode->num_exporting_dirs--;
}
version_t get_version() const { return fnode->version; }
void update_projected_version() {
ceph_assert(projected_fnode.empty());
projected_version = fnode->version;
}
version_t get_projected_version() const { return projected_version; }
void reset_fnode(fnode_const_ptr&& ptr) {
fnode = std::move(ptr);
}
void set_fresh_fnode(fnode_const_ptr&& ptr);
const fnode_const_ptr& get_fnode() const {
return fnode;
}
// only used for updating newly allocated CDir
fnode_t* _get_fnode() {
if (fnode == empty_fnode)
reset_fnode(allocate_fnode());
return const_cast<fnode_t*>(fnode.get());
}
const fnode_const_ptr& get_projected_fnode() const {
if (projected_fnode.empty())
return fnode;
else
return projected_fnode.back();
}
// fnode should have already been projected in caller's context
fnode_t* _get_projected_fnode() {
ceph_assert(!projected_fnode.empty());
return const_cast<fnode_t*>(projected_fnode.back().get());
}
fnode_ptr project_fnode(const MutationRef& mut);
void pop_and_dirty_projected_fnode(LogSegment *ls, const MutationRef& mut);
bool is_projected() const { return !projected_fnode.empty(); }
version_t pre_dirty(version_t min=0);
void _mark_dirty(LogSegment *ls);
void _set_dirty_flag() {
if (!state_test(STATE_DIRTY)) {
state_set(STATE_DIRTY);
get(PIN_DIRTY);
}
}
void mark_dirty(LogSegment *ls, version_t pv=0);
void mark_clean();
bool is_new() { return item_new.is_on_list(); }
void mark_new(LogSegment *ls);
bool is_bad() { return state_test(STATE_BADFRAG); }
/**
* Call to start this CDir on a new scrub.
* @pre It is not currently scrubbing
* @pre The CDir is marked complete.
* @post It has set up its internal scrubbing state.
*/
void scrub_initialize(const ScrubHeaderRef& header);
const ScrubHeaderRef& get_scrub_header() {
static const ScrubHeaderRef nullref;
return scrub_infop ? scrub_infop->header : nullref;
}
bool scrub_is_in_progress() const {
return (scrub_infop && scrub_infop->directory_scrubbing);
}
/**
* Call this once all CDentries have been scrubbed, according to
* scrub_dentry_next's listing. It finalizes the scrub statistics.
*/
void scrub_finished();
void scrub_aborted();
/**
* Tell the CDir to do a local scrub of itself.
* @pre The CDir is_complete().
* @returns true if the rstats and directory contents match, false otherwise.
*/
bool scrub_local();
/**
* Go bad due to a damaged dentry (register with damagetable and go BADFRAG)
*/
void go_bad_dentry(snapid_t last, std::string_view dname);
const scrub_info_t *scrub_info() const {
if (!scrub_infop)
scrub_info_create();
return scrub_infop.get();
}
// -- accessors --
inodeno_t ino() const { return inode->ino(); } // deprecate me?
frag_t get_frag() const { return frag; }
dirfrag_t dirfrag() const { return dirfrag_t(inode->ino(), frag); }
CInode *get_inode() { return inode; }
const CInode *get_inode() const { return inode; }
CDir *get_parent_dir() { return inode->get_parent_dir(); }
dentry_key_map::iterator begin() { return items.begin(); }
dentry_key_map::iterator end() { return items.end(); }
dentry_key_map::iterator lower_bound(dentry_key_t key) { return items.lower_bound(key); }
unsigned get_num_head_items() const { return num_head_items; }
unsigned get_num_head_null() const { return num_head_null; }
unsigned get_num_snap_items() const { return num_snap_items; }
unsigned get_num_snap_null() const { return num_snap_null; }
unsigned get_num_any() const { return num_head_items + num_head_null + num_snap_items + num_snap_null; }
bool check_rstats(bool scrub=false);
void inc_num_dirty() { num_dirty++; }
void dec_num_dirty() {
ceph_assert(num_dirty > 0);
num_dirty--;
}
int get_num_dirty() const {
return num_dirty;
}
void adjust_num_inodes_with_caps(int d);
int64_t get_frag_size() const {
return get_projected_fnode()->fragstat.size();
}
// -- dentries and inodes --
CDentry* lookup_exact_snap(std::string_view dname, snapid_t last);
CDentry* lookup(std::string_view n, snapid_t snap=CEPH_NOSNAP);
void adjust_dentry_lru(CDentry *dn);
CDentry* add_null_dentry(std::string_view dname,
snapid_t first=2, snapid_t last=CEPH_NOSNAP);
CDentry* add_primary_dentry(std::string_view dname, CInode *in, mempool::mds_co::string alternate_name,
snapid_t first=2, snapid_t last=CEPH_NOSNAP);
CDentry* add_remote_dentry(std::string_view dname, inodeno_t ino, unsigned char d_type,
mempool::mds_co::string alternate_name,
snapid_t first=2, snapid_t last=CEPH_NOSNAP);
void remove_dentry( CDentry *dn ); // delete dentry
void link_remote_inode( CDentry *dn, inodeno_t ino, unsigned char d_type);
void link_remote_inode( CDentry *dn, CInode *in );
void link_primary_inode( CDentry *dn, CInode *in );
void unlink_inode(CDentry *dn, bool adjust_lru=true);
void try_remove_unlinked_dn(CDentry *dn);
void add_to_bloom(CDentry *dn);
bool is_in_bloom(std::string_view name);
bool has_bloom() { return (bloom ? true : false); }
void remove_bloom() {
bloom.reset();
}
void try_remove_dentries_for_stray();
bool try_trim_snap_dentry(CDentry *dn, const std::set<snapid_t>& snaps);
void split(int bits, std::vector<CDir*>* subs, MDSContext::vec& waiters, bool replay);
void merge(const std::vector<CDir*>& subs, MDSContext::vec& waiters, bool replay);
bool should_split() const {
return g_conf()->mds_bal_split_size > 0 &&
((int)get_frag_size() + (int)get_num_snap_items()) > g_conf()->mds_bal_split_size;
}
bool should_split_fast() const;
bool should_merge() const;
mds_authority_t authority() const override;
mds_authority_t get_dir_auth() const { return dir_auth; }
void set_dir_auth(const mds_authority_t &a);
void set_dir_auth(mds_rank_t a) { set_dir_auth(mds_authority_t(a, CDIR_AUTH_UNKNOWN)); }
bool is_ambiguous_dir_auth() const {
return dir_auth.second != CDIR_AUTH_UNKNOWN;
}
bool is_full_dir_auth() const {
return is_auth() && !is_ambiguous_dir_auth();
}
bool is_full_dir_nonauth() const {
return !is_auth() && !is_ambiguous_dir_auth();
}
bool is_subtree_root() const {
return dir_auth != CDIR_AUTH_DEFAULT;
}
bool contains(CDir *x); // true if we are x or an ancestor of x
// for giving to clients
void get_dist_spec(std::set<mds_rank_t>& ls, mds_rank_t auth) {
if (is_auth()) {
list_replicas(ls);
if (!ls.empty())
ls.insert(auth);
}
}
static void encode_dirstat(ceph::buffer::list& bl, const session_info_t& info, const DirStat& ds);
void _encode_base(ceph::buffer::list& bl) {
ENCODE_START(1, 1, bl);
encode(first, bl);
encode(*fnode, bl);
encode(dir_rep, bl);
encode(dir_rep_by, bl);
ENCODE_FINISH(bl);
}
void _decode_base(ceph::buffer::list::const_iterator& p) {
DECODE_START(1, p);
decode(first, p);
{
auto _fnode = allocate_fnode();
decode(*_fnode, p);
reset_fnode(std::move(_fnode));
}
decode(dir_rep, p);
decode(dir_rep_by, p);
DECODE_FINISH(p);
}
// -- state --
bool is_complete() { return state & STATE_COMPLETE; }
bool is_exporting() { return state & STATE_EXPORTING; }
bool is_importing() { return state & STATE_IMPORTING; }
bool is_dirty_dft() { return state & STATE_DIRTYDFT; }
int get_dir_rep() const { return dir_rep; }
bool is_rep() const {
if (dir_rep == REP_NONE) return false;
return true;
}
bool can_rep() const;
// -- fetch --
object_t get_ondisk_object() {
return file_object_t(ino(), frag);
}
void fetch(std::string_view dname, snapid_t last,
MDSContext *c, bool ignore_authpinnability=false);
void fetch(MDSContext *c, bool ignore_authpinnability=false) {
fetch("", CEPH_NOSNAP, c, ignore_authpinnability);
}
void fetch_keys(const std::vector<dentry_key_t>& keys, MDSContext *c);
#if 0 // unused?
void wait_for_commit(Context *c, version_t v=0);
#endif
void commit_to(version_t want);
void commit(version_t want, MDSContext *c,
bool ignore_authpinnability=false, int op_prio=-1);
// -- dirtyness --
version_t get_committing_version() const { return committing_version; }
version_t get_committed_version() const { return committed_version; }
void set_committed_version(version_t v) { committed_version = v; }
void mark_complete();
// -- reference counting --
void first_get() override;
void last_put() override;
bool is_waiting_for_dentry(std::string_view dname, snapid_t snap) {
return waiting_on_dentry.count(string_snap_t(dname, snap));
}
void add_dentry_waiter(std::string_view dentry, snapid_t snap, MDSContext *c);
void take_dentry_waiting(std::string_view dentry, snapid_t first, snapid_t last, MDSContext::vec& ls);
void add_waiter(uint64_t mask, MDSContext *c) override;
void take_waiting(uint64_t mask, MDSContext::vec& ls) override; // may include dentry waiters
void finish_waiting(uint64_t mask, int result = 0); // ditto
// -- import/export --
mds_rank_t get_export_pin(bool inherit=true) const;
bool is_exportable(mds_rank_t dest) const;
void encode_export(ceph::buffer::list& bl);
void finish_export();
void abort_export() {
put(PIN_TEMPEXPORTING);
}
void decode_import(ceph::buffer::list::const_iterator& blp, LogSegment *ls);
void abort_import();
// -- auth pins --
bool can_auth_pin(int *err_ret=nullptr) const override;
int get_auth_pins() const { return auth_pins; }
int get_dir_auth_pins() const { return dir_auth_pins; }
void auth_pin(void *who) override;
void auth_unpin(void *who) override;
void adjust_nested_auth_pins(int dirinc, void *by);
void verify_fragstat();
void _walk_tree(std::function<bool(CDir*)> cb);
bool freeze_tree();
void _freeze_tree();
void unfreeze_tree();
void adjust_freeze_after_rename(CDir *dir);
bool freeze_dir();
void _freeze_dir();
void unfreeze_dir();
void maybe_finish_freeze();
std::pair<bool,bool> is_freezing_or_frozen_tree() const {
if (freeze_tree_state) {
if (freeze_tree_state->frozen)
return std::make_pair(false, true);
return std::make_pair(true, false);
}
return std::make_pair(false, false);
}
bool is_freezing() const override { return is_freezing_dir() || is_freezing_tree(); }
bool is_freezing_tree() const {
if (!num_freezing_trees)
return false;
return is_freezing_or_frozen_tree().first;
}
bool is_freezing_tree_root() const { return state & STATE_FREEZINGTREE; }
bool is_freezing_dir() const { return state & STATE_FREEZINGDIR; }
bool is_frozen() const override { return is_frozen_dir() || is_frozen_tree(); }
bool is_frozen_tree() const {
if (!num_frozen_trees)
return false;
return is_freezing_or_frozen_tree().second;
}
bool is_frozen_tree_root() const { return state & STATE_FROZENTREE; }
bool is_frozen_dir() const { return state & STATE_FROZENDIR; }
bool is_freezeable(bool freezing=false) const {
// no nested auth pins.
if (auth_pins - (freezing ? 1 : 0) > 0 ||
(freeze_tree_state && freeze_tree_state->auth_pins != auth_pins))
return false;
// inode must not be frozen.
if (!is_subtree_root() && inode->is_frozen())
return false;
return true;
}
bool is_freezeable_dir(bool freezing=false) const {
if ((auth_pins - freezing) > 0 || dir_auth_pins > 0)
return false;
// if not subtree root, inode must not be frozen (tree--frozen_dir is okay).
if (!is_subtree_root() && inode->is_frozen() && !inode->is_frozen_dir())
return false;
return true;
}
bool is_any_freezing_or_frozen_inode() const {
return num_frozen_inodes || !freezing_inodes.empty();
}
bool is_auth_pinned_by_lock_cache() const {
return frozen_inode_suppressed;
}
void disable_frozen_inode() {
ceph_assert(num_frozen_inodes == 0);
frozen_inode_suppressed++;
}
void enable_frozen_inode();
std::ostream& print_db_line_prefix(std::ostream& out) override;
void print(std::ostream& out) override;
void dump(ceph::Formatter *f, int flags = DUMP_DEFAULT) const;
void dump_load(ceph::Formatter *f);
// context
MDCache *mdcache;
CInode *inode; // my inode
frag_t frag; // my frag
snapid_t first = 2;
mempool::mds_co::compact_map<snapid_t,old_rstat_t> dirty_old_rstat; // [value.first,key]
// my inodes with dirty rstat data
elist<CInode*> dirty_rstat_inodes;
elist<CDentry*> dirty_dentries;
elist<CDir*>::item item_dirty, item_new;
// lock caches that auth-pin me
elist<MDLockCache::DirItem*> lock_caches_with_auth_pins;
// all dirfrags within freezing/frozen tree reference the 'state'
std::shared_ptr<freeze_tree_state_t> freeze_tree_state;
protected:
// friends
friend class Migrator;
friend class CInode;
friend class MDCache;
friend class MDiscover;
friend class MDBalancer;
friend class CDirDiscover;
friend class CDirExport;
friend class C_IO_Dir_TMAP_Fetched;
friend class C_IO_Dir_OMAP_Fetched;
friend class C_IO_Dir_OMAP_FetchedMore;
friend class C_IO_Dir_Committed;
friend class C_IO_Dir_Commit_Ops;
void _omap_fetch(std::set<std::string> *keys, MDSContext *fin=nullptr);
void _omap_fetch_more(version_t omap_version, bufferlist& hdrbl,
std::map<std::string, bufferlist>& omap, MDSContext *fin);
CDentry *_load_dentry(
std::string_view key,
std::string_view dname,
snapid_t last,
ceph::buffer::list &bl,
int pos,
const std::set<snapid_t> *snaps,
double rand_threshold,
bool *force_dirty);
/**
* Go bad due to a damaged header (register with damagetable and go BADFRAG)
*/
void go_bad(bool complete);
void _omap_fetched(ceph::buffer::list& hdrbl, std::map<std::string, ceph::buffer::list>& omap,
bool complete, const std::set<std::string>& keys, int r);
// -- commit --
void _commit(version_t want, int op_prio);
void _omap_commit_ops(int r, int op_prio, int64_t metapool, version_t version, bool _new,
std::vector<dentry_commit_item> &to_set, bufferlist &dfts,
std::vector<std::string> &to_remove,
mempool::mds_co::compact_set<mempool::mds_co::string> &_stale);
void _encode_primary_inode_base(dentry_commit_item &item, bufferlist &dfts,
bufferlist &bl);
void _omap_commit(int op_prio);
void _parse_dentry(CDentry *dn, dentry_commit_item &item,
const std::set<snapid_t> *snaps, bufferlist &bl);
void _committed(int r, version_t v);
static fnode_const_ptr empty_fnode;
// fnode is a pointer to constant fnode_t, the constant fnode_t can be shared
// by CDir and log events. To update fnode, read-copy-update should be used.
fnode_const_ptr fnode = empty_fnode;
version_t projected_version = 0;
mempool::mds_co::list<fnode_const_ptr> projected_fnode;
std::unique_ptr<scrub_info_t> scrub_infop;
// contents of this directory
dentry_key_map items; // non-null AND null
unsigned num_head_items = 0;
unsigned num_head_null = 0;
unsigned num_snap_items = 0;
unsigned num_snap_null = 0;
int num_dirty = 0;
int num_inodes_with_caps = 0;
// state
version_t committing_version = 0;
version_t committed_version = 0;
mempool::mds_co::compact_set<mempool::mds_co::string> stale_items;
// lock nesting, freeze
static int num_frozen_trees;
static int num_freezing_trees;
// freezing/frozen inodes in this dirfrag
int num_frozen_inodes = 0;
int frozen_inode_suppressed = 0;
elist<CInode*> freezing_inodes;
int dir_auth_pins = 0;
// cache control (defined for authority; hints for replicas)
__s32 dir_rep;
mempool::mds_co::compact_set<__s32> dir_rep_by; // if dir_rep == REP_LIST
// popularity
dirfrag_load_vec_t pop_me;
dirfrag_load_vec_t pop_nested;
dirfrag_load_vec_t pop_auth_subtree;
dirfrag_load_vec_t pop_auth_subtree_nested;
ceph::coarse_mono_time last_popularity_sample = ceph::coarse_mono_clock::zero();
elist<CInode*> pop_lru_subdirs;
std::unique_ptr<bloom_filter> bloom; // XXX not part of mempool::mds_co
/* If you set up the bloom filter, you must keep it accurate!
* It's deleted when you mark_complete() and is deliberately not serialized.*/
mempool::mds_co::compact_map<version_t, MDSContext::vec_alloc<mempool::mds_co::pool_allocator> > waiting_for_commit;
// -- waiters --
mempool::mds_co::map< string_snap_t, MDSContext::vec_alloc<mempool::mds_co::pool_allocator> > waiting_on_dentry; // FIXME string_snap_t not in mempool
private:
friend std::ostream& operator<<(std::ostream& out, const class CDir& dir);
void log_mark_dirty();
/**
* Create a scrub_info_t struct for the scrub_infop pointer.
*/
void scrub_info_create() const;
/**
* Delete the scrub_infop if it's not got any useful data.
*/
void scrub_maybe_delete_info();
void link_inode_work( CDentry *dn, CInode *in );
void unlink_inode_work( CDentry *dn );
void remove_null_dentries();
void prepare_new_fragment(bool replay);
void prepare_old_fragment(std::map<string_snap_t, MDSContext::vec >& dentry_waiters, bool replay);
void steal_dentry(CDentry *dn); // from another dir. used by merge/split.
void finish_old_fragment(MDSContext::vec& waiters, bool replay);
void init_fragment_pins();
std::string get_path() const;
// -- authority --
/*
* normal: <parent,unknown> !subtree_root
* delegation: <mds,unknown> subtree_root
* ambiguous: <mds1,mds2> subtree_root
* <parent,mds2> subtree_root
*/
mds_authority_t dir_auth;
};
#endif
| 25,776 | 31.711929 | 152 | h |
null | ceph-main/src/mds/CInode.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/int_types.h"
#include "common/errno.h"
#include <string>
#include "CInode.h"
#include "CDir.h"
#include "CDentry.h"
#include "MDSRank.h"
#include "MDCache.h"
#include "MDLog.h"
#include "Locker.h"
#include "Mutation.h"
#include "events/EUpdate.h"
#include "osdc/Objecter.h"
#include "snap.h"
#include "LogSegment.h"
#include "common/Clock.h"
#include "common/config.h"
#include "global/global_context.h"
#include "include/ceph_assert.h"
#include "mds/MDSContinuation.h"
#include "mds/InoTable.h"
#include "cephfs_features.h"
#include "osdc/Objecter.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
#undef dout_prefix
#define dout_prefix *_dout << "mds." << mdcache->mds->get_nodeid() << ".cache.ino(" << ino() << ") "
using namespace std;
void CInodeCommitOperation::update(ObjectOperation &op, inode_backtrace_t &bt) {
using ceph::encode;
op.priority = priority;
op.create(false);
bufferlist parent_bl;
encode(bt, parent_bl);
op.setxattr("parent", parent_bl);
// for the old pool there is no need to update the layout and symlink
if (!update_layout_symlink)
return;
bufferlist layout_bl;
encode(_layout, layout_bl, _features);
op.setxattr("layout", layout_bl);
if (!_symlink.empty()) {
bufferlist symlink_bl;
encode(_symlink, symlink_bl);
op.setxattr("symlink", symlink_bl);
}
}
class CInodeIOContext : public MDSIOContextBase
{
protected:
CInode *in;
MDSRank *get_mds() override {return in->mdcache->mds;}
public:
explicit CInodeIOContext(CInode *in_) : in(in_) {
ceph_assert(in != NULL);
}
};
sr_t* const CInode::projected_inode::UNDEF_SRNODE = (sr_t*)(unsigned long)-1;
LockType CInode::versionlock_type(CEPH_LOCK_IVERSION);
LockType CInode::authlock_type(CEPH_LOCK_IAUTH);
LockType CInode::linklock_type(CEPH_LOCK_ILINK);
LockType CInode::dirfragtreelock_type(CEPH_LOCK_IDFT);
LockType CInode::filelock_type(CEPH_LOCK_IFILE);
LockType CInode::xattrlock_type(CEPH_LOCK_IXATTR);
LockType CInode::snaplock_type(CEPH_LOCK_ISNAP);
LockType CInode::nestlock_type(CEPH_LOCK_INEST);
LockType CInode::flocklock_type(CEPH_LOCK_IFLOCK);
LockType CInode::policylock_type(CEPH_LOCK_IPOLICY);
std::string_view CInode::pin_name(int p) const
{
switch (p) {
case PIN_DIRFRAG: return "dirfrag";
case PIN_CAPS: return "caps";
case PIN_IMPORTING: return "importing";
case PIN_OPENINGDIR: return "openingdir";
case PIN_REMOTEPARENT: return "remoteparent";
case PIN_BATCHOPENJOURNAL: return "batchopenjournal";
case PIN_SCATTERED: return "scattered";
case PIN_STICKYDIRS: return "stickydirs";
//case PIN_PURGING: return "purging";
case PIN_FREEZING: return "freezing";
case PIN_FROZEN: return "frozen";
case PIN_IMPORTINGCAPS: return "importingcaps";
case PIN_EXPORTINGCAPS: return "exportingcaps";
case PIN_PASTSNAPPARENT: return "pastsnapparent";
case PIN_OPENINGSNAPPARENTS: return "openingsnapparents";
case PIN_TRUNCATING: return "truncating";
case PIN_STRAY: return "stray";
case PIN_NEEDSNAPFLUSH: return "needsnapflush";
case PIN_DIRTYRSTAT: return "dirtyrstat";
case PIN_DIRTYPARENT: return "dirtyparent";
case PIN_DIRWAITER: return "dirwaiter";
default: return generic_pin_name(p);
}
}
//int cinode_pins[CINODE_NUM_PINS]; // counts
ostream& CInode::print_db_line_prefix(ostream& out)
{
return out << ceph_clock_now() << " mds." << mdcache->mds->get_nodeid() << ".cache.ino(" << ino() << ") ";
}
/*
* write caps and lock ids
*/
struct cinode_lock_info_t cinode_lock_info[] = {
{ CEPH_LOCK_IFILE, CEPH_CAP_ANY_FILE_WR },
{ CEPH_LOCK_IAUTH, CEPH_CAP_AUTH_EXCL },
{ CEPH_LOCK_ILINK, CEPH_CAP_LINK_EXCL },
{ CEPH_LOCK_IXATTR, CEPH_CAP_XATTR_EXCL },
};
int num_cinode_locks = sizeof(cinode_lock_info) / sizeof(cinode_lock_info[0]);
ostream& operator<<(ostream& out, const CInode& in)
{
string path;
in.make_path_string(path, true);
out << "[inode " << in.ino();
out << " ["
<< (in.is_multiversion() ? "...":"")
<< in.first << "," << in.last << "]";
out << " " << path << (in.is_dir() ? "/":"");
if (in.is_auth()) {
out << " auth";
if (in.is_replicated())
out << in.get_replicas();
} else {
mds_authority_t a = in.authority();
out << " rep@" << a.first;
if (a.second != CDIR_AUTH_UNKNOWN)
out << "," << a.second;
out << "." << in.get_replica_nonce();
}
if (in.is_symlink())
out << " symlink='" << in.symlink << "'";
if (in.is_dir() && !in.dirfragtree.empty())
out << " " << in.dirfragtree;
out << " v" << in.get_version();
if (in.get_projected_version() > in.get_version())
out << " pv" << in.get_projected_version();
if (in.get_num_auth_pins()) {
out << " ap=" << in.get_num_auth_pins();
#ifdef MDS_AUTHPIN_SET
in.print_authpin_set(out);
#endif
}
if (in.snaprealm)
out << " snaprealm=" << in.snaprealm;
if (in.state_test(CInode::STATE_AMBIGUOUSAUTH)) out << " AMBIGAUTH";
if (in.state_test(CInode::STATE_NEEDSRECOVER)) out << " NEEDSRECOVER";
if (in.state_test(CInode::STATE_RECOVERING)) out << " RECOVERING";
if (in.state_test(CInode::STATE_DIRTYPARENT)) out << " DIRTYPARENT";
if (in.state_test(CInode::STATE_MISSINGOBJS)) out << " MISSINGOBJS";
if (in.is_ephemeral_dist()) out << " DISTEPHEMERALPIN";
if (in.is_ephemeral_rand()) out << " RANDEPHEMERALPIN";
if (in.is_freezing_inode()) out << " FREEZING=" << in.auth_pin_freeze_allowance;
if (in.is_frozen_inode()) out << " FROZEN";
if (in.is_frozen_auth_pin()) out << " FROZEN_AUTHPIN";
const auto& pi = in.get_projected_inode();
if (pi->is_truncating())
out << " truncating(" << pi->truncate_from << " to " << pi->truncate_size << ")";
if (in.is_dir()) {
out << " " << in.get_inode()->dirstat;
if (g_conf()->mds_debug_scatterstat && in.is_projected()) {
out << "->" << pi->dirstat;
}
} else {
out << " s=" << in.get_inode()->size;
if (in.get_inode()->nlink != 1)
out << " nl=" << in.get_inode()->nlink;
}
// rstat
out << " " << in.get_inode()->rstat;
if (!(in.get_inode()->rstat == in.get_inode()->accounted_rstat))
out << "/" << in.get_inode()->accounted_rstat;
if (g_conf()->mds_debug_scatterstat && in.is_projected()) {
out << "->" << pi->rstat;
if (!(pi->rstat == pi->accounted_rstat))
out << "/" << pi->accounted_rstat;
}
if (in.is_any_old_inodes()) {
out << " old_inodes=" << in.get_old_inodes()->size();
}
if (!in.client_need_snapflush.empty())
out << " need_snapflush=" << in.client_need_snapflush;
// locks
if (!in.authlock.is_sync_and_unlocked())
out << " " << in.authlock;
if (!in.linklock.is_sync_and_unlocked())
out << " " << in.linklock;
if (in.get_inode()->is_dir()) {
if (!in.dirfragtreelock.is_sync_and_unlocked())
out << " " << in.dirfragtreelock;
if (!in.snaplock.is_sync_and_unlocked())
out << " " << in.snaplock;
if (!in.nestlock.is_sync_and_unlocked())
out << " " << in.nestlock;
if (!in.policylock.is_sync_and_unlocked())
out << " " << in.policylock;
} else {
if (!in.flocklock.is_sync_and_unlocked())
out << " " << in.flocklock;
}
if (!in.filelock.is_sync_and_unlocked())
out << " " << in.filelock;
if (!in.xattrlock.is_sync_and_unlocked())
out << " " << in.xattrlock;
if (!in.versionlock.is_sync_and_unlocked())
out << " " << in.versionlock;
// hack: spit out crap on which clients have caps
if (in.get_inode()->client_ranges.size())
out << " cr=" << in.get_inode()->client_ranges;
if (!in.get_client_caps().empty()) {
out << " caps={";
bool first = true;
for (const auto &p : in.get_client_caps()) {
if (!first) out << ",";
out << p.first << "="
<< ccap_string(p.second.pending());
if (p.second.issued() != p.second.pending())
out << "/" << ccap_string(p.second.issued());
out << "/" << ccap_string(p.second.wanted())
<< "@" << p.second.get_last_seq();
first = false;
}
out << "}";
if (in.get_loner() >= 0 || in.get_wanted_loner() >= 0) {
out << ",l=" << in.get_loner();
if (in.get_loner() != in.get_wanted_loner())
out << "(" << in.get_wanted_loner() << ")";
}
}
if (!in.get_mds_caps_wanted().empty()) {
out << " mcw={";
bool first = true;
for (const auto &p : in.get_mds_caps_wanted()) {
if (!first)
out << ',';
out << p.first << '=' << ccap_string(p.second);
first = false;
}
out << '}';
}
if (in.get_num_ref()) {
out << " |";
in.print_pin_set(out);
}
if (in.get_inode()->export_pin != MDS_RANK_NONE) {
out << " export_pin=" << in.get_inode()->export_pin;
}
if (in.state_test(CInode::STATE_DISTEPHEMERALPIN)) {
out << " distepin";
}
if (in.state_test(CInode::STATE_RANDEPHEMERALPIN)) {
out << " randepin";
}
out << " " << ∈
out << "]";
return out;
}
CInode::CInode(MDCache *c, bool auth, snapid_t f, snapid_t l) :
mdcache(c), first(f), last(l),
item_dirty(this),
item_caps(this),
item_open_file(this),
item_dirty_parent(this),
item_dirty_dirfrag_dir(this),
item_dirty_dirfrag_nest(this),
item_dirty_dirfrag_dirfragtree(this),
pop(c->decayrate),
versionlock(this, &versionlock_type),
authlock(this, &authlock_type),
linklock(this, &linklock_type),
dirfragtreelock(this, &dirfragtreelock_type),
filelock(this, &filelock_type),
xattrlock(this, &xattrlock_type),
snaplock(this, &snaplock_type),
nestlock(this, &nestlock_type),
flocklock(this, &flocklock_type),
policylock(this, &policylock_type)
{
if (auth)
state_set(STATE_AUTH);
}
void CInode::print(ostream& out)
{
out << *this;
}
void CInode::add_need_snapflush(CInode *snapin, snapid_t snapid, client_t client)
{
dout(10) << __func__ << " client." << client << " snapid " << snapid << " on " << snapin << dendl;
if (client_need_snapflush.empty()) {
get(CInode::PIN_NEEDSNAPFLUSH);
// FIXME: this is non-optimal, as we'll block freezes/migrations for potentially
// long periods waiting for clients to flush their snaps.
auth_pin(this); // pin head get_inode()->..
}
auto &clients = client_need_snapflush[snapid];
if (clients.empty())
snapin->auth_pin(this); // ...and pin snapped/old inode!
clients.insert(client);
}
void CInode::remove_need_snapflush(CInode *snapin, snapid_t snapid, client_t client)
{
dout(10) << __func__ << " client." << client << " snapid " << snapid << " on " << snapin << dendl;
auto it = client_need_snapflush.find(snapid);
if (it == client_need_snapflush.end()) {
dout(10) << " snapid not found" << dendl;
return;
}
size_t n = it->second.erase(client);
if (n == 0) {
dout(10) << " client not found" << dendl;
return;
}
if (it->second.empty()) {
client_need_snapflush.erase(it);
snapin->auth_unpin(this);
if (client_need_snapflush.empty()) {
put(CInode::PIN_NEEDSNAPFLUSH);
auth_unpin(this);
}
}
}
pair<bool,bool> CInode::split_need_snapflush(CInode *cowin, CInode *in)
{
dout(10) << __func__ << " [" << cowin->first << "," << cowin->last << "] for " << *cowin << dendl;
bool cowin_need_flush = false;
bool orig_need_flush = false;
auto it = client_need_snapflush.lower_bound(cowin->first);
while (it != client_need_snapflush.end() && it->first < in->first) {
ceph_assert(!it->second.empty());
if (cowin->last >= it->first) {
cowin->auth_pin(this);
cowin_need_flush = true;
++it;
} else {
it = client_need_snapflush.erase(it);
}
in->auth_unpin(this);
}
if (it != client_need_snapflush.end() && it->first <= in->last)
orig_need_flush = true;
return make_pair(cowin_need_flush, orig_need_flush);
}
void CInode::mark_dirty_rstat()
{
if (!state_test(STATE_DIRTYRSTAT)) {
dout(10) << __func__ << dendl;
state_set(STATE_DIRTYRSTAT);
get(PIN_DIRTYRSTAT);
CDentry *pdn = get_projected_parent_dn();
if (pdn->is_auth()) {
CDir *pdir = pdn->dir;
pdir->dirty_rstat_inodes.push_back(&dirty_rstat_item);
mdcache->mds->locker->mark_updated_scatterlock(&pdir->inode->nestlock);
} else {
// under cross-MDS rename.
// DIRTYRSTAT flag will get cleared when rename finishes
ceph_assert(state_test(STATE_AMBIGUOUSAUTH));
}
}
}
void CInode::clear_dirty_rstat()
{
if (state_test(STATE_DIRTYRSTAT)) {
dout(10) << __func__ << dendl;
state_clear(STATE_DIRTYRSTAT);
put(PIN_DIRTYRSTAT);
dirty_rstat_item.remove_myself();
}
}
CInode::projected_inode CInode::project_inode(const MutationRef& mut,
bool xattr, bool snap)
{
if (mut && mut->is_projected(this)) {
ceph_assert(!xattr && !snap);
auto _inode = std::const_pointer_cast<mempool_inode>(projected_nodes.back().inode);
return projected_inode(std::move(_inode), xattr_map_ptr());
}
auto pi = allocate_inode(*get_projected_inode());
if (scrub_infop && scrub_infop->last_scrub_dirty) {
pi->last_scrub_stamp = scrub_infop->last_scrub_stamp;
pi->last_scrub_version = scrub_infop->last_scrub_version;
scrub_infop->last_scrub_dirty = false;
scrub_maybe_delete_info();
}
const auto& ox = get_projected_xattrs();
xattr_map_ptr px;
if (xattr) {
px = allocate_xattr_map();
if (ox)
*px = *ox;
}
sr_t* ps = projected_inode::UNDEF_SRNODE;
if (snap) {
ps = prepare_new_srnode(0);
++num_projected_srnodes;
}
projected_nodes.emplace_back(pi, xattr ? px : ox , ps);
if (mut)
mut->add_projected_node(this);
dout(15) << __func__ << " " << pi->ino << dendl;
return projected_inode(std::move(pi), std::move(px), ps);
}
void CInode::pop_and_dirty_projected_inode(LogSegment *ls, const MutationRef& mut)
{
ceph_assert(!projected_nodes.empty());
auto front = std::move(projected_nodes.front());
dout(15) << __func__ << " v" << front.inode->version << dendl;
projected_nodes.pop_front();
if (mut)
mut->remove_projected_node(this);
bool pool_updated = get_inode()->layout.pool_id != front.inode->layout.pool_id;
bool pin_updated = (get_inode()->export_pin != front.inode->export_pin) ||
(get_inode()->export_ephemeral_distributed_pin !=
front.inode->export_ephemeral_distributed_pin);
reset_inode(std::move(front.inode));
if (front.xattrs != get_xattrs())
reset_xattrs(std::move(front.xattrs));
if (front.snapnode != projected_inode::UNDEF_SRNODE) {
--num_projected_srnodes;
pop_projected_snaprealm(front.snapnode, false);
}
mark_dirty(ls);
if (get_inode()->is_backtrace_updated())
mark_dirty_parent(ls, pool_updated);
if (pin_updated)
maybe_export_pin(true);
}
sr_t *CInode::prepare_new_srnode(snapid_t snapid)
{
const sr_t *cur_srnode = get_projected_srnode();
sr_t *new_srnode;
if (cur_srnode) {
new_srnode = new sr_t(*cur_srnode);
} else {
if (snapid == 0)
snapid = mdcache->get_global_snaprealm()->get_newest_seq();
new_srnode = new sr_t();
new_srnode->seq = snapid;
new_srnode->created = snapid;
new_srnode->current_parent_since = get_oldest_snap();
SnapRealm *sr = find_snaprealm();
dout(20) << __func__ << ": inheriting change_attr from " << *sr
<< dendl;
new_srnode->change_attr = sr->srnode.change_attr;
}
return new_srnode;
}
const sr_t *CInode::get_projected_srnode() const {
if (num_projected_srnodes > 0) {
for (auto it = projected_nodes.rbegin(); it != projected_nodes.rend(); ++it)
if (it->snapnode != projected_inode::UNDEF_SRNODE)
return it->snapnode;
}
if (snaprealm)
return &snaprealm->srnode;
else
return NULL;
}
void CInode::project_snaprealm(sr_t *new_srnode)
{
dout(10) << __func__ << " " << new_srnode << dendl;
ceph_assert(projected_nodes.back().snapnode == projected_inode::UNDEF_SRNODE);
projected_nodes.back().snapnode = new_srnode;
++num_projected_srnodes;
}
void CInode::mark_snaprealm_global(sr_t *new_srnode)
{
ceph_assert(!is_dir());
// 'last_destroyed' is no longer used, use it to store origin 'current_parent_since'
new_srnode->last_destroyed = new_srnode->current_parent_since;
new_srnode->current_parent_since = mdcache->get_global_snaprealm()->get_newest_seq() + 1;
new_srnode->mark_parent_global();
}
void CInode::clear_snaprealm_global(sr_t *new_srnode)
{
// restore 'current_parent_since'
new_srnode->current_parent_since = new_srnode->last_destroyed;
new_srnode->last_destroyed = 0;
new_srnode->seq = mdcache->get_global_snaprealm()->get_newest_seq();
new_srnode->clear_parent_global();
}
bool CInode::is_projected_snaprealm_global() const
{
const sr_t *srnode = get_projected_srnode();
if (srnode && srnode->is_parent_global())
return true;
return false;
}
void CInode::project_snaprealm_past_parent(SnapRealm *newparent)
{
sr_t *new_snap = project_snaprealm();
record_snaprealm_past_parent(new_snap, newparent);
}
/* if newparent != parent, add parent to past_parents
if parent DNE, we need to find what the parent actually is and fill that in */
void CInode::record_snaprealm_past_parent(sr_t *new_snap, SnapRealm *newparent)
{
ceph_assert(!new_snap->is_parent_global());
SnapRealm *oldparent;
if (!snaprealm) {
oldparent = find_snaprealm();
} else {
oldparent = snaprealm->parent;
}
if (newparent != oldparent) {
snapid_t oldparentseq = oldparent->get_newest_seq();
if (oldparentseq + 1 > new_snap->current_parent_since) {
// copy old parent's snaps
const set<snapid_t>& snaps = oldparent->get_snaps();
auto p = snaps.lower_bound(new_snap->current_parent_since);
if (p != snaps.end())
new_snap->past_parent_snaps.insert(p, snaps.end());
if (oldparentseq > new_snap->seq)
new_snap->seq = oldparentseq;
}
new_snap->current_parent_since = mdcache->get_global_snaprealm()->get_newest_seq() + 1;
}
}
void CInode::record_snaprealm_parent_dentry(sr_t *new_snap, SnapRealm *oldparent,
CDentry *dn, bool primary_dn)
{
ceph_assert(new_snap->is_parent_global());
if (!oldparent)
oldparent = dn->get_dir()->inode->find_snaprealm();
auto& snaps = oldparent->get_snaps();
if (!primary_dn) {
auto p = snaps.lower_bound(dn->first);
if (p != snaps.end())
new_snap->past_parent_snaps.insert(p, snaps.end());
} else {
// 'last_destroyed' is used as 'current_parent_since'
auto p = snaps.lower_bound(new_snap->last_destroyed);
if (p != snaps.end())
new_snap->past_parent_snaps.insert(p, snaps.end());
new_snap->last_destroyed = mdcache->get_global_snaprealm()->get_newest_seq() + 1;
}
}
void CInode::early_pop_projected_snaprealm()
{
ceph_assert(!projected_nodes.empty());
if (projected_nodes.front().snapnode != projected_inode::UNDEF_SRNODE) {
pop_projected_snaprealm(projected_nodes.front().snapnode, true);
projected_nodes.front().snapnode = projected_inode::UNDEF_SRNODE;
--num_projected_srnodes;
}
}
void CInode::pop_projected_snaprealm(sr_t *next_snaprealm, bool early)
{
if (next_snaprealm) {
dout(10) << __func__ << (early ? " (early) " : " ")
<< next_snaprealm << " seq " << next_snaprealm->seq << dendl;
if (!snaprealm)
open_snaprealm();
auto old_flags = snaprealm->srnode.flags;
snaprealm->srnode = *next_snaprealm;
delete next_snaprealm;
if ((snaprealm->srnode.flags ^ old_flags) & sr_t::PARENT_GLOBAL) {
snaprealm->adjust_parent();
}
if (snaprealm->parent)
dout(10) << " realm " << *snaprealm << " parent " << *snaprealm->parent << dendl;
} else {
dout(10) << __func__ << (early ? " (early) null" : " null") << dendl;
ceph_assert(snaprealm);
snaprealm->merge_to(NULL);
}
}
// ====== CInode =======
// dirfrags
InodeStoreBase::inode_const_ptr InodeStoreBase::empty_inode = InodeStoreBase::allocate_inode();
__u32 InodeStoreBase::hash_dentry_name(std::string_view dn)
{
int which = inode->dir_layout.dl_dir_hash;
if (!which)
which = CEPH_STR_HASH_LINUX;
ceph_assert(ceph_str_hash_valid(which));
return ceph_str_hash(which, dn.data(), dn.length());
}
frag_t InodeStoreBase::pick_dirfrag(std::string_view dn)
{
if (dirfragtree.empty())
return frag_t(); // avoid the string hash if we can.
__u32 h = hash_dentry_name(dn);
return dirfragtree[h];
}
std::pair<bool, std::vector<CDir*>> CInode::get_dirfrags_under(frag_t fg)
{
std::pair<bool, std::vector<CDir*>> result;
auto& all = result.first;
auto& dirs = result.second;
all = false;
if (auto it = dirfrags.find(fg); it != dirfrags.end()){
all = true;
dirs.push_back(it->second);
return result;
}
int total = 0;
for(auto &[_fg, _dir] : dirfrags){
// frag_t.bits() can indicate the depth of the partition in the directory tree
// e.g.
// 01* : bit = 2, on the second floor
// *
// 0* 1*
// 00* 01* 10* 11* -- > level 2, bit = 2
// so fragA.bits > fragB.bits means fragA is deeper than fragB
if (fg.bits() >= _fg.bits()) {
if (_fg.contains(fg)) {
all = true;
return result;
}
} else {
if (fg.contains(_fg)) {
dirs.push_back(_dir);
// we can calculate how many sub slices a slice can be divided into
// frag_t(*) can be divided into two frags belonging to the first layer(0* 1*)
// or 2^2 frags belonging to the second layer(00* 01* 10* 11*)
// or (1 << (24 - frag_t(*).bits)) frags belonging to the 24th level
total += 1 << (24 - _fg.bits());
}
}
}
// we convert all the frags into the frags of 24th layer to calculate whether all the frags are included in the memory cache
all = ((1<<(24-fg.bits())) == total);
return result;
}
void CInode::verify_dirfrags()
{
bool bad = false;
for (const auto &p : dirfrags) {
if (!dirfragtree.is_leaf(p.first)) {
dout(0) << "have open dirfrag " << p.first << " but not leaf in " << dirfragtree
<< ": " << *p.second << dendl;
bad = true;
}
}
ceph_assert(!bad);
}
void CInode::force_dirfrags()
{
bool bad = false;
for (auto &p : dirfrags) {
if (!dirfragtree.is_leaf(p.first)) {
dout(0) << "have open dirfrag " << p.first << " but not leaf in " << dirfragtree
<< ": " << *p.second << dendl;
bad = true;
}
}
if (bad) {
frag_vec_t leaves;
dirfragtree.get_leaves(leaves);
for (const auto& leaf : leaves) {
mdcache->get_force_dirfrag(dirfrag_t(ino(), leaf), true);
}
}
verify_dirfrags();
}
CDir *CInode::get_approx_dirfrag(frag_t fg)
{
CDir *dir = get_dirfrag(fg);
if (dir) return dir;
// find a child?
auto&& p = get_dirfrags_under(fg);
if (!p.second.empty())
return p.second.front();
// try parents?
while (fg.bits() > 0) {
fg = fg.parent();
dir = get_dirfrag(fg);
if (dir) return dir;
}
return NULL;
}
CDir *CInode::get_or_open_dirfrag(MDCache *mdcache, frag_t fg)
{
ceph_assert(is_dir());
// have it?
CDir *dir = get_dirfrag(fg);
if (!dir) {
// create it.
ceph_assert(is_auth() || mdcache->mds->is_any_replay());
dir = new CDir(this, fg, mdcache, is_auth());
add_dirfrag(dir);
}
return dir;
}
CDir *CInode::add_dirfrag(CDir *dir)
{
auto em = dirfrags.emplace(std::piecewise_construct, std::forward_as_tuple(dir->dirfrag().frag), std::forward_as_tuple(dir));
ceph_assert(em.second);
if (stickydir_ref > 0) {
dir->state_set(CDir::STATE_STICKY);
dir->get(CDir::PIN_STICKY);
}
maybe_export_pin();
return dir;
}
void CInode::close_dirfrag(frag_t fg)
{
dout(14) << __func__ << " " << fg << dendl;
ceph_assert(dirfrags.count(fg));
CDir *dir = dirfrags[fg];
dir->remove_null_dentries();
// clear dirty flag
if (dir->is_dirty())
dir->mark_clean();
if (stickydir_ref > 0) {
dir->state_clear(CDir::STATE_STICKY);
dir->put(CDir::PIN_STICKY);
}
if (dir->is_subtree_root())
num_subtree_roots--;
// dump any remaining dentries, for debugging purposes
for (const auto &p : dir->items)
dout(14) << __func__ << " LEFTOVER dn " << *p.second << dendl;
ceph_assert(dir->get_num_ref() == 0);
delete dir;
dirfrags.erase(fg);
}
void CInode::close_dirfrags()
{
while (!dirfrags.empty())
close_dirfrag(dirfrags.begin()->first);
}
bool CInode::has_subtree_root_dirfrag(int auth)
{
if (num_subtree_roots > 0) {
if (auth == -1)
return true;
for (const auto &p : dirfrags) {
if (p.second->is_subtree_root() &&
p.second->dir_auth.first == auth)
return true;
}
}
return false;
}
bool CInode::has_subtree_or_exporting_dirfrag()
{
if (num_subtree_roots > 0 || num_exporting_dirs > 0)
return true;
return false;
}
void CInode::get_stickydirs()
{
if (stickydir_ref == 0) {
get(PIN_STICKYDIRS);
for (const auto &p : dirfrags) {
p.second->state_set(CDir::STATE_STICKY);
p.second->get(CDir::PIN_STICKY);
}
}
stickydir_ref++;
}
void CInode::put_stickydirs()
{
ceph_assert(stickydir_ref > 0);
stickydir_ref--;
if (stickydir_ref == 0) {
put(PIN_STICKYDIRS);
for (const auto &p : dirfrags) {
p.second->state_clear(CDir::STATE_STICKY);
p.second->put(CDir::PIN_STICKY);
}
}
}
// pins
void CInode::first_get()
{
// pin my dentry?
if (parent)
parent->get(CDentry::PIN_INODEPIN);
}
void CInode::last_put()
{
// unpin my dentry?
if (parent)
parent->put(CDentry::PIN_INODEPIN);
}
void CInode::_put()
{
if (get_num_ref() == (int)is_dirty() + (int)is_dirty_parent())
mdcache->maybe_eval_stray(this, true);
}
void CInode::add_remote_parent(CDentry *p)
{
if (remote_parents.empty())
get(PIN_REMOTEPARENT);
remote_parents.insert(p);
}
void CInode::remove_remote_parent(CDentry *p)
{
remote_parents.erase(p);
if (remote_parents.empty())
put(PIN_REMOTEPARENT);
}
CDir *CInode::get_parent_dir()
{
if (parent)
return parent->dir;
return NULL;
}
CDir *CInode::get_projected_parent_dir()
{
CDentry *p = get_projected_parent_dn();
if (p)
return p->dir;
return NULL;
}
CInode *CInode::get_parent_inode()
{
if (parent)
return parent->dir->inode;
return NULL;
}
bool CInode::is_ancestor_of(const CInode *other) const
{
while (other) {
if (other == this)
return true;
const CDentry *pdn = other->get_oldest_parent_dn();
if (!pdn) {
ceph_assert(other->is_base());
break;
}
other = pdn->get_dir()->get_inode();
}
return false;
}
bool CInode::is_projected_ancestor_of(const CInode *other) const
{
while (other) {
if (other == this)
return true;
const CDentry *pdn = other->get_projected_parent_dn();
if (!pdn) {
ceph_assert(other->is_base());
break;
}
other = pdn->get_dir()->get_inode();
}
return false;
}
/*
* Because a non-directory inode may have multiple links, the use_parent
* argument allows selecting which parent to use for path construction. This
* argument is only meaningful for the final component (i.e. the first of the
* nested calls) because directories cannot have multiple hard links. If
* use_parent is NULL and projected is true, the primary parent's projected
* inode is used all the way up the path chain. Otherwise the primary parent
* stable inode is used.
*/
void CInode::make_path_string(string& s, bool projected, const CDentry *use_parent) const
{
if (!use_parent) {
use_parent = projected ? get_projected_parent_dn() : parent;
}
if (use_parent) {
use_parent->make_path_string(s, projected);
} else if (is_root()) {
s = "";
} else if (is_mdsdir()) {
char t[40];
uint64_t eino(ino());
eino -= MDS_INO_MDSDIR_OFFSET;
snprintf(t, sizeof(t), "~mds%" PRId64, eino);
s = t;
} else {
char n[40];
uint64_t eino(ino());
snprintf(n, sizeof(n), "#%" PRIx64, eino);
s += n;
}
}
void CInode::make_path(filepath& fp, bool projected) const
{
const CDentry *use_parent = projected ? get_projected_parent_dn() : parent;
if (use_parent) {
ceph_assert(!is_base());
use_parent->make_path(fp, projected);
} else {
fp = filepath(ino());
}
}
void CInode::name_stray_dentry(string& dname)
{
char s[20];
snprintf(s, sizeof(s), "%llx", (unsigned long long)ino().val);
dname = s;
}
version_t CInode::pre_dirty()
{
version_t pv;
CDentry* _cdentry = get_projected_parent_dn();
if (_cdentry) {
pv = _cdentry->pre_dirty(get_projected_version());
dout(10) << "pre_dirty " << pv << " (current v " << get_inode()->version << ")" << dendl;
} else {
ceph_assert(is_base());
pv = get_projected_version() + 1;
}
// force update backtrace for old format inode (see mempool_inode::decode)
if (get_inode()->backtrace_version == 0 && !projected_nodes.empty()) {
auto pi = _get_projected_inode();
if (pi->backtrace_version == 0)
pi->update_backtrace(pv);
}
return pv;
}
void CInode::_mark_dirty(LogSegment *ls)
{
if (!state_test(STATE_DIRTY)) {
state_set(STATE_DIRTY);
get(PIN_DIRTY);
ceph_assert(ls);
}
// move myself to this segment's dirty list
if (ls)
ls->dirty_inodes.push_back(&item_dirty);
}
void CInode::mark_dirty(LogSegment *ls) {
dout(10) << __func__ << " " << *this << dendl;
/*
NOTE: I may already be dirty, but this fn _still_ needs to be called so that
the directory is (perhaps newly) dirtied, and so that parent_dir_version is
updated below.
*/
// only auth can get dirty. "dirty" async data in replicas is relative to
// filelock state, not the dirty flag.
ceph_assert(is_auth());
// touch my private version
_mark_dirty(ls);
// mark dentry too
if (parent)
parent->mark_dirty(get_version(), ls);
}
void CInode::mark_clean()
{
dout(10) << __func__ << " " << *this << dendl;
if (state_test(STATE_DIRTY)) {
state_clear(STATE_DIRTY);
put(PIN_DIRTY);
// remove myself from ls dirty list
item_dirty.remove_myself();
}
}
// --------------
// per-inode storage
// (currently for root inode only)
struct C_IO_Inode_Stored : public CInodeIOContext {
version_t version;
Context *fin;
C_IO_Inode_Stored(CInode *i, version_t v, Context *f) : CInodeIOContext(i), version(v), fin(f) {}
void finish(int r) override {
in->_stored(r, version, fin);
}
void print(ostream& out) const override {
out << "inode_store(" << in->ino() << ")";
}
};
object_t InodeStoreBase::get_object_name(inodeno_t ino, frag_t fg, std::string_view suffix)
{
char n[60];
snprintf(n, sizeof(n), "%llx.%08llx", (long long unsigned)ino, (long long unsigned)fg);
ceph_assert(strlen(n) + suffix.size() < sizeof n);
strncat(n, suffix.data(), suffix.size());
return object_t(n);
}
void CInode::store(MDSContext *fin)
{
dout(10) << __func__ << " " << get_version() << dendl;
ceph_assert(is_base());
if (snaprealm)
purge_stale_snap_data(snaprealm->get_snaps());
// encode
bufferlist bl;
string magic = CEPH_FS_ONDISK_MAGIC;
using ceph::encode;
encode(magic, bl);
encode_store(bl, mdcache->mds->mdsmap->get_up_features());
// write it.
SnapContext snapc;
ObjectOperation m;
m.write_full(bl);
object_t oid = CInode::get_object_name(ino(), frag_t(), ".inode");
object_locator_t oloc(mdcache->mds->get_metadata_pool());
Context *newfin =
new C_OnFinisher(new C_IO_Inode_Stored(this, get_version(), fin),
mdcache->mds->finisher);
mdcache->mds->objecter->mutate(oid, oloc, m, snapc,
ceph::real_clock::now(), 0,
newfin);
}
void CInode::_stored(int r, version_t v, Context *fin)
{
if (r < 0) {
dout(1) << "store error " << r << " v " << v << " on " << *this << dendl;
mdcache->mds->clog->error() << "failed to store inode " << ino()
<< " object: " << cpp_strerror(r);
mdcache->mds->handle_write_error(r);
fin->complete(r);
return;
}
dout(10) << __func__ << " " << v << " on " << *this << dendl;
if (v == get_projected_version())
mark_clean();
fin->complete(0);
}
void CInode::flush(MDSContext *fin)
{
dout(10) << __func__ << " " << *this << dendl;
ceph_assert(is_auth() && can_auth_pin());
MDSGatherBuilder gather(g_ceph_context);
if (is_dirty_parent()) {
store_backtrace(gather.new_sub());
}
if (is_dirty()) {
if (is_base()) {
store(gather.new_sub());
} else {
parent->dir->commit(0, gather.new_sub());
}
}
if (gather.has_subs()) {
gather.set_finisher(fin);
gather.activate();
} else {
fin->complete(0);
}
}
struct C_IO_Inode_Fetched : public CInodeIOContext {
bufferlist bl, bl2;
Context *fin;
C_IO_Inode_Fetched(CInode *i, Context *f) : CInodeIOContext(i), fin(f) {}
void finish(int r) override {
// Ignore 'r', because we fetch from two places, so r is usually CEPHFS_ENOENT
in->_fetched(bl, bl2, fin);
}
void print(ostream& out) const override {
out << "inode_fetch(" << in->ino() << ")";
}
};
void CInode::fetch(MDSContext *fin)
{
dout(10) << __func__ << dendl;
C_IO_Inode_Fetched *c = new C_IO_Inode_Fetched(this, fin);
C_GatherBuilder gather(g_ceph_context, new C_OnFinisher(c, mdcache->mds->finisher));
object_t oid = CInode::get_object_name(ino(), frag_t(), "");
object_locator_t oloc(mdcache->mds->get_metadata_pool());
// Old on-disk format: inode stored in xattr of a dirfrag
ObjectOperation rd;
rd.getxattr("inode", &c->bl, NULL);
mdcache->mds->objecter->read(oid, oloc, rd, CEPH_NOSNAP, (bufferlist*)NULL, 0, gather.new_sub());
// Current on-disk format: inode stored in a .inode object
object_t oid2 = CInode::get_object_name(ino(), frag_t(), ".inode");
mdcache->mds->objecter->read(oid2, oloc, 0, 0, CEPH_NOSNAP, &c->bl2, 0, gather.new_sub());
gather.activate();
}
void CInode::_fetched(bufferlist& bl, bufferlist& bl2, Context *fin)
{
dout(10) << __func__ << " got " << bl.length() << " and " << bl2.length() << dendl;
bufferlist::const_iterator p;
if (bl2.length()) {
p = bl2.cbegin();
} else if (bl.length()) {
p = bl.cbegin();
} else {
derr << "No data while reading inode " << ino() << dendl;
fin->complete(-CEPHFS_ENOENT);
return;
}
using ceph::decode;
// Attempt decode
try {
string magic;
decode(magic, p);
dout(10) << " magic is '" << magic << "' (expecting '"
<< CEPH_FS_ONDISK_MAGIC << "')" << dendl;
if (magic != CEPH_FS_ONDISK_MAGIC) {
dout(0) << "on disk magic '" << magic << "' != my magic '" << CEPH_FS_ONDISK_MAGIC
<< "'" << dendl;
fin->complete(-CEPHFS_EINVAL);
} else {
decode_store(p);
dout(10) << "_fetched " << *this << dendl;
fin->complete(0);
}
} catch (buffer::error &err) {
derr << "Corrupt inode " << ino() << ": " << err.what() << dendl;
fin->complete(-CEPHFS_EINVAL);
return;
}
}
void CInode::build_backtrace(int64_t pool, inode_backtrace_t& bt)
{
bt.ino = ino();
bt.ancestors.clear();
bt.pool = pool;
CInode *in = this;
CDentry *pdn = get_parent_dn();
while (pdn) {
CInode *diri = pdn->get_dir()->get_inode();
bt.ancestors.push_back(inode_backpointer_t(diri->ino(), pdn->get_name(), in->get_inode()->version));
in = diri;
pdn = in->get_parent_dn();
}
bt.old_pools.reserve(get_inode()->old_pools.size());
for (auto &p : get_inode()->old_pools) {
// don't add our own pool id to old_pools to avoid looping (e.g. setlayout 0, 1, 0)
if (p != pool)
bt.old_pools.push_back(p);
}
}
struct C_IO_Inode_StoredBacktrace : public CInodeIOContext {
version_t version;
Context *fin;
C_IO_Inode_StoredBacktrace(CInode *i, version_t v, Context *f) : CInodeIOContext(i), version(v), fin(f) {}
void finish(int r) override {
in->_stored_backtrace(r, version, fin);
}
void print(ostream& out) const override {
out << "backtrace_store(" << in->ino() << ")";
}
};
void CInode::_commit_ops(int r, C_GatherBuilder &gather_bld,
std::vector<CInodeCommitOperation> &ops_vec,
inode_backtrace_t &bt)
{
dout(10) << __func__ << dendl;
if (r < 0) {
mdcache->mds->handle_write_error_with_lock(r);
return;
}
SnapContext snapc;
object_t oid = get_object_name(ino(), frag_t(), "");
for (auto &op : ops_vec) {
ObjectOperation obj_op;
object_locator_t oloc(op.get_pool());
op.update(obj_op, bt);
mdcache->mds->objecter->mutate(oid, oloc, obj_op, snapc,
ceph::real_clock::now(),
0, gather_bld.new_sub());
}
}
void CInode::_store_backtrace(std::vector<CInodeCommitOperation> &ops_vec,
inode_backtrace_t &bt, int op_prio)
{
dout(10) << __func__ << " on " << *this << dendl;
ceph_assert(is_dirty_parent());
if (op_prio < 0)
op_prio = CEPH_MSG_PRIO_DEFAULT;
auth_pin(this);
const int64_t pool = get_backtrace_pool();
build_backtrace(pool, bt);
std::string_view slink = "";
if (is_symlink() && mdcache->get_symlink_recovery()) {
slink = symlink;
}
ops_vec.emplace_back(op_prio, pool, get_inode()->layout,
mdcache->mds->mdsmap->get_up_features(), slink);
if (!state_test(STATE_DIRTYPOOL) || get_inode()->old_pools.empty()) {
dout(20) << __func__ << ": no dirtypool or no old pools" << dendl;
return;
}
// In the case where DIRTYPOOL is set, we update all old pools backtraces
// such that anyone reading them will see the new pool ID in
// inode_backtrace_t::pool and go read everything else from there.
for (const auto &p : get_inode()->old_pools) {
if (p == pool)
continue;
dout(20) << __func__ << ": updating old pool " << p << dendl;
ops_vec.emplace_back(op_prio, p);
}
}
void CInode::store_backtrace(MDSContext *fin, int op_prio)
{
std::vector<CInodeCommitOperation> ops_vec;
inode_backtrace_t bt;
auto version = get_inode()->backtrace_version;
_store_backtrace(ops_vec, bt, op_prio);
C_GatherBuilder gather(g_ceph_context,
new C_OnFinisher(
new C_IO_Inode_StoredBacktrace(this, version, fin),
mdcache->mds->finisher));
_commit_ops(0, gather, ops_vec, bt);
ceph_assert(gather.has_subs());
gather.activate();
}
void CInode::store_backtrace(CInodeCommitOperations &op, int op_prio)
{
op.version = get_inode()->backtrace_version;
op.in = this;
_store_backtrace(op.ops_vec, op.bt, op_prio);
}
void CInode::_stored_backtrace(int r, version_t v, Context *fin)
{
if (r == -CEPHFS_ENOENT) {
const int64_t pool = get_backtrace_pool();
bool exists = mdcache->mds->objecter->with_osdmap(
[pool](const OSDMap &osd_map) {
return osd_map.have_pg_pool(pool);
});
// This CEPHFS_ENOENT is because the pool doesn't exist (the user deleted it
// out from under us), so the backtrace can never be written, so pretend
// to succeed so that the user can proceed to e.g. delete the file.
if (!exists) {
dout(4) << __func__ << " got CEPHFS_ENOENT: a data pool was deleted "
"beneath us!" << dendl;
r = 0;
}
}
if (r < 0) {
dout(1) << "store backtrace error " << r << " v " << v << dendl;
mdcache->mds->clog->error() << "failed to store backtrace on ino "
<< ino() << " object"
<< ", pool " << get_backtrace_pool()
<< ", errno " << r;
mdcache->mds->handle_write_error(r);
if (fin)
fin->complete(r);
return;
}
dout(10) << __func__ << " v " << v << dendl;
auth_unpin(this);
if (v == get_inode()->backtrace_version)
clear_dirty_parent();
if (fin)
fin->complete(0);
}
void CInode::fetch_backtrace(Context *fin, bufferlist *backtrace)
{
mdcache->fetch_backtrace(ino(), get_backtrace_pool(), *backtrace, fin);
}
void CInode::mark_dirty_parent(LogSegment *ls, bool dirty_pool)
{
if (!state_test(STATE_DIRTYPARENT)) {
dout(10) << __func__ << dendl;
state_set(STATE_DIRTYPARENT);
get(PIN_DIRTYPARENT);
ceph_assert(ls);
}
if (dirty_pool)
state_set(STATE_DIRTYPOOL);
if (ls)
ls->dirty_parent_inodes.push_back(&item_dirty_parent);
}
void CInode::clear_dirty_parent()
{
if (state_test(STATE_DIRTYPARENT)) {
dout(10) << __func__ << dendl;
state_clear(STATE_DIRTYPARENT);
state_clear(STATE_DIRTYPOOL);
put(PIN_DIRTYPARENT);
item_dirty_parent.remove_myself();
}
}
void CInode::verify_diri_backtrace(bufferlist &bl, int err)
{
if (is_base() || is_dirty_parent() || !is_auth())
return;
dout(10) << __func__ << dendl;
if (err == 0) {
inode_backtrace_t backtrace;
using ceph::decode;
decode(backtrace, bl);
CDentry *pdn = get_parent_dn();
if (backtrace.ancestors.empty() ||
backtrace.ancestors[0].dname != pdn->get_name() ||
backtrace.ancestors[0].dirino != pdn->get_dir()->ino())
err = -CEPHFS_EINVAL;
}
if (err) {
MDSRank *mds = mdcache->mds;
mds->clog->error() << "bad backtrace on directory inode " << ino();
ceph_assert(!"bad backtrace" == (g_conf()->mds_verify_backtrace > 1));
mark_dirty_parent(mds->mdlog->get_current_segment(), false);
mds->mdlog->flush();
}
}
// ------------------
// parent dir
void InodeStoreBase::encode_xattrs(bufferlist &bl) const {
using ceph::encode;
if (xattrs)
encode(*xattrs, bl);
else
encode((__u32)0, bl);
}
void InodeStoreBase::decode_xattrs(bufferlist::const_iterator &p) {
using ceph::decode;
mempool_xattr_map tmp;
decode_noshare(tmp, p);
if (tmp.empty()) {
reset_xattrs(xattr_map_ptr());
} else {
reset_xattrs(allocate_xattr_map(std::move(tmp)));
}
}
void InodeStoreBase::encode_old_inodes(bufferlist &bl, uint64_t features) const {
using ceph::encode;
if (old_inodes)
encode(*old_inodes, bl, features);
else
encode((__u32)0, bl);
}
void InodeStoreBase::decode_old_inodes(bufferlist::const_iterator &p) {
using ceph::decode;
mempool_old_inode_map tmp;
decode(tmp, p);
if (tmp.empty()) {
reset_old_inodes(old_inode_map_ptr());
} else {
reset_old_inodes(allocate_old_inode_map(std::move(tmp)));
}
}
void InodeStoreBase::encode_bare(bufferlist &bl, uint64_t features,
const bufferlist *snap_blob) const
{
using ceph::encode;
encode(*inode, bl, features);
if (inode->is_symlink())
encode(symlink, bl);
encode(dirfragtree, bl);
encode_xattrs(bl);
if (snap_blob)
encode(*snap_blob, bl);
else
encode(bufferlist(), bl);
encode_old_inodes(bl, features);
encode(oldest_snap, bl);
encode(damage_flags, bl);
}
void InodeStoreBase::encode(bufferlist &bl, uint64_t features,
const bufferlist *snap_blob) const
{
ENCODE_START(6, 4, bl);
encode_bare(bl, features, snap_blob);
ENCODE_FINISH(bl);
}
void CInode::encode_store(bufferlist& bl, uint64_t features)
{
bufferlist snap_blob;
encode_snap_blob(snap_blob);
InodeStoreBase::encode(bl, mdcache->mds->mdsmap->get_up_features(),
&snap_blob);
}
void InodeStoreBase::decode_bare(bufferlist::const_iterator &bl,
bufferlist& snap_blob, __u8 struct_v)
{
using ceph::decode;
auto _inode = allocate_inode();
decode(*_inode, bl);
if (_inode->is_symlink()) {
std::string tmp;
decode(tmp, bl);
symlink = std::string_view(tmp);
}
decode(dirfragtree, bl);
decode_xattrs(bl);
decode(snap_blob, bl);
decode_old_inodes(bl);
if (struct_v == 2 && _inode->is_dir()) {
bool default_layout_exists;
decode(default_layout_exists, bl);
if (default_layout_exists) {
decode(struct_v, bl); // this was a default_file_layout
decode(_inode->layout, bl); // but we only care about the layout portion
}
}
if (struct_v >= 5) {
// InodeStore is embedded in dentries without proper versioning, so
// we consume up to the end of the buffer
if (!bl.end()) {
decode(oldest_snap, bl);
}
if (!bl.end()) {
decode(damage_flags, bl);
}
}
reset_inode(std::move(_inode));
}
void InodeStoreBase::decode(bufferlist::const_iterator &bl, bufferlist& snap_blob)
{
DECODE_START_LEGACY_COMPAT_LEN(5, 4, 4, bl);
decode_bare(bl, snap_blob, struct_v);
DECODE_FINISH(bl);
}
void CInode::decode_store(bufferlist::const_iterator& bl)
{
bufferlist snap_blob;
InodeStoreBase::decode(bl, snap_blob);
decode_snap_blob(snap_blob);
}
// ------------------
// locking
SimpleLock* CInode::get_lock(int type)
{
switch (type) {
case CEPH_LOCK_IVERSION: return &versionlock;
case CEPH_LOCK_IFILE: return &filelock;
case CEPH_LOCK_IAUTH: return &authlock;
case CEPH_LOCK_ILINK: return &linklock;
case CEPH_LOCK_IDFT: return &dirfragtreelock;
case CEPH_LOCK_IXATTR: return &xattrlock;
case CEPH_LOCK_ISNAP: return &snaplock;
case CEPH_LOCK_INEST: return &nestlock;
case CEPH_LOCK_IFLOCK: return &flocklock;
case CEPH_LOCK_IPOLICY: return &policylock;
}
return 0;
}
void CInode::set_object_info(MDSCacheObjectInfo &info)
{
info.ino = ino();
info.snapid = last;
}
void CInode::encode_lock_iauth(bufferlist& bl)
{
ENCODE_START(2, 1, bl);
encode(get_inode()->version, bl);
encode(get_inode()->ctime, bl);
encode(get_inode()->mode, bl);
encode(get_inode()->uid, bl);
encode(get_inode()->gid, bl);
encode(get_inode()->fscrypt_auth, bl);
ENCODE_FINISH(bl);
}
void CInode::decode_lock_iauth(bufferlist::const_iterator& p)
{
ceph_assert(!is_auth());
auto _inode = allocate_inode(*get_inode());
DECODE_START(2, p);
decode(_inode->version, p);
utime_t tm;
decode(tm, p);
if (_inode->ctime < tm) _inode->ctime = tm;
decode(_inode->mode, p);
decode(_inode->uid, p);
decode(_inode->gid, p);
if (struct_v >= 2)
decode(_inode->fscrypt_auth, p);
DECODE_FINISH(p);
reset_inode(std::move(_inode));
}
void CInode::encode_lock_ilink(bufferlist& bl)
{
ENCODE_START(1, 1, bl);
encode(get_inode()->version, bl);
encode(get_inode()->ctime, bl);
encode(get_inode()->nlink, bl);
ENCODE_FINISH(bl);
}
void CInode::decode_lock_ilink(bufferlist::const_iterator& p)
{
ceph_assert(!is_auth());
auto _inode = allocate_inode(*get_inode());
DECODE_START(1, p);
decode(_inode->version, p);
utime_t tm;
decode(tm, p);
if (_inode->ctime < tm) _inode->ctime = tm;
decode(_inode->nlink, p);
DECODE_FINISH(p);
reset_inode(std::move(_inode));
}
void CInode::encode_lock_idft(bufferlist& bl)
{
ENCODE_START(1, 1, bl);
if (is_auth()) {
encode(get_inode()->version, bl);
} else {
// treat flushing as dirty when rejoining cache
bool dirty = dirfragtreelock.is_dirty_or_flushing();
encode(dirty, bl);
}
{
// encode the raw tree
encode(dirfragtree, bl);
// also specify which frags are mine
set<frag_t> myfrags;
auto&& dfls = get_dirfrags();
for (const auto& dir : dfls) {
if (dir->is_auth()) {
frag_t fg = dir->get_frag();
myfrags.insert(fg);
}
}
encode(myfrags, bl);
}
ENCODE_FINISH(bl);
}
void CInode::decode_lock_idft(bufferlist::const_iterator& p)
{
inode_ptr _inode;
DECODE_START(1, p);
if (is_auth()) {
bool replica_dirty;
decode(replica_dirty, p);
if (replica_dirty) {
dout(10) << __func__ << " setting dftlock dirty flag" << dendl;
dirfragtreelock.mark_dirty(); // ok bc we're auth and caller will handle
}
} else {
_inode = allocate_inode(*get_inode());
decode(_inode->version, p);
}
{
fragtree_t temp;
decode(temp, p);
set<frag_t> authfrags;
decode(authfrags, p);
if (is_auth()) {
// auth. believe replica's auth frags only.
for (auto fg : authfrags) {
if (!dirfragtree.is_leaf(fg)) {
dout(10) << " forcing frag " << fg << " to leaf (split|merge)" << dendl;
dirfragtree.force_to_leaf(g_ceph_context, fg);
dirfragtreelock.mark_dirty(); // ok bc we're auth and caller will handle
}
}
} else {
// replica. take the new tree, BUT make sure any open
// dirfrags remain leaves (they may have split _after_ this
// dft was scattered, or we may still be be waiting on the
// notify from the auth)
dirfragtree.swap(temp);
for (const auto &p : dirfrags) {
if (!dirfragtree.is_leaf(p.first)) {
dout(10) << " forcing open dirfrag " << p.first << " to leaf (racing with split|merge)" << dendl;
dirfragtree.force_to_leaf(g_ceph_context, p.first);
}
if (p.second->is_auth())
p.second->state_clear(CDir::STATE_DIRTYDFT);
}
}
if (g_conf()->mds_debug_frag)
verify_dirfrags();
}
DECODE_FINISH(p);
if (_inode)
reset_inode(std::move(_inode));
}
void CInode::encode_lock_ifile(bufferlist& bl)
{
ENCODE_START(2, 1, bl);
if (is_auth()) {
encode(get_inode()->version, bl);
encode(get_inode()->ctime, bl);
encode(get_inode()->mtime, bl);
encode(get_inode()->atime, bl);
encode(get_inode()->time_warp_seq, bl);
if (!is_dir()) {
encode(get_inode()->layout, bl, mdcache->mds->mdsmap->get_up_features());
encode(get_inode()->size, bl);
encode(get_inode()->truncate_seq, bl);
encode(get_inode()->truncate_size, bl);
encode(get_inode()->client_ranges, bl);
encode(get_inode()->inline_data, bl);
}
} else {
// treat flushing as dirty when rejoining cache
bool dirty = filelock.is_dirty_or_flushing();
encode(dirty, bl);
}
dout(15) << __func__ << " inode.dirstat is " << get_inode()->dirstat << dendl;
encode(get_inode()->dirstat, bl); // only meaningful if i am auth.
bufferlist tmp;
__u32 n = 0;
for (const auto &p : dirfrags) {
frag_t fg = p.first;
CDir *dir = p.second;
if (is_auth() || dir->is_auth()) {
const auto& pf = dir->get_projected_fnode();
dout(15) << fg << " " << *dir << dendl;
dout(20) << fg << " fragstat " << pf->fragstat << dendl;
dout(20) << fg << " accounted_fragstat " << pf->accounted_fragstat << dendl;
encode(fg, tmp);
encode(dir->first, tmp);
encode(pf->fragstat, tmp);
encode(pf->accounted_fragstat, tmp);
n++;
}
}
encode(n, bl);
bl.claim_append(tmp);
if (is_auth())
encode(get_inode()->fscrypt_file, bl);
ENCODE_FINISH(bl);
}
void CInode::decode_lock_ifile(bufferlist::const_iterator& p)
{
inode_ptr _inode;
DECODE_START(2, p);
if (!is_auth()) {
_inode = allocate_inode(*get_inode());
decode(_inode->version, p);
utime_t tm;
decode(tm, p);
if (_inode->ctime < tm) _inode->ctime = tm;
decode(_inode->mtime, p);
decode(_inode->atime, p);
decode(_inode->time_warp_seq, p);
if (!is_dir()) {
decode(_inode->layout, p);
decode(_inode->size, p);
decode(_inode->truncate_seq, p);
decode(_inode->truncate_size, p);
decode(_inode->client_ranges, p);
decode(_inode->inline_data, p);
}
} else {
bool replica_dirty;
decode(replica_dirty, p);
if (replica_dirty) {
dout(10) << __func__ << " setting filelock dirty flag" << dendl;
filelock.mark_dirty(); // ok bc we're auth and caller will handle
}
}
frag_info_t dirstat;
decode(dirstat, p);
if (!is_auth()) {
dout(10) << " taking inode dirstat " << dirstat << " for " << *this << dendl;
_inode->dirstat = dirstat; // take inode summation if replica
}
__u32 n;
decode(n, p);
dout(10) << " ...got " << n << " fragstats on " << *this << dendl;
while (n--) {
frag_t fg;
snapid_t fgfirst;
frag_info_t fragstat;
frag_info_t accounted_fragstat;
decode(fg, p);
decode(fgfirst, p);
decode(fragstat, p);
decode(accounted_fragstat, p);
dout(10) << fg << " [" << fgfirst << ",head] " << dendl;
dout(10) << fg << " fragstat " << fragstat << dendl;
dout(20) << fg << " accounted_fragstat " << accounted_fragstat << dendl;
CDir *dir = get_dirfrag(fg);
if (is_auth()) {
ceph_assert(dir); // i am auth; i had better have this dir open
dout(10) << fg << " first " << dir->first << " -> " << fgfirst
<< " on " << *dir << dendl;
dir->first = fgfirst;
auto _fnode = CDir::allocate_fnode(*dir->get_fnode());
_fnode->fragstat = fragstat;
_fnode->accounted_fragstat = accounted_fragstat;
dir->reset_fnode(std::move(_fnode));
if (!(fragstat == accounted_fragstat)) {
dout(10) << fg << " setting filelock updated flag" << dendl;
filelock.mark_dirty(); // ok bc we're auth and caller will handle
}
} else {
if (dir && dir->is_auth()) {
dout(10) << fg << " first " << dir->first << " -> " << fgfirst
<< " on " << *dir << dendl;
dir->first = fgfirst;
const auto& pf = dir->get_projected_fnode();
finish_scatter_update(&filelock, dir,
_inode->dirstat.version, pf->accounted_fragstat.version);
}
}
}
if (!is_auth() && struct_v >= 2)
decode(_inode->fscrypt_file, p);
DECODE_FINISH(p);
if (_inode)
reset_inode(std::move(_inode));
}
void CInode::encode_lock_inest(bufferlist& bl)
{
ENCODE_START(1, 1, bl);
if (is_auth()) {
encode(get_inode()->version, bl);
} else {
// treat flushing as dirty when rejoining cache
bool dirty = nestlock.is_dirty_or_flushing();
encode(dirty, bl);
}
dout(15) << __func__ << " inode.rstat is " << get_inode()->rstat << dendl;
encode(get_inode()->rstat, bl); // only meaningful if i am auth.
bufferlist tmp;
__u32 n = 0;
for (const auto &p : dirfrags) {
frag_t fg = p.first;
CDir *dir = p.second;
if (is_auth() || dir->is_auth()) {
const auto& pf = dir->get_projected_fnode();
dout(10) << __func__ << " " << fg << " dir " << *dir << dendl;
dout(10) << __func__ << " " << fg << " rstat " << pf->rstat << dendl;
dout(10) << __func__ << " " << fg << " accounted_rstat " << pf->rstat << dendl;
dout(10) << __func__ << " " << fg << " dirty_old_rstat " << dir->dirty_old_rstat << dendl;
encode(fg, tmp);
encode(dir->first, tmp);
encode(pf->rstat, tmp);
encode(pf->accounted_rstat, tmp);
encode(dir->dirty_old_rstat, tmp);
n++;
}
}
encode(n, bl);
bl.claim_append(tmp);
ENCODE_FINISH(bl);
}
void CInode::decode_lock_inest(bufferlist::const_iterator& p)
{
inode_ptr _inode;
DECODE_START(1, p);
if (is_auth()) {
bool replica_dirty;
decode(replica_dirty, p);
if (replica_dirty) {
dout(10) << __func__ << " setting nestlock dirty flag" << dendl;
nestlock.mark_dirty(); // ok bc we're auth and caller will handle
}
} else {
_inode = allocate_inode(*get_inode());
decode(_inode->version, p);
}
nest_info_t rstat;
decode(rstat, p);
if (!is_auth()) {
dout(10) << __func__ << " taking inode rstat " << rstat << " for " << *this << dendl;
_inode->rstat = rstat; // take inode summation if replica
}
__u32 n;
decode(n, p);
while (n--) {
frag_t fg;
snapid_t fgfirst;
nest_info_t rstat;
nest_info_t accounted_rstat;
decltype(CDir::dirty_old_rstat) dirty_old_rstat;
decode(fg, p);
decode(fgfirst, p);
decode(rstat, p);
decode(accounted_rstat, p);
decode(dirty_old_rstat, p);
dout(10) << __func__ << " " << fg << " [" << fgfirst << ",head]" << dendl;
dout(10) << __func__ << " " << fg << " rstat " << rstat << dendl;
dout(10) << __func__ << " " << fg << " accounted_rstat " << accounted_rstat << dendl;
dout(10) << __func__ << " " << fg << " dirty_old_rstat " << dirty_old_rstat << dendl;
CDir *dir = get_dirfrag(fg);
if (is_auth()) {
ceph_assert(dir); // i am auth; i had better have this dir open
dout(10) << fg << " first " << dir->first << " -> " << fgfirst
<< " on " << *dir << dendl;
dir->first = fgfirst;
auto _fnode = CDir::allocate_fnode(*dir->get_fnode());
_fnode->rstat = rstat;
_fnode->accounted_rstat = accounted_rstat;
dir->reset_fnode(std::move(_fnode));
dir->dirty_old_rstat.swap(dirty_old_rstat);
if (!(rstat == accounted_rstat) || !dir->dirty_old_rstat.empty()) {
dout(10) << fg << " setting nestlock updated flag" << dendl;
nestlock.mark_dirty(); // ok bc we're auth and caller will handle
}
} else {
if (dir && dir->is_auth()) {
dout(10) << fg << " first " << dir->first << " -> " << fgfirst
<< " on " << *dir << dendl;
dir->first = fgfirst;
const auto& pf = dir->get_projected_fnode();
finish_scatter_update(&nestlock, dir,
_inode->rstat.version, pf->accounted_rstat.version);
}
}
}
DECODE_FINISH(p);
if (_inode)
reset_inode(std::move(_inode));
}
void CInode::encode_lock_ixattr(bufferlist& bl)
{
ENCODE_START(2, 1, bl);
encode(get_inode()->version, bl);
encode(get_inode()->ctime, bl);
encode_xattrs(bl);
encode(get_inode()->xattr_version, bl);
ENCODE_FINISH(bl);
}
void CInode::decode_lock_ixattr(bufferlist::const_iterator& p)
{
ceph_assert(!is_auth());
auto _inode = allocate_inode(*get_inode());
DECODE_START(2, p);
decode(_inode->version, p);
utime_t tm;
decode(tm, p);
if (_inode->ctime < tm)
_inode->ctime = tm;
decode_xattrs(p);
if (struct_v >= 2) {
decode(_inode->xattr_version, p);
}
DECODE_FINISH(p);
reset_inode(std::move(_inode));
}
void CInode::encode_lock_isnap(bufferlist& bl)
{
ENCODE_START(1, 1, bl);
encode(get_inode()->version, bl);
encode(get_inode()->ctime, bl);
encode_snap(bl);
ENCODE_FINISH(bl);
}
void CInode::decode_lock_isnap(bufferlist::const_iterator& p)
{
ceph_assert(!is_auth());
auto _inode = allocate_inode(*get_inode());
DECODE_START(1, p);
decode(_inode->version, p);
utime_t tm;
decode(tm, p);
if (_inode->ctime < tm) _inode->ctime = tm;
decode_snap(p);
DECODE_FINISH(p);
reset_inode(std::move(_inode));
}
void CInode::encode_lock_iflock(bufferlist& bl)
{
ENCODE_START(1, 1, bl);
encode(get_inode()->version, bl);
_encode_file_locks(bl);
ENCODE_FINISH(bl);
}
void CInode::decode_lock_iflock(bufferlist::const_iterator& p)
{
ceph_assert(!is_auth());
auto _inode = allocate_inode(*get_inode());
DECODE_START(1, p);
decode(_inode->version, p);
_decode_file_locks(p);
DECODE_FINISH(p);
reset_inode(std::move(_inode));
}
void CInode::encode_lock_ipolicy(bufferlist& bl)
{
ENCODE_START(2, 1, bl);
if (is_dir()) {
encode(get_inode()->version, bl);
encode(get_inode()->ctime, bl);
encode(get_inode()->layout, bl, mdcache->mds->mdsmap->get_up_features());
encode(get_inode()->quota, bl);
encode(get_inode()->export_pin, bl);
encode(get_inode()->export_ephemeral_distributed_pin, bl);
encode(get_inode()->export_ephemeral_random_pin, bl);
}
ENCODE_FINISH(bl);
}
void CInode::decode_lock_ipolicy(bufferlist::const_iterator& p)
{
ceph_assert(!is_auth());
auto _inode = allocate_inode(*get_inode());
DECODE_START(1, p);
if (is_dir()) {
decode(_inode->version, p);
utime_t tm;
decode(tm, p);
if (_inode->ctime < tm)
_inode->ctime = tm;
decode(_inode->layout, p);
decode(_inode->quota, p);
decode(_inode->export_pin, p);
if (struct_v >= 2) {
decode(_inode->export_ephemeral_distributed_pin, p);
decode(_inode->export_ephemeral_random_pin, p);
}
}
DECODE_FINISH(p);
bool pin_updated = (get_inode()->export_pin != _inode->export_pin) ||
(get_inode()->export_ephemeral_distributed_pin !=
_inode->export_ephemeral_distributed_pin);
reset_inode(std::move(_inode));
maybe_export_pin(pin_updated);
}
void CInode::encode_lock_state(int type, bufferlist& bl)
{
ENCODE_START(1, 1, bl);
encode(first, bl);
if (!is_base())
encode(parent->first, bl);
switch (type) {
case CEPH_LOCK_IAUTH:
encode_lock_iauth(bl);
break;
case CEPH_LOCK_ILINK:
encode_lock_ilink(bl);
break;
case CEPH_LOCK_IDFT:
encode_lock_idft(bl);
break;
case CEPH_LOCK_IFILE:
encode_lock_ifile(bl);
break;
case CEPH_LOCK_INEST:
encode_lock_inest(bl);
break;
case CEPH_LOCK_IXATTR:
encode_lock_ixattr(bl);
break;
case CEPH_LOCK_ISNAP:
encode_lock_isnap(bl);
break;
case CEPH_LOCK_IFLOCK:
encode_lock_iflock(bl);
break;
case CEPH_LOCK_IPOLICY:
encode_lock_ipolicy(bl);
break;
default:
ceph_abort();
}
ENCODE_FINISH(bl);
}
/* for more info on scatterlocks, see comments by Locker::scatter_writebehind */
void CInode::decode_lock_state(int type, const bufferlist& bl)
{
auto p = bl.cbegin();
DECODE_START(1, p);
snapid_t newfirst;
using ceph::decode;
decode(newfirst, p);
if (!is_auth() && newfirst != first) {
dout(10) << __func__ << " first " << first << " -> " << newfirst << dendl;
first = newfirst;
}
if (!is_base()) {
decode(newfirst, p);
if (!parent->is_auth() && newfirst != parent->first) {
dout(10) << __func__ << " parent first " << first << " -> " << newfirst << dendl;
parent->first = newfirst;
}
}
switch (type) {
case CEPH_LOCK_IAUTH:
decode_lock_iauth(p);
break;
case CEPH_LOCK_ILINK:
decode_lock_ilink(p);
break;
case CEPH_LOCK_IDFT:
decode_lock_idft(p);
break;
case CEPH_LOCK_IFILE:
decode_lock_ifile(p);
break;
case CEPH_LOCK_INEST:
decode_lock_inest(p);
break;
case CEPH_LOCK_IXATTR:
decode_lock_ixattr(p);
break;
case CEPH_LOCK_ISNAP:
decode_lock_isnap(p);
break;
case CEPH_LOCK_IFLOCK:
decode_lock_iflock(p);
break;
case CEPH_LOCK_IPOLICY:
decode_lock_ipolicy(p);
break;
default:
ceph_abort();
}
DECODE_FINISH(p);
}
bool CInode::is_dirty_scattered()
{
return
filelock.is_dirty_or_flushing() ||
nestlock.is_dirty_or_flushing() ||
dirfragtreelock.is_dirty_or_flushing();
}
void CInode::clear_scatter_dirty()
{
filelock.remove_dirty();
nestlock.remove_dirty();
dirfragtreelock.remove_dirty();
}
void CInode::clear_dirty_scattered(int type)
{
dout(10) << __func__ << " " << type << " on " << *this << dendl;
ceph_assert(is_dir());
switch (type) {
case CEPH_LOCK_IFILE:
item_dirty_dirfrag_dir.remove_myself();
break;
case CEPH_LOCK_INEST:
item_dirty_dirfrag_nest.remove_myself();
break;
case CEPH_LOCK_IDFT:
item_dirty_dirfrag_dirfragtree.remove_myself();
break;
default:
ceph_abort();
}
}
/*
* when we initially scatter a lock, we need to check if any of the dirfrags
* have out of date accounted_rstat/fragstat. if so, mark the lock stale.
*/
/* for more info on scatterlocks, see comments by Locker::scatter_writebehind */
void CInode::start_scatter(ScatterLock *lock)
{
dout(10) << __func__ << " " << *lock << " on " << *this << dendl;
ceph_assert(is_auth());
const auto& pi = get_projected_inode();
for (const auto &p : dirfrags) {
frag_t fg = p.first;
CDir *dir = p.second;
const auto& pf = dir->get_projected_fnode();
dout(20) << fg << " " << *dir << dendl;
if (!dir->is_auth())
continue;
switch (lock->get_type()) {
case CEPH_LOCK_IFILE:
finish_scatter_update(lock, dir, pi->dirstat.version, pf->accounted_fragstat.version);
break;
case CEPH_LOCK_INEST:
finish_scatter_update(lock, dir, pi->rstat.version, pf->accounted_rstat.version);
break;
case CEPH_LOCK_IDFT:
dir->state_clear(CDir::STATE_DIRTYDFT);
break;
}
}
}
class C_Inode_FragUpdate : public MDSLogContextBase {
protected:
CInode *in;
CDir *dir;
MutationRef mut;
MDSRank *get_mds() override {return in->mdcache->mds;}
void finish(int r) override {
in->_finish_frag_update(dir, mut);
}
public:
C_Inode_FragUpdate(CInode *i, CDir *d, MutationRef& m) : in(i), dir(d), mut(m) {}
};
void CInode::finish_scatter_update(ScatterLock *lock, CDir *dir,
version_t inode_version, version_t dir_accounted_version)
{
frag_t fg = dir->get_frag();
ceph_assert(dir->is_auth());
if (dir->is_frozen()) {
dout(10) << __func__ << " " << fg << " frozen, marking " << *lock << " stale " << *dir << dendl;
} else if (dir->get_version() == 0) {
dout(10) << __func__ << " " << fg << " not loaded, marking " << *lock << " stale " << *dir << dendl;
} else {
if (dir_accounted_version != inode_version) {
dout(10) << __func__ << " " << fg << " journaling accounted scatterstat update v" << inode_version << dendl;
MDLog *mdlog = mdcache->mds->mdlog;
MutationRef mut(new MutationImpl());
mut->ls = mdlog->get_current_segment();
auto pf = dir->project_fnode(mut);
std::string_view ename;
switch (lock->get_type()) {
case CEPH_LOCK_IFILE:
pf->fragstat.version = inode_version;
pf->accounted_fragstat = pf->fragstat;
ename = "lock ifile accounted scatter stat update";
break;
case CEPH_LOCK_INEST:
pf->rstat.version = inode_version;
pf->accounted_rstat = pf->rstat;
ename = "lock inest accounted scatter stat update";
if (!is_auth() && lock->get_state() == LOCK_MIX) {
dout(10) << __func__ << " try to assimilate dirty rstat on "
<< *dir << dendl;
dir->assimilate_dirty_rstat_inodes(mut);
}
break;
default:
ceph_abort();
}
EUpdate *le = new EUpdate(mdlog, ename);
mdlog->start_entry(le);
le->metablob.add_dir_context(dir);
le->metablob.add_dir(dir, true);
ceph_assert(!dir->is_frozen());
mut->auth_pin(dir);
if (lock->get_type() == CEPH_LOCK_INEST &&
!is_auth() && lock->get_state() == LOCK_MIX) {
dout(10) << __func__ << " finish assimilating dirty rstat on "
<< *dir << dendl;
dir->assimilate_dirty_rstat_inodes_finish(&le->metablob);
if (!(pf->rstat == pf->accounted_rstat)) {
if (!mut->is_wrlocked(&nestlock)) {
mdcache->mds->locker->wrlock_force(&nestlock, mut);
}
mdcache->mds->locker->mark_updated_scatterlock(&nestlock);
mut->ls->dirty_dirfrag_nest.push_back(&item_dirty_dirfrag_nest);
}
}
pf->version = dir->pre_dirty();
mdlog->submit_entry(le, new C_Inode_FragUpdate(this, dir, mut));
} else {
dout(10) << __func__ << " " << fg << " accounted " << *lock
<< " scatter stat unchanged at v" << dir_accounted_version << dendl;
}
}
}
void CInode::_finish_frag_update(CDir *dir, MutationRef& mut)
{
dout(10) << __func__ << " on " << *dir << dendl;
mut->apply();
mdcache->mds->locker->drop_locks(mut.get());
mut->cleanup();
}
/*
* when we gather a lock, we need to assimilate dirfrag changes into the inode
* state. it's possible we can't update the dirfrag accounted_rstat/fragstat
* because the frag is auth and frozen, or that the replica couldn't for the same
* reason. hopefully it will get updated the next time the lock cycles.
*
* we have two dimensions of behavior:
* - we may be (auth and !frozen), and able to update, or not.
* - the frag may be stale, or not.
*
* if the frag is non-stale, we want to assimilate the diff into the
* inode, regardless of whether it's auth or updateable.
*
* if we update the frag, we want to set accounted_fragstat = frag,
* both if we took the diff or it was stale and we are making it
* un-stale.
*/
/* for more info on scatterlocks, see comments by Locker::scatter_writebehind */
void CInode::finish_scatter_gather_update(int type, MutationRef& mut)
{
LogChannelRef clog = mdcache->mds->clog;
dout(10) << __func__ << " " << type << " on " << *this << dendl;
ceph_assert(is_auth());
switch (type) {
case CEPH_LOCK_IFILE:
{
fragtree_t tmpdft = dirfragtree;
struct frag_info_t dirstat;
bool dirstat_valid = true;
// adjust summation
ceph_assert(is_auth());
auto pi = _get_projected_inode();
bool touched_mtime = false, touched_chattr = false;
dout(20) << " orig dirstat " << pi->dirstat << dendl;
pi->dirstat.version++;
for (const auto &p : dirfrags) {
frag_t fg = p.first;
CDir *dir = p.second;
dout(20) << fg << " " << *dir << dendl;
bool update;
if (dir->get_version() != 0) {
update = dir->is_auth() && !dir->is_frozen();
} else {
update = false;
dirstat_valid = false;
}
CDir::fnode_const_ptr pf;
if (update) {
mut->auth_pin(dir);
pf = dir->project_fnode(mut);
} else {
pf = dir->get_projected_fnode();
}
if (pf->accounted_fragstat.version == pi->dirstat.version - 1) {
dout(20) << fg << " fragstat " << pf->fragstat << dendl;
dout(20) << fg << " accounted_fragstat " << pf->accounted_fragstat << dendl;
pi->dirstat.add_delta(pf->fragstat, pf->accounted_fragstat, &touched_mtime, &touched_chattr);
} else {
dout(20) << fg << " skipping STALE accounted_fragstat " << pf->accounted_fragstat << dendl;
}
if (pf->fragstat.nfiles < 0 ||
pf->fragstat.nsubdirs < 0) {
clog->error() << "bad/negative dir size on "
<< dir->dirfrag() << " " << pf->fragstat;
ceph_assert(!"bad/negative fragstat" == g_conf()->mds_verify_scatter);
auto _pf = const_cast<fnode_t*>(pf.get());
if (pf->fragstat.nfiles < 0)
_pf->fragstat.nfiles = 0;
if (pf->fragstat.nsubdirs < 0)
_pf->fragstat.nsubdirs = 0;
}
if (update) {
auto _pf = const_cast<fnode_t*>(pf.get());
_pf->accounted_fragstat = _pf->fragstat;
_pf->fragstat.version = _pf->accounted_fragstat.version = pi->dirstat.version;
_pf->version = dir->pre_dirty();
dout(10) << fg << " updated accounted_fragstat " << pf->fragstat << " on " << *dir << dendl;
}
tmpdft.force_to_leaf(g_ceph_context, fg);
dirstat.add(pf->fragstat);
}
if (touched_mtime)
pi->mtime = pi->ctime = pi->dirstat.mtime;
if (touched_chattr)
pi->change_attr++;
dout(20) << " final dirstat " << pi->dirstat << dendl;
if (dirstat_valid && !dirstat.same_sums(pi->dirstat)) {
frag_vec_t leaves;
tmpdft.get_leaves_under(frag_t(), leaves);
for (const auto& leaf : leaves) {
if (!dirfrags.count(leaf)) {
dirstat_valid = false;
break;
}
}
if (dirstat_valid) {
if (state_test(CInode::STATE_REPAIRSTATS)) {
dout(20) << " dirstat mismatch, fixing" << dendl;
} else {
clog->error() << "unmatched fragstat on " << ino() << ", inode has "
<< pi->dirstat << ", dirfrags have " << dirstat;
ceph_assert(!"unmatched fragstat" == g_conf()->mds_verify_scatter);
}
// trust the dirfrags for now
version_t v = pi->dirstat.version;
if (pi->dirstat.mtime > dirstat.mtime)
dirstat.mtime = pi->dirstat.mtime;
if (pi->dirstat.change_attr > dirstat.change_attr)
dirstat.change_attr = pi->dirstat.change_attr;
pi->dirstat = dirstat;
pi->dirstat.version = v;
}
}
if (pi->dirstat.nfiles < 0 || pi->dirstat.nsubdirs < 0) {
std::string path;
make_path_string(path);
clog->error() << "Inconsistent statistics detected: fragstat on inode "
<< ino() << " (" << path << "), inode has " << pi->dirstat;
ceph_assert(!"bad/negative fragstat" == g_conf()->mds_verify_scatter);
if (pi->dirstat.nfiles < 0)
pi->dirstat.nfiles = 0;
if (pi->dirstat.nsubdirs < 0)
pi->dirstat.nsubdirs = 0;
}
}
break;
case CEPH_LOCK_INEST:
{
// adjust summation
ceph_assert(is_auth());
fragtree_t tmpdft = dirfragtree;
nest_info_t rstat;
bool rstat_valid = true;
rstat.rsubdirs = 1;
if (const sr_t *srnode = get_projected_srnode(); srnode)
rstat.rsnaps = srnode->snaps.size();
auto pi = _get_projected_inode();
dout(20) << " orig rstat " << pi->rstat << dendl;
pi->rstat.version++;
for (const auto &p : dirfrags) {
frag_t fg = p.first;
CDir *dir = p.second;
dout(20) << fg << " " << *dir << dendl;
bool update;
if (dir->get_version() != 0) {
update = dir->is_auth() && !dir->is_frozen();
} else {
update = false;
rstat_valid = false;
}
CDir::fnode_const_ptr pf;
if (update) {
mut->auth_pin(dir);
pf = dir->project_fnode(mut);
} else {
pf = dir->get_projected_fnode();
}
if (pf->accounted_rstat.version == pi->rstat.version-1) {
// only pull this frag's dirty rstat inodes into the frag if
// the frag is non-stale and updateable. if it's stale,
// that info will just get thrown out!
if (update)
dir->assimilate_dirty_rstat_inodes(mut);
dout(20) << fg << " rstat " << pf->rstat << dendl;
dout(20) << fg << " accounted_rstat " << pf->accounted_rstat << dendl;
dout(20) << fg << " dirty_old_rstat " << dir->dirty_old_rstat << dendl;
mdcache->project_rstat_frag_to_inode(pf->rstat, pf->accounted_rstat,
dir->first, CEPH_NOSNAP, this, true);
for (auto &p : dir->dirty_old_rstat) {
mdcache->project_rstat_frag_to_inode(p.second.rstat, p.second.accounted_rstat,
p.second.first, p.first, this, true);
}
if (update) // dir contents not valid if frozen or non-auth
dir->check_rstats();
} else {
dout(20) << fg << " skipping STALE accounted_rstat " << pf->accounted_rstat << dendl;
}
if (update) {
auto _pf = const_cast<fnode_t*>(pf.get());
_pf->accounted_rstat = pf->rstat;
_pf->rstat.version = _pf->accounted_rstat.version = pi->rstat.version;
_pf->version = dir->pre_dirty();
dir->dirty_old_rstat.clear();
dir->check_rstats();
dout(10) << fg << " updated accounted_rstat " << pf->rstat << " on " << *dir << dendl;
}
tmpdft.force_to_leaf(g_ceph_context, fg);
rstat.add(pf->rstat);
}
dout(20) << " final rstat " << pi->rstat << dendl;
if (rstat_valid && !rstat.same_sums(pi->rstat)) {
frag_vec_t leaves;
tmpdft.get_leaves_under(frag_t(), leaves);
for (const auto& leaf : leaves) {
if (!dirfrags.count(leaf)) {
rstat_valid = false;
break;
}
}
if (rstat_valid) {
if (state_test(CInode::STATE_REPAIRSTATS)) {
dout(20) << " rstat mismatch, fixing" << dendl;
} else {
clog->error() << "inconsistent rstat on inode " << ino()
<< ", inode has " << pi->rstat
<< ", directory fragments have " << rstat;
ceph_assert(!"unmatched rstat" == g_conf()->mds_verify_scatter);
}
// trust the dirfrag for now
version_t v = pi->rstat.version;
if (pi->rstat.rctime > rstat.rctime)
rstat.rctime = pi->rstat.rctime;
pi->rstat = rstat;
pi->rstat.version = v;
}
}
mdcache->broadcast_quota_to_client(this);
}
break;
case CEPH_LOCK_IDFT:
break;
default:
ceph_abort();
}
}
void CInode::finish_scatter_gather_update_accounted(int type, EMetaBlob *metablob)
{
dout(10) << __func__ << " " << type << " on " << *this << dendl;
ceph_assert(is_auth());
for (const auto &p : dirfrags) {
CDir *dir = p.second;
if (!dir->is_auth() || dir->get_version() == 0 || dir->is_frozen())
continue;
if (type == CEPH_LOCK_IDFT)
continue; // nothing to do.
if (type == CEPH_LOCK_INEST)
dir->assimilate_dirty_rstat_inodes_finish(metablob);
dout(10) << " journaling updated frag accounted_ on " << *dir << dendl;
ceph_assert(dir->is_projected());
metablob->add_dir(dir, true);
}
}
// waiting
bool CInode::is_frozen() const
{
if (is_frozen_inode()) return true;
if (parent && parent->dir->is_frozen()) return true;
return false;
}
bool CInode::is_frozen_dir() const
{
if (parent && parent->dir->is_frozen_dir()) return true;
return false;
}
bool CInode::is_freezing() const
{
if (is_freezing_inode()) return true;
if (parent && parent->dir->is_freezing()) return true;
return false;
}
void CInode::add_dir_waiter(frag_t fg, MDSContext *c)
{
if (waiting_on_dir.empty())
get(PIN_DIRWAITER);
waiting_on_dir[fg].push_back(c);
dout(10) << __func__ << " frag " << fg << " " << c << " on " << *this << dendl;
}
void CInode::take_dir_waiting(frag_t fg, MDSContext::vec& ls)
{
if (waiting_on_dir.empty())
return;
auto it = waiting_on_dir.find(fg);
if (it != waiting_on_dir.end()) {
dout(10) << __func__ << " frag " << fg << " on " << *this << dendl;
auto& waiting = it->second;
ls.insert(ls.end(), waiting.begin(), waiting.end());
waiting_on_dir.erase(it);
if (waiting_on_dir.empty())
put(PIN_DIRWAITER);
}
}
void CInode::add_waiter(uint64_t tag, MDSContext *c)
{
dout(10) << __func__ << " tag " << std::hex << tag << std::dec << " " << c
<< " !ambig " << !state_test(STATE_AMBIGUOUSAUTH)
<< " !frozen " << !is_frozen_inode()
<< " !freezing " << !is_freezing_inode()
<< dendl;
// wait on the directory?
// make sure its not the inode that is explicitly ambiguous|freezing|frozen
if (((tag & WAIT_SINGLEAUTH) && !state_test(STATE_AMBIGUOUSAUTH)) ||
((tag & WAIT_UNFREEZE) &&
!is_frozen_inode() && !is_freezing_inode() && !is_frozen_auth_pin())) {
dout(15) << "passing waiter up tree" << dendl;
parent->dir->add_waiter(tag, c);
return;
}
dout(15) << "taking waiter here" << dendl;
MDSCacheObject::add_waiter(tag, c);
}
void CInode::take_waiting(uint64_t mask, MDSContext::vec& ls)
{
if ((mask & WAIT_DIR) && !waiting_on_dir.empty()) {
// take all dentry waiters
while (!waiting_on_dir.empty()) {
auto it = waiting_on_dir.begin();
dout(10) << __func__ << " dirfrag " << it->first << " on " << *this << dendl;
auto& waiting = it->second;
ls.insert(ls.end(), waiting.begin(), waiting.end());
waiting_on_dir.erase(it);
}
put(PIN_DIRWAITER);
}
// waiting
MDSCacheObject::take_waiting(mask, ls);
}
void CInode::maybe_finish_freeze_inode()
{
CDir *dir = get_parent_dir();
if (auth_pins > auth_pin_freeze_allowance || dir->frozen_inode_suppressed)
return;
dout(10) << "maybe_finish_freeze_inode - frozen" << dendl;
ceph_assert(auth_pins == auth_pin_freeze_allowance);
get(PIN_FROZEN);
put(PIN_FREEZING);
state_clear(STATE_FREEZING);
state_set(STATE_FROZEN);
item_freezing_inode.remove_myself();
dir->num_frozen_inodes++;
finish_waiting(WAIT_FROZEN);
}
bool CInode::freeze_inode(int auth_pin_allowance)
{
CDir *dir = get_parent_dir();
ceph_assert(dir);
ceph_assert(auth_pin_allowance > 0); // otherwise we need to adjust parent's nested_auth_pins
ceph_assert(auth_pins >= auth_pin_allowance);
if (auth_pins == auth_pin_allowance && !dir->frozen_inode_suppressed) {
dout(10) << "freeze_inode - frozen" << dendl;
if (!state_test(STATE_FROZEN)) {
get(PIN_FROZEN);
state_set(STATE_FROZEN);
dir->num_frozen_inodes++;
}
return true;
}
dout(10) << "freeze_inode - waiting for auth_pins to drop to " << auth_pin_allowance << dendl;
auth_pin_freeze_allowance = auth_pin_allowance;
dir->freezing_inodes.push_back(&item_freezing_inode);
get(PIN_FREEZING);
state_set(STATE_FREEZING);
if (!dir->lock_caches_with_auth_pins.empty())
mdcache->mds->locker->invalidate_lock_caches(dir);
const static int lock_types[] = {
CEPH_LOCK_IVERSION, CEPH_LOCK_IFILE, CEPH_LOCK_IAUTH, CEPH_LOCK_ILINK, CEPH_LOCK_IDFT,
CEPH_LOCK_IXATTR, CEPH_LOCK_ISNAP, CEPH_LOCK_INEST, CEPH_LOCK_IFLOCK, CEPH_LOCK_IPOLICY, 0
};
for (int i = 0; lock_types[i]; ++i) {
auto lock = get_lock(lock_types[i]);
if (lock->is_cached())
mdcache->mds->locker->invalidate_lock_caches(lock);
}
// invalidate_lock_caches() may decrease dir->frozen_inode_suppressed
// and finish freezing the inode
return state_test(STATE_FROZEN);
}
void CInode::unfreeze_inode(MDSContext::vec& finished)
{
dout(10) << __func__ << dendl;
if (state_test(STATE_FREEZING)) {
state_clear(STATE_FREEZING);
put(PIN_FREEZING);
item_freezing_inode.remove_myself();
} else if (state_test(STATE_FROZEN)) {
state_clear(STATE_FROZEN);
put(PIN_FROZEN);
get_parent_dir()->num_frozen_inodes--;
} else
ceph_abort();
take_waiting(WAIT_UNFREEZE, finished);
}
void CInode::unfreeze_inode()
{
MDSContext::vec finished;
unfreeze_inode(finished);
mdcache->mds->queue_waiters(finished);
}
void CInode::freeze_auth_pin()
{
ceph_assert(state_test(CInode::STATE_FROZEN));
state_set(CInode::STATE_FROZENAUTHPIN);
get_parent_dir()->num_frozen_inodes++;
}
void CInode::unfreeze_auth_pin()
{
ceph_assert(state_test(CInode::STATE_FROZENAUTHPIN));
state_clear(CInode::STATE_FROZENAUTHPIN);
get_parent_dir()->num_frozen_inodes--;
if (!state_test(STATE_FREEZING|STATE_FROZEN)) {
MDSContext::vec finished;
take_waiting(WAIT_UNFREEZE, finished);
mdcache->mds->queue_waiters(finished);
}
}
void CInode::clear_ambiguous_auth(MDSContext::vec& finished)
{
ceph_assert(state_test(CInode::STATE_AMBIGUOUSAUTH));
state_clear(CInode::STATE_AMBIGUOUSAUTH);
take_waiting(CInode::WAIT_SINGLEAUTH, finished);
}
void CInode::clear_ambiguous_auth()
{
MDSContext::vec finished;
clear_ambiguous_auth(finished);
mdcache->mds->queue_waiters(finished);
}
// auth_pins
bool CInode::can_auth_pin(int *err_ret) const {
int err;
if (!is_auth()) {
err = ERR_NOT_AUTH;
} else if (is_freezing_inode() || is_frozen_inode() || is_frozen_auth_pin()) {
err = ERR_EXPORTING_INODE;
} else {
if (parent)
return parent->can_auth_pin(err_ret);
err = 0;
}
if (err && err_ret)
*err_ret = err;
return !err;
}
void CInode::auth_pin(void *by)
{
if (auth_pins == 0)
get(PIN_AUTHPIN);
auth_pins++;
#ifdef MDS_AUTHPIN_SET
auth_pin_set.insert(by);
#endif
dout(10) << "auth_pin by " << by << " on " << *this << " now " << auth_pins << dendl;
if (parent)
parent->adjust_nested_auth_pins(1, this);
}
void CInode::auth_unpin(void *by)
{
auth_pins--;
#ifdef MDS_AUTHPIN_SET
{
auto it = auth_pin_set.find(by);
ceph_assert(it != auth_pin_set.end());
auth_pin_set.erase(it);
}
#endif
if (auth_pins == 0)
put(PIN_AUTHPIN);
dout(10) << "auth_unpin by " << by << " on " << *this << " now " << auth_pins << dendl;
ceph_assert(auth_pins >= 0);
if (parent)
parent->adjust_nested_auth_pins(-1, by);
if (is_freezing_inode())
maybe_finish_freeze_inode();
}
// authority
mds_authority_t CInode::authority() const
{
if (inode_auth.first >= 0)
return inode_auth;
if (parent)
return parent->dir->authority();
// new items that are not yet linked in (in the committed plane) belong
// to their first parent.
if (!projected_parent.empty())
return projected_parent.front()->dir->authority();
return CDIR_AUTH_UNDEF;
}
// SNAP
snapid_t CInode::get_oldest_snap()
{
snapid_t t = first;
if (is_any_old_inodes())
t = get_old_inodes()->begin()->second.first;
return std::min(t, oldest_snap);
}
const CInode::mempool_old_inode& CInode::cow_old_inode(snapid_t follows, bool cow_head)
{
ceph_assert(follows >= first);
const auto& pi = cow_head ? get_projected_inode() : get_previous_projected_inode();
const auto& px = cow_head ? get_projected_xattrs() : get_previous_projected_xattrs();
auto _old_inodes = allocate_old_inode_map();
if (old_inodes)
*_old_inodes = *old_inodes;
mempool_old_inode &old = (*_old_inodes)[follows];
old.first = first;
old.inode = *pi;
if (px) {
dout(10) << " " << px->size() << " xattrs cowed, " << *px << dendl;
old.xattrs = *px;
}
if (first < oldest_snap)
oldest_snap = first;
old.inode.trim_client_ranges(follows);
if (g_conf()->mds_snap_rstat &&
!(old.inode.rstat == old.inode.accounted_rstat))
dirty_old_rstats.insert(follows);
first = follows+1;
dout(10) << __func__ << " " << (cow_head ? "head" : "previous_head" )
<< " to [" << old.first << "," << follows << "] on "
<< *this << dendl;
reset_old_inodes(std::move(_old_inodes));
return old;
}
void CInode::pre_cow_old_inode()
{
snapid_t follows = mdcache->get_global_snaprealm()->get_newest_seq();
dout(20) << __func__ << " follows " << follows << " on " << *this << dendl;
if (first <= follows)
cow_old_inode(follows, true);
}
bool CInode::has_snap_data(snapid_t snapid)
{
bool found = snapid >= first && snapid <= last;
if (!found && is_any_old_inodes()) {
auto p = old_inodes->lower_bound(snapid);
if (p != old_inodes->end()) {
if (p->second.first > snapid) {
if (p != old_inodes->begin())
--p;
}
if (p->second.first <= snapid && snapid <= p->first) {
found = true;
}
}
}
return found;
}
void CInode::purge_stale_snap_data(const set<snapid_t>& snaps)
{
dout(10) << __func__ << " " << snaps << dendl;
if (!get_old_inodes())
return;
std::vector<snapid_t> to_remove;
for (auto p : *get_old_inodes()) {
const snapid_t &id = p.first;
const auto &s = snaps.lower_bound(p.second.first);
if (s == snaps.end() || *s > id) {
dout(10) << " purging old_inode [" << p.second.first << "," << id << "]" << dendl;
to_remove.push_back(id);
}
}
if (to_remove.size() == get_old_inodes()->size()) {
reset_old_inodes(old_inode_map_ptr());
} else if (!to_remove.empty()) {
auto _old_inodes = allocate_old_inode_map(*get_old_inodes());
for (auto id : to_remove)
_old_inodes->erase(id);
reset_old_inodes(std::move(_old_inodes));
}
}
/*
* pick/create an old_inode
*/
snapid_t CInode::pick_old_inode(snapid_t snap) const
{
if (is_any_old_inodes()) {
auto it = old_inodes->lower_bound(snap); // p is first key >= to snap
if (it != old_inodes->end() && it->second.first <= snap) {
dout(10) << __func__ << " snap " << snap << " -> [" << it->second.first << "," << it->first << "]" << dendl;
return it->first;
}
}
dout(10) << __func__ << " snap " << snap << " -> nothing" << dendl;
return 0;
}
void CInode::open_snaprealm(bool nosplit)
{
if (!snaprealm) {
SnapRealm *parent = find_snaprealm();
snaprealm = new SnapRealm(mdcache, this);
if (parent) {
dout(10) << __func__ << " " << snaprealm
<< " parent is " << parent
<< dendl;
dout(30) << " siblings are " << parent->open_children << dendl;
snaprealm->parent = parent;
if (!nosplit)
parent->split_at(snaprealm);
parent->open_children.insert(snaprealm);
}
}
}
void CInode::close_snaprealm(bool nojoin)
{
if (snaprealm) {
dout(15) << __func__ << " " << *snaprealm << dendl;
if (snaprealm->parent) {
snaprealm->parent->open_children.erase(snaprealm);
//if (!nojoin)
//snaprealm->parent->join(snaprealm);
}
delete snaprealm;
snaprealm = 0;
}
}
SnapRealm *CInode::find_snaprealm() const
{
const CInode *cur = this;
while (!cur->snaprealm) {
const CDentry *pdn = cur->get_oldest_parent_dn();
if (!pdn)
break;
cur = pdn->get_dir()->get_inode();
}
return cur->snaprealm;
}
void CInode::encode_snap_blob(bufferlist &snapbl)
{
if (snaprealm) {
using ceph::encode;
encode(snaprealm->srnode, snapbl);
dout(20) << __func__ << " " << *snaprealm << dendl;
}
}
void CInode::decode_snap_blob(const bufferlist& snapbl)
{
using ceph::decode;
if (snapbl.length()) {
open_snaprealm();
auto old_flags = snaprealm->srnode.flags;
auto p = snapbl.cbegin();
decode(snaprealm->srnode, p);
if (!is_base()) {
if ((snaprealm->srnode.flags ^ old_flags) & sr_t::PARENT_GLOBAL) {
snaprealm->adjust_parent();
}
}
dout(20) << __func__ << " " << *snaprealm << dendl;
} else if (snaprealm &&
!is_root() && !is_mdsdir()) { // see https://tracker.ceph.com/issues/42675
ceph_assert(mdcache->mds->is_any_replay());
snaprealm->merge_to(NULL);
}
}
void CInode::encode_snap(bufferlist& bl)
{
ENCODE_START(1, 1, bl);
bufferlist snapbl;
encode_snap_blob(snapbl);
encode(snapbl, bl);
encode(oldest_snap, bl);
ENCODE_FINISH(bl);
}
void CInode::decode_snap(bufferlist::const_iterator& p)
{
DECODE_START(1, p);
bufferlist snapbl;
decode(snapbl, p);
decode(oldest_snap, p);
decode_snap_blob(snapbl);
DECODE_FINISH(p);
}
// =============================================
client_t CInode::calc_ideal_loner()
{
if (mdcache->is_readonly())
return -1;
if (!get_mds_caps_wanted().empty())
return -1;
int n = 0;
client_t loner = -1;
for (const auto &p : client_caps) {
if (!p.second.is_stale() &&
(is_dir() ?
!has_subtree_or_exporting_dirfrag() :
(p.second.wanted() & (CEPH_CAP_ANY_WR|CEPH_CAP_FILE_RD)))) {
if (n)
return -1;
n++;
loner = p.first;
}
}
return loner;
}
bool CInode::choose_ideal_loner()
{
want_loner_cap = calc_ideal_loner();
int changed = false;
if (loner_cap >= 0 && loner_cap != want_loner_cap) {
if (!try_drop_loner())
return false;
changed = true;
}
if (want_loner_cap >= 0) {
if (loner_cap < 0) {
set_loner_cap(want_loner_cap);
changed = true;
} else
ceph_assert(loner_cap == want_loner_cap);
}
return changed;
}
bool CInode::try_set_loner()
{
ceph_assert(want_loner_cap >= 0);
if (loner_cap >= 0 && loner_cap != want_loner_cap)
return false;
set_loner_cap(want_loner_cap);
return true;
}
void CInode::set_loner_cap(client_t l)
{
loner_cap = l;
authlock.set_excl_client(loner_cap);
filelock.set_excl_client(loner_cap);
linklock.set_excl_client(loner_cap);
xattrlock.set_excl_client(loner_cap);
}
bool CInode::try_drop_loner()
{
if (loner_cap < 0)
return true;
int other_allowed = get_caps_allowed_by_type(CAP_ANY);
Capability *cap = get_client_cap(loner_cap);
if (!cap ||
(cap->issued() & ~other_allowed) == 0) {
set_loner_cap(-1);
return true;
}
return false;
}
// choose new lock state during recovery, based on issued caps
void CInode::choose_lock_state(SimpleLock *lock, int allissued)
{
int shift = lock->get_cap_shift();
int issued = (allissued >> shift) & lock->get_cap_mask();
if (is_auth()) {
if (lock->is_xlocked()) {
// do nothing here
} else if (lock->get_state() != LOCK_MIX) {
if (issued & (CEPH_CAP_GEXCL | CEPH_CAP_GBUFFER))
lock->set_state(LOCK_EXCL);
else if (issued & CEPH_CAP_GWR) {
if (issued & (CEPH_CAP_GCACHE | CEPH_CAP_GSHARED))
lock->set_state(LOCK_EXCL);
else
lock->set_state(LOCK_MIX);
} else if (lock->is_dirty()) {
if (is_replicated())
lock->set_state(LOCK_MIX);
else
lock->set_state(LOCK_LOCK);
} else
lock->set_state(LOCK_SYNC);
}
} else {
// our states have already been chosen during rejoin.
if (lock->is_xlocked())
ceph_assert(lock->get_state() == LOCK_LOCK);
}
}
void CInode::choose_lock_states(int dirty_caps)
{
int issued = get_caps_issued() | dirty_caps;
if (is_auth() && (issued & (CEPH_CAP_ANY_EXCL|CEPH_CAP_ANY_WR)))
choose_ideal_loner();
choose_lock_state(&filelock, issued);
choose_lock_state(&nestlock, issued);
choose_lock_state(&dirfragtreelock, issued);
choose_lock_state(&authlock, issued);
choose_lock_state(&xattrlock, issued);
choose_lock_state(&linklock, issued);
}
int CInode::count_nonstale_caps()
{
int n = 0;
for (const auto &p : client_caps) {
if (!p.second.is_stale())
n++;
}
return n;
}
bool CInode::multiple_nonstale_caps()
{
int n = 0;
for (const auto &p : client_caps) {
if (!p.second.is_stale()) {
if (n)
return true;
n++;
}
}
return false;
}
void CInode::set_mds_caps_wanted(mempool::mds_co::compact_map<int32_t,int32_t>& m)
{
bool old_empty = mds_caps_wanted.empty();
mds_caps_wanted.swap(m);
if (old_empty != (bool)mds_caps_wanted.empty()) {
if (old_empty)
adjust_num_caps_notable(1);
else
adjust_num_caps_notable(-1);
}
}
void CInode::set_mds_caps_wanted(mds_rank_t mds, int32_t wanted)
{
bool old_empty = mds_caps_wanted.empty();
if (wanted) {
mds_caps_wanted[mds] = wanted;
if (old_empty)
adjust_num_caps_notable(1);
} else if (!old_empty) {
mds_caps_wanted.erase(mds);
if (mds_caps_wanted.empty())
adjust_num_caps_notable(-1);
}
}
Capability *CInode::add_client_cap(client_t client, Session *session,
SnapRealm *conrealm, bool new_inode)
{
ceph_assert(last == CEPH_NOSNAP);
if (client_caps.empty()) {
get(PIN_CAPS);
if (conrealm)
containing_realm = conrealm;
else
containing_realm = find_snaprealm();
containing_realm->inodes_with_caps.push_back(&item_caps);
dout(10) << __func__ << " first cap, joining realm " << *containing_realm << dendl;
mdcache->num_inodes_with_caps++;
if (parent)
parent->dir->adjust_num_inodes_with_caps(1);
}
uint64_t cap_id = new_inode ? 1 : ++mdcache->last_cap_id;
auto ret = client_caps.emplace(std::piecewise_construct, std::forward_as_tuple(client),
std::forward_as_tuple(this, session, cap_id));
ceph_assert(ret.second == true);
Capability *cap = &ret.first->second;
cap->client_follows = first-1;
containing_realm->add_cap(client, cap);
return cap;
}
void CInode::remove_client_cap(client_t client)
{
auto it = client_caps.find(client);
ceph_assert(it != client_caps.end());
Capability *cap = &it->second;
cap->item_session_caps.remove_myself();
cap->item_revoking_caps.remove_myself();
cap->item_client_revoking_caps.remove_myself();
containing_realm->remove_cap(client, cap);
if (client == loner_cap)
loner_cap = -1;
if (cap->is_wanted_notable())
adjust_num_caps_notable(-1);
client_caps.erase(it);
if (client_caps.empty()) {
dout(10) << __func__ << " last cap, leaving realm " << *containing_realm << dendl;
put(PIN_CAPS);
item_caps.remove_myself();
containing_realm = NULL;
mdcache->num_inodes_with_caps--;
if (parent)
parent->dir->adjust_num_inodes_with_caps(-1);
}
//clean up advisory locks
bool fcntl_removed = fcntl_locks ? fcntl_locks->remove_all_from(client) : false;
bool flock_removed = flock_locks ? flock_locks->remove_all_from(client) : false;
if (fcntl_removed || flock_removed) {
MDSContext::vec waiters;
take_waiting(CInode::WAIT_FLOCK, waiters);
mdcache->mds->queue_waiters(waiters);
}
}
void CInode::move_to_realm(SnapRealm *realm)
{
dout(10) << __func__ << " joining realm " << *realm
<< ", leaving realm " << *containing_realm << dendl;
for (auto& p : client_caps) {
containing_realm->remove_cap(p.first, &p.second);
realm->add_cap(p.first, &p.second);
}
item_caps.remove_myself();
realm->inodes_with_caps.push_back(&item_caps);
containing_realm = realm;
}
Capability *CInode::reconnect_cap(client_t client, const cap_reconnect_t& icr, Session *session)
{
Capability *cap = get_client_cap(client);
if (cap) {
// FIXME?
cap->merge(icr.capinfo.wanted, icr.capinfo.issued);
} else {
cap = add_client_cap(client, session);
cap->set_cap_id(icr.capinfo.cap_id);
cap->set_wanted(icr.capinfo.wanted);
cap->issue_norevoke(icr.capinfo.issued);
cap->reset_seq();
}
cap->set_last_issue_stamp(ceph_clock_now());
return cap;
}
void CInode::clear_client_caps_after_export()
{
while (!client_caps.empty())
remove_client_cap(client_caps.begin()->first);
loner_cap = -1;
want_loner_cap = -1;
if (!get_mds_caps_wanted().empty()) {
mempool::mds_co::compact_map<int32_t,int32_t> empty;
set_mds_caps_wanted(empty);
}
}
void CInode::export_client_caps(map<client_t,Capability::Export>& cl)
{
for (const auto &p : client_caps) {
cl[p.first] = p.second.make_export();
}
}
// caps allowed
int CInode::get_caps_liked() const
{
if (is_dir())
return CEPH_CAP_PIN | CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_SHARED; // but not, say, FILE_RD|WR|WRBUFFER
else
return CEPH_CAP_ANY & ~CEPH_CAP_FILE_LAZYIO;
}
int CInode::get_caps_allowed_ever() const
{
int allowed;
if (is_dir())
allowed = CEPH_CAP_PIN | CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_SHARED;
else
allowed = CEPH_CAP_ANY;
return allowed &
(CEPH_CAP_PIN |
(filelock.gcaps_allowed_ever() << filelock.get_cap_shift()) |
(authlock.gcaps_allowed_ever() << authlock.get_cap_shift()) |
(xattrlock.gcaps_allowed_ever() << xattrlock.get_cap_shift()) |
(linklock.gcaps_allowed_ever() << linklock.get_cap_shift()));
}
int CInode::get_caps_allowed_by_type(int type) const
{
return
CEPH_CAP_PIN |
(filelock.gcaps_allowed(type) << filelock.get_cap_shift()) |
(authlock.gcaps_allowed(type) << authlock.get_cap_shift()) |
(xattrlock.gcaps_allowed(type) << xattrlock.get_cap_shift()) |
(linklock.gcaps_allowed(type) << linklock.get_cap_shift());
}
int CInode::get_caps_careful() const
{
return
(filelock.gcaps_careful() << filelock.get_cap_shift()) |
(authlock.gcaps_careful() << authlock.get_cap_shift()) |
(xattrlock.gcaps_careful() << xattrlock.get_cap_shift()) |
(linklock.gcaps_careful() << linklock.get_cap_shift());
}
int CInode::get_xlocker_mask(client_t client) const
{
return
(filelock.gcaps_xlocker_mask(client) << filelock.get_cap_shift()) |
(authlock.gcaps_xlocker_mask(client) << authlock.get_cap_shift()) |
(xattrlock.gcaps_xlocker_mask(client) << xattrlock.get_cap_shift()) |
(linklock.gcaps_xlocker_mask(client) << linklock.get_cap_shift());
}
int CInode::get_caps_allowed_for_client(Session *session, Capability *cap,
const mempool_inode *file_i) const
{
client_t client = session->get_client();
int allowed;
if (client == get_loner()) {
// as the loner, we get the loner_caps AND any xlocker_caps for things we have xlocked
allowed =
get_caps_allowed_by_type(CAP_LONER) |
(get_caps_allowed_by_type(CAP_XLOCKER) & get_xlocker_mask(client));
} else {
allowed = get_caps_allowed_by_type(CAP_ANY);
}
if (is_dir()) {
allowed &= ~CEPH_CAP_ANY_DIR_OPS;
if (cap && (allowed & CEPH_CAP_FILE_EXCL))
allowed |= cap->get_lock_cache_allowed();
} else {
if (file_i->inline_data.version == CEPH_INLINE_NONE &&
file_i->layout.pool_ns.empty()) {
// noop
} else if (cap) {
if ((file_i->inline_data.version != CEPH_INLINE_NONE &&
cap->is_noinline()) ||
(!file_i->layout.pool_ns.empty() &&
cap->is_nopoolns()))
allowed &= ~(CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR);
} else {
auto& conn = session->get_connection();
if ((file_i->inline_data.version != CEPH_INLINE_NONE &&
!conn->has_feature(CEPH_FEATURE_MDS_INLINE_DATA)) ||
(!file_i->layout.pool_ns.empty() &&
!conn->has_feature(CEPH_FEATURE_FS_FILE_LAYOUT_V2)))
allowed &= ~(CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR);
}
}
return allowed;
}
// caps issued, wanted
int CInode::get_caps_issued(int *ploner, int *pother, int *pxlocker,
int shift, int mask)
{
int c = 0;
int loner = 0, other = 0, xlocker = 0;
if (!is_auth()) {
loner_cap = -1;
}
for (const auto &p : client_caps) {
int i = p.second.issued();
c |= i;
if (p.first == loner_cap)
loner |= i;
else
other |= i;
xlocker |= get_xlocker_mask(p.first) & i;
}
if (ploner) *ploner = (loner >> shift) & mask;
if (pother) *pother = (other >> shift) & mask;
if (pxlocker) *pxlocker = (xlocker >> shift) & mask;
return (c >> shift) & mask;
}
bool CInode::is_any_caps_wanted() const
{
for (const auto &p : client_caps) {
if (p.second.wanted())
return true;
}
return false;
}
int CInode::get_caps_wanted(int *ploner, int *pother, int shift, int mask) const
{
int w = 0;
int loner = 0, other = 0;
for (const auto &p : client_caps) {
if (!p.second.is_stale()) {
int t = p.second.wanted();
w |= t;
if (p.first == loner_cap)
loner |= t;
else
other |= t;
}
//cout << " get_caps_wanted client " << it->first << " " << cap_string(it->second.wanted()) << endl;
}
if (is_auth())
for (const auto &p : mds_caps_wanted) {
w |= p.second;
other |= p.second;
//cout << " get_caps_wanted mds " << it->first << " " << cap_string(it->second) << endl;
}
if (ploner) *ploner = (loner >> shift) & mask;
if (pother) *pother = (other >> shift) & mask;
return (w >> shift) & mask;
}
bool CInode::issued_caps_need_gather(SimpleLock *lock)
{
int loner_issued, other_issued, xlocker_issued;
get_caps_issued(&loner_issued, &other_issued, &xlocker_issued,
lock->get_cap_shift(), lock->get_cap_mask());
if ((loner_issued & ~lock->gcaps_allowed(CAP_LONER)) ||
(other_issued & ~lock->gcaps_allowed(CAP_ANY)) ||
(xlocker_issued & ~lock->gcaps_allowed(CAP_XLOCKER)))
return true;
return false;
}
void CInode::adjust_num_caps_notable(int d)
{
if (!is_clientwriteable()) {
if (!num_caps_notable && d > 0)
mdcache->open_file_table.add_inode(this);
else if (num_caps_notable > 0 && num_caps_notable == -d)
mdcache->open_file_table.remove_inode(this);
}
num_caps_notable +=d;
ceph_assert(num_caps_notable >= 0);
}
void CInode::mark_clientwriteable()
{
if (last != CEPH_NOSNAP)
return;
if (!state_test(STATE_CLIENTWRITEABLE)) {
if (num_caps_notable == 0)
mdcache->open_file_table.add_inode(this);
state_set(STATE_CLIENTWRITEABLE);
}
}
void CInode::clear_clientwriteable()
{
if (state_test(STATE_CLIENTWRITEABLE)) {
if (num_caps_notable == 0)
mdcache->open_file_table.remove_inode(this);
state_clear(STATE_CLIENTWRITEABLE);
}
}
// =============================================
int CInode::encode_inodestat(bufferlist& bl, Session *session,
SnapRealm *dir_realm,
snapid_t snapid,
unsigned max_bytes,
int getattr_caps)
{
client_t client = session->get_client();
ceph_assert(snapid);
bool valid = true;
// pick a version!
const mempool_inode *oi = get_inode().get();
const mempool_inode *pi = get_projected_inode().get();
const mempool_xattr_map *pxattrs = nullptr;
if (snapid != CEPH_NOSNAP) {
// for now at least, old_inodes is only defined/valid on the auth
if (!is_auth())
valid = false;
if (is_any_old_inodes()) {
auto it = old_inodes->lower_bound(snapid);
if (it != old_inodes->end()) {
if (it->second.first > snapid) {
if (it != old_inodes->begin())
--it;
}
if (it->second.first <= snapid && snapid <= it->first) {
dout(15) << __func__ << " snapid " << snapid
<< " to old_inode [" << it->second.first << "," << it->first << "]"
<< " " << it->second.inode.rstat
<< dendl;
pi = oi = &it->second.inode;
pxattrs = &it->second.xattrs;
} else {
// snapshoted remote dentry can result this
dout(0) << __func__ << " old_inode for snapid " << snapid
<< " not found" << dendl;
}
}
} else if (snapid < first || snapid > last) {
// snapshoted remote dentry can result this
dout(0) << __func__ << " [" << first << "," << last << "]"
<< " not match snapid " << snapid << dendl;
}
}
utime_t snap_btime;
std::map<std::string, std::string> snap_metadata;
SnapRealm *realm = find_snaprealm();
if (snapid != CEPH_NOSNAP && realm) {
// add snapshot timestamp vxattr
map<snapid_t,const SnapInfo*> infomap;
realm->get_snap_info(infomap,
snapid, // min
snapid); // max
if (!infomap.empty()) {
ceph_assert(infomap.size() == 1);
const SnapInfo *si = infomap.begin()->second;
snap_btime = si->stamp;
snap_metadata = si->metadata;
}
}
bool no_caps = !valid ||
session->is_stale() ||
(dir_realm && realm != dir_realm) ||
is_frozen() ||
state_test(CInode::STATE_EXPORTINGCAPS);
if (no_caps)
dout(20) << __func__ << " no caps"
<< (!valid?", !valid":"")
<< (session->is_stale()?", session stale ":"")
<< ((dir_realm && realm != dir_realm)?", snaprealm differs ":"")
<< (is_frozen()?", frozen inode":"")
<< (state_test(CInode::STATE_EXPORTINGCAPS)?", exporting caps":"")
<< dendl;
// "fake" a version that is odd (stable) version, +1 if projected.
version_t version = (oi->version * 2) + is_projected();
Capability *cap = get_client_cap(client);
bool pfile = filelock.is_xlocked_by_client(client) || get_loner() == client;
//(cap && (cap->issued() & CEPH_CAP_FILE_EXCL));
bool pauth = authlock.is_xlocked_by_client(client) || get_loner() == client;
bool plink = linklock.is_xlocked_by_client(client) || get_loner() == client;
bool pxattr = xattrlock.is_xlocked_by_client(client) || get_loner() == client;
bool plocal = versionlock.get_last_wrlock_client() == client;
bool ppolicy = policylock.is_xlocked_by_client(client) || get_loner()==client;
const mempool_inode *any_i = (pfile|pauth|plink|pxattr|plocal) ? pi : oi;
dout(20) << " pfile " << pfile << " pauth " << pauth
<< " plink " << plink << " pxattr " << pxattr
<< " plocal " << plocal
<< " mtime " << any_i->mtime
<< " ctime " << any_i->ctime
<< " change_attr " << any_i->change_attr
<< " valid=" << valid << dendl;
// file
const mempool_inode *file_i = pfile ? pi:oi;
file_layout_t layout;
if (is_dir()) {
layout = (ppolicy ? pi : oi)->layout;
} else {
layout = file_i->layout;
}
// max_size is min of projected, actual
uint64_t max_size =
std::min(oi->get_client_range(client),
pi->get_client_range(client));
// inline data
version_t inline_version = 0;
bufferlist inline_data;
if (file_i->inline_data.version == CEPH_INLINE_NONE) {
inline_version = CEPH_INLINE_NONE;
} else if ((!cap && !no_caps) ||
(cap && cap->client_inline_version < file_i->inline_data.version) ||
(getattr_caps & CEPH_CAP_FILE_RD)) { // client requests inline data
inline_version = file_i->inline_data.version;
if (file_i->inline_data.length() > 0)
file_i->inline_data.get_data(inline_data);
}
// nest (do same as file... :/)
if (cap) {
cap->last_rbytes = file_i->rstat.rbytes;
cap->last_rsize = file_i->rstat.rsize();
}
// auth
const mempool_inode *auth_i = pauth ? pi:oi;
// link
const mempool_inode *link_i = plink ? pi:oi;
// xattr
const mempool_inode *xattr_i = pxattr ? pi:oi;
using ceph::encode;
// xattr
version_t xattr_version;
if ((!cap && !no_caps) ||
(cap && cap->client_xattr_version < xattr_i->xattr_version) ||
(getattr_caps & CEPH_CAP_XATTR_SHARED)) { // client requests xattrs
if (!pxattrs)
pxattrs = pxattr ? get_projected_xattrs().get() : get_xattrs().get();
xattr_version = xattr_i->xattr_version;
} else {
xattr_version = 0;
}
// do we have room?
if (max_bytes) {
unsigned bytes =
8 + 8 + 4 + 8 + 8 + sizeof(ceph_mds_reply_cap) +
sizeof(struct ceph_file_layout) +
sizeof(struct ceph_timespec) * 3 + 4 + // ctime ~ time_warp_seq
8 + 8 + 8 + 4 + 4 + 4 + 4 + 4 + // size ~ nlink
8 + 8 + 8 + 8 + 8 + sizeof(struct ceph_timespec) + // dirstat.nfiles ~ rstat.rctime
sizeof(__u32) + sizeof(__u32) * 2 * dirfragtree._splits.size() + // dirfragtree
sizeof(__u32) + symlink.length() + // symlink
sizeof(struct ceph_dir_layout); // dir_layout
if (xattr_version) {
bytes += sizeof(__u32) + sizeof(__u32); // xattr buffer len + number entries
if (pxattrs) {
for (const auto &p : *pxattrs)
bytes += sizeof(__u32) * 2 + p.first.length() + p.second.length();
}
} else {
bytes += sizeof(__u32); // xattr buffer len
}
bytes +=
sizeof(version_t) + sizeof(__u32) + inline_data.length() + // inline data
1 + 1 + 8 + 8 + 4 + // quota
4 + layout.pool_ns.size() + // pool ns
sizeof(struct ceph_timespec) + 8; // btime + change_attr
if (bytes > max_bytes)
return -CEPHFS_ENOSPC;
}
// encode caps
struct ceph_mds_reply_cap ecap;
if (snapid != CEPH_NOSNAP) {
/*
* snapped inodes (files or dirs) only get read-only caps. always
* issue everything possible, since it is read only.
*
* if a snapped inode has caps, limit issued caps based on the
* lock state.
*
* if it is a live inode, limit issued caps based on the lock
* state.
*
* do NOT adjust cap issued state, because the client always
* tracks caps per-snap and the mds does either per-interval or
* multiversion.
*/
ecap.caps = valid ? get_caps_allowed_by_type(CAP_ANY) : CEPH_STAT_CAP_INODE;
if (last == CEPH_NOSNAP || is_any_caps())
ecap.caps = ecap.caps & get_caps_allowed_for_client(session, nullptr, file_i);
ecap.seq = 0;
ecap.mseq = 0;
ecap.realm = 0;
} else {
if (!no_caps && !cap) {
// add a new cap
cap = add_client_cap(client, session, realm);
if (is_auth())
choose_ideal_loner();
}
int issue = 0;
if (!no_caps && cap) {
int likes = get_caps_liked();
int allowed = get_caps_allowed_for_client(session, cap, file_i);
issue = (cap->wanted() | likes) & allowed;
cap->issue_norevoke(issue, true);
issue = cap->pending();
dout(10) << "encode_inodestat issuing " << ccap_string(issue)
<< " seq " << cap->get_last_seq() << dendl;
} else if (cap && cap->is_new() && !dir_realm) {
// alway issue new caps to client, otherwise the caps get lost
ceph_assert(cap->is_stale());
ceph_assert(!cap->pending());
issue = CEPH_CAP_PIN;
cap->issue_norevoke(issue, true);
dout(10) << "encode_inodestat issuing " << ccap_string(issue)
<< " seq " << cap->get_last_seq()
<< "(stale&new caps)" << dendl;
}
if (issue) {
cap->set_last_issue();
cap->set_last_issue_stamp(ceph_clock_now());
ecap.caps = issue;
ecap.wanted = cap->wanted();
ecap.cap_id = cap->get_cap_id();
ecap.seq = cap->get_last_seq();
ecap.mseq = cap->get_mseq();
ecap.realm = realm->inode->ino();
} else {
ecap.cap_id = 0;
ecap.caps = 0;
ecap.seq = 0;
ecap.mseq = 0;
ecap.realm = 0;
ecap.wanted = 0;
}
}
ecap.flags = is_auth() ? CEPH_CAP_FLAG_AUTH : 0;
dout(10) << "encode_inodestat caps " << ccap_string(ecap.caps)
<< " seq " << ecap.seq << " mseq " << ecap.mseq
<< " xattrv " << xattr_version << dendl;
if (inline_data.length() && cap) {
if ((cap->pending() | getattr_caps) & CEPH_CAP_FILE_SHARED) {
dout(10) << "including inline version " << inline_version << dendl;
cap->client_inline_version = inline_version;
} else {
dout(10) << "dropping inline version " << inline_version << dendl;
inline_version = 0;
inline_data.clear();
}
}
// include those xattrs?
if (xattr_version && cap) {
if ((cap->pending() | getattr_caps) & CEPH_CAP_XATTR_SHARED) {
dout(10) << "including xattrs version " << xattr_version << dendl;
cap->client_xattr_version = xattr_version;
} else {
dout(10) << "dropping xattrs version " << xattr_version << dendl;
xattr_version = 0;
}
}
// The end result of encode_xattrs() is equivalent to:
// {
// bufferlist xbl;
// if (xattr_version) {
// if (pxattrs)
// encode(*pxattrs, bl);
// else
// encode((__u32)0, bl);
// }
// encode(xbl, bl);
// }
//
// But encoding xattrs into the 'xbl' requires a memory allocation.
// The 'bl' should have enough pre-allocated memory in most cases.
// Encoding xattrs directly into it can avoid the extra allocation.
auto encode_xattrs = [xattr_version, pxattrs, &bl]() {
using ceph::encode;
if (xattr_version) {
ceph_le32 xbl_len;
auto filler = bl.append_hole(sizeof(xbl_len));
const auto starting_bl_len = bl.length();
if (pxattrs)
encode(*pxattrs, bl);
else
encode((__u32)0, bl);
xbl_len = bl.length() - starting_bl_len;
filler.copy_in(sizeof(xbl_len), (char *)&xbl_len);
} else {
encode((__u32)0, bl);
}
};
/*
* note: encoding matches MClientReply::InodeStat
*/
if (session->info.has_feature(CEPHFS_FEATURE_REPLY_ENCODING)) {
ENCODE_START(7, 1, bl);
encode(oi->ino, bl);
encode(snapid, bl);
encode(oi->rdev, bl);
encode(version, bl);
encode(xattr_version, bl);
encode(ecap, bl);
{
ceph_file_layout legacy_layout;
layout.to_legacy(&legacy_layout);
encode(legacy_layout, bl);
}
encode(any_i->ctime, bl);
encode(file_i->mtime, bl);
encode(file_i->atime, bl);
encode(file_i->time_warp_seq, bl);
encode(file_i->size, bl);
encode(max_size, bl);
encode(file_i->truncate_size, bl);
encode(file_i->truncate_seq, bl);
encode(auth_i->mode, bl);
encode((uint32_t)auth_i->uid, bl);
encode((uint32_t)auth_i->gid, bl);
encode(link_i->nlink, bl);
encode(file_i->dirstat.nfiles, bl);
encode(file_i->dirstat.nsubdirs, bl);
encode(file_i->rstat.rbytes, bl);
encode(file_i->rstat.rfiles, bl);
encode(file_i->rstat.rsubdirs, bl);
encode(file_i->rstat.rctime, bl);
dirfragtree.encode(bl);
encode(symlink, bl);
encode(file_i->dir_layout, bl);
encode_xattrs();
encode(inline_version, bl);
encode(inline_data, bl);
const mempool_inode *policy_i = ppolicy ? pi : oi;
encode(policy_i->quota, bl);
encode(layout.pool_ns, bl);
encode(any_i->btime, bl);
encode(any_i->change_attr, bl);
encode(file_i->export_pin, bl);
encode(snap_btime, bl);
encode(file_i->rstat.rsnaps, bl);
encode(snap_metadata, bl);
encode(!file_i->fscrypt_auth.empty(), bl);
encode(file_i->fscrypt_auth, bl);
encode(file_i->fscrypt_file, bl);
ENCODE_FINISH(bl);
}
else {
ceph_assert(session->get_connection());
encode(oi->ino, bl);
encode(snapid, bl);
encode(oi->rdev, bl);
encode(version, bl);
encode(xattr_version, bl);
encode(ecap, bl);
{
ceph_file_layout legacy_layout;
layout.to_legacy(&legacy_layout);
encode(legacy_layout, bl);
}
encode(any_i->ctime, bl);
encode(file_i->mtime, bl);
encode(file_i->atime, bl);
encode(file_i->time_warp_seq, bl);
encode(file_i->size, bl);
encode(max_size, bl);
encode(file_i->truncate_size, bl);
encode(file_i->truncate_seq, bl);
encode(auth_i->mode, bl);
encode((uint32_t)auth_i->uid, bl);
encode((uint32_t)auth_i->gid, bl);
encode(link_i->nlink, bl);
encode(file_i->dirstat.nfiles, bl);
encode(file_i->dirstat.nsubdirs, bl);
encode(file_i->rstat.rbytes, bl);
encode(file_i->rstat.rfiles, bl);
encode(file_i->rstat.rsubdirs, bl);
encode(file_i->rstat.rctime, bl);
dirfragtree.encode(bl);
encode(symlink, bl);
auto& conn = session->get_connection();
if (conn->has_feature(CEPH_FEATURE_DIRLAYOUTHASH)) {
encode(file_i->dir_layout, bl);
}
encode_xattrs();
if (conn->has_feature(CEPH_FEATURE_MDS_INLINE_DATA)) {
encode(inline_version, bl);
encode(inline_data, bl);
}
if (conn->has_feature(CEPH_FEATURE_MDS_QUOTA)) {
const mempool_inode *policy_i = ppolicy ? pi : oi;
encode(policy_i->quota, bl);
}
if (conn->has_feature(CEPH_FEATURE_FS_FILE_LAYOUT_V2)) {
encode(layout.pool_ns, bl);
}
if (conn->has_feature(CEPH_FEATURE_FS_BTIME)) {
encode(any_i->btime, bl);
encode(any_i->change_attr, bl);
}
}
return valid;
}
void CInode::encode_cap_message(const ref_t<MClientCaps> &m, Capability *cap)
{
ceph_assert(cap);
client_t client = cap->get_client();
bool pfile = filelock.is_xlocked_by_client(client) || (cap->issued() & CEPH_CAP_FILE_EXCL);
bool pauth = authlock.is_xlocked_by_client(client);
bool plink = linklock.is_xlocked_by_client(client);
bool pxattr = xattrlock.is_xlocked_by_client(client);
const mempool_inode *oi = get_inode().get();
const mempool_inode *pi = get_projected_inode().get();
const mempool_inode *i = (pfile|pauth|plink|pxattr) ? pi : oi;
dout(20) << __func__ << " pfile " << pfile
<< " pauth " << pauth << " plink " << plink << " pxattr " << pxattr
<< " mtime " << i->mtime << " ctime " << i->ctime << " change_attr " << i->change_attr << dendl;
i = pfile ? pi:oi;
m->set_layout(i->layout);
m->size = i->size;
m->truncate_seq = i->truncate_seq;
m->truncate_size = i->truncate_size;
m->fscrypt_file = i->fscrypt_file;
m->fscrypt_auth = i->fscrypt_auth;
m->mtime = i->mtime;
m->atime = i->atime;
m->ctime = i->ctime;
m->btime = i->btime;
m->change_attr = i->change_attr;
m->time_warp_seq = i->time_warp_seq;
m->nfiles = i->dirstat.nfiles;
m->nsubdirs = i->dirstat.nsubdirs;
if (cap->client_inline_version < i->inline_data.version) {
m->inline_version = cap->client_inline_version = i->inline_data.version;
if (i->inline_data.length() > 0)
i->inline_data.get_data(m->inline_data);
} else {
m->inline_version = 0;
}
// max_size is min of projected, actual.
uint64_t oldms = oi->get_client_range(client);
uint64_t newms = pi->get_client_range(client);
m->max_size = std::min(oldms, newms);
i = pauth ? pi:oi;
m->head.mode = i->mode;
m->head.uid = i->uid;
m->head.gid = i->gid;
i = plink ? pi:oi;
m->head.nlink = i->nlink;
using ceph::encode;
i = pxattr ? pi:oi;
const auto& ix = pxattr ? get_projected_xattrs() : get_xattrs();
if ((cap->pending() & CEPH_CAP_XATTR_SHARED) &&
i->xattr_version > cap->client_xattr_version) {
dout(10) << " including xattrs v " << i->xattr_version << dendl;
if (ix)
encode(*ix, m->xattrbl);
else
encode((__u32)0, m->xattrbl);
m->head.xattr_version = i->xattr_version;
cap->client_xattr_version = i->xattr_version;
}
}
void CInode::_encode_base(bufferlist& bl, uint64_t features)
{
ENCODE_START(1, 1, bl);
encode(first, bl);
encode(*get_inode(), bl, features);
encode(symlink, bl);
encode(dirfragtree, bl);
encode_xattrs(bl);
encode_old_inodes(bl, features);
encode(damage_flags, bl);
encode_snap(bl);
ENCODE_FINISH(bl);
}
void CInode::_decode_base(bufferlist::const_iterator& p)
{
DECODE_START(1, p);
decode(first, p);
{
auto _inode = allocate_inode();
decode(*_inode, p);
reset_inode(std::move(_inode));
}
{
std::string tmp;
decode(tmp, p);
symlink = std::string_view(tmp);
}
decode(dirfragtree, p);
decode_xattrs(p);
decode_old_inodes(p);
decode(damage_flags, p);
decode_snap(p);
DECODE_FINISH(p);
}
void CInode::_encode_locks_full(bufferlist& bl)
{
using ceph::encode;
encode(authlock, bl);
encode(linklock, bl);
encode(dirfragtreelock, bl);
encode(filelock, bl);
encode(xattrlock, bl);
encode(snaplock, bl);
encode(nestlock, bl);
encode(flocklock, bl);
encode(policylock, bl);
encode(loner_cap, bl);
}
void CInode::_decode_locks_full(bufferlist::const_iterator& p)
{
using ceph::decode;
decode(authlock, p);
decode(linklock, p);
decode(dirfragtreelock, p);
decode(filelock, p);
decode(xattrlock, p);
decode(snaplock, p);
decode(nestlock, p);
decode(flocklock, p);
decode(policylock, p);
decode(loner_cap, p);
set_loner_cap(loner_cap);
want_loner_cap = loner_cap; // for now, we'll eval() shortly.
}
void CInode::_encode_locks_state_for_replica(bufferlist& bl, bool need_recover)
{
ENCODE_START(1, 1, bl);
authlock.encode_state_for_replica(bl);
linklock.encode_state_for_replica(bl);
dirfragtreelock.encode_state_for_replica(bl);
filelock.encode_state_for_replica(bl);
nestlock.encode_state_for_replica(bl);
xattrlock.encode_state_for_replica(bl);
snaplock.encode_state_for_replica(bl);
flocklock.encode_state_for_replica(bl);
policylock.encode_state_for_replica(bl);
encode(need_recover, bl);
ENCODE_FINISH(bl);
}
void CInode::_encode_locks_state_for_rejoin(bufferlist& bl, int rep)
{
authlock.encode_state_for_replica(bl);
linklock.encode_state_for_replica(bl);
dirfragtreelock.encode_state_for_rejoin(bl, rep);
filelock.encode_state_for_rejoin(bl, rep);
nestlock.encode_state_for_rejoin(bl, rep);
xattrlock.encode_state_for_replica(bl);
snaplock.encode_state_for_replica(bl);
flocklock.encode_state_for_replica(bl);
policylock.encode_state_for_replica(bl);
}
void CInode::_decode_locks_state_for_replica(bufferlist::const_iterator& p, bool is_new)
{
DECODE_START(1, p);
authlock.decode_state(p, is_new);
linklock.decode_state(p, is_new);
dirfragtreelock.decode_state(p, is_new);
filelock.decode_state(p, is_new);
nestlock.decode_state(p, is_new);
xattrlock.decode_state(p, is_new);
snaplock.decode_state(p, is_new);
flocklock.decode_state(p, is_new);
policylock.decode_state(p, is_new);
bool need_recover;
decode(need_recover, p);
if (need_recover && is_new) {
// Auth mds replicated this inode while it's recovering. Auth mds may take xlock on the lock
// and change the object when replaying unsafe requests.
authlock.mark_need_recover();
linklock.mark_need_recover();
dirfragtreelock.mark_need_recover();
filelock.mark_need_recover();
nestlock.mark_need_recover();
xattrlock.mark_need_recover();
snaplock.mark_need_recover();
flocklock.mark_need_recover();
policylock.mark_need_recover();
}
DECODE_FINISH(p);
}
void CInode::_decode_locks_rejoin(bufferlist::const_iterator& p, MDSContext::vec& waiters,
list<SimpleLock*>& eval_locks, bool survivor)
{
authlock.decode_state_rejoin(p, waiters, survivor);
linklock.decode_state_rejoin(p, waiters, survivor);
dirfragtreelock.decode_state_rejoin(p, waiters, survivor);
filelock.decode_state_rejoin(p, waiters, survivor);
nestlock.decode_state_rejoin(p, waiters, survivor);
xattrlock.decode_state_rejoin(p, waiters, survivor);
snaplock.decode_state_rejoin(p, waiters, survivor);
flocklock.decode_state_rejoin(p, waiters, survivor);
policylock.decode_state_rejoin(p, waiters, survivor);
if (!dirfragtreelock.is_stable() && !dirfragtreelock.is_wrlocked())
eval_locks.push_back(&dirfragtreelock);
if (!filelock.is_stable() && !filelock.is_wrlocked())
eval_locks.push_back(&filelock);
if (!nestlock.is_stable() && !nestlock.is_wrlocked())
eval_locks.push_back(&nestlock);
}
// IMPORT/EXPORT
void CInode::encode_export(bufferlist& bl)
{
ENCODE_START(5, 4, bl);
_encode_base(bl, mdcache->mds->mdsmap->get_up_features());
encode(state, bl);
encode(pop, bl);
encode(get_replicas(), bl);
// include scatterlock info for any bounding CDirs
bufferlist bounding;
if (get_inode()->is_dir())
for (const auto &p : dirfrags) {
CDir *dir = p.second;
if (dir->state_test(CDir::STATE_EXPORTBOUND)) {
encode(p.first, bounding);
encode(dir->get_fnode()->fragstat, bounding);
encode(dir->get_fnode()->accounted_fragstat, bounding);
encode(dir->get_fnode()->rstat, bounding);
encode(dir->get_fnode()->accounted_rstat, bounding);
dout(10) << " encoded fragstat/rstat info for " << *dir << dendl;
}
}
encode(bounding, bl);
_encode_locks_full(bl);
_encode_file_locks(bl);
ENCODE_FINISH(bl);
get(PIN_TEMPEXPORTING);
}
void CInode::finish_export()
{
state &= MASK_STATE_EXPORT_KEPT;
pop.zero();
// just in case!
//dirlock.clear_updated();
loner_cap = -1;
put(PIN_TEMPEXPORTING);
}
void CInode::decode_import(bufferlist::const_iterator& p,
LogSegment *ls)
{
DECODE_START(5, p);
_decode_base(p);
{
unsigned s;
decode(s, p);
s &= MASK_STATE_EXPORTED;
set_ephemeral_pin((s & STATE_DISTEPHEMERALPIN),
(s & STATE_RANDEPHEMERALPIN));
state_set(STATE_AUTH | s);
}
if (is_dirty()) {
get(PIN_DIRTY);
_mark_dirty(ls);
}
if (is_dirty_parent()) {
get(PIN_DIRTYPARENT);
mark_dirty_parent(ls);
}
decode(pop, p);
decode(get_replicas(), p);
if (is_replicated())
get(PIN_REPLICATED);
replica_nonce = 0;
// decode fragstat info on bounding cdirs
bufferlist bounding;
decode(bounding, p);
auto q = bounding.cbegin();
while (!q.end()) {
frag_t fg;
decode(fg, q);
CDir *dir = get_dirfrag(fg);
ceph_assert(dir); // we should have all bounds open
// Only take the remote's fragstat/rstat if we are non-auth for
// this dirfrag AND the lock is NOT in a scattered (MIX) state.
// We know lock is stable, and MIX is the only state in which
// the inode auth (who sent us this data) may not have the best
// info.
// HMM: Are there cases where dir->is_auth() is an insufficient
// check because the dirfrag is under migration? That implies
// it is frozen (and in a SYNC or LOCK state). FIXME.
auto _fnode = CDir::allocate_fnode(*dir->get_fnode());
if (dir->is_auth() ||
filelock.get_state() == LOCK_MIX) {
dout(10) << " skipped fragstat info for " << *dir << dendl;
frag_info_t f;
decode(f, q);
decode(f, q);
} else {
decode(_fnode->fragstat, q);
decode(_fnode->accounted_fragstat, q);
dout(10) << " took fragstat info for " << *dir << dendl;
}
if (dir->is_auth() ||
nestlock.get_state() == LOCK_MIX) {
dout(10) << " skipped rstat info for " << *dir << dendl;
nest_info_t n;
decode(n, q);
decode(n, q);
} else {
decode(_fnode->rstat, q);
decode(_fnode->accounted_rstat, q);
dout(10) << " took rstat info for " << *dir << dendl;
}
dir->reset_fnode(std::move(_fnode));
}
_decode_locks_full(p);
_decode_file_locks(p);
DECODE_FINISH(p);
}
void InodeStoreBase::dump(Formatter *f) const
{
inode->dump(f);
f->dump_string("symlink", symlink);
f->open_array_section("xattrs");
if (xattrs) {
for (const auto& [key, val] : *xattrs) {
f->open_object_section("xattr");
f->dump_string("key", key);
std::string v(val.c_str(), val.length());
f->dump_string("val", v);
f->close_section();
}
}
f->close_section();
f->open_object_section("dirfragtree");
dirfragtree.dump(f);
f->close_section(); // dirfragtree
f->open_array_section("old_inodes");
if (old_inodes) {
for (const auto &p : *old_inodes) {
f->open_object_section("old_inode");
// The key is the last snapid, the first is in the mempool_old_inode
f->dump_int("last", p.first);
p.second.dump(f);
f->close_section(); // old_inode
}
}
f->close_section(); // old_inodes
f->dump_unsigned("oldest_snap", oldest_snap);
f->dump_unsigned("damage_flags", damage_flags);
}
template <>
void decode_json_obj(mempool::mds_co::string& t, JSONObj *obj){
t = mempool::mds_co::string(std::string_view(obj->get_data()));
}
void InodeStoreBase::decode_json(JSONObj *obj)
{
{
auto _inode = allocate_inode();
_inode->decode_json(obj);
reset_inode(std::move(_inode));
}
JSONDecoder::decode_json("symlink", symlink, obj, true);
// JSONDecoder::decode_json("dirfragtree", dirfragtree, obj, true); // cann't decode it now
//
//
{
mempool_xattr_map tmp;
JSONDecoder::decode_json("xattrs", tmp, xattrs_cb, obj, true);
if (tmp.empty())
reset_xattrs(xattr_map_ptr());
else
reset_xattrs(allocate_xattr_map(std::move(tmp)));
}
// JSONDecoder::decode_json("old_inodes", old_inodes, InodeStoreBase::old_indoes_cb, obj, true); // cann't decode old_inodes now
JSONDecoder::decode_json("oldest_snap", oldest_snap.val, obj, true);
JSONDecoder::decode_json("damage_flags", damage_flags, obj, true);
//sr_t srnode;
//JSONDecoder::decode_json("snap_blob", srnode, obj, true); // cann't decode it now
//snap_blob = srnode;
}
void InodeStoreBase::xattrs_cb(InodeStoreBase::mempool_xattr_map& c, JSONObj *obj){
string k;
JSONDecoder::decode_json("key", k, obj, true);
string v;
JSONDecoder::decode_json("val", v, obj, true);
c[k.c_str()] = buffer::copy(v.c_str(), v.size());
}
void InodeStoreBase::old_indoes_cb(InodeStoreBase::mempool_old_inode_map& c, JSONObj *obj){
snapid_t s;
JSONDecoder::decode_json("last", s.val, obj, true);
InodeStoreBase::mempool_old_inode i;
// i.decode_json(obj); // cann't decode now, simon
c[s] = i;
}
void InodeStore::generate_test_instances(std::list<InodeStore*> &ls)
{
InodeStore *populated = new InodeStore;
populated->get_inode()->ino = 0xdeadbeef;
populated->symlink = "rhubarb";
ls.push_back(populated);
}
void InodeStoreBare::generate_test_instances(std::list<InodeStoreBare*> &ls)
{
InodeStoreBare *populated = new InodeStoreBare;
populated->get_inode()->ino = 0xdeadbeef;
populated->symlink = "rhubarb";
ls.push_back(populated);
}
void CInode::validate_disk_state(CInode::validated_data *results,
MDSContext *fin)
{
class ValidationContinuation : public MDSContinuation {
public:
MDSContext *fin;
CInode *in;
CInode::validated_data *results;
bufferlist bl;
CInode *shadow_in;
enum {
START = 0,
BACKTRACE,
INODE,
DIRFRAGS,
SNAPREALM,
};
ValidationContinuation(CInode *i,
CInode::validated_data *data_r,
MDSContext *fin_) :
MDSContinuation(i->mdcache->mds->server),
fin(fin_),
in(i),
results(data_r),
shadow_in(NULL) {
set_callback(START, static_cast<Continuation::stagePtr>(&ValidationContinuation::_start));
set_callback(BACKTRACE, static_cast<Continuation::stagePtr>(&ValidationContinuation::_backtrace));
set_callback(INODE, static_cast<Continuation::stagePtr>(&ValidationContinuation::_inode_disk));
set_callback(DIRFRAGS, static_cast<Continuation::stagePtr>(&ValidationContinuation::_dirfrags));
}
~ValidationContinuation() override {
if (shadow_in) {
delete shadow_in;
in->mdcache->num_shadow_inodes--;
}
}
/**
* Fetch backtrace and set tag if tag is non-empty
*/
void fetch_backtrace_and_tag(CInode *in,
std::string_view tag, bool is_internal,
Context *fin, int *bt_r, bufferlist *bt)
{
const int64_t pool = in->get_backtrace_pool();
object_t oid = CInode::get_object_name(in->ino(), frag_t(), "");
ObjectOperation fetch;
fetch.getxattr("parent", bt, bt_r);
in->mdcache->mds->objecter->read(oid, object_locator_t(pool), fetch, CEPH_NOSNAP,
NULL, 0, fin);
if (in->mdcache->mds->logger) {
in->mdcache->mds->logger->inc(l_mds_openino_backtrace_fetch);
in->mdcache->mds->logger->inc(l_mds_scrub_backtrace_fetch);
}
using ceph::encode;
if (!is_internal) {
ObjectOperation scrub_tag;
bufferlist tag_bl;
encode(tag, tag_bl);
scrub_tag.setxattr("scrub_tag", tag_bl);
SnapContext snapc;
in->mdcache->mds->objecter->mutate(oid, object_locator_t(pool), scrub_tag, snapc,
ceph::real_clock::now(),
0, NULL);
if (in->mdcache->mds->logger)
in->mdcache->mds->logger->inc(l_mds_scrub_set_tag);
}
}
bool _start(int rval) {
ceph_assert(in->can_auth_pin());
in->auth_pin(this);
if (in->is_dirty()) {
MDCache *mdcache = in->mdcache; // For the benefit of dout
auto ino = [this]() { return in->ino(); }; // For the benefit of dout
dout(20) << "validating a dirty CInode; results will be inconclusive"
<< dendl;
}
C_OnFinisher *conf = new C_OnFinisher(get_io_callback(BACKTRACE),
in->mdcache->mds->finisher);
std::string_view tag = in->scrub_infop->header->get_tag();
bool is_internal = in->scrub_infop->header->is_internal_tag();
// Rather than using the usual CInode::fetch_backtrace,
// use a special variant that optionally writes a tag in the same
// operation.
fetch_backtrace_and_tag(in, tag, is_internal, conf, &results->backtrace.ondisk_read_retval, &bl);
return false;
}
bool _backtrace(int rval) {
// set up basic result reporting and make sure we got the data
results->performed_validation = true; // at least, some of it!
results->backtrace.checked = true;
const int64_t pool = in->get_backtrace_pool();
inode_backtrace_t& memory_backtrace = results->backtrace.memory_value;
in->build_backtrace(pool, memory_backtrace);
bool equivalent, divergent;
int memory_newer;
MDCache *mdcache = in->mdcache; // For the benefit of dout
auto ino = [this]() { return in->ino(); }; // For the benefit of dout
// Ignore rval because it's the result of a FAILOK operation
// from fetch_backtrace_and_tag: the real result is in
// backtrace.ondisk_read_retval
dout(20) << "ondisk_read_retval: " << results->backtrace.ondisk_read_retval << dendl;
if (results->backtrace.ondisk_read_retval != 0) {
results->backtrace.error_str << "failed to read off disk; see retval";
// we probably have a new unwritten file!
// so skip the backtrace scrub for this entry and say that all's well
if (in->is_mdsdir()){
dout(20) << "forcing backtrace as passed since mdsdir actually doesn't have backtrace" << dendl;
results->backtrace.passed = true;
}
if (in->is_dirty_parent()) {
dout(20) << "forcing backtrace as passed since inode is dirty parent" << dendl;
results->backtrace.passed = true;
}
goto next;
}
// extract the backtrace, and compare it to a newly-constructed one
try {
auto p = bl.cbegin();
using ceph::decode;
decode(results->backtrace.ondisk_value, p);
dout(10) << "decoded " << bl.length() << " bytes of backtrace successfully" << dendl;
} catch (buffer::error&) {
if (results->backtrace.ondisk_read_retval == 0 && rval != 0) {
// Cases where something has clearly gone wrong with the overall
// fetch op, though we didn't get a nonzero rc from the getxattr
// operation. e.g. object missing.
results->backtrace.ondisk_read_retval = rval;
}
results->backtrace.error_str << "failed to decode on-disk backtrace ("
<< bl.length() << " bytes)!";
// we probably have a new unwritten file!
// so skip the backtrace scrub for this entry and say that all's well
if (in->is_dirty_parent()) {
dout(20) << "decode failed; forcing backtrace as passed since "
"inode is dirty parent" << dendl;
results->backtrace.passed = true;
}
goto next;
}
memory_newer = memory_backtrace.compare(results->backtrace.ondisk_value,
&equivalent, &divergent);
if (divergent || memory_newer < 0) {
// we're divergent, or on-disk version is newer
results->backtrace.error_str << "On-disk backtrace is divergent or newer";
/* if the backtraces are divergent and the link count is 0, then
* most likely its a stray entry that's being purged and things are
* well and there's no reason for alarm
*/
if (divergent && (in->is_dirty_parent() || in->get_inode()->nlink == 0)) {
results->backtrace.passed = true;
dout(20) << "divergent backtraces are acceptable when dn "
"is being purged or has been renamed or moved to a "
"different directory " << *in << dendl;
}
} else {
results->backtrace.passed = true;
}
next:
if (!results->backtrace.passed && in->scrub_infop->header->get_repair()) {
std::string path;
in->make_path_string(path);
in->mdcache->mds->clog->warn() << "bad backtrace on inode " << in->ino()
<< "(" << path << "), rewriting it";
in->mark_dirty_parent(in->mdcache->mds->mdlog->get_current_segment(),
false);
// Flag that we repaired this BT so that it won't go into damagetable
results->backtrace.repaired = true;
if (in->mdcache->mds->logger)
in->mdcache->mds->logger->inc(l_mds_scrub_backtrace_repaired);
}
// If the inode's number was free in the InoTable, fix that
// (#15619)
{
InoTable *inotable = mdcache->mds->inotable;
dout(10) << "scrub: inotable ino = " << in->ino() << dendl;
dout(10) << "scrub: inotable free says "
<< inotable->is_marked_free(in->ino()) << dendl;
if (inotable->is_marked_free(in->ino())) {
LogChannelRef clog = in->mdcache->mds->clog;
clog->error() << "scrub: inode wrongly marked free: " << in->ino();
if (in->scrub_infop->header->get_repair()) {
bool repaired = inotable->repair(in->ino());
if (repaired) {
clog->error() << "inode table repaired for inode: " << in->ino();
inotable->save();
if (in->mdcache->mds->logger)
in->mdcache->mds->logger->inc(l_mds_scrub_inotable_repaired);
} else {
clog->error() << "Cannot repair inotable while other operations"
" are in progress";
}
}
}
}
if (in->is_dir()) {
if (in->mdcache->mds->logger)
in->mdcache->mds->logger->inc(l_mds_scrub_dir_inodes);
return validate_directory_data();
} else {
if (in->mdcache->mds->logger)
in->mdcache->mds->logger->inc(l_mds_scrub_file_inodes);
// TODO: validate on-disk inode for normal files
return true;
}
}
bool validate_directory_data() {
ceph_assert(in->is_dir());
if (in->is_base()) {
if (!shadow_in) {
shadow_in = new CInode(in->mdcache);
in->mdcache->create_unlinked_system_inode(shadow_in, in->ino(), in->get_inode()->mode);
in->mdcache->num_shadow_inodes++;
}
shadow_in->fetch(get_internal_callback(INODE));
if (in->mdcache->mds->logger)
in->mdcache->mds->logger->inc(l_mds_scrub_dir_base_inodes);
return false;
} else {
// TODO: validate on-disk inode for non-base directories
if (in->mdcache->mds->logger)
in->mdcache->mds->logger->inc(l_mds_scrub_dirfrag_rstats);
results->inode.passed = true;
return check_dirfrag_rstats();
}
}
bool _inode_disk(int rval) {
const auto& si = shadow_in->get_inode();
const auto& i = in->get_inode();
results->inode.checked = true;
results->inode.ondisk_read_retval = rval;
results->inode.ondisk_value = *si;
results->inode.memory_value = *i;
if (si->version > i->version) {
// uh, what?
results->inode.error_str << "On-disk inode is newer than in-memory one; ";
goto next;
} else {
bool divergent = false;
int r = i->compare(*si, &divergent);
results->inode.passed = !divergent && r >= 0;
if (!results->inode.passed) {
results->inode.error_str <<
"On-disk inode is divergent or newer than in-memory one; ";
goto next;
}
}
next:
return check_dirfrag_rstats();
}
bool check_dirfrag_rstats() {
if (in->has_subtree_root_dirfrag()) {
in->mdcache->rdlock_dirfrags_stats(in, get_internal_callback(DIRFRAGS));
return false;
} else {
return immediate(DIRFRAGS, 0);
}
}
bool _dirfrags(int rval) {
// basic reporting setup
results->raw_stats.checked = true;
results->raw_stats.ondisk_read_retval = rval;
results->raw_stats.memory_value.dirstat = in->get_inode()->dirstat;
results->raw_stats.memory_value.rstat = in->get_inode()->rstat;
frag_info_t& dir_info = results->raw_stats.ondisk_value.dirstat;
nest_info_t& nest_info = results->raw_stats.ondisk_value.rstat;
if (rval != 0) {
results->raw_stats.error_str << "Failed to read dirfrags off disk";
goto next;
}
// check each dirfrag...
for (const auto &p : in->dirfrags) {
CDir *dir = p.second;
ceph_assert(dir->get_version() > 0);
nest_info.add(dir->get_fnode()->accounted_rstat);
dir_info.add(dir->get_fnode()->accounted_fragstat);
}
nest_info.rsubdirs++; // it gets one to account for self
if (const sr_t *srnode = in->get_projected_srnode(); srnode)
nest_info.rsnaps += srnode->snaps.size();
// ...and that their sum matches our inode settings
if (!dir_info.same_sums(in->get_inode()->dirstat) ||
!nest_info.same_sums(in->get_inode()->rstat)) {
if (in->scrub_infop->header->get_repair()) {
results->raw_stats.error_str
<< "freshly-calculated rstats don't match existing ones (will be fixed)";
in->mdcache->repair_inode_stats(in);
results->raw_stats.repaired = true;
} else {
results->raw_stats.error_str
<< "freshly-calculated rstats don't match existing ones";
}
if (in->is_dirty()) {
MDCache *mdcache = in->mdcache; // for dout()
auto ino = [this]() { return in->ino(); }; // for dout()
dout(20) << "raw stats most likely wont match since inode is dirty; "
"please rerun scrub when system is stable; "
"assuming passed for now;" << dendl;
results->raw_stats.passed = true;
}
goto next;
}
results->raw_stats.passed = true;
{
MDCache *mdcache = in->mdcache; // for dout()
auto ino = [this]() { return in->ino(); }; // for dout()
dout(20) << "raw stats check passed on " << *in << dendl;
}
next:
return true;
}
void _done() override {
if ((!results->raw_stats.checked || results->raw_stats.passed) &&
(!results->backtrace.checked || results->backtrace.passed) &&
(!results->inode.checked || results->inode.passed))
results->passed_validation = true;
// Flag that we did some repair work so that our repair operation
// can be flushed at end of scrub
if (results->backtrace.repaired ||
results->inode.repaired ||
results->raw_stats.repaired)
in->scrub_infop->header->set_repaired();
if (fin)
fin->complete(get_rval());
in->auth_unpin(this);
}
};
dout(10) << "scrub starting validate_disk_state on " << *this << dendl;
ValidationContinuation *vc = new ValidationContinuation(this,
results,
fin);
vc->begin();
}
void CInode::validated_data::dump(Formatter *f) const
{
f->open_object_section("results");
{
f->dump_bool("performed_validation", performed_validation);
f->dump_bool("passed_validation", passed_validation);
f->open_object_section("backtrace");
{
f->dump_bool("checked", backtrace.checked);
f->dump_bool("passed", backtrace.passed);
f->dump_int("read_ret_val", backtrace.ondisk_read_retval);
f->dump_stream("ondisk_value") << backtrace.ondisk_value;
f->dump_stream("memoryvalue") << backtrace.memory_value;
f->dump_string("error_str", backtrace.error_str.str());
}
f->close_section(); // backtrace
f->open_object_section("raw_stats");
{
f->dump_bool("checked", raw_stats.checked);
f->dump_bool("passed", raw_stats.passed);
f->dump_int("read_ret_val", raw_stats.ondisk_read_retval);
f->dump_stream("ondisk_value.dirstat") << raw_stats.ondisk_value.dirstat;
f->dump_stream("ondisk_value.rstat") << raw_stats.ondisk_value.rstat;
f->dump_stream("memory_value.dirstat") << raw_stats.memory_value.dirstat;
f->dump_stream("memory_value.rstat") << raw_stats.memory_value.rstat;
f->dump_string("error_str", raw_stats.error_str.str());
}
f->close_section(); // raw_stats
// dump failure return code
int rc = 0;
if (backtrace.checked && backtrace.ondisk_read_retval)
rc = backtrace.ondisk_read_retval;
if (inode.checked && inode.ondisk_read_retval)
rc = inode.ondisk_read_retval;
if (raw_stats.checked && raw_stats.ondisk_read_retval)
rc = raw_stats.ondisk_read_retval;
f->dump_int("return_code", rc);
}
f->close_section(); // results
}
bool CInode::validated_data::all_damage_repaired() const
{
bool unrepaired =
(raw_stats.checked && !raw_stats.passed && !raw_stats.repaired)
||
(backtrace.checked && !backtrace.passed && !backtrace.repaired)
||
(inode.checked && !inode.passed && !inode.repaired);
return !unrepaired;
}
void CInode::dump(Formatter *f, int flags) const
{
if (flags & DUMP_PATH) {
std::string path;
make_path_string(path, true);
if (path.empty())
path = "/";
f->dump_string("path", path);
}
if (flags & DUMP_INODE_STORE_BASE)
InodeStoreBase::dump(f);
if (flags & DUMP_MDS_CACHE_OBJECT)
MDSCacheObject::dump(f);
if (flags & DUMP_LOCKS) {
f->open_object_section("versionlock");
versionlock.dump(f);
f->close_section();
f->open_object_section("authlock");
authlock.dump(f);
f->close_section();
f->open_object_section("linklock");
linklock.dump(f);
f->close_section();
f->open_object_section("dirfragtreelock");
dirfragtreelock.dump(f);
f->close_section();
f->open_object_section("filelock");
filelock.dump(f);
f->close_section();
f->open_object_section("xattrlock");
xattrlock.dump(f);
f->close_section();
f->open_object_section("snaplock");
snaplock.dump(f);
f->close_section();
f->open_object_section("nestlock");
nestlock.dump(f);
f->close_section();
f->open_object_section("flocklock");
flocklock.dump(f);
f->close_section();
f->open_object_section("policylock");
policylock.dump(f);
f->close_section();
}
if (flags & DUMP_STATE) {
f->open_array_section("states");
MDSCacheObject::dump_states(f);
if (state_test(STATE_EXPORTING))
f->dump_string("state", "exporting");
if (state_test(STATE_OPENINGDIR))
f->dump_string("state", "openingdir");
if (state_test(STATE_FREEZING))
f->dump_string("state", "freezing");
if (state_test(STATE_FROZEN))
f->dump_string("state", "frozen");
if (state_test(STATE_AMBIGUOUSAUTH))
f->dump_string("state", "ambiguousauth");
if (state_test(STATE_EXPORTINGCAPS))
f->dump_string("state", "exportingcaps");
if (state_test(STATE_NEEDSRECOVER))
f->dump_string("state", "needsrecover");
if (state_test(STATE_PURGING))
f->dump_string("state", "purging");
if (state_test(STATE_DIRTYPARENT))
f->dump_string("state", "dirtyparent");
if (state_test(STATE_DIRTYRSTAT))
f->dump_string("state", "dirtyrstat");
if (state_test(STATE_STRAYPINNED))
f->dump_string("state", "straypinned");
if (state_test(STATE_FROZENAUTHPIN))
f->dump_string("state", "frozenauthpin");
if (state_test(STATE_DIRTYPOOL))
f->dump_string("state", "dirtypool");
if (state_test(STATE_ORPHAN))
f->dump_string("state", "orphan");
if (state_test(STATE_MISSINGOBJS))
f->dump_string("state", "missingobjs");
f->close_section();
}
if (flags & DUMP_CAPS) {
f->open_array_section("client_caps");
for (const auto &p : client_caps) {
auto &client = p.first;
auto cap = &p.second;
f->open_object_section("client_cap");
f->dump_int("client_id", client.v);
f->dump_string("pending", ccap_string(cap->pending()));
f->dump_string("issued", ccap_string(cap->issued()));
f->dump_string("wanted", ccap_string(cap->wanted()));
f->dump_int("last_sent", cap->get_last_seq());
f->close_section();
}
f->close_section();
f->dump_int("loner", loner_cap.v);
f->dump_int("want_loner", want_loner_cap.v);
f->open_array_section("mds_caps_wanted");
for (const auto &p : mds_caps_wanted) {
f->open_object_section("mds_cap_wanted");
f->dump_int("rank", p.first);
f->dump_string("cap", ccap_string(p.second));
f->close_section();
}
f->close_section();
}
if (flags & DUMP_DIRFRAGS) {
f->open_array_section("dirfrags");
auto&& dfs = get_dirfrags();
for(const auto &dir: dfs) {
f->open_object_section("dir");
dir->dump(f, CDir::DUMP_DEFAULT | CDir::DUMP_ITEMS);
dir->check_rstats();
f->close_section();
}
f->close_section();
}
}
/****** Scrub Stuff *****/
void CInode::scrub_info_create() const
{
dout(25) << __func__ << dendl;
ceph_assert(!scrub_infop);
// break out of const-land to set up implicit initial state
CInode *me = const_cast<CInode*>(this);
const auto& pi = me->get_projected_inode();
std::unique_ptr<scrub_info_t> si(new scrub_info_t());
si->last_scrub_stamp = pi->last_scrub_stamp;
si->last_scrub_version = pi->last_scrub_version;
me->scrub_infop.swap(si);
}
void CInode::scrub_maybe_delete_info()
{
if (scrub_infop &&
!scrub_infop->scrub_in_progress &&
!scrub_infop->last_scrub_dirty) {
scrub_infop.reset();
}
}
void CInode::scrub_initialize(ScrubHeaderRef& header)
{
dout(20) << __func__ << " with scrub_version " << get_version() << dendl;
scrub_info();
scrub_infop->scrub_in_progress = true;
scrub_infop->queued_frags.clear();
scrub_infop->header = header;
header->inc_num_pending();
// right now we don't handle remote inodes
}
void CInode::scrub_aborted() {
dout(20) << __func__ << dendl;
ceph_assert(scrub_is_in_progress());
scrub_infop->scrub_in_progress = false;
scrub_infop->header->dec_num_pending();
scrub_maybe_delete_info();
}
void CInode::scrub_finished() {
dout(20) << __func__ << dendl;
ceph_assert(scrub_is_in_progress());
scrub_infop->last_scrub_version = get_version();
scrub_infop->last_scrub_stamp = ceph_clock_now();
scrub_infop->last_scrub_dirty = true;
scrub_infop->scrub_in_progress = false;
scrub_infop->header->dec_num_pending();
}
int64_t CInode::get_backtrace_pool() const
{
if (is_dir()) {
return mdcache->mds->get_metadata_pool();
} else {
// Files are required to have an explicit layout that specifies
// a pool
ceph_assert(get_inode()->layout.pool_id != -1);
return get_inode()->layout.pool_id;
}
}
void CInode::queue_export_pin(mds_rank_t export_pin)
{
if (state_test(CInode::STATE_QUEUEDEXPORTPIN))
return;
mds_rank_t target;
if (export_pin >= 0)
target = export_pin;
else if (export_pin == MDS_RANK_EPHEMERAL_RAND)
target = mdcache->hash_into_rank_bucket(ino());
else
target = MDS_RANK_NONE;
unsigned min_frag_bits = mdcache->get_ephemeral_dist_frag_bits();
bool queue = false;
for (auto& p : dirfrags) {
CDir *dir = p.second;
if (!dir->is_auth())
continue;
if (export_pin == MDS_RANK_EPHEMERAL_DIST) {
if (dir->get_frag().bits() < min_frag_bits) {
// needs split
queue = true;
break;
}
target = mdcache->hash_into_rank_bucket(ino(), dir->get_frag());
}
if (target != MDS_RANK_NONE) {
if (dir->is_subtree_root()) {
// set auxsubtree bit or export it
if (!dir->state_test(CDir::STATE_AUXSUBTREE) ||
target != dir->get_dir_auth().first)
queue = true;
} else {
// create aux subtree or export it
queue = true;
}
} else {
// clear aux subtrees ?
queue = dir->state_test(CDir::STATE_AUXSUBTREE);
}
if (queue)
break;
}
if (queue) {
state_set(CInode::STATE_QUEUEDEXPORTPIN);
mdcache->export_pin_queue.insert(this);
}
}
void CInode::maybe_export_pin(bool update)
{
if (!g_conf()->mds_bal_export_pin)
return;
if (!is_dir() || !is_normal())
return;
dout(15) << __func__ << " update=" << update << " " << *this << dendl;
mds_rank_t export_pin = get_export_pin(false);
if (export_pin == MDS_RANK_NONE && !update)
return;
check_pin_policy(export_pin);
queue_export_pin(export_pin);
}
void CInode::set_ephemeral_pin(bool dist, bool rand)
{
unsigned state = 0;
if (dist)
state |= STATE_DISTEPHEMERALPIN;
if (rand)
state |= STATE_RANDEPHEMERALPIN;
if (!state)
return;
if (state_test(state) != state) {
dout(10) << "set ephemeral (" << (dist ? "dist" : "")
<< (rand ? " rand" : "") << ") pin on " << *this << dendl;
if (!is_ephemerally_pinned()) {
auto p = mdcache->export_ephemeral_pins.insert(this);
ceph_assert(p.second);
}
state_set(state);
}
}
void CInode::clear_ephemeral_pin(bool dist, bool rand)
{
unsigned state = 0;
if (dist)
state |= STATE_DISTEPHEMERALPIN;
if (rand)
state |= STATE_RANDEPHEMERALPIN;
if (state_test(state)) {
dout(10) << "clear ephemeral (" << (dist ? "dist" : "")
<< (rand ? " rand" : "") << ") pin on " << *this << dendl;
state_clear(state);
if (!is_ephemerally_pinned()) {
auto count = mdcache->export_ephemeral_pins.erase(this);
ceph_assert(count == 1);
}
}
}
void CInode::maybe_ephemeral_rand(double threshold)
{
if (!mdcache->get_export_ephemeral_random_config()) {
dout(15) << __func__ << " config false: cannot ephemeral random pin " << *this << dendl;
clear_ephemeral_pin(false, true);
return;
} else if (!is_dir() || !is_normal()) {
dout(15) << __func__ << " !dir or !normal: cannot ephemeral random pin " << *this << dendl;
clear_ephemeral_pin(false, true);
return;
} else if (get_inode()->nlink == 0) {
dout(15) << __func__ << " unlinked directory: cannot ephemeral random pin " << *this << dendl;
clear_ephemeral_pin(false, true);
return;
} else if (state_test(CInode::STATE_RANDEPHEMERALPIN)) {
dout(10) << __func__ << " already ephemeral random pinned: requeueing " << *this << dendl;
queue_export_pin(MDS_RANK_EPHEMERAL_RAND);
return;
}
/* not precomputed? */
if (threshold < 0.0) {
threshold = get_ephemeral_rand();
}
if (threshold <= 0.0) {
return;
}
double n = ceph::util::generate_random_number(0.0, 1.0);
dout(15) << __func__ << " rand " << n << " <?= " << threshold
<< " " << *this << dendl;
if (n <= threshold) {
dout(10) << __func__ << " randomly export pinning " << *this << dendl;
set_ephemeral_pin(false, true);
queue_export_pin(MDS_RANK_EPHEMERAL_RAND);
}
}
void CInode::setxattr_ephemeral_rand(double probability)
{
ceph_assert(is_dir());
_get_projected_inode()->export_ephemeral_random_pin = probability;
}
void CInode::setxattr_ephemeral_dist(bool val)
{
ceph_assert(is_dir());
_get_projected_inode()->export_ephemeral_distributed_pin = val;
}
void CInode::set_export_pin(mds_rank_t rank)
{
ceph_assert(is_dir());
_get_projected_inode()->export_pin = rank;
maybe_export_pin(true);
}
mds_rank_t CInode::get_export_pin(bool inherit) const
{
if (!g_conf()->mds_bal_export_pin)
return MDS_RANK_NONE;
/* An inode that is export pinned may not necessarily be a subtree root, we
* need to traverse the parents. A base or system inode cannot be pinned.
* N.B. inodes not yet linked into a dir (i.e. anonymous inodes) will not
* have a parent yet.
*/
mds_rank_t r_target = MDS_RANK_NONE;
const CInode *in = this;
const CDir *dir = nullptr;
while (true) {
if (in->is_system())
break;
const CDentry *pdn = in->get_parent_dn();
if (!pdn)
break;
if (in->get_inode()->nlink == 0) {
// ignore export pin for unlinked directory
break;
}
if (in->get_inode()->export_pin >= 0) {
return in->get_inode()->export_pin;
} else if (in->get_inode()->export_ephemeral_distributed_pin &&
mdcache->get_export_ephemeral_distributed_config()) {
if (in != this)
return mdcache->hash_into_rank_bucket(in->ino(), dir->get_frag());
return MDS_RANK_EPHEMERAL_DIST;
} else if (r_target != MDS_RANK_NONE && in->get_inode()->export_ephemeral_random_pin > 0.0) {
return r_target;
} else if (r_target == MDS_RANK_NONE && in->is_ephemeral_rand() &&
mdcache->get_export_ephemeral_random_config()) {
/* If a parent overrides a grandparent ephemeral pin policy with an export pin, we use that export pin instead. */
if (!inherit)
return MDS_RANK_EPHEMERAL_RAND;
if (in == this)
r_target = MDS_RANK_EPHEMERAL_RAND;
else
r_target = mdcache->hash_into_rank_bucket(in->ino());
}
if (!inherit)
break;
dir = pdn->get_dir();
in = dir->inode;
}
return MDS_RANK_NONE;
}
void CInode::check_pin_policy(mds_rank_t export_pin)
{
if (export_pin == MDS_RANK_EPHEMERAL_DIST) {
set_ephemeral_pin(true, false);
clear_ephemeral_pin(false, true);
} else if (export_pin == MDS_RANK_EPHEMERAL_RAND) {
set_ephemeral_pin(false, true);
clear_ephemeral_pin(true, false);
} else if (is_ephemerally_pinned()) {
// export_pin >= 0 || export_pin == MDS_RANK_NONE
clear_ephemeral_pin(true, true);
if (export_pin != get_inode()->export_pin) // inherited export_pin
queue_export_pin(MDS_RANK_NONE);
}
}
double CInode::get_ephemeral_rand() const
{
/* N.B. inodes not yet linked into a dir (i.e. anonymous inodes) will not
* have a parent yet.
*/
const CInode *in = this;
double max = mdcache->export_ephemeral_random_max;
while (true) {
if (in->is_system())
break;
const CDentry *pdn = in->get_parent_dn();
if (!pdn)
break;
// ignore export pin for unlinked directory
if (in->get_inode()->nlink == 0)
break;
if (in->get_inode()->export_ephemeral_random_pin > 0.0)
return std::min(in->get_inode()->export_ephemeral_random_pin, max);
/* An export_pin overrides only if no closer parent (incl. this one) has a
* random pin set.
*/
if (in->get_inode()->export_pin >= 0 ||
in->get_inode()->export_ephemeral_distributed_pin)
return 0.0;
in = pdn->get_dir()->inode;
}
return 0.0;
}
void CInode::get_nested_dirfrags(std::vector<CDir*>& v) const
{
for (const auto &p : dirfrags) {
const auto& dir = p.second;
if (!dir->is_subtree_root())
v.push_back(dir);
}
}
void CInode::get_subtree_dirfrags(std::vector<CDir*>& v) const
{
for (const auto &p : dirfrags) {
const auto& dir = p.second;
if (dir->is_subtree_root())
v.push_back(dir);
}
}
MEMPOOL_DEFINE_OBJECT_FACTORY(CInode, co_inode, mds_co);
| 158,891 | 27.748326 | 130 | cc |
null | ceph-main/src/mds/CInode.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CINODE_H
#define CEPH_CINODE_H
#include <list>
#include <map>
#include <set>
#include <string_view>
#include "common/config.h"
#include "common/RefCountedObj.h"
#include "include/compat.h"
#include "include/counter.h"
#include "include/elist.h"
#include "include/types.h"
#include "include/lru.h"
#include "include/compact_set.h"
#include "MDSCacheObject.h"
#include "MDSContext.h"
#include "flock.h"
#include "BatchOp.h"
#include "CDentry.h"
#include "SimpleLock.h"
#include "ScatterLock.h"
#include "LocalLockC.h"
#include "Capability.h"
#include "SnapRealm.h"
#include "Mutation.h"
#include "messages/MClientCaps.h"
#define dout_context g_ceph_context
class Context;
class CDir;
class CInode;
class MDCache;
class LogSegment;
struct SnapRealm;
class Session;
struct ObjectOperation;
class EMetaBlob;
struct cinode_lock_info_t {
int lock;
int wr_caps;
};
struct CInodeCommitOperation {
public:
CInodeCommitOperation(int prio, int64_t po)
: pool(po), priority(prio) {
}
CInodeCommitOperation(int prio, int64_t po, file_layout_t l, uint64_t f, std::string_view s)
: pool(po), priority(prio), _layout(l), _features(f), _symlink(s) {
update_layout_symlink = true;
}
void update(ObjectOperation &op, inode_backtrace_t &bt);
int64_t get_pool() { return pool; }
private:
int64_t pool; ///< pool id
int priority;
bool update_layout_symlink = false;
file_layout_t _layout;
uint64_t _features;
std::string_view _symlink;
};
struct CInodeCommitOperations {
std::vector<CInodeCommitOperation> ops_vec;
inode_backtrace_t bt;
version_t version;
CInode *in;
};
/**
* Base class for CInode, containing the backing store data and
* serialization methods. This exists so that we can read and
* handle CInodes from the backing store without hitting all
* the business logic in CInode proper.
*/
class InodeStoreBase {
public:
using mempool_inode = inode_t<mempool::mds_co::pool_allocator>;
using inode_ptr = std::shared_ptr<mempool_inode>;
using inode_const_ptr = std::shared_ptr<const mempool_inode>;
template <typename ...Args>
static inode_ptr allocate_inode(Args && ...args) {
static mempool::mds_co::pool_allocator<mempool_inode> allocator;
return std::allocate_shared<mempool_inode>(allocator, std::forward<Args>(args)...);
}
using mempool_xattr_map = xattr_map<mempool::mds_co::pool_allocator>; // FIXME bufferptr not in mempool
using xattr_map_ptr = std::shared_ptr<mempool_xattr_map>;
using xattr_map_const_ptr = std::shared_ptr<const mempool_xattr_map>;
template <typename ...Args>
static xattr_map_ptr allocate_xattr_map(Args && ...args) {
static mempool::mds_co::pool_allocator<mempool_xattr_map> allocator;
return std::allocate_shared<mempool_xattr_map>(allocator, std::forward<Args>(args)...);
}
using mempool_old_inode = old_inode_t<mempool::mds_co::pool_allocator>;
using mempool_old_inode_map = mempool::mds_co::map<snapid_t, mempool_old_inode>;
using old_inode_map_ptr = std::shared_ptr<mempool_old_inode_map>;
using old_inode_map_const_ptr = std::shared_ptr<const mempool_old_inode_map>;
template <typename ...Args>
static old_inode_map_ptr allocate_old_inode_map(Args && ...args) {
static mempool::mds_co::pool_allocator<mempool_old_inode_map> allocator;
return std::allocate_shared<mempool_old_inode_map>(allocator, std::forward<Args>(args)...);
}
void reset_inode(inode_const_ptr&& ptr) {
inode = std::move(ptr);
}
void reset_xattrs(xattr_map_const_ptr&& ptr) {
xattrs = std::move(ptr);
}
void reset_old_inodes(old_inode_map_const_ptr&& ptr) {
old_inodes = std::move(ptr);
}
void encode_xattrs(bufferlist &bl) const;
void decode_xattrs(bufferlist::const_iterator &p);
void encode_old_inodes(bufferlist &bl, uint64_t features) const;
void decode_old_inodes(bufferlist::const_iterator &p);
/* Helpers */
static object_t get_object_name(inodeno_t ino, frag_t fg, std::string_view suffix);
/* Full serialization for use in ".inode" root inode objects */
void encode(ceph::buffer::list &bl, uint64_t features, const ceph::buffer::list *snap_blob=NULL) const;
void decode(ceph::buffer::list::const_iterator &bl, ceph::buffer::list& snap_blob);
/* Serialization without ENCODE_START/FINISH blocks for use embedded in dentry */
void encode_bare(ceph::buffer::list &bl, uint64_t features, const ceph::buffer::list *snap_blob=NULL) const;
void decode_bare(ceph::buffer::list::const_iterator &bl, ceph::buffer::list &snap_blob, __u8 struct_v=5);
/* For test/debug output */
void dump(ceph::Formatter *f) const;
void decode_json(JSONObj *obj);
static void xattrs_cb(InodeStoreBase::mempool_xattr_map& c, JSONObj *obj);
static void old_indoes_cb(InodeStoreBase::mempool_old_inode_map& c, JSONObj *obj);
/* For use by offline tools */
__u32 hash_dentry_name(std::string_view dn);
frag_t pick_dirfrag(std::string_view dn);
mempool::mds_co::string symlink; // symlink dest, if symlink
fragtree_t dirfragtree; // dir frag tree, if any. always consistent with our dirfrag map.
snapid_t oldest_snap = CEPH_NOSNAP;
damage_flags_t damage_flags = 0;
protected:
static inode_const_ptr empty_inode;
// Following members are pointers to constant data, the constant data can
// be shared by CInode and log events. To update these members in CInode,
// read-copy-update should be used.
inode_const_ptr inode = empty_inode;
xattr_map_const_ptr xattrs;
old_inode_map_const_ptr old_inodes; // key = last, value.first = first
};
inline void decode_noshare(InodeStoreBase::mempool_xattr_map& xattrs,
ceph::buffer::list::const_iterator &p)
{
decode_noshare<mempool::mds_co::pool_allocator>(xattrs, p);
}
class InodeStore : public InodeStoreBase {
public:
mempool_inode* get_inode() {
if (inode == empty_inode)
reset_inode(allocate_inode());
return const_cast<mempool_inode*>(inode.get());
}
mempool_xattr_map* get_xattrs() { return const_cast<mempool_xattr_map*>(xattrs.get()); }
void encode(ceph::buffer::list &bl, uint64_t features) const {
InodeStoreBase::encode(bl, features, &snap_blob);
}
void decode(ceph::buffer::list::const_iterator &bl) {
InodeStoreBase::decode(bl, snap_blob);
}
void encode_bare(ceph::buffer::list &bl, uint64_t features) const {
InodeStoreBase::encode_bare(bl, features, &snap_blob);
}
void decode_bare(ceph::buffer::list::const_iterator &bl) {
InodeStoreBase::decode_bare(bl, snap_blob);
}
static void generate_test_instances(std::list<InodeStore*>& ls);
using InodeStoreBase::inode;
using InodeStoreBase::xattrs;
using InodeStoreBase::old_inodes;
// FIXME bufferlist not part of mempool
ceph::buffer::list snap_blob; // Encoded copy of SnapRealm, because we can't
// rehydrate it without full MDCache
};
WRITE_CLASS_ENCODER_FEATURES(InodeStore)
// just for ceph-dencoder
class InodeStoreBare : public InodeStore {
public:
void encode(ceph::buffer::list &bl, uint64_t features) const {
InodeStore::encode_bare(bl, features);
}
void decode(ceph::buffer::list::const_iterator &bl) {
InodeStore::decode_bare(bl);
}
static void generate_test_instances(std::list<InodeStoreBare*>& ls);
};
WRITE_CLASS_ENCODER_FEATURES(InodeStoreBare)
// cached inode wrapper
class CInode : public MDSCacheObject, public InodeStoreBase, public Counter<CInode> {
public:
MEMPOOL_CLASS_HELPERS();
using mempool_cap_map = mempool::mds_co::map<client_t, Capability>;
/**
* @defgroup Scrubbing and fsck
*/
/**
* Report the results of validation against a particular inode.
* Each member is a pair of bools.
* <member>.first represents if validation was performed against the member.
* <member.second represents if the member passed validation.
* performed_validation is set to true if the validation was actually
* run. It might not be run if, for instance, the inode is marked as dirty.
* passed_validation is set to true if everything that was checked
* passed its validation.
*/
struct validated_data {
template<typename T>struct member_status {
bool checked = false;
bool passed = false;
bool repaired = false;
int ondisk_read_retval = 0;
T ondisk_value;
T memory_value;
std::stringstream error_str;
};
struct raw_stats_t {
frag_info_t dirstat;
nest_info_t rstat;
};
validated_data() {}
void dump(ceph::Formatter *f) const;
bool all_damage_repaired() const;
bool performed_validation = false;
bool passed_validation = false;
member_status<inode_backtrace_t> backtrace;
member_status<mempool_inode> inode; // XXX should not be in mempool; wait for pmr
member_status<raw_stats_t> raw_stats;
};
// friends
friend class Server;
friend class Locker;
friend class Migrator;
friend class MDCache;
friend class StrayManager;
friend class CDir;
friend std::ostream& operator<<(std::ostream&, const CInode&);
class scrub_info_t {
public:
scrub_info_t() {}
version_t last_scrub_version = 0;
utime_t last_scrub_stamp;
bool last_scrub_dirty = false; /// are our stamps dirty with respect to disk state?
bool scrub_in_progress = false; /// are we currently scrubbing?
fragset_t queued_frags;
ScrubHeaderRef header;
};
// -- pins --
static const int PIN_DIRFRAG = -1;
static const int PIN_CAPS = 2; // client caps
static const int PIN_IMPORTING = -4; // importing
static const int PIN_OPENINGDIR = 7;
static const int PIN_REMOTEPARENT = 8;
static const int PIN_BATCHOPENJOURNAL = 9;
static const int PIN_SCATTERED = 10;
static const int PIN_STICKYDIRS = 11;
//static const int PIN_PURGING = -12;
static const int PIN_FREEZING = 13;
static const int PIN_FROZEN = 14;
static const int PIN_IMPORTINGCAPS = -15;
static const int PIN_PASTSNAPPARENT = -16;
static const int PIN_OPENINGSNAPPARENTS = 17;
static const int PIN_TRUNCATING = 18;
static const int PIN_STRAY = 19; // we pin our stray inode while active
static const int PIN_NEEDSNAPFLUSH = 20;
static const int PIN_DIRTYRSTAT = 21;
static const int PIN_EXPORTINGCAPS = 22;
static const int PIN_DIRTYPARENT = 23;
static const int PIN_DIRWAITER = 24;
// -- dump flags --
static const int DUMP_INODE_STORE_BASE = (1 << 0);
static const int DUMP_MDS_CACHE_OBJECT = (1 << 1);
static const int DUMP_LOCKS = (1 << 2);
static const int DUMP_STATE = (1 << 3);
static const int DUMP_CAPS = (1 << 4);
static const int DUMP_PATH = (1 << 5);
static const int DUMP_DIRFRAGS = (1 << 6);
static const int DUMP_ALL = (-1);
static const int DUMP_DEFAULT = DUMP_ALL & (~DUMP_PATH) & (~DUMP_DIRFRAGS);
// -- state --
static const int STATE_EXPORTING = (1<<0); // on nonauth bystander.
static const int STATE_OPENINGDIR = (1<<1);
static const int STATE_FREEZING = (1<<2);
static const int STATE_FROZEN = (1<<3);
static const int STATE_AMBIGUOUSAUTH = (1<<4);
static const int STATE_EXPORTINGCAPS = (1<<5);
static const int STATE_NEEDSRECOVER = (1<<6);
static const int STATE_RECOVERING = (1<<7);
static const int STATE_PURGING = (1<<8);
static const int STATE_DIRTYPARENT = (1<<9);
static const int STATE_DIRTYRSTAT = (1<<10);
static const int STATE_STRAYPINNED = (1<<11);
static const int STATE_FROZENAUTHPIN = (1<<12);
static const int STATE_DIRTYPOOL = (1<<13);
static const int STATE_REPAIRSTATS = (1<<14);
static const int STATE_MISSINGOBJS = (1<<15);
static const int STATE_EVALSTALECAPS = (1<<16);
static const int STATE_QUEUEDEXPORTPIN = (1<<17);
static const int STATE_TRACKEDBYOFT = (1<<18); // tracked by open file table
static const int STATE_DELAYEDEXPORTPIN = (1<<19);
static const int STATE_DISTEPHEMERALPIN = (1<<20);
static const int STATE_RANDEPHEMERALPIN = (1<<21);
static const int STATE_CLIENTWRITEABLE = (1<<22);
// orphan inode needs notification of releasing reference
static const int STATE_ORPHAN = STATE_NOTIFYREF;
static const int MASK_STATE_EXPORTED =
(STATE_DIRTY|STATE_NEEDSRECOVER|STATE_DIRTYPARENT|STATE_DIRTYPOOL|
STATE_DISTEPHEMERALPIN|STATE_RANDEPHEMERALPIN);
static const int MASK_STATE_EXPORT_KEPT =
(STATE_FROZEN|STATE_AMBIGUOUSAUTH|STATE_EXPORTINGCAPS|
STATE_QUEUEDEXPORTPIN|STATE_TRACKEDBYOFT|STATE_DELAYEDEXPORTPIN|
STATE_DISTEPHEMERALPIN|STATE_RANDEPHEMERALPIN);
/* These are for "permanent" state markers that are passed around between
* MDS. Nothing protects/updates it like a typical MDS lock.
*
* Currently, we just use this for REPLICATED inodes. The reason we need to
* replicate the random epin state is because the directory inode is still
* under the authority of the parent subtree. So it's not exported normally
* and we can't pass around the state that way. The importer of the dirfrags
* still needs to know that the inode is random pinned though otherwise it
* doesn't know that the dirfrags are pinned.
*/
static const int MASK_STATE_REPLICATED = STATE_RANDEPHEMERALPIN;
// -- waiters --
static const uint64_t WAIT_DIR = (1<<0);
static const uint64_t WAIT_FROZEN = (1<<1);
static const uint64_t WAIT_TRUNC = (1<<2);
static const uint64_t WAIT_FLOCK = (1<<3);
static const uint64_t WAIT_UNLINK = (1<<4);
static const uint64_t WAIT_ANY_MASK = (uint64_t)(-1);
// misc
static const unsigned EXPORT_NONCE = 1; // nonce given to replicas created by export
// ---------------------------
CInode() = delete;
CInode(MDCache *c, bool auth=true, snapid_t f=2, snapid_t l=CEPH_NOSNAP);
~CInode() override {
close_dirfrags();
close_snaprealm();
clear_file_locks();
ceph_assert(num_projected_srnodes == 0);
ceph_assert(num_caps_notable == 0);
ceph_assert(num_subtree_roots == 0);
ceph_assert(num_exporting_dirs == 0);
ceph_assert(batch_ops.empty());
}
std::map<int, std::unique_ptr<BatchOp>> batch_ops;
std::string_view pin_name(int p) const override;
std::ostream& print_db_line_prefix(std::ostream& out) override;
const scrub_info_t *scrub_info() const {
if (!scrub_infop)
scrub_info_create();
return scrub_infop.get();
}
const ScrubHeaderRef& get_scrub_header() {
static const ScrubHeaderRef nullref;
return scrub_infop ? scrub_infop->header : nullref;
}
bool scrub_is_in_progress() const {
return (scrub_infop && scrub_infop->scrub_in_progress);
}
/**
* Start scrubbing on this inode. That could be very short if it's
* a file, or take a long time if we're recursively scrubbing a directory.
* @pre It is not currently scrubbing
* @post it has set up internal scrubbing state
* @param scrub_version What version are we scrubbing at (usually, parent
* directory's get_projected_version())
*/
void scrub_initialize(ScrubHeaderRef& header);
/**
* Call this once the scrub has been completed, whether it's a full
* recursive scrub on a directory or simply the data on a file (or
* anything in between).
* @param c An out param which is filled in with a Context* that must
* be complete()ed.
*/
void scrub_finished();
void scrub_aborted();
fragset_t& scrub_queued_frags() {
ceph_assert(scrub_infop);
return scrub_infop->queued_frags;
}
bool is_multiversion() const {
return snaprealm || // other snaprealms will link to me
get_inode()->is_dir() || // links to me in other snaps
get_inode()->nlink > 1 || // there are remote links, possibly snapped, that will need to find me
is_any_old_inodes(); // once multiversion, always multiversion. until old_inodes gets cleaned out.
}
snapid_t get_oldest_snap();
bool is_dirty_rstat() {
return state_test(STATE_DIRTYRSTAT);
}
void mark_dirty_rstat();
void clear_dirty_rstat();
//bool hack_accessed = false;
//utime_t hack_load_stamp;
/**
* Projection methods, used to store inode changes until they have been journaled,
* at which point they are popped.
* Usage:
* project_inode as needed. If you're changing xattrs or sr_t, then pass true
* as needed then change the xattrs/snapnode member as needed. (Dirty
* exception: project_past_snaprealm_parent allows you to project the
* snapnode after doing project_inode (i.e. you don't need to pass
* snap=true).
*
* Then, journal. Once journaling is done, pop_and_dirty_projected_inode.
* This function will take care of the inode itself, the xattrs, and the snaprealm.
*/
struct projected_inode {
static sr_t* const UNDEF_SRNODE;
inode_ptr const inode;
xattr_map_ptr const xattrs;
sr_t* const snapnode;
projected_inode() = delete;
explicit projected_inode(inode_ptr&& i, xattr_map_ptr&& x, sr_t *s=nullptr) :
inode(std::move(i)), xattrs(std::move(x)), snapnode(s) {}
};
projected_inode project_inode(const MutationRef& mut,
bool xattr = false, bool snap = false);
void pop_and_dirty_projected_inode(LogSegment *ls, const MutationRef& mut);
version_t get_projected_version() const {
if (projected_nodes.empty())
return get_inode()->version;
else
return projected_nodes.back().inode->version;
}
bool is_projected() const {
return !projected_nodes.empty();
}
const inode_const_ptr& get_projected_inode() const {
if (projected_nodes.empty())
return get_inode();
else
return projected_nodes.back().inode;
}
// inode should have already been projected in caller's context
mempool_inode* _get_projected_inode() {
ceph_assert(!projected_nodes.empty());
return const_cast<mempool_inode*>(projected_nodes.back().inode.get());
}
const inode_const_ptr& get_previous_projected_inode() const {
ceph_assert(!projected_nodes.empty());
auto it = projected_nodes.rbegin();
++it;
if (it != projected_nodes.rend())
return it->inode;
else
return get_inode();
}
const xattr_map_const_ptr& get_projected_xattrs() {
if (projected_nodes.empty())
return xattrs;
else
return projected_nodes.back().xattrs;
}
const xattr_map_const_ptr& get_previous_projected_xattrs() {
ceph_assert(!projected_nodes.empty());
auto it = projected_nodes.rbegin();
++it;
if (it != projected_nodes.rend())
return it->xattrs;
else
return xattrs;
}
sr_t *prepare_new_srnode(snapid_t snapid);
void project_snaprealm(sr_t *new_srnode);
sr_t *project_snaprealm(snapid_t snapid=0) {
sr_t* new_srnode = prepare_new_srnode(snapid);
project_snaprealm(new_srnode);
return new_srnode;
}
const sr_t *get_projected_srnode() const;
void mark_snaprealm_global(sr_t *new_srnode);
void clear_snaprealm_global(sr_t *new_srnode);
bool is_projected_snaprealm_global() const;
void record_snaprealm_past_parent(sr_t *new_snap, SnapRealm *newparent);
void record_snaprealm_parent_dentry(sr_t *new_snap, SnapRealm *newparent,
CDentry *dn, bool primary_dn);
void project_snaprealm_past_parent(SnapRealm *newparent);
void early_pop_projected_snaprealm();
const mempool_old_inode& cow_old_inode(snapid_t follows, bool cow_head);
void split_old_inode(snapid_t snap);
snapid_t pick_old_inode(snapid_t last) const;
void pre_cow_old_inode();
bool has_snap_data(snapid_t s);
void purge_stale_snap_data(const std::set<snapid_t>& snaps);
size_t get_num_dirfrags() const { return dirfrags.size(); }
CDir* get_dirfrag(frag_t fg) {
auto pi = dirfrags.find(fg);
if (pi != dirfrags.end()) {
//assert(g_conf()->debug_mds < 2 || dirfragtree.is_leaf(fg)); // performance hack FIXME
return pi->second;
}
return NULL;
}
std::pair<bool, std::vector<CDir*>> get_dirfrags_under(frag_t fg);
CDir* get_approx_dirfrag(frag_t fg);
template<typename Container>
void get_dirfrags(Container& ls) const {
// all dirfrags
if constexpr (std::is_same_v<Container, std::vector<CDir*>>)
ls.reserve(ls.size() + dirfrags.size());
for (const auto &p : dirfrags)
ls.push_back(p.second);
}
auto get_dirfrags() const {
std::vector<CDir*> result;
get_dirfrags(result);
return result;
}
void get_nested_dirfrags(std::vector<CDir*>&) const;
std::vector<CDir*> get_nested_dirfrags() const {
std::vector<CDir*> v;
get_nested_dirfrags(v);
return v;
}
void get_subtree_dirfrags(std::vector<CDir*>&) const;
std::vector<CDir*> get_subtree_dirfrags() const {
std::vector<CDir*> v;
get_subtree_dirfrags(v);
return v;
}
int get_num_subtree_roots() const {
return num_subtree_roots;
}
CDir *get_or_open_dirfrag(MDCache *mdcache, frag_t fg);
CDir *add_dirfrag(CDir *dir);
void close_dirfrag(frag_t fg);
void close_dirfrags();
bool has_subtree_root_dirfrag(int auth=-1);
bool has_subtree_or_exporting_dirfrag();
void force_dirfrags();
void verify_dirfrags();
void get_stickydirs();
void put_stickydirs();
void add_need_snapflush(CInode *snapin, snapid_t snapid, client_t client);
void remove_need_snapflush(CInode *snapin, snapid_t snapid, client_t client);
std::pair<bool,bool> split_need_snapflush(CInode *cowin, CInode *in);
// -- accessors --
inodeno_t ino() const { return get_inode()->ino; }
vinodeno_t vino() const { return vinodeno_t(ino(), last); }
int d_type() const { return IFTODT(get_inode()->mode); }
bool is_root() const { return ino() == CEPH_INO_ROOT; }
bool is_stray() const { return MDS_INO_IS_STRAY(ino()); }
mds_rank_t get_stray_owner() const {
return (mds_rank_t)MDS_INO_STRAY_OWNER(ino());
}
bool is_mdsdir() const { return MDS_INO_IS_MDSDIR(ino()); }
bool is_base() const { return MDS_INO_IS_BASE(ino()); }
bool is_system() const { return ino() < MDS_INO_SYSTEM_BASE; }
bool is_lost_and_found() const { return ino() == CEPH_INO_LOST_AND_FOUND; }
bool is_normal() const { return !(is_base() || is_system() || is_stray()); }
bool is_file() const { return get_inode()->is_file(); }
bool is_symlink() const { return get_inode()->is_symlink(); }
bool is_dir() const { return get_inode()->is_dir(); }
bool is_head() const { return last == CEPH_NOSNAP; }
// note: this overloads MDSCacheObject
bool is_ambiguous_auth() const {
return state_test(STATE_AMBIGUOUSAUTH) ||
MDSCacheObject::is_ambiguous_auth();
}
void set_ambiguous_auth() {
state_set(STATE_AMBIGUOUSAUTH);
}
void clear_ambiguous_auth(MDSContext::vec& finished);
void clear_ambiguous_auth();
const inode_const_ptr& get_inode() const {
return inode;
}
// only used for updating newly allocated CInode
mempool_inode* _get_inode() {
if (inode == empty_inode)
reset_inode(allocate_inode());
return const_cast<mempool_inode*>(inode.get());
}
const xattr_map_const_ptr& get_xattrs() const { return xattrs; }
bool is_any_old_inodes() const { return old_inodes && !old_inodes->empty(); }
const old_inode_map_const_ptr& get_old_inodes() const { return old_inodes; }
CDentry* get_parent_dn() { return parent; }
const CDentry* get_parent_dn() const { return parent; }
CDentry* get_projected_parent_dn() { return !projected_parent.empty() ? projected_parent.back() : parent; }
const CDentry* get_projected_parent_dn() const { return !projected_parent.empty() ? projected_parent.back() : parent; }
const CDentry* get_oldest_parent_dn() const {
if (parent)
return parent;
return !projected_parent.empty() ? projected_parent.front(): NULL;
}
CDir *get_parent_dir();
const CDir *get_projected_parent_dir() const;
CDir *get_projected_parent_dir();
CInode *get_parent_inode();
bool is_lt(const MDSCacheObject *r) const override {
const CInode *o = static_cast<const CInode*>(r);
return ino() < o->ino() ||
(ino() == o->ino() && last < o->last);
}
// -- misc --
bool is_ancestor_of(const CInode *other) const;
bool is_projected_ancestor_of(const CInode *other) const;
void make_path_string(std::string& s, bool projected=false, const CDentry *use_parent=NULL) const;
void make_path(filepath& s, bool projected=false) const;
void name_stray_dentry(std::string& dname);
// -- dirtyness --
version_t get_version() const { return get_inode()->version; }
version_t pre_dirty();
void _mark_dirty(LogSegment *ls);
void mark_dirty(LogSegment *ls);
void mark_clean();
void store(MDSContext *fin);
void _stored(int r, version_t cv, Context *fin);
/**
* Flush a CInode to disk. This includes the backtrace, the parent
* directory's link, and the Inode object itself (if a base directory).
* @pre is_auth() on both the inode and its containing directory
* @pre can_auth_pin()
* @param fin The Context to call when the flush is completed.
*/
void flush(MDSContext *fin);
void fetch(MDSContext *fin);
void _fetched(ceph::buffer::list& bl, ceph::buffer::list& bl2, Context *fin);
void _commit_ops(int r, C_GatherBuilder &gather_bld,
std::vector<CInodeCommitOperation> &ops_vec,
inode_backtrace_t &bt);
void build_backtrace(int64_t pool, inode_backtrace_t& bt);
void _store_backtrace(std::vector<CInodeCommitOperation> &ops_vec,
inode_backtrace_t &bt, int op_prio);
void store_backtrace(CInodeCommitOperations &op, int op_prio);
void store_backtrace(MDSContext *fin, int op_prio=-1);
void _stored_backtrace(int r, version_t v, Context *fin);
void fetch_backtrace(Context *fin, ceph::buffer::list *backtrace);
void mark_dirty_parent(LogSegment *ls, bool dirty_pool=false);
void clear_dirty_parent();
void verify_diri_backtrace(ceph::buffer::list &bl, int err);
bool is_dirty_parent() { return state_test(STATE_DIRTYPARENT); }
bool is_dirty_pool() { return state_test(STATE_DIRTYPOOL); }
void encode_snap_blob(ceph::buffer::list &bl);
void decode_snap_blob(const ceph::buffer::list &bl);
void encode_store(ceph::buffer::list& bl, uint64_t features);
void decode_store(ceph::buffer::list::const_iterator& bl);
void add_dir_waiter(frag_t fg, MDSContext *c);
void take_dir_waiting(frag_t fg, MDSContext::vec& ls);
bool is_waiting_for_dir(frag_t fg) {
return waiting_on_dir.count(fg);
}
void add_waiter(uint64_t tag, MDSContext *c) override;
void take_waiting(uint64_t tag, MDSContext::vec& ls) override;
// -- encode/decode helpers --
void _encode_base(ceph::buffer::list& bl, uint64_t features);
void _decode_base(ceph::buffer::list::const_iterator& p);
void _encode_locks_full(ceph::buffer::list& bl);
void _decode_locks_full(ceph::buffer::list::const_iterator& p);
void _encode_locks_state_for_replica(ceph::buffer::list& bl, bool need_recover);
void _encode_locks_state_for_rejoin(ceph::buffer::list& bl, int rep);
void _decode_locks_state_for_replica(ceph::buffer::list::const_iterator& p, bool is_new);
void _decode_locks_rejoin(ceph::buffer::list::const_iterator& p, MDSContext::vec& waiters,
std::list<SimpleLock*>& eval_locks, bool survivor);
// -- import/export --
void encode_export(ceph::buffer::list& bl);
void finish_export();
void abort_export() {
put(PIN_TEMPEXPORTING);
ceph_assert(state_test(STATE_EXPORTINGCAPS));
state_clear(STATE_EXPORTINGCAPS);
put(PIN_EXPORTINGCAPS);
}
void decode_import(ceph::buffer::list::const_iterator& p, LogSegment *ls);
// for giving to clients
int encode_inodestat(ceph::buffer::list& bl, Session *session, SnapRealm *realm,
snapid_t snapid=CEPH_NOSNAP, unsigned max_bytes=0,
int getattr_wants=0);
void encode_cap_message(const ceph::ref_t<MClientCaps> &m, Capability *cap);
SimpleLock* get_lock(int type) override;
void set_object_info(MDSCacheObjectInfo &info) override;
void encode_lock_state(int type, ceph::buffer::list& bl) override;
void decode_lock_state(int type, const ceph::buffer::list& bl) override;
void encode_lock_iauth(ceph::buffer::list& bl);
void decode_lock_iauth(ceph::buffer::list::const_iterator& p);
void encode_lock_ilink(ceph::buffer::list& bl);
void decode_lock_ilink(ceph::buffer::list::const_iterator& p);
void encode_lock_idft(ceph::buffer::list& bl);
void decode_lock_idft(ceph::buffer::list::const_iterator& p);
void encode_lock_ifile(ceph::buffer::list& bl);
void decode_lock_ifile(ceph::buffer::list::const_iterator& p);
void encode_lock_inest(ceph::buffer::list& bl);
void decode_lock_inest(ceph::buffer::list::const_iterator& p);
void encode_lock_ixattr(ceph::buffer::list& bl);
void decode_lock_ixattr(ceph::buffer::list::const_iterator& p);
void encode_lock_isnap(ceph::buffer::list& bl);
void decode_lock_isnap(ceph::buffer::list::const_iterator& p);
void encode_lock_iflock(ceph::buffer::list& bl);
void decode_lock_iflock(ceph::buffer::list::const_iterator& p);
void encode_lock_ipolicy(ceph::buffer::list& bl);
void decode_lock_ipolicy(ceph::buffer::list::const_iterator& p);
void _finish_frag_update(CDir *dir, MutationRef& mut);
void clear_dirty_scattered(int type) override;
bool is_dirty_scattered();
void clear_scatter_dirty(); // on rejoin ack
void start_scatter(ScatterLock *lock);
void finish_scatter_update(ScatterLock *lock, CDir *dir,
version_t inode_version, version_t dir_accounted_version);
void finish_scatter_gather_update(int type, MutationRef& mut);
void finish_scatter_gather_update_accounted(int type, EMetaBlob *metablob);
// -- snap --
void open_snaprealm(bool no_split=false);
void close_snaprealm(bool no_join=false);
SnapRealm *find_snaprealm() const;
void encode_snap(ceph::buffer::list& bl);
void decode_snap(ceph::buffer::list::const_iterator& p);
client_t get_loner() const { return loner_cap; }
client_t get_wanted_loner() const { return want_loner_cap; }
// this is the loner state our locks should aim for
client_t get_target_loner() const {
if (loner_cap == want_loner_cap)
return loner_cap;
else
return -1;
}
client_t calc_ideal_loner();
void set_loner_cap(client_t l);
bool choose_ideal_loner();
bool try_set_loner();
bool try_drop_loner();
// choose new lock state during recovery, based on issued caps
void choose_lock_state(SimpleLock *lock, int allissued);
void choose_lock_states(int dirty_caps);
int count_nonstale_caps();
bool multiple_nonstale_caps();
bool is_any_caps() { return !client_caps.empty(); }
bool is_any_nonstale_caps() { return count_nonstale_caps(); }
const mempool::mds_co::compact_map<int32_t,int32_t>& get_mds_caps_wanted() const { return mds_caps_wanted; }
void set_mds_caps_wanted(mempool::mds_co::compact_map<int32_t,int32_t>& m);
void set_mds_caps_wanted(mds_rank_t mds, int32_t wanted);
const mempool_cap_map& get_client_caps() const { return client_caps; }
Capability *get_client_cap(client_t client) {
auto client_caps_entry = client_caps.find(client);
if (client_caps_entry != client_caps.end())
return &client_caps_entry->second;
return 0;
}
int get_client_cap_pending(client_t client) const {
auto client_caps_entry = client_caps.find(client);
if (client_caps_entry != client_caps.end()) {
return client_caps_entry->second.pending();
} else {
return 0;
}
}
int get_num_caps_notable() const { return num_caps_notable; }
void adjust_num_caps_notable(int d);
Capability *add_client_cap(client_t client, Session *session,
SnapRealm *conrealm=nullptr, bool new_inode=false);
void remove_client_cap(client_t client);
void move_to_realm(SnapRealm *realm);
Capability *reconnect_cap(client_t client, const cap_reconnect_t& icr, Session *session);
void clear_client_caps_after_export();
void export_client_caps(std::map<client_t,Capability::Export>& cl);
// caps allowed
int get_caps_liked() const;
int get_caps_allowed_ever() const;
int get_caps_allowed_by_type(int type) const;
int get_caps_careful() const;
int get_xlocker_mask(client_t client) const;
int get_caps_allowed_for_client(Session *s, Capability *cap,
const mempool_inode *file_i) const;
// caps issued, wanted
int get_caps_issued(int *ploner = 0, int *pother = 0, int *pxlocker = 0,
int shift = 0, int mask = -1);
bool is_any_caps_wanted() const;
int get_caps_wanted(int *ploner = 0, int *pother = 0, int shift = 0, int mask = -1) const;
bool issued_caps_need_gather(SimpleLock *lock);
// client writeable
bool is_clientwriteable() const { return state & STATE_CLIENTWRITEABLE; }
void mark_clientwriteable();
void clear_clientwriteable();
// -- authority --
mds_authority_t authority() const override;
// -- auth pins --
bool can_auth_pin(int *err_ret=nullptr) const override;
void auth_pin(void *by) override;
void auth_unpin(void *by) override;
// -- freeze --
bool is_freezing_inode() const { return state_test(STATE_FREEZING); }
bool is_frozen_inode() const { return state_test(STATE_FROZEN); }
bool is_frozen_auth_pin() const { return state_test(STATE_FROZENAUTHPIN); }
bool is_frozen() const override;
bool is_frozen_dir() const;
bool is_freezing() const override;
/* Freeze the inode. auth_pin_allowance lets the caller account for any
* auth_pins it is itself holding/responsible for. */
bool freeze_inode(int auth_pin_allowance=0);
void unfreeze_inode(MDSContext::vec& finished);
void unfreeze_inode();
void freeze_auth_pin();
void unfreeze_auth_pin();
// -- reference counting --
void bad_put(int by) override {
generic_dout(0) << " bad put " << *this << " by " << by << " " << pin_name(by) << " was " << ref
#ifdef MDS_REF_SET
<< " (" << ref_map << ")"
#endif
<< dendl;
#ifdef MDS_REF_SET
ceph_assert(ref_map[by] > 0);
#endif
ceph_assert(ref > 0);
}
void bad_get(int by) override {
generic_dout(0) << " bad get " << *this << " by " << by << " " << pin_name(by) << " was " << ref
#ifdef MDS_REF_SET
<< " (" << ref_map << ")"
#endif
<< dendl;
#ifdef MDS_REF_SET
ceph_assert(ref_map[by] >= 0);
#endif
}
void first_get() override;
void last_put() override;
void _put() override;
// -- hierarchy stuff --
void set_primary_parent(CDentry *p) {
ceph_assert(parent == 0 ||
g_conf().get_val<bool>("mds_hack_allow_loading_invalid_metadata"));
parent = p;
}
void remove_primary_parent(CDentry *dn) {
ceph_assert(dn == parent);
parent = 0;
}
void add_remote_parent(CDentry *p);
void remove_remote_parent(CDentry *p);
int num_remote_parents() {
return remote_parents.size();
}
void push_projected_parent(CDentry *dn) {
projected_parent.push_back(dn);
}
void pop_projected_parent() {
ceph_assert(projected_parent.size());
parent = projected_parent.front();
projected_parent.pop_front();
}
bool is_parent_projected() const {
return !projected_parent.empty();
}
mds_rank_t get_export_pin(bool inherit=true) const;
void check_pin_policy(mds_rank_t target);
void set_export_pin(mds_rank_t rank);
void queue_export_pin(mds_rank_t target);
void maybe_export_pin(bool update=false);
void set_ephemeral_pin(bool dist, bool rand);
void clear_ephemeral_pin(bool dist, bool rand);
void setxattr_ephemeral_dist(bool val=false);
bool is_ephemeral_dist() const {
return state_test(STATE_DISTEPHEMERALPIN);
}
double get_ephemeral_rand() const;
void maybe_ephemeral_rand(double threshold=-1.0);
void setxattr_ephemeral_rand(double prob=0.0);
bool is_ephemeral_rand() const {
return state_test(STATE_RANDEPHEMERALPIN);
}
bool has_ephemeral_policy() const {
return get_inode()->export_ephemeral_random_pin > 0.0 ||
get_inode()->export_ephemeral_distributed_pin;
}
bool is_ephemerally_pinned() const {
return state_test(STATE_DISTEPHEMERALPIN) ||
state_test(STATE_RANDEPHEMERALPIN);
}
void print(std::ostream& out) override;
void dump(ceph::Formatter *f, int flags = DUMP_DEFAULT) const;
/**
* Validate that the on-disk state of an inode matches what
* we expect from our memory state. Currently this checks that:
* 1) The backtrace associated with the file data exists and is correct
* 2) For directories, the actual inode metadata matches our memory state,
* 3) For directories, the rstats match
*
* @param results A freshly-created validated_data struct, with values set
* as described in the struct documentation.
* @param mdr The request to be responeded upon the completion of the
* validation (or NULL)
* @param fin Context to call back on completion (or NULL)
*/
void validate_disk_state(validated_data *results,
MDSContext *fin);
static void dump_validation_results(const validated_data& results,
ceph::Formatter *f);
//bool hack_accessed = false;
//utime_t hack_load_stamp;
MDCache *mdcache;
SnapRealm *snaprealm = nullptr;
SnapRealm *containing_realm = nullptr;
snapid_t first, last;
mempool::mds_co::compact_set<snapid_t> dirty_old_rstats;
uint64_t last_journaled = 0; // log offset for the last time i was journaled
//loff_t last_open_journaled; // log offset for the last journaled EOpen
utime_t last_dirstat_prop;
// list item node for when we have unpropagated rstat data
elist<CInode*>::item dirty_rstat_item;
mempool::mds_co::set<client_t> client_snap_caps;
mempool::mds_co::compact_map<snapid_t, mempool::mds_co::set<client_t> > client_need_snapflush;
// LogSegment lists i (may) belong to
elist<CInode*>::item item_dirty;
elist<CInode*>::item item_caps;
elist<CInode*>::item item_open_file;
elist<CInode*>::item item_dirty_parent;
elist<CInode*>::item item_dirty_dirfrag_dir;
elist<CInode*>::item item_dirty_dirfrag_nest;
elist<CInode*>::item item_dirty_dirfrag_dirfragtree;
// also update RecoveryQueue::RecoveryQueue() if you change this
elist<CInode*>::item& item_recover_queue = item_dirty_dirfrag_dir;
elist<CInode*>::item& item_recover_queue_front = item_dirty_dirfrag_nest;
inode_load_vec_t pop;
elist<CInode*>::item item_pop_lru;
// -- locks --
static LockType versionlock_type;
static LockType authlock_type;
static LockType linklock_type;
static LockType dirfragtreelock_type;
static LockType filelock_type;
static LockType xattrlock_type;
static LockType snaplock_type;
static LockType nestlock_type;
static LockType flocklock_type;
static LockType policylock_type;
// FIXME not part of mempool
LocalLockC versionlock;
SimpleLock authlock;
SimpleLock linklock;
ScatterLock dirfragtreelock;
ScatterLock filelock;
SimpleLock xattrlock;
SimpleLock snaplock;
ScatterLock nestlock;
SimpleLock flocklock;
SimpleLock policylock;
// -- caps -- (new)
// client caps
client_t loner_cap = -1, want_loner_cap = -1;
protected:
ceph_lock_state_t *get_fcntl_lock_state() {
if (!fcntl_locks)
fcntl_locks = new ceph_lock_state_t(g_ceph_context, CEPH_LOCK_FCNTL);
return fcntl_locks;
}
void clear_fcntl_lock_state() {
delete fcntl_locks;
fcntl_locks = NULL;
}
ceph_lock_state_t *get_flock_lock_state() {
if (!flock_locks)
flock_locks = new ceph_lock_state_t(g_ceph_context, CEPH_LOCK_FLOCK);
return flock_locks;
}
void clear_flock_lock_state() {
delete flock_locks;
flock_locks = NULL;
}
void clear_file_locks() {
clear_fcntl_lock_state();
clear_flock_lock_state();
}
void _encode_file_locks(ceph::buffer::list& bl) const {
using ceph::encode;
bool has_fcntl_locks = fcntl_locks && !fcntl_locks->empty();
encode(has_fcntl_locks, bl);
if (has_fcntl_locks)
encode(*fcntl_locks, bl);
bool has_flock_locks = flock_locks && !flock_locks->empty();
encode(has_flock_locks, bl);
if (has_flock_locks)
encode(*flock_locks, bl);
}
void _decode_file_locks(ceph::buffer::list::const_iterator& p) {
using ceph::decode;
bool has_fcntl_locks;
decode(has_fcntl_locks, p);
if (has_fcntl_locks)
decode(*get_fcntl_lock_state(), p);
else
clear_fcntl_lock_state();
bool has_flock_locks;
decode(has_flock_locks, p);
if (has_flock_locks)
decode(*get_flock_lock_state(), p);
else
clear_flock_lock_state();
}
/**
* Return the pool ID where we currently write backtraces for
* this inode (in addition to inode.old_pools)
*
* @returns a pool ID >=0
*/
int64_t get_backtrace_pool() const;
// parent dentries in cache
CDentry *parent = nullptr; // primary link
mempool::mds_co::compact_set<CDentry*> remote_parents; // if hard linked
mempool::mds_co::list<CDentry*> projected_parent; // for in-progress rename, (un)link, etc.
mds_authority_t inode_auth = CDIR_AUTH_DEFAULT;
// -- distributed state --
// file capabilities
mempool_cap_map client_caps; // client -> caps
mempool::mds_co::compact_map<int32_t, int32_t> mds_caps_wanted; // [auth] mds -> caps wanted
int replica_caps_wanted = 0; // [replica] what i've requested from auth
int num_caps_notable = 0;
ceph_lock_state_t *fcntl_locks = nullptr;
ceph_lock_state_t *flock_locks = nullptr;
// -- waiting --
mempool::mds_co::compact_map<frag_t, MDSContext::vec > waiting_on_dir;
// -- freezing inode --
int auth_pin_freeze_allowance = 0;
elist<CInode*>::item item_freezing_inode;
void maybe_finish_freeze_inode();
private:
friend class ValidationContinuation;
/**
* Create a scrub_info_t struct for the scrub_infop pointer.
*/
void scrub_info_create() const;
/**
* Delete the scrub_info_t struct if it's not got any useful data
*/
void scrub_maybe_delete_info();
void pop_projected_snaprealm(sr_t *next_snaprealm, bool early);
bool _validate_disk_state(class ValidationContinuation *c,
int rval, int stage);
struct projected_const_node {
inode_const_ptr inode;
xattr_map_const_ptr xattrs;
sr_t *snapnode;
projected_const_node() = delete;
projected_const_node(projected_const_node&&) = default;
explicit projected_const_node(const inode_const_ptr& i, const xattr_map_const_ptr& x, sr_t *s) :
inode(i), xattrs(x), snapnode(s) {}
};
mempool::mds_co::list<projected_const_node> projected_nodes; // projected values (only defined while dirty)
size_t num_projected_srnodes = 0;
// -- cache infrastructure --
mempool::mds_co::compact_map<frag_t,CDir*> dirfrags; // cached dir fragments under this Inode
//for the purpose of quickly determining whether there's a subtree root or exporting dir
int num_subtree_roots = 0;
int num_exporting_dirs = 0;
int stickydir_ref = 0;
std::unique_ptr<scrub_info_t> scrub_infop;
/** @} Scrubbing and fsck */
};
std::ostream& operator<<(std::ostream& out, const CInode& in);
extern cinode_lock_info_t cinode_lock_info[];
extern int num_cinode_locks;
#undef dout_context
#endif
| 43,696 | 33.846093 | 121 | h |
null | ceph-main/src/mds/Capability.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "Capability.h"
#include "CInode.h"
#include "SessionMap.h"
#include "common/Formatter.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
#undef dout_prefix
#define dout_prefix *_dout << "Capability "
/*
* Capability::Export
*/
void Capability::Export::encode(ceph::buffer::list &bl) const
{
ENCODE_START(3, 2, bl);
encode(cap_id, bl);
encode(wanted, bl);
encode(issued, bl);
encode(pending, bl);
encode(client_follows, bl);
encode(seq, bl);
encode(mseq, bl);
encode(last_issue_stamp, bl);
encode(state, bl);
ENCODE_FINISH(bl);
}
void Capability::Export::decode(ceph::buffer::list::const_iterator &p)
{
DECODE_START_LEGACY_COMPAT_LEN(3, 2, 2, p);
decode(cap_id, p);
decode(wanted, p);
decode(issued, p);
decode(pending, p);
decode(client_follows, p);
decode(seq, p);
decode(mseq, p);
decode(last_issue_stamp, p);
if (struct_v >= 3)
decode(state, p);
DECODE_FINISH(p);
}
void Capability::Export::dump(ceph::Formatter *f) const
{
f->dump_unsigned("cap_id", cap_id);
f->dump_stream("wanted") << ccap_string(wanted);
f->dump_stream("issued") << ccap_string(issued);
f->dump_stream("pending") << ccap_string(pending);
f->dump_unsigned("client_follows", client_follows);
f->dump_unsigned("seq", seq);
f->dump_unsigned("migrate_seq", mseq);
f->dump_stream("last_issue_stamp") << last_issue_stamp;
}
void Capability::Export::generate_test_instances(std::list<Capability::Export*>& ls)
{
ls.push_back(new Export);
ls.push_back(new Export);
ls.back()->wanted = 1;
ls.back()->issued = 2;
ls.back()->pending = 3;
ls.back()->client_follows = 4;
ls.back()->mseq = 5;
ls.back()->last_issue_stamp = utime_t(6, 7);
}
void Capability::Import::encode(ceph::buffer::list &bl) const
{
ENCODE_START(1, 1, bl);
encode(cap_id, bl);
encode(issue_seq, bl);
encode(mseq, bl);
ENCODE_FINISH(bl);
}
void Capability::Import::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START(1, bl);
decode(cap_id, bl);
decode(issue_seq, bl);
decode(mseq, bl);
DECODE_FINISH(bl);
}
void Capability::Import::dump(ceph::Formatter *f) const
{
f->dump_unsigned("cap_id", cap_id);
f->dump_unsigned("issue_seq", issue_seq);
f->dump_unsigned("migrate_seq", mseq);
}
/*
* Capability::revoke_info
*/
void Capability::revoke_info::encode(ceph::buffer::list& bl) const
{
ENCODE_START(2, 2, bl)
encode(before, bl);
encode(seq, bl);
encode(last_issue, bl);
ENCODE_FINISH(bl);
}
void Capability::revoke_info::decode(ceph::buffer::list::const_iterator& bl)
{
DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl);
decode(before, bl);
decode(seq, bl);
decode(last_issue, bl);
DECODE_FINISH(bl);
}
void Capability::revoke_info::dump(ceph::Formatter *f) const
{
f->dump_unsigned("before", before);
f->dump_unsigned("seq", seq);
f->dump_unsigned("last_issue", last_issue);
}
void Capability::revoke_info::generate_test_instances(std::list<Capability::revoke_info*>& ls)
{
ls.push_back(new revoke_info);
ls.push_back(new revoke_info);
ls.back()->before = 1;
ls.back()->seq = 2;
ls.back()->last_issue = 3;
}
/*
* Capability
*/
Capability::Capability(CInode *i, Session *s, uint64_t id) :
item_session_caps(this), item_snaprealm_caps(this),
item_revoking_caps(this), item_client_revoking_caps(this),
lock_caches(member_offset(MDLockCache, item_cap_lock_cache)),
inode(i), session(s), cap_id(id)
{
if (session) {
session->touch_cap_bottom(this);
cap_gen = session->get_cap_gen();
if (session->is_stale())
--cap_gen; // not valid
auto& conn = session->get_connection();
if (conn) {
if (!conn->has_feature(CEPH_FEATURE_MDS_INLINE_DATA))
state |= STATE_NOINLINE;
if (!conn->has_feature(CEPH_FEATURE_FS_FILE_LAYOUT_V2))
state |= STATE_NOPOOLNS;
if (!conn->has_feature(CEPH_FEATURE_MDS_QUOTA))
state |= STATE_NOQUOTA;
}
} else {
cap_gen = 0;
}
}
client_t Capability::get_client() const
{
return session ? session->get_client() : client_t(-1);
}
int Capability::confirm_receipt(ceph_seq_t seq, unsigned caps) {
int was_revoking = (_issued & ~_pending);
if (seq == last_sent) {
_revokes.clear();
_issued = caps;
// don't add bits
_pending &= caps;
// if the revoking is not totally finished just add the
// new revoking caps back.
if (was_revoking && revoking()) {
CInode *in = get_inode();
dout(10) << "revocation is not totally finished yet on " << *in
<< ", the session " << *session << dendl;
_revokes.emplace_back(_pending, last_sent, last_issue);
calc_issued();
}
} else {
// can i forget any revocations?
while (!_revokes.empty() && _revokes.front().seq < seq)
_revokes.pop_front();
if (!_revokes.empty()) {
if (_revokes.front().seq == seq)
_revokes.begin()->before = caps;
calc_issued();
} else {
// seq < last_sent
_issued = caps | _pending;
}
}
if (was_revoking && _issued == _pending) {
item_revoking_caps.remove_myself();
item_client_revoking_caps.remove_myself();
maybe_clear_notable();
}
return was_revoking & ~_issued; // return revoked
}
bool Capability::is_stale() const
{
return session ? session->is_stale() : false;
}
bool Capability::is_valid() const
{
return !session || session->get_cap_gen() == cap_gen;
}
void Capability::revalidate()
{
if (!is_valid())
cap_gen = session->get_cap_gen();
}
void Capability::mark_notable()
{
state |= STATE_NOTABLE;
session->touch_cap(this);
}
void Capability::maybe_clear_notable()
{
if ((_issued == _pending) &&
!is_clientwriteable() &&
!is_wanted_notable(_wanted)) {
ceph_assert(is_notable());
state &= ~STATE_NOTABLE;
session->touch_cap_bottom(this);
}
}
void Capability::set_wanted(int w) {
CInode *in = get_inode();
if (in) {
if (!is_wanted_notable(_wanted) && is_wanted_notable(w)) {
in->adjust_num_caps_notable(1);
if (!is_notable())
mark_notable();
} else if (is_wanted_notable(_wanted) && !is_wanted_notable(w)) {
in->adjust_num_caps_notable(-1);
maybe_clear_notable();
}
}
_wanted = w;
}
void Capability::encode(ceph::buffer::list& bl) const
{
ENCODE_START(2, 2, bl)
encode(last_sent, bl);
encode(last_issue_stamp, bl);
encode(_wanted, bl);
encode(_pending, bl);
encode(_revokes, bl);
ENCODE_FINISH(bl);
}
void Capability::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl)
decode(last_sent, bl);
decode(last_issue_stamp, bl);
__u32 tmp_wanted;
decode(tmp_wanted, bl);
set_wanted(tmp_wanted);
decode(_pending, bl);
decode(_revokes, bl);
DECODE_FINISH(bl);
calc_issued();
}
void Capability::dump(ceph::Formatter *f) const
{
if (inode)
f->dump_stream("ino") << inode->ino();
f->dump_unsigned("last_sent", last_sent);
f->dump_stream("last_issue_stamp") << last_issue_stamp;
f->dump_stream("wanted") << ccap_string(_wanted);
f->dump_stream("pending") << ccap_string(_pending);
f->open_array_section("revokes");
for (const auto &r : _revokes) {
f->open_object_section("revoke");
r.dump(f);
f->close_section();
}
f->close_section();
}
void Capability::generate_test_instances(std::list<Capability*>& ls)
{
ls.push_back(new Capability);
ls.push_back(new Capability);
ls.back()->last_sent = 11;
ls.back()->last_issue_stamp = utime_t(12, 13);
ls.back()->set_wanted(14);
ls.back()->_pending = 15;
{
auto &r = ls.back()->_revokes.emplace_back();
r.before = 16;
r.seq = 17;
r.last_issue = 18;
}
{
auto &r = ls.back()->_revokes.emplace_back();
r.before = 19;
r.seq = 20;
r.last_issue = 21;
}
}
MEMPOOL_DEFINE_OBJECT_FACTORY(Capability, co_cap, mds_co);
| 8,257 | 23.288235 | 94 | cc |
null | ceph-main/src/mds/Capability.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CAPABILITY_H
#define CEPH_CAPABILITY_H
#include "include/buffer_fwd.h"
#include "include/counter.h"
#include "include/mempool.h"
#include "include/xlist.h"
#include "include/elist.h"
#include "common/config.h"
#include "mdstypes.h"
/*
Capability protocol notes.
- two types of cap events from mds -> client:
- cap "issue" in a MClientReply, or an MClientCaps IMPORT op.
- cap "update" (revocation or grant) .. an MClientCaps message.
- if client has cap, the mds should have it too.
- if client has no dirty data, it can release it without waiting for an mds ack.
- client may thus get a cap _update_ and not have the cap. ignore it.
- mds should track seq of last issue. any release
attempt will only succeed if the client has seen the latest.
- a UPDATE updates the clients issued caps, wanted, etc. it may also flush dirty metadata.
- 'caps' are which caps the client retains.
- if 0, client wishes to release the cap
- 'wanted' is which caps the client wants.
- 'dirty' is which metadata is to be written.
- client gets a FLUSH_ACK with matching dirty flags indicating which caps were written.
- a FLUSH_ACK acks a FLUSH.
- 'dirty' is the _original_ FLUSH's dirty (i.e., which metadata was written back)
- 'seq' is the _original_ FLUSH's seq.
- 'caps' is the _original_ FLUSH's caps (not actually important)
- client can conclude that (dirty & ~caps) bits were successfully cleaned.
- a FLUSHSNAP flushes snapshot metadata.
- 'dirty' indicates which caps, were dirty, if any.
- mds writes metadata. if dirty!=0, replies with FLUSHSNAP_ACK.
*/
class CInode;
class Session;
class MDLockCache;
namespace ceph {
class Formatter;
}
class Capability : public Counter<Capability> {
public:
MEMPOOL_CLASS_HELPERS();
struct Export {
Export() {}
Export(int64_t id, int w, int i, int p, snapid_t cf,
ceph_seq_t s, ceph_seq_t m, utime_t lis, unsigned st) :
cap_id(id), wanted(w), issued(i), pending(p), client_follows(cf),
seq(s), mseq(m), last_issue_stamp(lis), state(st) {}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &p);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<Export*>& ls);
int64_t cap_id = 0;
int32_t wanted = 0;
int32_t issued = 0;
int32_t pending = 0;
snapid_t client_follows;
ceph_seq_t seq = 0;
ceph_seq_t mseq = 0;
utime_t last_issue_stamp;
uint32_t state = 0;
};
struct Import {
Import() {}
Import(int64_t i, ceph_seq_t s, ceph_seq_t m) : cap_id(i), issue_seq(s), mseq(m) {}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &p);
void dump(ceph::Formatter *f) const;
int64_t cap_id = 0;
ceph_seq_t issue_seq = 0;
ceph_seq_t mseq = 0;
};
struct revoke_info {
revoke_info() {}
revoke_info(__u32 b, ceph_seq_t s, ceph_seq_t li) : before(b), seq(s), last_issue(li) {}
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<revoke_info*>& ls);
__u32 before = 0;
ceph_seq_t seq = 0;
ceph_seq_t last_issue = 0;
};
const static unsigned STATE_NOTABLE = (1<<0);
const static unsigned STATE_NEW = (1<<1);
const static unsigned STATE_IMPORTING = (1<<2);
const static unsigned STATE_NEEDSNAPFLUSH = (1<<3);
const static unsigned STATE_CLIENTWRITEABLE = (1<<4);
const static unsigned STATE_NOINLINE = (1<<5);
const static unsigned STATE_NOPOOLNS = (1<<6);
const static unsigned STATE_NOQUOTA = (1<<7);
const static unsigned MASK_STATE_EXPORTED =
(STATE_CLIENTWRITEABLE | STATE_NOINLINE | STATE_NOPOOLNS | STATE_NOQUOTA);
Capability(CInode *i=nullptr, Session *s=nullptr, uint64_t id=0);
Capability(const Capability& other) = delete;
const Capability& operator=(const Capability& other) = delete;
int pending() const {
return _pending;
}
int issued() const {
return _issued;
}
int revoking() const {
return _issued & ~_pending;
}
ceph_seq_t issue(unsigned c, bool reval=false) {
if (reval)
revalidate();
if (_pending & ~c) {
// revoking (and maybe adding) bits. note caps prior to this revocation
_revokes.emplace_back(_pending, last_sent, last_issue);
_pending = c;
_issued |= c;
if (!is_notable())
mark_notable();
} else if (~_pending & c) {
// adding bits only. remove obsolete revocations?
_pending |= c;
_issued |= c;
// drop old _revokes with no bits we don't have
while (!_revokes.empty() &&
(_revokes.back().before & ~_pending) == 0)
_revokes.pop_back();
} else {
// no change.
ceph_assert(_pending == c);
}
//last_issue =
inc_last_seq();
return last_sent;
}
ceph_seq_t issue_norevoke(unsigned c, bool reval=false) {
if (reval)
revalidate();
_pending |= c;
_issued |= c;
clear_new();
inc_last_seq();
return last_sent;
}
int confirm_receipt(ceph_seq_t seq, unsigned caps);
// we may get a release racing with revocations, which means our revokes will be ignored
// by the client. clean them out of our _revokes history so we don't wait on them.
void clean_revoke_from(ceph_seq_t li) {
bool changed = false;
while (!_revokes.empty() && _revokes.front().last_issue <= li) {
_revokes.pop_front();
changed = true;
}
if (changed) {
bool was_revoking = (_issued & ~_pending);
calc_issued();
if (was_revoking && _issued == _pending) {
item_revoking_caps.remove_myself();
item_client_revoking_caps.remove_myself();
maybe_clear_notable();
}
}
}
ceph_seq_t get_mseq() const { return mseq; }
void inc_mseq() { mseq++; }
utime_t get_last_issue_stamp() const { return last_issue_stamp; }
utime_t get_last_revoke_stamp() const { return last_revoke_stamp; }
void set_last_issue() { last_issue = last_sent; }
void set_last_issue_stamp(utime_t t) { last_issue_stamp = t; }
void set_last_revoke_stamp(utime_t t) { last_revoke_stamp = t; }
void reset_num_revoke_warnings() { num_revoke_warnings = 0; }
void inc_num_revoke_warnings() { ++num_revoke_warnings; }
unsigned get_num_revoke_warnings() const { return num_revoke_warnings; }
void set_cap_id(uint64_t i) { cap_id = i; }
uint64_t get_cap_id() const { return cap_id; }
//ceph_seq_t get_last_issue() { return last_issue; }
bool is_suppress() const { return suppress > 0; }
void inc_suppress() { suppress++; }
void dec_suppress() { suppress--; }
static bool is_wanted_notable(int wanted) {
return wanted & (CEPH_CAP_ANY_WR|CEPH_CAP_FILE_WR|CEPH_CAP_FILE_RD);
}
bool is_wanted_notable() const {
return is_wanted_notable(wanted());
}
bool is_notable() const { return state & STATE_NOTABLE; }
bool is_stale() const;
bool is_valid() const;
bool is_new() const { return state & STATE_NEW; }
void mark_new() { state |= STATE_NEW; }
void clear_new() { state &= ~STATE_NEW; }
bool is_importing() const { return state & STATE_IMPORTING; }
void mark_importing() { state |= STATE_IMPORTING; }
void clear_importing() { state &= ~STATE_IMPORTING; }
bool need_snapflush() const { return state & STATE_NEEDSNAPFLUSH; }
void mark_needsnapflush() { state |= STATE_NEEDSNAPFLUSH; }
void clear_needsnapflush() { state &= ~STATE_NEEDSNAPFLUSH; }
bool is_clientwriteable() const { return state & STATE_CLIENTWRITEABLE; }
void mark_clientwriteable() {
if (!is_clientwriteable()) {
state |= STATE_CLIENTWRITEABLE;
if (!is_notable())
mark_notable();
}
}
void clear_clientwriteable() {
if (is_clientwriteable()) {
state &= ~STATE_CLIENTWRITEABLE;
maybe_clear_notable();
}
}
bool is_noinline() const { return state & STATE_NOINLINE; }
bool is_nopoolns() const { return state & STATE_NOPOOLNS; }
bool is_noquota() const { return state & STATE_NOQUOTA; }
CInode *get_inode() const { return inode; }
Session *get_session() const { return session; }
client_t get_client() const;
// caps this client wants to hold
int wanted() const { return _wanted; }
void set_wanted(int w);
void inc_last_seq() { last_sent++; }
ceph_seq_t get_last_seq() const {
return last_sent;
}
ceph_seq_t get_last_issue() const { return last_issue; }
void reset_seq() {
last_sent = 0;
last_issue = 0;
}
// -- exports --
Export make_export() const {
return Export(cap_id, wanted(), issued(), pending(), client_follows, get_last_seq(), mseq+1, last_issue_stamp, state);
}
void merge(const Export& other, bool auth_cap) {
// issued + pending
int newpending = other.pending | pending();
if (other.issued & ~newpending)
issue(other.issued | newpending);
else
issue(newpending);
last_issue_stamp = other.last_issue_stamp;
client_follows = other.client_follows;
state |= other.state & MASK_STATE_EXPORTED;
if ((other.state & STATE_CLIENTWRITEABLE) && !is_notable())
mark_notable();
// wanted
set_wanted(wanted() | other.wanted);
if (auth_cap)
mseq = other.mseq;
}
void merge(int otherwanted, int otherissued) {
// issued + pending
int newpending = pending();
if (otherissued & ~newpending)
issue(otherissued | newpending);
else
issue(newpending);
// wanted
set_wanted(wanted() | otherwanted);
}
int revoke() {
if (revoking())
return confirm_receipt(last_sent, pending());
return 0;
}
// serializers
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<Capability*>& ls);
snapid_t client_follows = 0;
version_t client_xattr_version = 0;
version_t client_inline_version = 0;
int64_t last_rbytes = 0;
int64_t last_rsize = 0;
xlist<Capability*>::item item_session_caps;
xlist<Capability*>::item item_snaprealm_caps;
xlist<Capability*>::item item_revoking_caps;
xlist<Capability*>::item item_client_revoking_caps;
elist<MDLockCache*> lock_caches;
int get_lock_cache_allowed() const { return lock_cache_allowed; }
void set_lock_cache_allowed(int c) { lock_cache_allowed |= c; }
void clear_lock_cache_allowed(int c) { lock_cache_allowed &= ~c; }
private:
void calc_issued() {
_issued = _pending;
for (const auto &r : _revokes) {
_issued |= r.before;
}
}
void revalidate();
void mark_notable();
void maybe_clear_notable();
CInode *inode;
Session *session;
uint64_t cap_id;
uint32_t cap_gen;
__u32 _wanted = 0; // what the client wants (ideally)
utime_t last_issue_stamp;
utime_t last_revoke_stamp;
unsigned num_revoke_warnings = 0;
// track in-flight caps --------------
// - add new caps to _pending
// - track revocations in _revokes list
__u32 _pending = 0, _issued = 0;
mempool::mds_co::list<revoke_info> _revokes;
ceph_seq_t last_sent = 0;
ceph_seq_t last_issue = 0;
ceph_seq_t mseq = 0;
int suppress = 0;
unsigned state = 0;
int lock_cache_allowed = 0;
};
WRITE_CLASS_ENCODER(Capability::Export)
WRITE_CLASS_ENCODER(Capability::Import)
WRITE_CLASS_ENCODER(Capability::revoke_info)
WRITE_CLASS_ENCODER(Capability)
#endif
| 11,877 | 28.919395 | 122 | h |
null | ceph-main/src/mds/DamageTable.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/debug.h"
#include "mds/CDir.h"
#include "DamageTable.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
#undef dout_prefix
#define dout_prefix *_dout << "mds." << rank << ".damage " << __func__ << " "
namespace {
/**
* Record damage to a particular dirfrag, implicitly affecting
* any dentries within it.
*/
class DirFragDamage : public DamageEntry
{
public:
inodeno_t ino;
frag_t frag;
DirFragDamage(inodeno_t ino_, frag_t frag_)
: ino(ino_), frag(frag_)
{}
damage_entry_type_t get_type() const override
{
return DAMAGE_ENTRY_DIRFRAG;
}
void dump(Formatter *f) const override
{
f->open_object_section("dir_frag_damage");
f->dump_string("damage_type", "dir_frag");
f->dump_int("id", id);
f->dump_int("ino", ino);
f->dump_stream("frag") << frag;
f->dump_string("path", path);
f->close_section();
}
};
/**
* Record damage to a particular dname within a particular dirfrag
*/
class DentryDamage : public DamageEntry
{
public:
inodeno_t ino;
frag_t frag;
std::string dname;
snapid_t snap_id;
DentryDamage(
inodeno_t ino_,
frag_t frag_,
std::string_view dname_,
snapid_t snap_id_)
: ino(ino_), frag(frag_), dname(dname_), snap_id(snap_id_)
{}
damage_entry_type_t get_type() const override
{
return DAMAGE_ENTRY_DENTRY;
}
void dump(Formatter *f) const override
{
f->open_object_section("dentry_damage");
f->dump_string("damage_type", "dentry");
f->dump_int("id", id);
f->dump_int("ino", ino);
f->dump_stream("frag") << frag;
f->dump_string("dname", dname);
f->dump_stream("snap_id") << snap_id;
f->dump_string("path", path);
f->close_section();
}
};
/**
* Record damage to our ability to look up an ino by number
*/
class BacktraceDamage : public DamageEntry
{
public:
inodeno_t ino;
BacktraceDamage(inodeno_t ino_)
: ino(ino_)
{}
damage_entry_type_t get_type() const override
{
return DAMAGE_ENTRY_BACKTRACE;
}
void dump(Formatter *f) const override
{
f->open_object_section("backtrace_damage");
f->dump_string("damage_type", "backtrace");
f->dump_int("id", id);
f->dump_int("ino", ino);
f->dump_string("path", path);
f->close_section();
}
};
}
DamageEntry::~DamageEntry()
{}
bool DamageTable::notify_dentry(
inodeno_t ino, frag_t frag,
snapid_t snap_id, std::string_view dname, std::string_view path)
{
if (oversized()) {
return true;
}
// Special cases: damage to these dirfrags is considered fatal to
// the MDS rank that owns them.
if (
(MDS_INO_IS_MDSDIR(ino) && MDS_INO_MDSDIR_OWNER(ino) == rank)
||
(MDS_INO_IS_STRAY(ino) && MDS_INO_STRAY_OWNER(ino) == rank)
) {
derr << "Damage to dentries in fragment " << frag << " of ino " << ino
<< "is fatal because it is a system directory for this rank" << dendl;
return true;
}
auto& df_dentries = dentries[DirFragIdent(ino, frag)];
if (auto [it, inserted] = df_dentries.try_emplace(DentryIdent(dname, snap_id)); inserted) {
auto entry = std::make_shared<DentryDamage>(ino, frag, dname, snap_id);
entry->path = path;
it->second = entry;
by_id[entry->id] = std::move(entry);
}
return false;
}
bool DamageTable::notify_dirfrag(inodeno_t ino, frag_t frag,
std::string_view path)
{
// Special cases: damage to these dirfrags is considered fatal to
// the MDS rank that owns them.
if ((MDS_INO_IS_STRAY(ino) && MDS_INO_STRAY_OWNER(ino) == rank)
|| (ino == CEPH_INO_ROOT)) {
derr << "Damage to fragment " << frag << " of ino " << ino
<< " is fatal because it is a system directory for this rank" << dendl;
return true;
}
if (oversized()) {
return true;
}
if (auto [it, inserted] = dirfrags.try_emplace(DirFragIdent(ino, frag)); inserted) {
DamageEntryRef entry = std::make_shared<DirFragDamage>(ino, frag);
entry->path = path;
it->second = entry;
by_id[entry->id] = std::move(entry);
}
return false;
}
bool DamageTable::notify_remote_damaged(inodeno_t ino, std::string_view path)
{
if (oversized()) {
return true;
}
if (auto [it, inserted] = remotes.try_emplace(ino); inserted) {
auto entry = std::make_shared<BacktraceDamage>(ino);
entry->path = path;
it->second = entry;
by_id[entry->id] = std::move(entry);
}
return false;
}
bool DamageTable::oversized() const
{
return by_id.size() > (size_t)(g_conf()->mds_damage_table_max_entries);
}
bool DamageTable::is_dentry_damaged(
const CDir *dir_frag,
std::string_view dname,
const snapid_t snap_id) const
{
if (dentries.count(
DirFragIdent(dir_frag->inode->ino(), dir_frag->frag)
) == 0) {
return false;
}
const std::map<DentryIdent, DamageEntryRef> &frag_dentries =
dentries.at(DirFragIdent(dir_frag->inode->ino(), dir_frag->frag));
return frag_dentries.count(DentryIdent(dname, snap_id)) > 0;
}
bool DamageTable::is_dirfrag_damaged(
const CDir *dir_frag) const
{
return dirfrags.count(
DirFragIdent(dir_frag->inode->ino(), dir_frag->frag)) > 0;
}
bool DamageTable::is_remote_damaged(
const inodeno_t ino) const
{
return remotes.count(ino) > 0;
}
void DamageTable::dump(Formatter *f) const
{
f->open_array_section("damage_table");
for (const auto &i : by_id)
{
i.second->dump(f);
}
f->close_section();
}
void DamageTable::erase(damage_entry_id_t damage_id)
{
auto by_id_entry = by_id.find(damage_id);
if (by_id_entry == by_id.end()) {
return;
}
DamageEntryRef entry = by_id_entry->second;
ceph_assert(entry->id == damage_id); // Sanity
const auto type = entry->get_type();
if (type == DAMAGE_ENTRY_DIRFRAG) {
auto dirfrag_entry = std::static_pointer_cast<DirFragDamage>(entry);
dirfrags.erase(DirFragIdent(dirfrag_entry->ino, dirfrag_entry->frag));
} else if (type == DAMAGE_ENTRY_DENTRY) {
auto dentry_entry = std::static_pointer_cast<DentryDamage>(entry);
dentries.erase(DirFragIdent(dentry_entry->ino, dentry_entry->frag));
} else if (type == DAMAGE_ENTRY_BACKTRACE) {
auto backtrace_entry = std::static_pointer_cast<BacktraceDamage>(entry);
remotes.erase(backtrace_entry->ino);
} else {
derr << "Invalid type " << type << dendl;
ceph_abort();
}
by_id.erase(by_id_entry);
}
| 6,846 | 23.807971 | 93 | cc |
null | ceph-main/src/mds/DamageTable.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef DAMAGE_TABLE_H_
#define DAMAGE_TABLE_H_
#include <string_view>
#include "mdstypes.h"
#include "include/random.h"
class CDir;
typedef uint64_t damage_entry_id_t;
typedef enum
{
DAMAGE_ENTRY_DIRFRAG,
DAMAGE_ENTRY_DENTRY,
DAMAGE_ENTRY_BACKTRACE
} damage_entry_type_t;
class DamageEntry
{
public:
DamageEntry()
{
id = ceph::util::generate_random_number<damage_entry_id_t>(0, 0xffffffff);
reported_at = ceph_clock_now();
}
virtual ~DamageEntry();
virtual damage_entry_type_t get_type() const = 0;
virtual void dump(Formatter *f) const = 0;
damage_entry_id_t id;
utime_t reported_at;
// path is optional, advisory. Used to give the admin an idea of what
// part of his tree the damage affects.
std::string path;
};
typedef std::shared_ptr<DamageEntry> DamageEntryRef;
class DirFragIdent
{
public:
DirFragIdent(inodeno_t ino_, frag_t frag_)
: ino(ino_), frag(frag_)
{}
bool operator<(const DirFragIdent &rhs) const
{
if (ino == rhs.ino) {
return frag < rhs.frag;
} else {
return ino < rhs.ino;
}
}
inodeno_t ino;
frag_t frag;
};
class DentryIdent
{
public:
DentryIdent(std::string_view dname_, snapid_t snap_id_)
: dname(dname_), snap_id(snap_id_)
{}
bool operator<(const DentryIdent &rhs) const
{
if (dname == rhs.dname) {
return snap_id < rhs.snap_id;
} else {
return dname < rhs.dname;
}
}
std::string dname;
snapid_t snap_id;
};
/**
* Registry of in-RADOS metadata damage identified
* during forward scrub or during normal fetches.
*
* Used to indicate damage to the administrator, and
* to cache known-bad paths so that we don't hit them
* repeatedly.
*
* Callers notifying damage must check return code; if
* an fatal condition is indicated then they should mark the MDS
* rank damaged.
*
* An artificial limit on the number of damage entries
* is imposed to avoid this structure growing indefinitely. If
* a notification causes the limit to be exceeded, the fatal
* condition will be indicated in the return code and the MDS
* rank should be marked damaged.
*
* Protected by MDS::mds_lock
*/
class DamageTable
{
public:
explicit DamageTable(const mds_rank_t rank_)
: rank(rank_)
{
ceph_assert(rank_ != MDS_RANK_NONE);
}
/**
* Return true if no damage entries exist
*/
bool empty() const
{
return by_id.empty();
}
/**
* Indicate that a dirfrag cannot be loaded.
*
* @return true if fatal
*/
bool notify_dirfrag(inodeno_t ino, frag_t frag, std::string_view path);
/**
* Indicate that a particular dentry cannot be loaded.
*
* @return true if fatal
*/
bool notify_dentry(
inodeno_t ino, frag_t frag,
snapid_t snap_id, std::string_view dname, std::string_view path);
/**
* Indicate that a particular Inode could not be loaded by number
*/
bool notify_remote_damaged(inodeno_t ino, std::string_view path);
bool is_dentry_damaged(
const CDir *dir_frag,
std::string_view dname,
const snapid_t snap_id) const;
bool is_dirfrag_damaged(const CDir *dir_frag) const;
bool is_remote_damaged(const inodeno_t ino) const;
void dump(Formatter *f) const;
void erase(damage_entry_id_t damage_id);
protected:
// I need to know my MDS rank so that I can check if
// metadata items are part of my mydir.
const mds_rank_t rank;
bool oversized() const;
// Map of all dirfrags reported damaged
std::map<DirFragIdent, DamageEntryRef> dirfrags;
// Store dentries in a map per dirfrag, so that we can
// readily look up all the bad dentries in a particular
// dirfrag
std::map<DirFragIdent, std::map<DentryIdent, DamageEntryRef> > dentries;
// Map of all inodes which could not be resolved remotely
// (i.e. have probably/possibly missing backtraces)
std::map<inodeno_t, DamageEntryRef> remotes;
// All damage, by ID. This is a secondary index
// to the dirfrag, dentry, remote maps. It exists
// to enable external tools to unambiguously operate
// on particular entries.
std::map<damage_entry_id_t, DamageEntryRef> by_id;
};
#endif // DAMAGE_TABLE_H_
| 4,777 | 23.253807 | 80 | h |
null | ceph-main/src/mds/FSMap.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <ostream>
#include "FSMap.h"
#include "common/StackStringStream.h"
#ifdef WITH_SEASTAR
#include "crimson/common/config_proxy.h"
#else
#include "common/config_proxy.h"
#endif
#include "global/global_context.h"
#include "mon/health_check.h"
using std::list;
using std::pair;
using std::ostream;
using std::string;
using std::string_view;
using ceph::bufferlist;
using ceph::Formatter;
void ClusterInfo::encode(ceph::buffer::list &bl) const {
ENCODE_START(1, 1, bl);
encode(client_name, bl);
encode(cluster_name, bl);
encode(fs_name, bl);
ENCODE_FINISH(bl);
}
void ClusterInfo::decode(ceph::buffer::list::const_iterator &iter) {
DECODE_START(1, iter);
decode(client_name, iter);
decode(cluster_name, iter);
decode(fs_name, iter);
DECODE_FINISH(iter);
}
void ClusterInfo::dump(ceph::Formatter *f) const {
f->dump_string("client_name", client_name);
f->dump_string("cluster_name", cluster_name);
f->dump_string("fs_name", fs_name);
}
void ClusterInfo::print(std::ostream& out) const {
out << "[client_name=" << client_name << ", cluster_name=" << cluster_name
<< ", fs_name=" << fs_name << "]" << std::endl;
}
void Peer::encode(ceph::buffer::list &bl) const {
ENCODE_START(1, 1, bl);
encode(uuid, bl);
encode(remote, bl);
ENCODE_FINISH(bl);
}
void Peer::decode(ceph::buffer::list::const_iterator &iter) {
DECODE_START(1, iter);
decode(uuid, iter);
decode(remote, iter);
DECODE_FINISH(iter);
}
void Peer::dump(ceph::Formatter *f) const {
f->open_object_section(uuid);
f->dump_object("remote", remote);
f->close_section();
}
void Peer::print(std::ostream& out) const {
out << "[uuid=" << uuid << ", remote=" << remote << "]" << std::endl;
}
void MirrorInfo::encode(ceph::buffer::list &bl) const {
ENCODE_START(1, 1, bl);
encode(mirrored, bl);
encode(peers, bl);
ENCODE_FINISH(bl);
}
void MirrorInfo::decode(ceph::buffer::list::const_iterator &iter) {
DECODE_START(1, iter);
decode(mirrored, iter);
decode(peers, iter);
DECODE_FINISH(iter);
}
void MirrorInfo::dump(ceph::Formatter *f) const {
f->open_object_section("peers");
for (auto &peer : peers) {
peer.dump(f);
}
f->close_section(); // peers
}
void MirrorInfo::print(std::ostream& out) const {
out << "[peers=" << peers << "]" << std::endl;
}
void Filesystem::dump(Formatter *f) const
{
f->open_object_section("mdsmap");
mds_map.dump(f);
f->close_section();
f->dump_int("id", fscid);
if (mirror_info.is_mirrored()) {
f->open_object_section("mirror_info");
mirror_info.dump(f);
f->close_section(); // mirror_info
}
}
void FSMap::dump(Formatter *f) const
{
f->dump_int("epoch", epoch);
// Use 'default' naming to match 'set-default' CLI
f->dump_int("default_fscid", legacy_client_fscid);
f->open_object_section("compat");
default_compat.dump(f);
f->close_section();
f->open_object_section("feature_flags");
f->dump_bool("enable_multiple", enable_multiple);
f->dump_bool("ever_enabled_multiple", ever_enabled_multiple);
f->close_section();
f->open_array_section("standbys");
for (const auto& [gid, info] : standby_daemons) {
f->open_object_section("info");
info.dump(f);
f->dump_int("epoch", standby_epochs.at(gid));
f->close_section();
}
f->close_section();
f->open_array_section("filesystems");
for (const auto &fs : filesystems) {
f->open_object_section("filesystem");
fs.second->dump(f);
f->close_section();
}
f->close_section();
}
FSMap &FSMap::operator=(const FSMap &rhs)
{
epoch = rhs.epoch;
next_filesystem_id = rhs.next_filesystem_id;
legacy_client_fscid = rhs.legacy_client_fscid;
default_compat = rhs.default_compat;
enable_multiple = rhs.enable_multiple;
mds_roles = rhs.mds_roles;
standby_daemons = rhs.standby_daemons;
standby_epochs = rhs.standby_epochs;
filesystems.clear();
for (const auto &i : rhs.filesystems) {
const auto &fs = i.second;
filesystems[fs->fscid] = std::make_shared<Filesystem>(*fs);
}
return *this;
}
void FSMap::generate_test_instances(std::list<FSMap*>& ls)
{
FSMap *m = new FSMap();
std::list<MDSMap*> mds_map_instances;
MDSMap::generate_test_instances(mds_map_instances);
int k = 20;
for (auto i : mds_map_instances) {
auto fs = Filesystem::create();
fs->fscid = k++;
fs->mds_map = *i;
delete i;
m->filesystems[fs->fscid] = fs;
}
mds_map_instances.clear();
ls.push_back(m);
}
void FSMap::print(ostream& out) const
{
out << "e" << epoch << std::endl;
out << "enable_multiple, ever_enabled_multiple: " << enable_multiple << ","
<< ever_enabled_multiple << std::endl;
out << "default compat: " << default_compat << std::endl;
out << "legacy client fscid: " << legacy_client_fscid << std::endl;
out << " " << std::endl;
if (filesystems.empty()) {
out << "No filesystems configured" << std::endl;
}
for (const auto& p : filesystems) {
p.second->print(out);
out << " " << std::endl << " " << std::endl; // Space out a bit
}
if (!standby_daemons.empty()) {
out << "Standby daemons:" << std::endl << " " << std::endl;
}
for (const auto& p : standby_daemons) {
out << p.second << std::endl;
}
}
void FSMap::print_daemon_summary(ostream& out) const
{
// this appears in the "services:" section of "ceph status"
int num_up = 0, num_in = 0, num_failed = 0;
int num_standby_replay = 0;
for (auto& [fscid, fs] : filesystems) {
num_up += fs->mds_map.get_num_up_mds();
num_in += fs->mds_map.get_num_in_mds();
num_failed += fs->mds_map.get_num_failed_mds();
num_standby_replay += fs->mds_map.get_num_standby_replay_mds();
}
int num_standby = standby_daemons.size();
out << num_up << "/" << num_in << " daemons up";
if (num_failed) {
out << " (" << num_failed << " failed)";
}
if (num_standby) {
out << ", " << num_standby << " standby";
}
if (num_standby_replay) {
out << ", " << num_standby_replay << " hot standby";
}
}
void FSMap::print_fs_summary(ostream& out) const
{
// this appears in the "data:" section of "ceph status"
if (!filesystems.empty()) {
int num_failed = 0, num_recovering = 0, num_stopped = 0, num_healthy = 0;
int num_damaged = 0;
for (auto& [fscid, fs] : filesystems) {
if (fs->mds_map.is_any_damaged()) {
++num_damaged;
}
if (fs->mds_map.is_any_failed()) {
++num_failed;
} else if (fs->mds_map.is_degraded()) {
++num_recovering;
} else if (fs->mds_map.get_max_mds() == 0) {
++num_stopped;
} else {
++num_healthy;
}
}
out << " volumes: "
<< num_healthy << "/" << filesystems.size() << " healthy";
if (num_recovering) {
out << ", " << num_recovering << " recovering";
}
if (num_failed) {
out << ", " << num_failed << " failed";
}
if (num_stopped) {
out << ", " << num_stopped << " stopped";
}
if (num_damaged) {
out << "; " << num_damaged << " damaged";
}
out << "\n";
}
}
void FSMap::print_summary(Formatter *f, ostream *out) const
{
if (f) {
f->dump_unsigned("epoch", get_epoch());
for (const auto &p : filesystems) {
auto& fs = p.second;
f->dump_unsigned("id", fs->fscid);
f->dump_unsigned("up", fs->mds_map.up.size());
f->dump_unsigned("in", fs->mds_map.in.size());
f->dump_unsigned("max", fs->mds_map.max_mds);
}
} else {
auto count = filesystems.size();
if (count <= 3) {
bool first = true;
for (const auto& p : filesystems) {
const auto& fs = p.second;
if (!first) {
*out << " ";
}
if (fs->mds_map.is_degraded()) {
*out << fs->mds_map.fs_name << ":" << fs->mds_map.up.size() << "/" << fs->mds_map.in.size();
} else {
*out << fs->mds_map.fs_name << ":" << fs->mds_map.in.size();
}
first = false;
}
} else {
*out << count << " fs";
unsigned degraded = 0;
CachedStackStringStream css;
*css << " (degraded: ";
for (const auto& p : filesystems) {
const auto& fs = p.second;
if (fs->mds_map.is_degraded()) {
degraded++;
if (degraded <= 3) {
*css << fs->mds_map.fs_name << ":" << fs->mds_map.up.size() << "/" << fs->mds_map.in.size();
}
}
}
if (degraded > 0) {
if (degraded <= 3) {
*css << ")";
*out << css->strv();
} else {
*out << " (degraded: " << degraded << " fs)";
}
}
}
}
if (f) {
f->open_array_section("by_rank");
}
std::map<MDSMap::DaemonState,unsigned> by_state;
std::map<mds_role_t, std::pair<MDSMap::DaemonState, std::string>> by_rank;
by_state[MDSMap::DaemonState::STATE_STANDBY] = standby_daemons.size();
for (const auto& [gid, fscid] : mds_roles) {
if (fscid == FS_CLUSTER_ID_NONE)
continue;
const auto& info = filesystems.at(fscid)->mds_map.get_info_gid(gid);
auto s = std::string(ceph_mds_state_name(info.state));
if (info.laggy()) {
s += "(laggy or crashed)";
}
if (f) {
f->open_object_section("mds");
f->dump_unsigned("filesystem_id", fscid);
f->dump_unsigned("rank", info.rank);
f->dump_string("name", info.name);
f->dump_string("status", s);
f->dump_unsigned("gid", gid);
f->close_section();
} else if (info.state != MDSMap::DaemonState::STATE_STANDBY_REPLAY) {
by_rank[mds_role_t(fscid, info.rank)] = std::make_pair(info.state, info.name + "=" + s);
}
by_state[info.state]++;
}
if (f) {
f->close_section();
} else {
if (0 < by_rank.size() && by_rank.size() < 5) {
if (filesystems.size() > 1) {
// Disambiguate filesystems
std::map<std::string, std::string> pretty;
for (const auto& [role,status] : by_rank) {
const auto &fs_name = filesystems.at(role.fscid)->mds_map.fs_name;
CachedStackStringStream css;
*css << fs_name << ":" << role.rank;
pretty.emplace(std::piecewise_construct, std::forward_as_tuple(css->strv()), std::forward_as_tuple(status.second));
--by_state[status.first]; /* already printed! */
}
*out << " " << pretty;
} else {
// Omit FSCID in output when only one filesystem exists
std::map<mds_rank_t, std::string> shortened;
for (const auto& [role,status] : by_rank) {
shortened[role.rank] = status.second;
--by_state[status.first]; /* already printed! */
}
*out << " " << shortened;
}
}
for (const auto& [state, count] : by_state) {
if (count > 0) {
auto s = std::string_view(ceph_mds_state_name(state));
*out << " " << count << " " << s;
}
}
}
if (f) {
const auto state = MDSMap::DaemonState::STATE_STANDBY;
auto&& name = ceph_mds_state_name(state);
auto count = standby_daemons.size();
f->dump_unsigned(name, count);
}
size_t failed = 0;
size_t damaged = 0;
for (const auto& p : filesystems) {
auto& fs = p.second;
failed += fs->mds_map.failed.size();
damaged += fs->mds_map.damaged.size();
}
if (failed > 0) {
if (f) {
f->dump_unsigned("failed", failed);
} else {
*out << ", " << failed << " failed";
}
}
if (damaged > 0) {
if (f) {
f->dump_unsigned("damaged", damaged);
} else {
*out << ", " << damaged << " damaged";
}
}
//if (stopped.size())
//out << ", " << stopped.size() << " stopped";
}
mds_gid_t Filesystem::get_standby_replay(mds_gid_t who) const
{
for (const auto &i : mds_map.mds_info) {
const auto &info = i.second;
if (info.state == MDSMap::STATE_STANDBY_REPLAY
&& info.rank == mds_map.mds_info.at(who).rank) {
return info.global_id;
}
}
return MDS_GID_NONE;
}
Filesystem::ref FSMap::create_filesystem(std::string_view name,
int64_t metadata_pool, int64_t data_pool, uint64_t features,
fs_cluster_id_t fscid, bool recover)
{
auto fs = Filesystem::create();
fs->mds_map.epoch = epoch;
fs->mds_map.fs_name = name;
fs->mds_map.data_pools.push_back(data_pool);
fs->mds_map.metadata_pool = metadata_pool;
fs->mds_map.cas_pool = -1;
fs->mds_map.compat = default_compat;
fs->mds_map.created = ceph_clock_now();
fs->mds_map.modified = ceph_clock_now();
fs->mds_map.enabled = true;
if (fscid == FS_CLUSTER_ID_NONE) {
fs->fscid = next_filesystem_id++;
} else {
fs->fscid = fscid;
next_filesystem_id = std::max(fscid, (fs_cluster_id_t)next_filesystem_id) + 1;
}
if (recover) {
// Populate rank 0 as existing (so don't go into CREATING)
// but failed (so that next available MDS is assigned the rank)
fs->mds_map.in.insert(mds_rank_t(0));
fs->mds_map.failed.insert(mds_rank_t(0));
fs->mds_map.set_flag(CEPH_MDSMAP_NOT_JOINABLE);
}
// File system's ID can be FS_CLUSTER_ID_ANONYMOUS if we're recovering
// a legacy file system by passing FS_CLUSTER_ID_ANONYMOUS as the desired
// file system ID
if (fscid != FS_CLUSTER_ID_ANONYMOUS) {
// ANONYMOUS is only for upgrades from legacy mdsmaps, we should
// have initialized next_filesystem_id such that it's never used here.
ceph_assert(fs->fscid != FS_CLUSTER_ID_ANONYMOUS);
}
filesystems[fs->fscid] = fs;
// Created first filesystem? Set it as the one
// for legacy clients to use
if (filesystems.size() == 1) {
legacy_client_fscid = fs->fscid;
}
return fs;
}
Filesystem::const_ref FSMap::get_filesystem(std::string_view name) const
{
for (const auto& p : filesystems) {
if (p.second->mds_map.fs_name == name) {
return p.second;
}
}
return nullptr;
}
std::vector<Filesystem::const_ref> FSMap::get_filesystems(void) const
{
std::vector<Filesystem::const_ref> ret;
for (const auto& p : filesystems) {
ret.push_back(p.second);
}
return ret;
}
void FSMap::reset_filesystem(fs_cluster_id_t fscid)
{
auto fs = get_filesystem(fscid);
auto new_fs = Filesystem::create();
// Populate rank 0 as existing (so don't go into CREATING)
// but failed (so that next available MDS is assigned the rank)
new_fs->mds_map.in.insert(mds_rank_t(0));
new_fs->mds_map.failed.insert(mds_rank_t(0));
// Carry forward what makes sense
new_fs->fscid = fs->fscid;
new_fs->mds_map.inline_data_enabled = fs->mds_map.inline_data_enabled;
new_fs->mds_map.data_pools = fs->mds_map.data_pools;
new_fs->mds_map.metadata_pool = fs->mds_map.metadata_pool;
new_fs->mds_map.cas_pool = fs->mds_map.cas_pool;
new_fs->mds_map.fs_name = fs->mds_map.fs_name;
new_fs->mds_map.compat = default_compat;
new_fs->mds_map.created = ceph_clock_now();
new_fs->mds_map.modified = ceph_clock_now();
new_fs->mds_map.standby_count_wanted = fs->mds_map.standby_count_wanted;
new_fs->mds_map.enabled = true;
// Remember mds ranks that have ever started. (They should load old inotable
// instead of creating new one if they start again.)
new_fs->mds_map.stopped.insert(fs->mds_map.in.begin(), fs->mds_map.in.end());
new_fs->mds_map.stopped.insert(fs->mds_map.stopped.begin(), fs->mds_map.stopped.end());
new_fs->mds_map.stopped.erase(mds_rank_t(0));
// Persist the new FSMap
filesystems[new_fs->fscid] = new_fs;
}
void FSMap::get_health(list<pair<health_status_t,string> >& summary,
list<pair<health_status_t,string> > *detail) const
{
mds_rank_t standby_count_wanted = 0;
for (const auto &i : filesystems) {
const auto &fs = i.second;
// TODO: move get_health up into here so that we can qualify
// all the messages with what filesystem they're talking about
fs->mds_map.get_health(summary, detail);
standby_count_wanted = std::max(standby_count_wanted, fs->mds_map.get_standby_count_wanted((mds_rank_t)standby_daemons.size()));
}
if (standby_count_wanted) {
CachedStackStringStream css;
*css << "insufficient standby daemons available: have " << standby_daemons.size() << "; want " << standby_count_wanted << " more";
summary.push_back(make_pair(HEALTH_WARN, css->str()));
}
}
bool FSMap::check_health(void)
{
bool changed = false;
for (auto &i : filesystems) {
changed |= i.second->mds_map.check_health((mds_rank_t)standby_daemons.size());
}
return changed;
}
void FSMap::get_health_checks(health_check_map_t *checks) const
{
mds_rank_t standby_count_wanted = 0;
for (const auto &i : filesystems) {
const auto &fs = i.second;
health_check_map_t fschecks;
fs->mds_map.get_health_checks(&fschecks);
// Some of the failed ranks might be transient (i.e. there are standbys
// ready to replace them). We will report only on "stuck" failed, i.e.
// ranks which are failed and have no standby replacement available.
std::set<mds_rank_t> stuck_failed;
for (const auto &rank : fs->mds_map.failed) {
auto rep_info = find_replacement_for({fs->fscid, rank});
if (!rep_info) {
stuck_failed.insert(rank);
}
}
// FS_WITH_FAILED_MDS
if (!stuck_failed.empty()) {
health_check_t& fscheck = checks->get_or_add(
"FS_WITH_FAILED_MDS", HEALTH_WARN,
"%num% filesystem%plurals% %hasorhave% a failed mds daemon", 1);
CachedStackStringStream css;
*css << "fs " << fs->mds_map.fs_name << " has " << stuck_failed.size()
<< " failed mds" << (stuck_failed.size() > 1 ? "s" : "");
fscheck.detail.push_back(css->str()); }
checks->merge(fschecks);
standby_count_wanted = std::max(
standby_count_wanted,
fs->mds_map.get_standby_count_wanted((mds_rank_t)standby_daemons.size()));
}
// MDS_INSUFFICIENT_STANDBY
if (standby_count_wanted) {
CachedStackStringStream css1, css2;
*css1 << "insufficient standby MDS daemons available";
auto& d = checks->get_or_add("MDS_INSUFFICIENT_STANDBY", HEALTH_WARN, css1->str(), 1);
*css2 << "have " << standby_daemons.size() << "; want " << standby_count_wanted
<< " more";
d.detail.push_back(css2->str());
}
}
void FSMap::encode(bufferlist& bl, uint64_t features) const
{
ENCODE_START(STRUCT_VERSION, 6, bl);
encode(epoch, bl);
encode(next_filesystem_id, bl);
encode(legacy_client_fscid, bl);
encode(default_compat, bl);
encode(enable_multiple, bl);
{
std::vector<Filesystem::ref> v;
v.reserve(filesystems.size());
for (auto& p : filesystems) v.emplace_back(p.second);
encode(v, bl, features);
}
encode(mds_roles, bl);
encode(standby_daemons, bl, features);
encode(standby_epochs, bl);
encode(ever_enabled_multiple, bl);
ENCODE_FINISH(bl);
}
void FSMap::decode(bufferlist::const_iterator& p)
{
struct_version = 0;
DECODE_START(STRUCT_VERSION, p);
DECODE_OLDEST(7);
struct_version = struct_v;
decode(epoch, p);
decode(next_filesystem_id, p);
decode(legacy_client_fscid, p);
decode(default_compat, p);
decode(enable_multiple, p);
{
std::vector<Filesystem::ref> v;
decode(v, p);
filesystems.clear();
for (auto& ref : v) {
auto em = filesystems.emplace(std::piecewise_construct, std::forward_as_tuple(ref->fscid), std::forward_as_tuple(std::move(ref)));
ceph_assert(em.second);
}
}
decode(mds_roles, p);
decode(standby_daemons, p);
decode(standby_epochs, p);
if (struct_v >= 7) {
decode(ever_enabled_multiple, p);
}
DECODE_FINISH(p);
}
void FSMap::sanitize(const std::function<bool(int64_t pool)>& pool_exists)
{
for (auto &fs : filesystems) {
fs.second->mds_map.sanitize(pool_exists);
}
}
void Filesystem::encode(bufferlist& bl, uint64_t features) const
{
ENCODE_START(2, 1, bl);
encode(fscid, bl);
bufferlist mdsmap_bl;
mds_map.encode(mdsmap_bl, features);
encode(mdsmap_bl, bl);
encode(mirror_info, bl);
ENCODE_FINISH(bl);
}
void Filesystem::decode(bufferlist::const_iterator& p)
{
DECODE_START(2, p);
decode(fscid, p);
bufferlist mdsmap_bl;
decode(mdsmap_bl, p);
auto mdsmap_bl_iter = mdsmap_bl.cbegin();
mds_map.decode(mdsmap_bl_iter);
if (struct_v >= 2) {
decode(mirror_info, p);
}
DECODE_FINISH(p);
}
int FSMap::parse_filesystem(
std::string_view ns_str,
Filesystem::const_ref* result
) const
{
std::string ns_err;
std::string s(ns_str);
fs_cluster_id_t fscid = strict_strtol(s.c_str(), 10, &ns_err);
if (!ns_err.empty() || filesystems.count(fscid) == 0) {
for (auto &fs : filesystems) {
if (fs.second->mds_map.fs_name == s) {
*result = std::const_pointer_cast<const Filesystem>(fs.second);
return 0;
}
}
return -CEPHFS_ENOENT;
} else {
*result = get_filesystem(fscid);
return 0;
}
}
void Filesystem::print(std::ostream &out) const
{
out << "Filesystem '" << mds_map.fs_name
<< "' (" << fscid << ")" << std::endl;
mds_map.print(out);
if (mirror_info.is_mirrored()) {
mirror_info.print(out);
}
}
bool FSMap::is_any_degraded() const
{
for (auto& i : filesystems) {
if (i.second->mds_map.is_degraded()) {
return true;
}
}
return false;
}
std::map<mds_gid_t, MDSMap::mds_info_t> FSMap::get_mds_info() const
{
std::map<mds_gid_t, mds_info_t> result;
for (const auto &i : standby_daemons) {
result[i.first] = i.second;
}
for (const auto &i : filesystems) {
const auto &fs_info = i.second->mds_map.get_mds_info();
for (const auto &j : fs_info) {
result[j.first] = j.second;
}
}
return result;
}
const MDSMap::mds_info_t* FSMap::get_available_standby(const Filesystem& fs) const
{
const bool upgradeable = fs.is_upgradeable();
const mds_info_t* who = nullptr;
for (const auto& [gid, info] : standby_daemons) {
ceph_assert(info.rank == MDS_RANK_NONE);
ceph_assert(info.state == MDSMap::STATE_STANDBY);
if (info.laggy() || info.is_frozen()) {
continue;
} else if (!info.compat.writeable(fs.mds_map.compat)) {
/* standby is not compatible with this fs */
continue;
} else if (!upgradeable && !fs.mds_map.compat.writeable(info.compat)) {
/* promotion would change fs.mds_map.compat and we're not upgradeable */
continue;
}
if (info.join_fscid == fs.fscid) {
who = &info;
break;
} else if (info.join_fscid == FS_CLUSTER_ID_NONE) {
who = &info; /* vanilla standby */
} else if (who == nullptr) {
who = &info; /* standby for another fs, last resort */
}
}
return who;
}
mds_gid_t FSMap::find_mds_gid_by_name(std::string_view s) const
{
const auto info = get_mds_info();
for (const auto &p : info) {
if (p.second.name == s) {
return p.first;
}
}
return MDS_GID_NONE;
}
const MDSMap::mds_info_t* FSMap::find_by_name(std::string_view name) const
{
std::map<mds_gid_t, mds_info_t> result;
for (const auto &i : standby_daemons) {
if (i.second.name == name) {
return &(i.second);
}
}
for (const auto &i : filesystems) {
const auto &fs_info = i.second->mds_map.get_mds_info();
for (const auto &j : fs_info) {
if (j.second.name == name) {
return &(j.second);
}
}
}
return nullptr;
}
const MDSMap::mds_info_t* FSMap::find_replacement_for(mds_role_t role) const
{
auto&& fs = get_filesystem(role.fscid);
// First see if we have a STANDBY_REPLAY
for (const auto& [gid, info] : fs->mds_map.mds_info) {
if (info.rank == role.rank && info.state == MDSMap::STATE_STANDBY_REPLAY) {
if (info.is_frozen()) {
/* the standby-replay is frozen, do nothing! */
return nullptr;
} else {
ceph_assert(info.compat.writeable(fs->mds_map.compat));
return &info;
}
}
}
return get_available_standby(*fs);
}
void FSMap::sanity(bool pending) const
{
/* Only do some sanity checks on **new** FSMaps. Older versions may not be
* compliant.
*/
if (legacy_client_fscid != FS_CLUSTER_ID_NONE) {
ceph_assert(filesystems.count(legacy_client_fscid) == 1);
}
for (const auto& [fscid, fs] : filesystems) {
ceph_assert(fscid == fs->fscid);
for (const auto& [gid, info] : fs->mds_map.mds_info) {
ceph_assert(info.rank != MDS_RANK_NONE);
ceph_assert(mds_roles.at(gid) == fscid);
ceph_assert(standby_daemons.count(gid) == 0);
ceph_assert(standby_epochs.count(gid) == 0);
if (info.state != MDSMap::STATE_STANDBY_REPLAY) {
ceph_assert(fs->mds_map.up.at(info.rank) == gid);
ceph_assert(fs->mds_map.failed.count(info.rank) == 0);
ceph_assert(fs->mds_map.damaged.count(info.rank) == 0);
} else {
ceph_assert(!pending || fs->mds_map.allows_standby_replay());
}
ceph_assert(info.compat.writeable(fs->mds_map.compat));
}
for (const auto &j : fs->mds_map.up) {
mds_rank_t rank = j.first;
ceph_assert(fs->mds_map.in.count(rank) == 1);
mds_gid_t gid = j.second;
ceph_assert(fs->mds_map.mds_info.count(gid) == 1);
}
}
for (const auto &i : standby_daemons) {
ceph_assert(i.second.state == MDSMap::STATE_STANDBY);
ceph_assert(i.second.rank == MDS_RANK_NONE);
ceph_assert(i.second.global_id == i.first);
ceph_assert(standby_epochs.count(i.first) == 1);
ceph_assert(mds_roles.count(i.first) == 1);
ceph_assert(mds_roles.at(i.first) == FS_CLUSTER_ID_NONE);
}
for (const auto &i : standby_epochs) {
ceph_assert(standby_daemons.count(i.first) == 1);
}
for (const auto &i : mds_roles) {
if (i.second == FS_CLUSTER_ID_NONE) {
ceph_assert(standby_daemons.count(i.first) == 1);
} else {
ceph_assert(filesystems.count(i.second) == 1);
ceph_assert(filesystems.at(i.second)->mds_map.mds_info.count(i.first) == 1);
}
}
}
void FSMap::promote(
mds_gid_t standby_gid,
Filesystem& filesystem,
mds_rank_t assigned_rank)
{
ceph_assert(gid_exists(standby_gid));
bool is_standby_replay = mds_roles.at(standby_gid) != FS_CLUSTER_ID_NONE;
if (!is_standby_replay) {
ceph_assert(standby_daemons.count(standby_gid));
ceph_assert(standby_daemons.at(standby_gid).state == MDSMap::STATE_STANDBY);
}
MDSMap &mds_map = filesystem.mds_map;
// Insert daemon state to Filesystem
if (!is_standby_replay) {
mds_map.mds_info[standby_gid] = standby_daemons.at(standby_gid);
} else {
ceph_assert(mds_map.mds_info.count(standby_gid));
ceph_assert(mds_map.mds_info.at(standby_gid).state == MDSMap::STATE_STANDBY_REPLAY);
ceph_assert(mds_map.mds_info.at(standby_gid).rank == assigned_rank);
}
auto& info = mds_map.mds_info.at(standby_gid);
if (!filesystem.mds_map.compat.writeable(info.compat)) {
ceph_assert(filesystem.is_upgradeable());
filesystem.mds_map.compat.merge(info.compat);
}
if (mds_map.stopped.erase(assigned_rank)) {
// The cluster is being expanded with a stopped rank
info.state = MDSMap::STATE_STARTING;
} else if (!mds_map.is_in(assigned_rank)) {
// The cluster is being expanded with a new rank
info.state = MDSMap::STATE_CREATING;
} else {
// An existing rank is being assigned to a replacement
info.state = MDSMap::STATE_REPLAY;
mds_map.failed.erase(assigned_rank);
}
info.rank = assigned_rank;
info.inc = epoch;
mds_roles.at(standby_gid) = filesystem.fscid;
// Update the rank state in Filesystem
mds_map.in.insert(assigned_rank);
mds_map.up[assigned_rank] = standby_gid;
// Remove from the list of standbys
if (!is_standby_replay) {
standby_daemons.erase(standby_gid);
standby_epochs.erase(standby_gid);
}
// Indicate that Filesystem has been modified
mds_map.epoch = epoch;
}
void FSMap::assign_standby_replay(
const mds_gid_t standby_gid,
const fs_cluster_id_t leader_ns,
const mds_rank_t leader_rank)
{
ceph_assert(mds_roles.at(standby_gid) == FS_CLUSTER_ID_NONE);
ceph_assert(gid_exists(standby_gid));
ceph_assert(!gid_has_rank(standby_gid));
ceph_assert(standby_daemons.count(standby_gid));
// Insert to the filesystem
auto fs = filesystems.at(leader_ns);
fs->mds_map.mds_info[standby_gid] = standby_daemons.at(standby_gid);
fs->mds_map.mds_info[standby_gid].rank = leader_rank;
fs->mds_map.mds_info[standby_gid].state = MDSMap::STATE_STANDBY_REPLAY;
mds_roles[standby_gid] = leader_ns;
// Remove from the list of standbys
standby_daemons.erase(standby_gid);
standby_epochs.erase(standby_gid);
// Indicate that Filesystem has been modified
fs->mds_map.epoch = epoch;
}
void FSMap::erase(mds_gid_t who, epoch_t blocklist_epoch)
{
if (mds_roles.at(who) == FS_CLUSTER_ID_NONE) {
standby_daemons.erase(who);
standby_epochs.erase(who);
} else {
auto &fs = filesystems.at(mds_roles.at(who));
const auto &info = fs->mds_map.mds_info.at(who);
if (info.state != MDSMap::STATE_STANDBY_REPLAY) {
if (info.state == MDSMap::STATE_CREATING) {
// If this gid didn't make it past CREATING, then forget
// the rank ever existed so that next time it's handed out
// to a gid it'll go back into CREATING.
fs->mds_map.in.erase(info.rank);
} else {
// Put this rank into the failed list so that the next available
// STANDBY will pick it up.
fs->mds_map.failed.insert(info.rank);
}
ceph_assert(fs->mds_map.up.at(info.rank) == info.global_id);
fs->mds_map.up.erase(info.rank);
}
fs->mds_map.mds_info.erase(who);
fs->mds_map.last_failure_osd_epoch = blocklist_epoch;
fs->mds_map.epoch = epoch;
}
mds_roles.erase(who);
}
void FSMap::damaged(mds_gid_t who, epoch_t blocklist_epoch)
{
ceph_assert(mds_roles.at(who) != FS_CLUSTER_ID_NONE);
auto fs = filesystems.at(mds_roles.at(who));
mds_rank_t rank = fs->mds_map.mds_info.at(who).rank;
erase(who, blocklist_epoch);
fs->mds_map.failed.erase(rank);
fs->mds_map.damaged.insert(rank);
ceph_assert(fs->mds_map.epoch == epoch);
}
/**
* Update to indicate that the rank `rank` is to be removed
* from the damaged list of the filesystem `fscid`
*/
bool FSMap::undamaged(const fs_cluster_id_t fscid, const mds_rank_t rank)
{
auto fs = filesystems.at(fscid);
if (fs->mds_map.damaged.erase(rank)) {
fs->mds_map.failed.insert(rank);
fs->mds_map.epoch = epoch;
return true;
} else {
return false;
}
}
void FSMap::insert(const MDSMap::mds_info_t &new_info)
{
static const CompatSet empty;
ceph_assert(new_info.state == MDSMap::STATE_STANDBY);
ceph_assert(new_info.rank == MDS_RANK_NONE);
mds_roles[new_info.global_id] = FS_CLUSTER_ID_NONE;
auto& info = standby_daemons[new_info.global_id];
info = new_info;
if (empty.compare(info.compat) == 0) {
// bootstrap old compat: boot beacon contains empty compat on old (v16.2.4
// or older) MDS.
info.compat = MDSMap::get_compat_set_v16_2_4();
}
/* TODO remove after R is released
* Insert INLINE; see comment in MDSMap::decode.
*/
info.compat.incompat.insert(MDS_FEATURE_INCOMPAT_INLINE);
standby_epochs[new_info.global_id] = epoch;
}
std::vector<mds_gid_t> FSMap::stop(mds_gid_t who)
{
ceph_assert(mds_roles.at(who) != FS_CLUSTER_ID_NONE);
auto fs = filesystems.at(mds_roles.at(who));
const auto &info = fs->mds_map.mds_info.at(who);
fs->mds_map.up.erase(info.rank);
fs->mds_map.in.erase(info.rank);
fs->mds_map.stopped.insert(info.rank);
// Also drop any standby replays that were following this rank
std::vector<mds_gid_t> standbys;
for (const auto &i : fs->mds_map.mds_info) {
const auto &other_gid = i.first;
const auto &other_info = i.second;
if (other_info.rank == info.rank
&& other_info.state == MDSMap::STATE_STANDBY_REPLAY) {
standbys.push_back(other_gid);
}
}
for (const auto &other_gid : standbys) {
erase(other_gid, 0);
}
fs->mds_map.mds_info.erase(who);
mds_roles.erase(who);
fs->mds_map.epoch = epoch;
return standbys;
}
/**
* Given one of the following forms:
* <fs name>:<rank>
* <fs id>:<rank>
* <rank>
*
* Parse into a mds_role_t. The rank-only form is only valid
* if legacy_client_ns is set.
*/
int FSMap::parse_role(
std::string_view role_str,
mds_role_t *role,
std::ostream &ss,
const std::vector<string> &filter) const
{
int r = parse_role(role_str, role, ss);
if (r < 0) return r;
string_view fs_name = get_filesystem(role->fscid)->mds_map.get_fs_name();
if (!filter.empty() &&
std::find(filter.begin(), filter.end(), fs_name) == filter.end()) {
if (r >= 0) {
ss << "Invalid file system";
}
return -CEPHFS_ENOENT;
}
return r;
}
int FSMap::parse_role(
std::string_view role_str,
mds_role_t *role,
std::ostream &ss) const
{
size_t colon_pos = role_str.find(":");
size_t rank_pos;
Filesystem::const_ref fs;
if (colon_pos == std::string::npos) {
if (legacy_client_fscid == FS_CLUSTER_ID_NONE) {
ss << "No filesystem selected";
return -CEPHFS_ENOENT;
}
fs = get_filesystem(legacy_client_fscid);
rank_pos = 0;
} else {
if (parse_filesystem(role_str.substr(0, colon_pos), &fs) < 0) {
ss << "Invalid filesystem";
return -CEPHFS_ENOENT;
}
rank_pos = colon_pos+1;
}
mds_rank_t rank;
std::string err;
std::string rank_str(role_str.substr(rank_pos));
long rank_i = strict_strtol(rank_str.c_str(), 10, &err);
if (rank_i < 0 || !err.empty()) {
ss << "Invalid rank '" << rank_str << "'";
return -CEPHFS_EINVAL;
} else {
rank = rank_i;
}
if (fs->mds_map.in.count(rank) == 0) {
ss << "Rank '" << rank << "' not found";
return -CEPHFS_ENOENT;
}
*role = {fs->fscid, rank};
return 0;
}
bool FSMap::pool_in_use(int64_t poolid) const
{
for (auto const &i : filesystems) {
if (i.second->mds_map.is_data_pool(poolid)
|| i.second->mds_map.metadata_pool == poolid) {
return true;
}
}
return false;
}
void FSMap::erase_filesystem(fs_cluster_id_t fscid)
{
filesystems.erase(fscid);
for (auto& [gid, info] : standby_daemons) {
if (info.join_fscid == fscid) {
modify_daemon(gid, [](auto& info) {
info.join_fscid = FS_CLUSTER_ID_NONE;
});
}
}
for (auto& p : filesystems) {
for (auto& [gid, info] : p.second->mds_map.get_mds_info()) {
if (info.join_fscid == fscid) {
modify_daemon(gid, [](auto& info) {
info.join_fscid = FS_CLUSTER_ID_NONE;
});
}
}
}
}
| 34,756 | 27.583059 | 136 | cc |
null | ceph-main/src/mds/FSMap.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_FSMAP_H
#define CEPH_FSMAP_H
#include <map>
#include <memory>
#include <set>
#include <string>
#include <string_view>
#include <errno.h>
#include "include/types.h"
#include "common/Clock.h"
#include "mds/MDSMap.h"
#include "include/CompatSet.h"
#include "include/ceph_features.h"
#include "include/common_fwd.h"
#include "common/Formatter.h"
#include "mds/mdstypes.h"
#if __cplusplus <= 201703L
template<class Key, class T, class Compare, class Alloc, class Pred>
typename std::map<Key, T, Compare, Alloc>::size_type
erase_if(std::map<Key, T, Compare, Alloc>& c, Pred pred) {
auto old_size = c.size();
for (auto i = c.begin(), last = c.end(); i != last; ) {
if (pred(*i)) {
i = c.erase(i);
} else {
++i;
}
}
return old_size - c.size();
}
#endif
class health_check_map_t;
struct ClusterInfo {
ClusterInfo() = default;
ClusterInfo(std::string_view client_name, std::string_view cluster_name,
std::string_view fs_name)
: client_name(client_name),
cluster_name(cluster_name),
fs_name(fs_name) {
}
std::string client_name;
std::string cluster_name;
std::string fs_name;
bool operator==(const ClusterInfo &cluster_info) const {
return client_name == cluster_info.client_name &&
cluster_name == cluster_info.cluster_name &&
fs_name == cluster_info.fs_name;
}
void dump(ceph::Formatter *f) const;
void print(std::ostream& out) const;
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &iter);
};
inline std::ostream& operator<<(std::ostream& out, const ClusterInfo &cluster_info) {
out << "{client_name=" << cluster_info.client_name << ", cluster_name="
<< cluster_info.cluster_name << ", fs_name=" << cluster_info.fs_name << "}";
return out;
}
struct Peer {
Peer() = default;
Peer(std::string_view uuid)
: uuid(uuid) {
}
Peer(std::string_view uuid,
const ClusterInfo &remote)
: uuid(uuid),
remote(remote) {
}
std::string uuid;
ClusterInfo remote;
bool operator==(const Peer &rhs) const {
return uuid == rhs.uuid;
}
bool operator<(const Peer &rhs) const {
return uuid < rhs.uuid;
}
void dump(ceph::Formatter *f) const;
void print(std::ostream& out) const;
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &iter);
};
typedef std::set<Peer> Peers;
inline std::ostream& operator<<(std::ostream& out, const Peer &peer) {
out << "{uuid=" << peer.uuid << ", remote_cluster=" << peer.remote << "}";
return out;
}
struct MirrorInfo {
MirrorInfo() = default;
bool is_mirrored() const {
return mirrored;
}
void enable_mirroring() {
mirrored = true;
}
void disable_mirroring() {
peers.clear();
mirrored = false;
}
// uuid variant check
bool has_peer(std::string_view uuid) const {
return peers.find(Peer(uuid)) != peers.end();
}
// client_name/cluster_name/fs_name variant check
bool has_peer(std::string_view client_name,
std::string_view cluster_name,
std::string_view fs_name) const {
ClusterInfo cluster_info(client_name, cluster_name, fs_name);
for (auto &peer : peers) {
if (peer.remote == cluster_info) {
return true;
}
}
return false;
}
bool has_peers() const {
return !peers.empty();
}
void peer_add(std::string_view uuid,
std::string_view client_name,
std::string_view cluster_name,
std::string_view fs_name) {
peers.emplace(Peer(uuid, ClusterInfo(client_name, cluster_name, fs_name)));
}
void peer_remove(std::string_view uuid) {
peers.erase(uuid);
}
bool mirrored = false;
Peers peers;
void dump(ceph::Formatter *f) const;
void print(std::ostream& out) const;
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &iter);
};
inline std::ostream& operator<<(std::ostream& out, const MirrorInfo &mirror_info) {
out << "{peers=" << mirror_info.peers << "}";
return out;
}
WRITE_CLASS_ENCODER(ClusterInfo)
WRITE_CLASS_ENCODER(Peer)
WRITE_CLASS_ENCODER(MirrorInfo)
/**
* The MDSMap and any additional fields describing a particular
* filesystem (a unique fs_cluster_id_t).
*/
class Filesystem
{
public:
using ref = std::shared_ptr<Filesystem>;
using const_ref = std::shared_ptr<Filesystem const>;
template<typename... Args>
static ref create(Args&&... args)
{
return std::make_shared<Filesystem>(std::forward<Args>(args)...);
}
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator& p);
void dump(ceph::Formatter *f) const;
void print(std::ostream& out) const;
bool is_upgradeable() const {
return (mds_map.allows_standby_replay() && mds_map.get_num_in_mds() == 0)
|| (!mds_map.allows_standby_replay() && mds_map.get_num_in_mds() <= 1);
}
/**
* Return true if a daemon is already assigned as
* STANDBY_REPLAY for the gid `who`
*/
bool has_standby_replay(mds_gid_t who) const
{
return get_standby_replay(who) != MDS_GID_NONE;
}
mds_gid_t get_standby_replay(mds_gid_t who) const;
bool is_standby_replay(mds_gid_t who) const
{
auto p = mds_map.mds_info.find(who);
if (p != mds_map.mds_info.end() &&
p->second.state == MDSMap::STATE_STANDBY_REPLAY) {
return true;
}
return false;
}
fs_cluster_id_t fscid = FS_CLUSTER_ID_NONE;
MDSMap mds_map;
MirrorInfo mirror_info;
};
WRITE_CLASS_ENCODER_FEATURES(Filesystem)
class FSMap {
public:
friend class MDSMonitor;
friend class PaxosFSMap;
using mds_info_t = MDSMap::mds_info_t;
static const version_t STRUCT_VERSION = 7;
static const version_t STRUCT_VERSION_TRIM_TO = 7;
FSMap() : default_compat(MDSMap::get_compat_set_default()) {}
FSMap(const FSMap &rhs)
:
epoch(rhs.epoch),
next_filesystem_id(rhs.next_filesystem_id),
legacy_client_fscid(rhs.legacy_client_fscid),
default_compat(rhs.default_compat),
enable_multiple(rhs.enable_multiple),
ever_enabled_multiple(rhs.ever_enabled_multiple),
mds_roles(rhs.mds_roles),
standby_daemons(rhs.standby_daemons),
standby_epochs(rhs.standby_epochs),
struct_version(rhs.struct_version)
{
filesystems.clear();
for (const auto &i : rhs.filesystems) {
const auto &fs = i.second;
filesystems[fs->fscid] = std::make_shared<Filesystem>(*fs);
}
}
FSMap &operator=(const FSMap &rhs);
const CompatSet &get_default_compat() const {return default_compat;}
void filter(const std::vector<std::string>& allowed)
{
if (allowed.empty()) {
return;
}
erase_if(filesystems, [&](const auto& f) {
return std::find(allowed.begin(), allowed.end(), f.second->mds_map.get_fs_name()) == allowed.end();
});
erase_if(mds_roles, [&](const auto& r) {
return std::find(allowed.begin(), allowed.end(), fs_name_from_gid(r.first)) == allowed.end();
});
}
void set_enable_multiple(const bool v)
{
enable_multiple = v;
if (true == v) {
ever_enabled_multiple = true;
}
}
bool get_enable_multiple() const
{
return enable_multiple;
}
void set_legacy_client_fscid(fs_cluster_id_t fscid)
{
ceph_assert(fscid == FS_CLUSTER_ID_NONE || filesystems.count(fscid));
legacy_client_fscid = fscid;
}
fs_cluster_id_t get_legacy_client_fscid() const
{
return legacy_client_fscid;
}
size_t get_num_standby() const {
return standby_daemons.size();
}
bool is_any_degraded() const;
/**
* Get state of all daemons (for all filesystems, including all standbys)
*/
std::map<mds_gid_t, mds_info_t> get_mds_info() const;
const mds_info_t* get_available_standby(const Filesystem& fs) const;
/**
* Resolve daemon name to GID
*/
mds_gid_t find_mds_gid_by_name(std::string_view s) const;
/**
* Resolve daemon name to status
*/
const mds_info_t* find_by_name(std::string_view name) const;
/**
* Does a daemon exist with this GID?
*/
bool gid_exists(mds_gid_t gid,
const std::vector<std::string>& in = {}) const
{
try {
std::string_view m = fs_name_from_gid(gid);
return in.empty() || std::find(in.begin(), in.end(), m) != in.end();
} catch (const std::out_of_range&) {
return false;
}
}
/**
* Does a daemon with this GID exist, *and* have an MDS rank assigned?
*/
bool gid_has_rank(mds_gid_t gid) const
{
return gid_exists(gid) && mds_roles.at(gid) != FS_CLUSTER_ID_NONE;
}
/**
* Which filesystem owns this GID?
*/
fs_cluster_id_t fscid_from_gid(mds_gid_t gid) const {
if (!gid_exists(gid)) {
return FS_CLUSTER_ID_NONE;
}
return mds_roles.at(gid);
}
/**
* Insert a new MDS daemon, as a standby
*/
void insert(const MDSMap::mds_info_t &new_info);
/**
* Assign an MDS cluster standby replay rank to a standby daemon
*/
void assign_standby_replay(
const mds_gid_t standby_gid,
const fs_cluster_id_t leader_ns,
const mds_rank_t leader_rank);
/**
* Assign an MDS cluster rank to a standby daemon
*/
void promote(
mds_gid_t standby_gid,
Filesystem& filesystem,
mds_rank_t assigned_rank);
/**
* A daemon reports that it is STATE_STOPPED: remove it,
* and the rank it held.
*
* @returns a list of any additional GIDs that were removed from the map
* as a side effect (like standby replays)
*/
std::vector<mds_gid_t> stop(mds_gid_t who);
/**
* The rank held by 'who', if any, is to be relinquished, and
* the state for the daemon GID is to be forgotten.
*/
void erase(mds_gid_t who, epoch_t blocklist_epoch);
/**
* Update to indicate that the rank held by 'who' is damaged
*/
void damaged(mds_gid_t who, epoch_t blocklist_epoch);
/**
* Update to indicate that the rank `rank` is to be removed
* from the damaged list of the filesystem `fscid`
*/
bool undamaged(const fs_cluster_id_t fscid, const mds_rank_t rank);
/**
* Initialize a Filesystem and assign a fscid. Update legacy_client_fscid
* to point to the new filesystem if it's the only one.
*
* Caller must already have validated all arguments vs. the existing
* FSMap and OSDMap contents.
*/
Filesystem::ref create_filesystem(
std::string_view name, int64_t metadata_pool,
int64_t data_pool, uint64_t features,
fs_cluster_id_t fscid, bool recover);
/**
* Remove the filesystem (it must exist). Caller should already
* have failed out any MDSs that were assigned to the filesystem.
*/
void erase_filesystem(fs_cluster_id_t fscid);
/**
* Reset all the state information (not configuration information)
* in a particular filesystem. Caller must have verified that
* the filesystem already exists.
*/
void reset_filesystem(fs_cluster_id_t fscid);
/**
* Mutator helper for Filesystem objects: expose a non-const
* Filesystem pointer to `fn` and update epochs appropriately.
*/
template<typename T>
void modify_filesystem(fs_cluster_id_t fscid, T&& fn)
{
auto& fs = filesystems.at(fscid);
fn(fs);
fs->mds_map.epoch = epoch;
}
/**
* Apply a mutation to the mds_info_t structure for a particular
* daemon (identified by GID), and make appropriate updates to epochs.
*/
template<typename T>
void modify_daemon(mds_gid_t who, T&& fn)
{
const auto& fscid = mds_roles.at(who);
if (fscid == FS_CLUSTER_ID_NONE) {
auto& info = standby_daemons.at(who);
fn(info);
ceph_assert(info.state == MDSMap::STATE_STANDBY);
standby_epochs[who] = epoch;
} else {
auto& fs = filesystems.at(fscid);
auto& info = fs->mds_map.mds_info.at(who);
fn(info);
fs->mds_map.epoch = epoch;
}
}
/**
* Given that gid exists in a filesystem or as a standby, return
* a reference to its info.
*/
const mds_info_t& get_info_gid(mds_gid_t gid) const
{
auto fscid = mds_roles.at(gid);
if (fscid == FS_CLUSTER_ID_NONE) {
return standby_daemons.at(gid);
} else {
return filesystems.at(fscid)->mds_map.mds_info.at(gid);
}
}
std::string_view fs_name_from_gid(mds_gid_t gid) const
{
auto fscid = mds_roles.at(gid);
if (fscid == FS_CLUSTER_ID_NONE or !filesystem_exists(fscid)) {
return std::string_view();
} else {
return get_filesystem(fscid)->mds_map.get_fs_name();
}
}
bool is_standby_replay(mds_gid_t who) const
{
return filesystems.at(mds_roles.at(who))->is_standby_replay(who);
}
mds_gid_t get_standby_replay(mds_gid_t who) const
{
return filesystems.at(mds_roles.at(who))->get_standby_replay(who);
}
Filesystem::const_ref get_legacy_filesystem()
{
if (legacy_client_fscid == FS_CLUSTER_ID_NONE) {
return nullptr;
} else {
return filesystems.at(legacy_client_fscid);
}
}
/**
* A daemon has informed us of its offload targets
*/
void update_export_targets(mds_gid_t who, const std::set<mds_rank_t> &targets)
{
auto fscid = mds_roles.at(who);
modify_filesystem(fscid, [who, &targets](auto&& fs) {
fs->mds_map.mds_info.at(who).export_targets = targets;
});
}
epoch_t get_epoch() const { return epoch; }
void inc_epoch() { epoch++; }
version_t get_struct_version() const { return struct_version; }
bool is_struct_old() const {
return struct_version < STRUCT_VERSION_TRIM_TO;
}
size_t filesystem_count() const {return filesystems.size();}
bool filesystem_exists(fs_cluster_id_t fscid) const {return filesystems.count(fscid) > 0;}
Filesystem::const_ref get_filesystem(fs_cluster_id_t fscid) const {return std::const_pointer_cast<const Filesystem>(filesystems.at(fscid));}
Filesystem::ref get_filesystem(fs_cluster_id_t fscid) {return filesystems.at(fscid);}
Filesystem::ref get_filesystem(mds_gid_t gid) {
return filesystems.at(mds_roles.at(gid));
}
Filesystem::const_ref get_filesystem(void) const {return std::const_pointer_cast<const Filesystem>(filesystems.begin()->second);}
Filesystem::const_ref get_filesystem(std::string_view name) const;
Filesystem::const_ref get_filesystem(mds_gid_t gid) const {
return filesystems.at(mds_roles.at(gid));
}
std::vector<Filesystem::const_ref> get_filesystems(void) const;
int parse_filesystem(
std::string_view ns_str,
Filesystem::const_ref *result
) const;
int parse_role(
std::string_view role_str,
mds_role_t *role,
std::ostream &ss,
const std::vector<std::string> &filter) const;
int parse_role(
std::string_view role_str,
mds_role_t *role,
std::ostream &ss) const;
/**
* Return true if this pool is in use by any of the filesystems
*/
bool pool_in_use(int64_t poolid) const;
const mds_info_t* find_replacement_for(mds_role_t role) const;
void get_health(std::list<std::pair<health_status_t,std::string> >& summary,
std::list<std::pair<health_status_t,std::string> > *detail) const;
void get_health_checks(health_check_map_t *checks) const;
bool check_health(void);
/**
* Assert that the FSMap, Filesystem, MDSMap, mds_info_t relations are
* all self-consistent.
*/
void sanity(bool pending=false) const;
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator& p);
void decode(ceph::buffer::list& bl) {
auto p = bl.cbegin();
decode(p);
}
void sanitize(const std::function<bool(int64_t pool)>& pool_exists);
void print(std::ostream& out) const;
void print_summary(ceph::Formatter *f, std::ostream *out) const;
void print_daemon_summary(std::ostream& out) const;
void print_fs_summary(std::ostream& out) const;
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<FSMap*>& ls);
protected:
epoch_t epoch = 0;
uint64_t next_filesystem_id = FS_CLUSTER_ID_ANONYMOUS + 1;
fs_cluster_id_t legacy_client_fscid = FS_CLUSTER_ID_NONE;
CompatSet default_compat;
bool enable_multiple = true;
bool ever_enabled_multiple = true; // < the cluster had multiple FS enabled once
std::map<fs_cluster_id_t, Filesystem::ref> filesystems;
// Remember which Filesystem an MDS daemon's info is stored in
// (or in standby_daemons for FS_CLUSTER_ID_NONE)
std::map<mds_gid_t, fs_cluster_id_t> mds_roles;
// For MDS daemons not yet assigned to a Filesystem
std::map<mds_gid_t, mds_info_t> standby_daemons;
std::map<mds_gid_t, epoch_t> standby_epochs;
private:
epoch_t struct_version = 0;
};
WRITE_CLASS_ENCODER_FEATURES(FSMap)
inline std::ostream& operator<<(std::ostream& out, const FSMap& m) {
m.print_summary(NULL, &out);
return out;
}
#endif
| 17,276 | 26.467409 | 142 | h |
null | ceph-main/src/mds/FSMapUser.cc | #include "FSMapUser.h"
void FSMapUser::encode(ceph::buffer::list& bl, uint64_t features) const
{
ENCODE_START(1, 1, bl);
encode(epoch, bl);
encode(legacy_client_fscid, bl);
std::vector<fs_info_t> fs_list;
for (auto p = filesystems.begin(); p != filesystems.end(); ++p)
fs_list.push_back(p->second);
encode(fs_list, bl, features);
ENCODE_FINISH(bl);
}
void FSMapUser::decode(ceph::buffer::list::const_iterator& p)
{
DECODE_START(1, p);
decode(epoch, p);
decode(legacy_client_fscid, p);
std::vector<fs_info_t> fs_list;
decode(fs_list, p);
filesystems.clear();
for (auto p = fs_list.begin(); p != fs_list.end(); ++p)
filesystems[p->cid] = *p;
DECODE_FINISH(p);
}
void FSMapUser::fs_info_t::encode(ceph::buffer::list& bl, uint64_t features) const
{
ENCODE_START(1, 1, bl);
encode(cid, bl);
encode(name, bl);
ENCODE_FINISH(bl);
}
void FSMapUser::fs_info_t::decode(ceph::buffer::list::const_iterator& p)
{
DECODE_START(1, p);
decode(cid, p);
decode(name, p);
DECODE_FINISH(p);
}
void FSMapUser::generate_test_instances(std::list<FSMapUser*>& ls)
{
FSMapUser *m = new FSMapUser();
m->epoch = 2;
m->legacy_client_fscid = 1;
m->filesystems[1].cid = 1;
m->filesystems[2].name = "cephfs2";
m->filesystems[2].cid = 2;
m->filesystems[1].name = "cephfs1";
ls.push_back(m);
}
void FSMapUser::print(std::ostream& out) const
{
out << "e" << epoch << std::endl;
out << "legacy_client_fscid: " << legacy_client_fscid << std::endl;
for (auto &p : filesystems)
out << " id " << p.second.cid << " name " << p.second.name << std::endl;
}
void FSMapUser::print_summary(ceph::Formatter *f, std::ostream *out)
{
std::map<mds_role_t,std::string> by_rank;
std::map<std::string,int> by_state;
if (f) {
f->dump_unsigned("epoch", get_epoch());
for (auto &p : filesystems) {
f->dump_unsigned("id", p.second.cid);
f->dump_string("name", p.second.name);
}
} else {
*out << "e" << get_epoch() << ":";
for (auto &p : filesystems)
*out << " " << p.second.name << "(" << p.second.cid << ")";
}
}
| 2,096 | 24.573171 | 82 | cc |
null | ceph-main/src/mds/FSMapUser.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef CEPH_FSMAPCOMPACT_H
#define CEPH_FSMAPCOMPACT_H
#include <map>
#include <string>
#include <string_view>
#include "mds/mdstypes.h"
class FSMapUser {
public:
struct fs_info_t {
fs_info_t() {}
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator &bl);
std::string name;
fs_cluster_id_t cid = FS_CLUSTER_ID_NONE;
};
FSMapUser() {}
epoch_t get_epoch() const { return epoch; }
fs_cluster_id_t get_fs_cid(std::string_view name) const {
for (auto &p : filesystems) {
if (p.second.name == name)
return p.first;
}
return FS_CLUSTER_ID_NONE;
}
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator& bl);
void print(std::ostream& out) const;
void print_summary(ceph::Formatter *f, std::ostream *out);
static void generate_test_instances(std::list<FSMapUser*>& ls);
std::map<fs_cluster_id_t, fs_info_t> filesystems;
fs_cluster_id_t legacy_client_fscid = FS_CLUSTER_ID_NONE;
epoch_t epoch = 0;
};
WRITE_CLASS_ENCODER_FEATURES(FSMapUser::fs_info_t)
WRITE_CLASS_ENCODER_FEATURES(FSMapUser)
inline std::ostream& operator<<(std::ostream& out, FSMapUser& m) {
m.print_summary(NULL, &out);
return out;
}
#endif
| 1,715 | 25.4 | 70 | h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.