repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null |
ceph-main/src/tools/monmaptool.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <string>
#include "common/ceph_argparse.h"
#include "common/errno.h"
#include "global/global_init.h"
#include "include/str_list.h"
#include "mon/MonMap.h"
using std::cerr;
using std::cout;
using std::list;
using std::map;
using std::ostream;
using std::set;
using std::string;
using std::string_view;
using std::vector;
void usage()
{
cout << "usage: monmaptool [--print] [--create [--clobber] [--fsid uuid]]\n"
<< " [--enable-all-features]\n"
<< " [--generate] [--set-initial-members]\n"
<< " [--add name 1.2.3.4:567] [--rm name]\n"
<< " [--addv name [v2:1.2.4.5:567,v1:1.2.3.4:568]]\n"
<< " [--feature-list [plain|parseable]]\n"
<< " [--feature-set <value> [--optional|--persistent]]\n"
<< " [--feature-unset <value> [--optional|--persistent]]\n"
<< " [--set-min-mon-release <release-major-number>]\n"
<< " <mapfilename>"
<< std::endl;
}
void helpful_exit()
{
cerr << "monmaptool -h for usage" << std::endl;
exit(1);
}
struct feature_op_t {
enum type_t {
PERSISTENT,
OPTIONAL,
PLAIN,
PARSEABLE,
NONE
};
enum op_t {
OP_SET,
OP_UNSET,
OP_LIST
};
op_t op;
type_t type;
mon_feature_t feature;
feature_op_t() : op(OP_LIST), type(NONE) { }
// default to 'persistent' feature if not specified
feature_op_t(op_t o) : op(o), type(PERSISTENT) { }
feature_op_t(op_t o, type_t t) : op(o), type(t) { }
feature_op_t(op_t o, type_t t, mon_feature_t &f) :
op(o), type(t), feature(t) { }
void set_optional() {
type = OPTIONAL;
}
void set_persistent() {
type = PERSISTENT;
}
bool parse_value(string &s, ostream *errout = NULL) {
feature = ceph::features::mon::get_feature_by_name(s);
if (feature != ceph::features::mon::FEATURE_NONE) {
return true;
}
// try parsing as numerical value
uint64_t feature_val;
string interr;
feature_val = strict_strtoll(s.c_str(), 10, &interr);
if (!interr.empty()) {
if (errout) {
*errout << "unknown features name '" << s
<< "' or unable to parse value: " << interr << std::endl;
}
return false;
}
feature = mon_feature_t(feature_val);
return true;
}
};
void features_list(feature_op_t &f, MonMap &m)
{
if (f.type == feature_op_t::type_t::PLAIN) {
cout << "MONMAP FEATURES:" << std::endl;
cout << " persistent: ";
m.persistent_features.print_with_value(cout);
cout << std::endl;
cout << " optional: ";
m.optional_features.print_with_value(cout);
cout << std::endl;
cout << " required: ";
m.get_required_features().print_with_value(cout);
cout << std::endl;
cout << std::endl;
cout << "AVAILABLE FEATURES:" << std::endl;
cout << " supported: ";
ceph::features::mon::get_supported().print_with_value(cout);
cout << std::endl;
cout << " persistent: ";
ceph::features::mon::get_persistent().print_with_value(cout);
cout << std::endl;
} else if (f.type == feature_op_t::type_t::PARSEABLE) {
cout << "monmap:persistent:";
m.persistent_features.print_with_value(cout);
cout << std::endl;
cout << "monmap:optional:";
m.optional_features.print_with_value(cout);
cout << std::endl;
cout << "monmap:required:";
m.get_required_features().print_with_value(cout);
cout << std::endl;
cout << "available:supported:";
ceph::features::mon::get_supported().print_with_value(cout);
cout << std::endl;
cout << "available:persistent:";
ceph::features::mon::get_persistent().print_with_value(cout);
cout << std::endl;
}
}
bool handle_features(list<feature_op_t>& lst, MonMap &m)
{
if (lst.empty())
return false;
bool modified = false;
for (auto &f : lst) {
if (f.op == feature_op_t::op_t::OP_LIST) {
features_list(f, m);
} else if (f.op == feature_op_t::op_t::OP_SET ||
f.op == feature_op_t::op_t::OP_UNSET) {
modified = true;
mon_feature_t &target =
( f.type == feature_op_t::type_t::OPTIONAL ?
m.optional_features : m.persistent_features );
if (f.op == feature_op_t::op_t::OP_SET) {
target.set_feature(f.feature);
} else {
target.unset_feature(f.feature);
}
} else {
cerr << "unknown feature operation type '" << f.op << "'" << std::endl;
}
}
return modified;
}
int main(int argc, const char **argv)
{
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
const char *me = argv[0];
std::string fn;
bool print = false;
bool create = false;
bool enable_all_features = false;
bool clobber = false;
bool modified = false;
bool show_features = false;
bool generate = false;
bool filter = false;
ceph_release_t min_mon_release = ceph_release_t::unknown;
map<string,entity_addr_t> add;
map<string,entity_addrvec_t> addv;
list<string> rm;
list<feature_op_t> features;
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
std::string val;
for (std::vector<const char*>::iterator i = args.begin(); i != args.end(); ) {
if (ceph_argparse_double_dash(args, i)) {
break;
} else if (ceph_argparse_flag(args, i, "-p", "--print", (char*)NULL)) {
print = true;
} else if (ceph_argparse_flag(args, i, "--create", (char*)NULL)) {
create = true;
} else if (ceph_argparse_flag(args, i, "--enable-all-features", (char*)NULL)) {
enable_all_features = true;
} else if (ceph_argparse_flag(args, i, "--clobber", (char*)NULL)) {
clobber = true;
} else if (ceph_argparse_flag(args, i, "--generate", (char*)NULL)) {
generate = true;
} else if (ceph_argparse_flag(args, i, "--set-initial-members", (char*)NULL)) {
filter = true;
} else if (ceph_argparse_witharg(args, i, &val, "--set-min-mon-release",
(char*)NULL)) {
min_mon_release = ceph_release_from_name(val);
} else if (ceph_argparse_flag(args, i, "--add", (char*)NULL)) {
string name = *i;
i = args.erase(i);
if (i == args.end())
helpful_exit();
entity_addr_t addr;
if (!addr.parse(string_view{*i})) {
// Either we couldn't parse the address or we didn't consume the entire token
cerr << me << ": invalid ip:port '" << *i << "'" << std::endl;
return -1;
}
add[name] = addr;
modified = true;
i = args.erase(i);
} else if (ceph_argparse_flag(args, i, "--addv", (char*)NULL)) {
string name = *i;
i = args.erase(i);
if (i == args.end())
helpful_exit();
entity_addrvec_t addrs;
if (!addrs.parse(*i)) {
cerr << me << ": invalid ip:port '" << *i << "'" << std::endl;
return -1;
}
addv[name] = addrs;
modified = true;
i = args.erase(i);
} else if (ceph_argparse_witharg(args, i, &val, "--rm", (char*)NULL)) {
rm.push_back(val);
modified = true;
} else if (ceph_argparse_flag(args, i, "--feature-list", (char*)NULL)) {
string format = *i;
if (format == "plain" || format == "parseable") {
i = args.erase(i);
} else {
format = "plain";
}
feature_op_t f(feature_op_t::op_t::OP_LIST,
feature_op_t::type_t::PLAIN);
if (format == "parseable") {
f.type = feature_op_t::type_t::PARSEABLE;
} else if (format != "plain") {
cerr << "invalid format type for list: '" << val << "'" << std::endl;
helpful_exit();
}
features.push_back(f);
show_features = true;
} else if (ceph_argparse_witharg(args, i, &val,
"--feature-set", (char*)NULL)) {
// parse value
feature_op_t f(feature_op_t::op_t::OP_SET);
if (!f.parse_value(val, &cerr)) {
helpful_exit();
}
features.push_back(f);
} else if (ceph_argparse_witharg(args, i, &val,
"--feature-unset", (char*)NULL)) {
// parse value
feature_op_t f(feature_op_t::op_t::OP_UNSET);
if (!f.parse_value(val, &cerr)) {
helpful_exit();
}
features.push_back(f);
} else if (ceph_argparse_flag(args, i, "--optional", (char*)NULL)) {
if (features.empty()) {
helpful_exit();
}
features.back().set_optional();
} else if (ceph_argparse_flag(args, i, "--persistent", (char*)NULL)) {
if (features.empty()) {
helpful_exit();
}
features.back().set_persistent();
} else {
++i;
}
}
if (args.empty()) {
cerr << me << ": must specify monmap filename" << std::endl;
helpful_exit();
}
else if (args.size() > 1) {
cerr << me << ": too many arguments" << std::endl;
helpful_exit();
}
fn = args[0];
MonMap monmap;
cout << me << ": monmap file " << fn << std::endl;
int r = 0;
if (!(create && clobber)) {
try {
r = monmap.read(fn.c_str());
} catch (...) {
cerr << me << ": unable to read monmap file" << std::endl;
return -1;
}
}
if (!create && r < 0) {
cerr << me << ": couldn't open " << fn << ": " << cpp_strerror(r) << std::endl;
return -1;
}
else if (create && !clobber && r == 0) {
cerr << me << ": " << fn << " exists, --clobber to overwrite" << std::endl;
return -1;
}
if (create) {
monmap.epoch = 0;
monmap.created = ceph_clock_now();
monmap.last_changed = monmap.created;
srand(getpid() + time(0));
if (g_conf().get_val<uuid_d>("fsid").is_zero()) {
monmap.generate_fsid();
cout << me << ": generated fsid " << monmap.fsid << std::endl;
}
monmap.strategy = static_cast<MonMap::election_strategy>(
g_conf().get_val<uint64_t>("mon_election_default_strategy"));
if (min_mon_release == ceph_release_t::unknown) {
min_mon_release = ceph_release_t::pacific;
}
// TODO: why do we not use build_initial in our normal path here!?!?!
modified = true;
}
if (enable_all_features) {
// populate persistent features, too
monmap.persistent_features = ceph::features::mon::get_persistent();
modified = true;
}
if (generate) {
int r = monmap.build_initial(g_ceph_context, true, cerr);
if (r < 0)
return r;
}
if (min_mon_release != ceph_release_t::unknown) {
monmap.min_mon_release = min_mon_release;
cout << "setting min_mon_release = " << min_mon_release << std::endl;
modified = true;
}
if (filter) {
// apply initial members
list<string> initial_members;
get_str_list(g_conf()->mon_initial_members, initial_members);
if (!initial_members.empty()) {
cout << "initial_members " << initial_members << ", filtering seed monmap" << std::endl;
set<entity_addrvec_t> removed;
monmap.set_initial_members(g_ceph_context, initial_members,
string(), entity_addrvec_t(),
&removed);
cout << "removed " << removed << std::endl;
}
modified = true;
}
if (!g_conf().get_val<uuid_d>("fsid").is_zero()) {
monmap.fsid = g_conf().get_val<uuid_d>("fsid");
cout << me << ": set fsid to " << monmap.fsid << std::endl;
modified = true;
}
for (auto& p : add) {
entity_addr_t addr = p.second;
entity_addrvec_t addrs;
if (monmap.contains(p.first)) {
cerr << me << ": map already contains mon." << p.first << std::endl;
helpful_exit();
}
if (addr.get_port() == 0) {
if (monmap.persistent_features.contains_all(
ceph::features::mon::FEATURE_NAUTILUS)) {
addr.set_type(entity_addr_t::TYPE_MSGR2);
addr.set_port(CEPH_MON_PORT_IANA);
addrs.v.push_back(addr);
addr.set_type(entity_addr_t::TYPE_LEGACY);
addr.set_port(CEPH_MON_PORT_LEGACY);
addrs.v.push_back(addr);
} else {
addr.set_type(entity_addr_t::TYPE_LEGACY);
addr.set_port(CEPH_MON_PORT_LEGACY);
addrs.v.push_back(addr);
}
} else if (addr.get_port() == CEPH_MON_PORT_LEGACY) {
addr.set_type(entity_addr_t::TYPE_LEGACY);
addrs.v.push_back(addr);
} else {
if (monmap.persistent_features.contains_all(
ceph::features::mon::FEATURE_NAUTILUS)) {
addr.set_type(entity_addr_t::TYPE_MSGR2);
}
addrs.v.push_back(addr);
}
if (monmap.contains(addrs)) {
cerr << me << ": map already contains " << addrs << std::endl;
helpful_exit();
}
monmap.add(p.first, addrs);
}
for (auto& p : addv) {
if (monmap.contains(p.first)) {
cerr << me << ": map already contains mon." << p.first << std::endl;
helpful_exit();
}
if (monmap.contains(p.second)) {
cerr << me << ": map already contains " << p.second << std::endl;
helpful_exit();
}
monmap.add(p.first, p.second);
}
for (auto& p : rm) {
cout << me << ": removing " << p << std::endl;
if (!monmap.contains(p)) {
cerr << me << ": map does not contain " << p << std::endl;
helpful_exit();
}
monmap.remove(p);
}
if (handle_features(features, monmap)) {
modified = true;
}
if (!print && !modified && !show_features) {
cerr << "no action specified" << std::endl;
helpful_exit();
}
if (print)
monmap.print(cout);
if (modified) {
// write it out
cout << me << ": writing epoch " << monmap.epoch
<< " to " << fn
<< " (" << monmap.size() << " monitors)"
<< std::endl;
int r = monmap.write(fn.c_str());
if (r < 0) {
cerr << "monmaptool: error writing to '" << fn << "': " << cpp_strerror(r) << std::endl;
return 1;
}
}
return 0;
}
| 14,245 | 28.073469 | 94 |
cc
|
null |
ceph-main/src/tools/neorados.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat <[email protected]>
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#define BOOST_COROUTINES_NO_DEPRECATION_WARNING
#include <algorithm>
#include <cassert>
#include <iostream>
#include <string>
#include <string_view>
#include <tuple>
#include <vector>
#include <boost/asio.hpp>
#include <boost/io/ios_state.hpp>
#include <boost/program_options.hpp>
#include <boost/system/system_error.hpp>
#include <fmt/format.h>
#include <fmt/ostream.h>
#include <spawn/spawn.hpp>
#include "include/buffer.h" // :(
#include "include/neorados/RADOS.hpp"
using namespace std::literals;
namespace ba = boost::asio;
namespace bs = boost::system;
namespace R = neorados;
namespace s = spawn;
std::string verstr(const std::tuple<uint32_t, uint32_t, uint32_t>& v)
{
const auto [maj, min, p] = v;
return fmt::format("v{}.{}.{}", maj, min, p);
}
template<typename V>
void printseq(const V& v, std::ostream& m)
{
std::for_each(v.cbegin(), v.cend(),
[&m](const auto& e) {
fmt::print(m, "{}\n", e);
});
}
template<typename V, typename F>
void printseq(const V& v, std::ostream& m, F&& f)
{
std::for_each(v.cbegin(), v.cend(),
[&m, &f](const auto& e) {
fmt::print(m, "{}\n", f(e));
});
}
std::int64_t lookup_pool(R::RADOS& r, const std::string& pname,
s::yield_context y)
{
bs::error_code ec;
auto p = r.lookup_pool(pname, y[ec]);
if (ec)
throw bs::system_error(
ec, fmt::format("when looking up '{}'", pname));
return p;
}
void lspools(R::RADOS& r, const std::vector<std::string>&,
s::yield_context y)
{
const auto l = r.list_pools(y);
printseq(l, std::cout, [](const auto& p) -> const std::string& {
return p.second;
});
}
void ls(R::RADOS& r, const std::vector<std::string>& p, s::yield_context y)
{
const auto& pname = p[0];
const auto pool = lookup_pool(r, pname, y);
std::vector<R::Entry> ls;
R::Cursor next = R::Cursor::begin();
bs::error_code ec;
do {
std::tie(ls, next) = r.enumerate_objects(pool, next, R::Cursor::end(),
1000, {}, y[ec], R::all_nspaces);
if (ec)
throw bs::system_error(ec, fmt::format("when listing {}", pname));
printseq(ls, std::cout);
ls.clear();
} while (next != R::Cursor::end());
}
void mkpool(R::RADOS& r, const std::vector<std::string>& p,
s::yield_context y)
{
const auto& pname = p[0];
bs::error_code ec;
r.create_pool(pname, std::nullopt, y[ec]);
if (ec)
throw bs::system_error(ec, fmt::format("when creating pool '{}'", pname));
}
void rmpool(R::RADOS& r, const std::vector<std::string>& p,
s::yield_context y)
{
const auto& pname = p[0];
bs::error_code ec;
r.delete_pool(pname, y[ec]);
if (ec)
throw bs::system_error(ec, fmt::format("when removing pool '{}'", pname));
}
void create(R::RADOS& r, const std::vector<std::string>& p,
s::yield_context y)
{
const auto& pname = p[0];
const R::Object obj = p[1];
const auto pool = lookup_pool(r, pname, y);
bs::error_code ec;
R::WriteOp op;
op.create(true);
r.execute(obj, pool, std::move(op), y[ec]);
if (ec)
throw bs::system_error(ec,
fmt::format(
"when creating object '{}' in pool '{}'",
obj, pname));
}
inline constexpr std::size_t io_size = 4 << 20;
void write(R::RADOS& r, const std::vector<std::string>& p, s::yield_context y)
{
const auto& pname = p[0];
const R::Object obj(p[1]);
const auto pool = lookup_pool(r, pname, y);
bs::error_code ec;
std::unique_ptr<char[]> buf = std::make_unique<char[]>(io_size);
std::size_t off = 0;
boost::io::ios_exception_saver ies(std::cin);
std::cin.exceptions(std::istream::badbit);
std::cin.clear();
while (!std::cin.eof()) {
auto curoff = off;
std::cin.read(buf.get(), io_size);
auto len = std::cin.gcount();
off += len;
if (len == 0)
break; // Nothin' to do.
ceph::buffer::list bl;
bl.append(buffer::create_static(len, buf.get()));
R::WriteOp op;
op.write(curoff, std::move(bl));
r.execute(obj, pool, std::move(op), y[ec]);
if (ec)
throw bs::system_error(ec, fmt::format(
"when writing object '{}' in pool '{}'",
obj, pname));
}
}
void read(R::RADOS& r, const std::vector<std::string>& p, s::yield_context y)
{
const auto& pname = p[0];
const R::Object obj(p[1]);
const auto pool = lookup_pool(r, pname, y);
bs::error_code ec;
std::uint64_t len;
{
R::ReadOp op;
op.stat(&len, nullptr);
r.execute(obj, pool, std::move(op),
nullptr, y[ec]);
if (ec)
throw bs::system_error(
ec,
fmt::format("when getting length of object '{}' in pool '{}'",
obj, pname));
}
std::size_t off = 0;
ceph::buffer::list bl;
while (auto toread = std::max(len - off, io_size)) {
R::ReadOp op;
op.read(off, toread, &bl);
r.execute(obj, pool, std::move(op), nullptr, y[ec]);
if (ec)
throw bs::system_error(
ec,
fmt::format("when reading from object '{}' in pool '{}'",
obj, pool));
off += bl.length();
bl.write_stream(std::cout);
bl.clear();
}
}
void rm(R::RADOS& r, const std::vector<std::string>& p, s::yield_context y)
{
const auto& pname = p[0];
const R::Object obj = p[1];
const auto pool = lookup_pool(r, pname, y);
bs::error_code ec;
R::WriteOp op;
op.remove();
r.execute(obj, pool, std::move(op), y[ec]);
if (ec)
throw bs::system_error(ec, fmt::format(
"when removing object '{}' in pool '{}'",
obj, pname));
}
static constexpr auto version = std::make_tuple(0ul, 0ul, 1ul);
using cmdfunc = void (*)(R::RADOS& r, const std::vector<std::string>& p,
s::yield_context);
struct cmdesc {
std::string_view name;
std::size_t arity;
cmdfunc f;
std::string_view usage;
std::string_view desc;
};
const std::array commands = {
// Pools operations ;)
cmdesc{ "lspools"sv,
0, &lspools,
""sv,
"List all pools"sv },
// Pool operations
cmdesc{ "ls"sv,
1, &ls,
"POOL"sv,
"list all objects in POOL"sv },
cmdesc{ "mkpool"sv,
1, &mkpool,
"POOL"sv,
"create POOL"sv },
cmdesc{ "rmpool"sv,
1, &rmpool,
"POOL"sv,
"remove POOL"sv },
// Object operations
cmdesc{ "create"sv,
2, &create,
"POOL OBJECT"sv,
"exclusively create OBJECT in POOL"sv },
cmdesc{ "write"sv,
2, &write,
"POOL OBJECT"sv,
"write to OBJECT in POOL from standard input"sv },
cmdesc{ "read"sv,
2, &read,
"POOL OBJECT"sv,
"read contents of OBJECT in POOL to standard out"sv },
cmdesc{ "rm"sv,
2, &rm,
"POOL OBJECT"sv,
"remove OBJECT in POOL"sv }
};
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<boost::program_options::options_description> : fmt::ostream_formatter {};
#endif // FMT_VERSION
int main(int argc, char* argv[])
{
const std::string_view prog(argv[0]);
std::string command;
namespace po = boost::program_options;
try {
std::vector<std::string> parameters;
po::options_description desc(fmt::format("{} options", prog));
desc.add_options()
("help", "show help")
("version", "show version")
("command", po::value<std::string>(&command), "the operation to perform")
("parameters", po::value<std::vector<std::string>>(¶meters),
"parameters to the command");
po::positional_options_description p;
p.add("command", 1);
p.add("parameters", -1);
po::variables_map vm;
po::store(po::command_line_parser(argc, argv).
options(desc).positional(p).run(), vm);
po::notify(vm);
if (vm.count("help")) {
fmt::print("{}", desc);
fmt::print("Commands:\n");
for (const auto& cmd : commands) {
fmt::print(" {} {}{}{}\n",
cmd.name, cmd.usage,
cmd.name.length() + cmd.usage.length() < 13 ?
"\t\t"sv : "\t"sv,
cmd.desc);
}
return 0;
}
if (vm.count("version")) {
fmt::print(
"{}: RADOS command exerciser, {},\n"
"RADOS library version {}\n"
"Copyright (C) 2019 Red Hat <[email protected]>\n"
"This is free software; you can redistribute it and/or\n"
"modify it under the terms of the GNU Lesser General Public\n"
"License version 2.1, as published by the Free Software\n"
"Foundation. See file COPYING.\n", prog,
verstr(version), verstr(R::RADOS::version()));
return 0;
}
if (vm.find("command") == vm.end()) {
fmt::print(std::cerr, "{}: a command is required\n", prog);
return 1;
}
ba::io_context c;
if (auto ci = std::find_if(commands.begin(), commands.end(),
[&command](const cmdesc& c) {
return c.name == command;
}); ci != commands.end()) {
if (parameters.size() < ci->arity) {
fmt::print(std::cerr, "{}: {}: too few arguments\n\t{} {}\n",
prog, command, ci->name, ci->usage);
return 1;
}
if (parameters.size() > ci->arity) {
fmt::print(std::cerr, "{}: {}: too many arguments\n\t{} {}\n",
prog, command, ci->name, ci->usage);
return 1;
}
s::spawn(c, [&](s::yield_context y) {
auto r = R::RADOS::Builder{}.build(c, y);
ci->f(r, parameters, y);
});
} else {
fmt::print(std::cerr, "{}: {}: unknown command\n", prog, command);
return 1;
}
c.run();
} catch (const std::exception& e) {
fmt::print(std::cerr, "{}: {}: {}\n", prog, command, e.what());
return 1;
}
return 0;
}
| 9,714 | 23.910256 | 107 |
cc
|
null |
ceph-main/src/tools/osdmaptool.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <string>
#include <sys/stat.h>
#include "common/ceph_argparse.h"
#include "common/errno.h"
#include "common/safe_io.h"
#include "include/random.h"
#include "mon/health_check.h"
#include <time.h>
#include <algorithm>
#include "global/global_init.h"
#include "osd/OSDMap.h"
using namespace std;
void usage()
{
cout << " usage: [--print] <mapfilename>" << std::endl;
cout << " --create-from-conf creates an osd map with default configurations" << std::endl;
cout << " --createsimple <numosd> [--clobber] [--pg-bits <bitsperosd>] [--pgp-bits <bits>] creates a relatively generic OSD map with <numosd> devices" << std::endl;
cout << " --pgp-bits <bits> pgp_num map attribute will be shifted by <bits>" << std::endl;
cout << " --pg-bits <bits> pg_num map attribute will be shifted by <bits>" << std::endl;
cout << " --clobber allows osdmaptool to overwrite <mapfilename> if it already exists" << std::endl;
cout << " --export-crush <file> write osdmap's crush map to <file>" << std::endl;
cout << " --import-crush <file> replace osdmap's crush map with <file>" << std::endl;
cout << " --health dump health checks" << std::endl;
cout << " --test-map-pgs [--pool <poolid>] [--pg_num <pg_num>] [--range-first <first> --range-last <last>] map all pgs" << std::endl;
cout << " --test-map-pgs-dump [--pool <poolid>] [--range-first <first> --range-last <last>] map all pgs" << std::endl;
cout << " --test-map-pgs-dump-all [--pool <poolid>] [--range-first <first> --range-last <last>] map all pgs to osds" << std::endl;
cout << " --mark-up-in mark osds up and in (but do not persist)" << std::endl;
cout << " --mark-out <osdid> mark an osd as out (but do not persist)" << std::endl;
cout << " --mark-up <osdid> mark an osd as up (but do not persist)" << std::endl;
cout << " --mark-in <osdid> mark an osd as in (but do not persist)" << std::endl;
cout << " --with-default-pool include default pool when creating map" << std::endl;
cout << " --clear-temp clear pg_temp and primary_temp" << std::endl;
cout << " --clean-temps clean pg_temps" << std::endl;
cout << " --test-random do random placements" << std::endl;
cout << " --test-map-pg <pgid> map a pgid to osds" << std::endl;
cout << " --test-map-object <objectname> [--pool <poolid>] map an object to osds"
<< std::endl;
cout << " --upmap-cleanup <file> clean up pg_upmap[_items] entries, writing" << std::endl;
cout << " commands to <file> [default: - for stdout]" << std::endl;
cout << " --upmap <file> calculate pg upmap entries to balance pg layout" << std::endl;
cout << " writing commands to <file> [default: - for stdout]" << std::endl;
cout << " --upmap-max <max-count> set max upmap entries to calculate [default: 10]" << std::endl;
cout << " --upmap-deviation <max-deviation>" << std::endl;
cout << " max deviation from target [default: 5]" << std::endl;
cout << " --upmap-pool <poolname> restrict upmap balancing to 1 or more pools" << std::endl;
cout << " --upmap-active Act like an active balancer, keep applying changes until balanced" << std::endl;
cout << " --dump <format> displays the map in plain text when <format> is 'plain', 'json' if specified format is not supported" << std::endl;
cout << " --tree displays a tree of the map" << std::endl;
cout << " --test-crush [--range-first <first> --range-last <last>] map pgs to acting osds" << std::endl;
cout << " --adjust-crush-weight <osdid:weight>[,<osdid:weight>,<...>] change <osdid> CRUSH <weight> (but do not persist)" << std::endl;
cout << " --save write modified osdmap with upmap or crush-adjust changes" << std::endl;
cout << " --read <file> calculate pg upmap entries to balance pg primaries" << std::endl;
cout << " --read-pool <poolname> specify which pool the read balancer should adjust" << std::endl;
cout << " --vstart prefix upmap and read output with './bin/'" << std::endl;
exit(1);
}
void print_inc_upmaps(const OSDMap::Incremental& pending_inc, int fd, bool vstart, std::string cmd="ceph")
{
ostringstream ss;
std::string prefix = "./bin/";
for (auto& i : pending_inc.old_pg_upmap) {
if (vstart)
ss << prefix;
ss << cmd + " osd rm-pg-upmap " << i << std::endl;
}
for (auto& i : pending_inc.new_pg_upmap) {
if (vstart)
ss << prefix;
ss << cmd + " osd pg-upmap " << i.first;
for (auto osd : i.second) {
ss << " " << osd;
}
ss << std::endl;
}
for (auto& i : pending_inc.old_pg_upmap_items) {
if (vstart)
ss << prefix;
ss << cmd + " osd rm-pg-upmap-items " << i << std::endl;
}
for (auto& i : pending_inc.new_pg_upmap_items) {
if (vstart)
ss << prefix;
ss << cmd + " osd pg-upmap-items " << i.first;
for (auto p : i.second) {
ss << " " << p.first << " " << p.second;
}
ss << std::endl;
}
for (auto& i : pending_inc.new_pg_upmap_primary) {
if (vstart)
ss << prefix;
ss << cmd + " osd pg-upmap-primary " << i.first << " " << i.second << std::endl;
}
string s = ss.str();
int r = safe_write(fd, s.c_str(), s.size());
if (r < 0) {
cerr << "error writing output: " << cpp_strerror(r) << std::endl;
exit(1);
}
}
int main(int argc, const char **argv)
{
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
const char *me = argv[0];
std::string fn;
bool print = false;
boost::scoped_ptr<Formatter> print_formatter;
bool tree = false;
boost::scoped_ptr<Formatter> tree_formatter;
bool createsimple = false;
bool createpool = false;
bool create_from_conf = false;
int num_osd = 0;
int pg_bits = 6;
int pgp_bits = 6;
bool clobber = false;
bool modified = false;
std::string export_crush, import_crush, test_map_pg, test_map_object, adjust_crush_weight;
bool test_crush = false;
int range_first = -1;
int range_last = -1;
int pool = -1;
bool mark_up_in = false;
int marked_out = -1;
int marked_up = -1;
int marked_in = -1;
bool clear_temp = false;
bool clean_temps = false;
bool test_map_pgs = false;
bool test_map_pgs_dump = false;
bool test_random = false;
bool upmap_cleanup = false;
bool upmap = false;
bool health = false;
std::string upmap_file = "-";
int upmap_max = 10;
int upmap_deviation = 5;
bool upmap_active = false;
std::set<std::string> upmap_pools;
std::random_device::result_type upmap_seed;
std::random_device::result_type *upmap_p_seed = nullptr;
bool read = false;
std::string read_pool;
int64_t pg_num = -1;
bool test_map_pgs_dump_all = false;
bool save = false;
bool vstart = false;
std::string val;
std::ostringstream err;
for (std::vector<const char*>::iterator i = args.begin(); i != args.end(); ) {
if (ceph_argparse_double_dash(args, i)) {
break;
} else if (ceph_argparse_flag(args, i, "-p", "--print", (char*)NULL)) {
print = true;
} else if (ceph_argparse_witharg(args, i, &val, err, "--dump", (char*)NULL)) {
print = true;
if (!val.empty() && val != "plain") {
print_formatter.reset(Formatter::create(val, "", "json"));
}
} else if (ceph_argparse_witharg(args, i, &val, err, "--tree", (char*)NULL)) {
tree = true;
if (!val.empty() && val != "plain") {
tree_formatter.reset(Formatter::create(val, "", "json"));
}
} else if (ceph_argparse_witharg(args, i, &pg_bits, err, "--osd-pg-bits", (char*)NULL)) {
} else if (ceph_argparse_witharg(args, i, &pgp_bits, err, "--osd-pgp-bits", (char*)NULL)) {
} else if (ceph_argparse_witharg(args, i, &upmap_file, "--upmap-cleanup", (char*)NULL)) {
upmap_cleanup = true;
} else if (ceph_argparse_witharg(args, i, &upmap_file, "--upmap", (char*)NULL)) {
upmap_cleanup = true;
upmap = true;
} else if (ceph_argparse_witharg(args, i, &upmap_file, "--read", (char*)NULL)) {
read = true;
} else if (ceph_argparse_witharg(args, i, &upmap_max, err, "--upmap-max", (char*)NULL)) {
} else if (ceph_argparse_witharg(args, i, &upmap_deviation, err, "--upmap-deviation", (char*)NULL)) {
} else if (ceph_argparse_witharg(args, i, (int *)&upmap_seed, err, "--upmap-seed", (char*)NULL)) {
upmap_p_seed = &upmap_seed;
} else if (ceph_argparse_witharg(args, i, &val, "--upmap-pool", (char*)NULL)) {
upmap_pools.insert(val);
} else if (ceph_argparse_witharg(args, i, &val, "--read-pool", (char*)NULL)) {
read_pool = val;
} else if (ceph_argparse_witharg(args, i, &num_osd, err, "--createsimple", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
exit(EXIT_FAILURE);
}
createsimple = true;
} else if (ceph_argparse_flag(args, i, "--upmap-active", (char*)NULL)) {
upmap_active = true;
} else if (ceph_argparse_flag(args, i, "--health", (char*)NULL)) {
health = true;
} else if (ceph_argparse_flag(args, i, "--with-default-pool", (char*)NULL)) {
createpool = true;
} else if (ceph_argparse_flag(args, i, "--create-from-conf", (char*)NULL)) {
create_from_conf = true;
} else if (ceph_argparse_flag(args, i, "--mark-up-in", (char*)NULL)) {
mark_up_in = true;
} else if (ceph_argparse_witharg(args, i, &val, "--mark-out", (char*)NULL)) {
marked_out = std::stoi(val);
} else if (ceph_argparse_witharg(args, i, &val, "--mark-up", (char*)NULL)) {
marked_up = std::stod(val);
} else if (ceph_argparse_witharg(args, i, &val, "--mark-in", (char*)NULL)) {
marked_in = std::stod(val);
} else if (ceph_argparse_flag(args, i, "--clear-temp", (char*)NULL)) {
clear_temp = true;
} else if (ceph_argparse_flag(args, i, "--clean-temps", (char*)NULL)) {
clean_temps = true;
} else if (ceph_argparse_flag(args, i, "--test-map-pgs", (char*)NULL)) {
test_map_pgs = true;
} else if (ceph_argparse_flag(args, i, "--test-map-pgs-dump", (char*)NULL)) {
test_map_pgs_dump = true;
} else if (ceph_argparse_flag(args, i, "--test-map-pgs-dump-all", (char*)NULL)) {
test_map_pgs_dump_all = true;
} else if (ceph_argparse_flag(args, i, "--test-random", (char*)NULL)) {
test_random = true;
} else if (ceph_argparse_flag(args, i, "--clobber", (char*)NULL)) {
clobber = true;
} else if (ceph_argparse_witharg(args, i, &pg_bits, err, "--pg_bits", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
exit(EXIT_FAILURE);
}
} else if (ceph_argparse_witharg(args, i, &pgp_bits, err, "--pgp_bits", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
exit(EXIT_FAILURE);
}
} else if (ceph_argparse_witharg(args, i, &val, "--export_crush", (char*)NULL)) {
export_crush = val;
} else if (ceph_argparse_witharg(args, i, &val, "--import_crush", (char*)NULL)) {
import_crush = val;
} else if (ceph_argparse_witharg(args, i, &val, "--test_map_pg", (char*)NULL)) {
test_map_pg = val;
} else if (ceph_argparse_witharg(args, i, &val, "--test_map_object", (char*)NULL)) {
test_map_object = val;
} else if (ceph_argparse_flag(args, i, "--test_crush", (char*)NULL)) {
test_crush = true;
} else if (ceph_argparse_witharg(args, i, &val, err, "--pg_num", (char*)NULL)) {
string interr;
pg_num = strict_strtoll(val.c_str(), 10, &interr);
if (interr.length() > 0) {
cerr << "error parsing integer value " << interr << std::endl;
exit(EXIT_FAILURE);
}
} else if (ceph_argparse_witharg(args, i, &range_first, err, "--range_first", (char*)NULL)) {
} else if (ceph_argparse_witharg(args, i, &range_last, err, "--range_last", (char*)NULL)) {
} else if (ceph_argparse_witharg(args, i, &pool, err, "--pool", (char*)NULL)) {
if (!err.str().empty()) {
cerr << err.str() << std::endl;
exit(EXIT_FAILURE);
}
} else if (ceph_argparse_witharg(args, i, &val, err, "--adjust-crush-weight", (char*)NULL)) {
adjust_crush_weight = val;
} else if (ceph_argparse_flag(args, i, "--save", (char*)NULL)) {
save = true;
} else if (ceph_argparse_flag(args, i, "--vstart", (char*)NULL)) {
vstart = true;
} else {
++i;
}
}
if (args.empty()) {
cerr << me << ": must specify osdmap filename" << std::endl;
usage();
}
else if (args.size() > 1) {
cerr << me << ": too many arguments" << std::endl;
usage();
}
if (upmap_deviation < 1) {
cerr << me << ": upmap-deviation must be >= 1" << std::endl;
usage();
}
fn = args[0];
if (range_first >= 0 && range_last >= 0) {
set<OSDMap*> maps;
OSDMap *prev = NULL;
for (int i=range_first; i <= range_last; i++) {
ostringstream f;
f << fn << "/" << i;
bufferlist bl;
string error, s = f.str();
int r = bl.read_file(s.c_str(), &error);
if (r < 0) {
cerr << "unable to read " << s << ": " << cpp_strerror(r) << std::endl;
exit(1);
}
cout << s << " got " << bl.length() << " bytes" << std::endl;
OSDMap *o = new OSDMap;
o->decode(bl);
maps.insert(o);
if (prev)
OSDMap::dedup(prev, o);
prev = o;
}
exit(0);
}
OSDMap osdmap;
bufferlist bl;
cerr << me << ": osdmap file '" << fn << "'" << std::endl;
int r = 0;
struct stat st;
if (!createsimple && !create_from_conf && !clobber) {
std::string error;
r = bl.read_file(fn.c_str(), &error);
if (r == 0) {
try {
osdmap.decode(bl);
}
catch (const buffer::error &e) {
cerr << me << ": error decoding osdmap '" << fn << "'" << std::endl;
return -1;
}
}
else {
cerr << me << ": couldn't open " << fn << ": " << error << std::endl;
return -1;
}
}
else if ((createsimple || create_from_conf) && !clobber && ::stat(fn.c_str(), &st) == 0) {
cerr << me << ": " << fn << " exists, --clobber to overwrite" << std::endl;
return -1;
}
if (createsimple || create_from_conf) {
if (createsimple) {
if (num_osd < 1) {
cerr << me << ": osd count must be > 0" << std::endl;
exit(1);
}
} else {
num_osd = -1;
}
uuid_d fsid;
if (createpool) {
osdmap.build_simple_with_pool(
g_ceph_context, 0, fsid, num_osd, pg_bits, pgp_bits);
} else {
osdmap.build_simple(g_ceph_context, 0, fsid, num_osd);
}
modified = true;
}
if (mark_up_in) {
cout << "marking all OSDs up and in" << std::endl;
int n = osdmap.get_max_osd();
for (int i=0; i<n; i++) {
osdmap.set_state(i, osdmap.get_state(i) | CEPH_OSD_UP);
osdmap.set_weight(i, CEPH_OSD_IN);
if (osdmap.crush->get_item_weight(i) == 0 ) {
osdmap.crush->adjust_item_weightf(g_ceph_context, i, 1.0);
}
}
}
if (marked_out >=0 && marked_out < osdmap.get_max_osd()) {
cout << "marking OSD@" << marked_out << " as out" << std::endl;
int id = marked_out;
osdmap.set_state(id, osdmap.get_state(id) | CEPH_OSD_UP);
osdmap.set_weight(id, CEPH_OSD_OUT);
}
if (marked_up >=0 && marked_up < osdmap.get_max_osd()) {
cout << "marking OSD@" << marked_up << " as up" << std::endl;
int id = marked_up;
osdmap.set_state(id, osdmap.get_state(id) | CEPH_OSD_UP);
}
if (marked_in >=0 && marked_in < osdmap.get_max_osd()) {
cout << "marking OSD@" << marked_up << " as up" << std::endl;
int id = marked_up;
osdmap.set_weight(id, CEPH_OSD_IN);
}
for_each_substr(adjust_crush_weight, ",", [&](auto osd_to_adjust) {
std::string_view osd_to_weight_delimiter{":"};
size_t pos = osd_to_adjust.find(osd_to_weight_delimiter);
if (pos == osd_to_adjust.npos) {
cerr << me << ": use ':' as separator of osd id and its weight"
<< std::endl;
usage();
}
int osd_id = std::stoi(string(osd_to_adjust.substr(0, pos)));
float new_weight = std::stof(string(osd_to_adjust.substr(pos + 1)));
osdmap.crush->adjust_item_weightf(g_ceph_context, osd_id, new_weight);
std::cout << "Adjusted osd." << osd_id << " CRUSH weight to " << new_weight
<< std::endl;
if (save) {
OSDMap::Incremental inc;
inc.fsid = osdmap.get_fsid();
inc.epoch = osdmap.get_epoch() + 1;
osdmap.apply_incremental(inc);
modified = true;
}
});
if (clear_temp) {
cout << "clearing pg/primary temp" << std::endl;
osdmap.clear_temp();
}
if (clean_temps) {
cout << "cleaning pg temps" << std::endl;
OSDMap::Incremental pending_inc(osdmap.get_epoch()+1);
OSDMap tmpmap;
tmpmap.deepish_copy_from(osdmap);
tmpmap.apply_incremental(pending_inc);
OSDMap::clean_temps(g_ceph_context, osdmap, tmpmap, &pending_inc);
}
int upmap_fd = STDOUT_FILENO;
if (upmap || upmap_cleanup || read) {
if (upmap_file != "-") {
upmap_fd = ::open(upmap_file.c_str(), O_CREAT|O_WRONLY|O_TRUNC, 0644);
if (upmap_fd < 0) {
cerr << "error opening " << upmap_file << ": " << cpp_strerror(errno)
<< std::endl;
exit(1);
}
cout << "writing upmap command output to: " << upmap_file << std::endl;
}
}
if (upmap_cleanup) {
cout << "checking for upmap cleanups" << std::endl;
OSDMap::Incremental pending_inc(osdmap.get_epoch()+1);
pending_inc.fsid = osdmap.get_fsid();
int r = osdmap.clean_pg_upmaps(g_ceph_context, &pending_inc);
if (r > 0) {
print_inc_upmaps(pending_inc, upmap_fd, vstart);
r = osdmap.apply_incremental(pending_inc);
ceph_assert(r == 0);
}
}
if (read) {
int64_t pid = osdmap.lookup_pg_pool_name(read_pool);
if (pid < 0) {
cerr << " pool " << read_pool << " does not exist" << std::endl;
exit(1);
}
const pg_pool_t* pool = osdmap.get_pg_pool(pid);
if (! pool->is_replicated()) {
cerr << read_pool << " is an erasure coded pool; "
<< "please try again with a replicated pool." << std::endl;
exit(1);
}
OSDMap tmp_osd_map;
tmp_osd_map.deepish_copy_from(osdmap);
// Gather BEFORE info
map<uint64_t,set<pg_t>> pgs_by_osd;
map<uint64_t,set<pg_t>> prim_pgs_by_osd;
map<uint64_t,set<pg_t>> acting_prims_by_osd;
pgs_by_osd = tmp_osd_map.get_pgs_by_osd(g_ceph_context, pid, &prim_pgs_by_osd, &acting_prims_by_osd);
OSDMap::read_balance_info_t rb_info;
tmp_osd_map.calc_read_balance_score(g_ceph_context, pid, &rb_info);
float read_balance_score_before = rb_info.adjusted_score;
ceph_assert(read_balance_score_before >= 0);
// Calculate read balancer
OSDMap::Incremental pending_inc(osdmap.get_epoch()+1);
int num_changes = osdmap.balance_primaries(g_ceph_context, pid, &pending_inc, tmp_osd_map);
if (num_changes < 0) {
cerr << "Error balancing primaries. Rerun with at least --debug-osd=10 for more details." << std::endl;
exit(1);
}
// Gather AFTER info
map<uint64_t,set<pg_t>> pgs_by_osd_2;
map<uint64_t,set<pg_t>> prim_pgs_by_osd_2;
map<uint64_t,set<pg_t>> acting_prims_by_osd_2;
pgs_by_osd_2 = tmp_osd_map.get_pgs_by_osd(g_ceph_context, pid, &prim_pgs_by_osd_2, &acting_prims_by_osd_2);
tmp_osd_map.calc_read_balance_score(g_ceph_context, pid, &rb_info);
float read_balance_score_after = rb_info.adjusted_score;
ceph_assert(read_balance_score_after >= 0);
if (num_changes > 0) {
cout << " \n";
cout << "---------- BEFORE ------------ \n";
for (auto & [osd, pgs] : prim_pgs_by_osd) {
cout << " osd." << osd << " | primary affinity: " << tmp_osd_map.get_primary_affinityf(osd) << " | number of prims: " << pgs.size() << "\n";
}
cout << " \n";
cout << "read_balance_score of '" << read_pool << "': " << read_balance_score_before << "\n\n\n";
cout << "---------- AFTER ------------ \n";
for (auto & [osd, pgs] : prim_pgs_by_osd_2) {
cout << " osd." << osd << " | primary affinity: " << tmp_osd_map.get_primary_affinityf(osd) << " | number of prims: " << pgs.size() << "\n";
}
cout << " \n";
cout << "read_balance_score of '" << read_pool << "': " << read_balance_score_after << "\n\n\n";
cout << "num changes: " << num_changes << "\n";
print_inc_upmaps(pending_inc, upmap_fd, vstart);
} else {
cout << " Unable to find further optimization, or distribution is already perfect\n";
}
}
if (upmap) {
cout << "upmap, max-count " << upmap_max
<< ", max deviation " << upmap_deviation
<< std::endl;
vector<int64_t> pools;
set<int64_t> upmap_pool_nums;
for (auto& s : upmap_pools) {
int64_t p = osdmap.lookup_pg_pool_name(s);
if (p < 0) {
cerr << " pool " << s << " does not exist" << std::endl;
exit(1);
}
pools.push_back(p);
upmap_pool_nums.insert(p);
}
if (!pools.empty()) {
cout << " limiting to pools " << upmap_pools << " (" << pools << ")"
<< std::endl;
} else {
mempool::osdmap::map<int64_t,pg_pool_t> opools = osdmap.get_pools();
for (auto& i : opools) {
pools.push_back(i.first);
}
}
if (pools.empty()) {
cout << "No pools available" << std::endl;
goto skip_upmap;
}
int rounds = 0;
struct timespec round_start;
[[maybe_unused]] int r = clock_gettime(CLOCK_MONOTONIC, &round_start);
assert(r == 0);
do {
random_device_t rd;
std::shuffle(pools.begin(), pools.end(), std::mt19937{rd()});
cout << "pools ";
for (auto& i: pools)
cout << osdmap.get_pool_name(i) << " ";
cout << std::endl;
OSDMap::Incremental pending_inc(osdmap.get_epoch()+1);
pending_inc.fsid = osdmap.get_fsid();
int total_did = 0;
int left = upmap_max;
struct timespec begin, end;
r = clock_gettime(CLOCK_MONOTONIC, &begin);
assert(r == 0);
for (auto& i: pools) {
set<int64_t> one_pool;
one_pool.insert(i);
//TODO: Josh: Add a function on the seed for multiple iterations.
int did = osdmap.calc_pg_upmaps(
g_ceph_context, upmap_deviation,
left, one_pool,
&pending_inc, upmap_p_seed);
total_did += did;
left -= did;
if (left <= 0)
break;
if (upmap_p_seed != nullptr) {
*upmap_p_seed += 13;
}
}
r = clock_gettime(CLOCK_MONOTONIC, &end);
assert(r == 0);
cout << "prepared " << total_did << "/" << upmap_max << " changes" << std::endl;
float elapsed_time = (end.tv_sec - begin.tv_sec) + 1.0e-9*(end.tv_nsec - begin.tv_nsec);
if (upmap_active)
cout << "Time elapsed " << elapsed_time << " secs" << std::endl;
if (total_did > 0) {
print_inc_upmaps(pending_inc, upmap_fd, vstart);
if (save || upmap_active) {
int r = osdmap.apply_incremental(pending_inc);
ceph_assert(r == 0);
if (save)
modified = true;
}
} else {
cout << "Unable to find further optimization, "
<< "or distribution is already perfect"
<< std::endl;
if (upmap_active) {
map<int,set<pg_t>> pgs_by_osd;
for (auto& i : osdmap.get_pools()) {
if (!upmap_pool_nums.empty() && !upmap_pool_nums.count(i.first))
continue;
for (unsigned ps = 0; ps < i.second.get_pg_num(); ++ps) {
pg_t pg(ps, i.first);
vector<int> up;
osdmap.pg_to_up_acting_osds(pg, &up, nullptr, nullptr, nullptr);
//ldout(cct, 20) << __func__ << " " << pg << " up " << up << dendl;
for (auto osd : up) {
if (osd != CRUSH_ITEM_NONE)
pgs_by_osd[osd].insert(pg);
}
}
}
for (auto& i : pgs_by_osd)
cout << "osd." << i.first << " pgs " << i.second.size() << std::endl;
float elapsed_time = (end.tv_sec - round_start.tv_sec) + 1.0e-9*(end.tv_nsec - round_start.tv_nsec);
cout << "Total time elapsed " << elapsed_time << " secs, " << rounds << " rounds" << std::endl;
}
break;
}
++rounds;
} while(upmap_active);
}
skip_upmap:
if (upmap_file != "-") {
::close(upmap_fd);
}
if (!import_crush.empty()) {
bufferlist cbl;
std::string error;
r = cbl.read_file(import_crush.c_str(), &error);
if (r) {
cerr << me << ": error reading crush map from " << import_crush
<< ": " << error << std::endl;
exit(1);
}
// validate
CrushWrapper cw;
auto p = cbl.cbegin();
cw.decode(p);
if (cw.get_max_devices() > osdmap.get_max_osd()) {
cerr << me << ": crushmap max_devices " << cw.get_max_devices()
<< " > osdmap max_osd " << osdmap.get_max_osd() << std::endl;
exit(1);
}
// apply
OSDMap::Incremental inc;
inc.fsid = osdmap.get_fsid();
inc.epoch = osdmap.get_epoch()+1;
inc.crush = cbl;
osdmap.apply_incremental(inc);
cout << me << ": imported " << cbl.length() << " byte crush map from " << import_crush << std::endl;
modified = true;
}
if (!export_crush.empty()) {
bufferlist cbl;
osdmap.crush->encode(cbl, CEPH_FEATURES_SUPPORTED_DEFAULT);
r = cbl.write_file(export_crush.c_str());
if (r < 0) {
cerr << me << ": error writing crush map to " << import_crush << std::endl;
exit(1);
}
cout << me << ": exported crush map to " << export_crush << std::endl;
}
if (!test_map_object.empty()) {
object_t oid(test_map_object);
if (pool == -1) {
cout << me << ": assuming pool 1 (use --pool to override)" << std::endl;
pool = 1;
}
if (!osdmap.have_pg_pool(pool)) {
cerr << "There is no pool " << pool << std::endl;
exit(1);
}
object_locator_t loc(pool);
pg_t raw_pgid = osdmap.object_locator_to_pg(oid, loc);
pg_t pgid = osdmap.raw_pg_to_pg(raw_pgid);
vector<int> acting;
osdmap.pg_to_acting_osds(pgid, acting);
cout << " object '" << oid
<< "' -> " << pgid
<< " -> " << acting
<< std::endl;
}
if (!test_map_pg.empty()) {
pg_t pgid;
if (!pgid.parse(test_map_pg.c_str())) {
cerr << me << ": failed to parse pg '" << test_map_pg << std::endl;
usage();
}
cout << " parsed '" << test_map_pg << "' -> " << pgid << std::endl;
vector<int> raw, up, acting;
int raw_primary, up_primary, acting_primary;
osdmap.pg_to_raw_osds(pgid, &raw, &raw_primary);
osdmap.pg_to_up_acting_osds(pgid, &up, &up_primary,
&acting, &acting_primary);
cout << pgid << " raw (" << raw << ", p" << raw_primary
<< ") up (" << up << ", p" << up_primary
<< ") acting (" << acting << ", p" << acting_primary << ")"
<< std::endl;
}
if (test_map_pgs || test_map_pgs_dump || test_map_pgs_dump_all) {
if (pool != -1 && !osdmap.have_pg_pool(pool)) {
cerr << "There is no pool " << pool << std::endl;
exit(1);
}
int n = osdmap.get_max_osd();
vector<int> count(n, 0);
vector<int> first_count(n, 0);
vector<int> primary_count(n, 0);
vector<int> size(30, 0);
int max_size = 0;
if (test_random)
srand(getpid());
auto& pools = osdmap.get_pools();
for (auto p = pools.begin(); p != pools.end(); ++p) {
if (pool != -1 && p->first != pool)
continue;
if (pg_num > 0)
p->second.set_pg_num(pg_num);
cout << "pool " << p->first
<< " pg_num " << p->second.get_pg_num() << std::endl;
for (unsigned i = 0; i < p->second.get_pg_num(); ++i) {
pg_t pgid = pg_t(i, p->first);
vector<int> osds, raw, up, acting;
int primary, calced_primary, up_primary, acting_primary;
if (test_random) {
osds.resize(p->second.size);
for (unsigned i=0; i<osds.size(); ++i) {
osds[i] = rand() % osdmap.get_max_osd();
}
primary = osds[0];
} else if (test_map_pgs_dump_all) {
osdmap.pg_to_raw_osds(pgid, &raw, &calced_primary);
osdmap.pg_to_up_acting_osds(pgid, &up, &up_primary,
&acting, &acting_primary);
osds = acting;
primary = acting_primary;
} else {
osdmap.pg_to_acting_osds(pgid, &osds, &primary);
}
size[osds.size()]++;
if ((unsigned)max_size < osds.size())
max_size = osds.size();
if (test_map_pgs_dump) {
cout << pgid << "\t" << osds << "\t" << primary << std::endl;
} else if (test_map_pgs_dump_all) {
cout << pgid << " raw (" << raw << ", p" << calced_primary
<< ") up (" << up << ", p" << up_primary
<< ") acting (" << acting << ", p" << acting_primary << ")"
<< std::endl;
}
for (unsigned i=0; i<osds.size(); i++) {
//cout << " rep " << i << " on " << osds[i] << std::endl;
if (osds[i] != CRUSH_ITEM_NONE)
count[osds[i]]++;
}
if (osds.size() && osds[0] != CRUSH_ITEM_NONE)
first_count[osds[0]]++;
if (primary >= 0)
primary_count[primary]++;
}
}
uint64_t total = 0;
int in = 0;
int min_osd = -1;
int max_osd = -1;
cout << "#osd\tcount\tfirst\tprimary\tc wt\twt\n";
for (int i=0; i<n; i++) {
if (!osdmap.is_in(i))
continue;
if (osdmap.crush->get_item_weight(i) <= 0)
continue;
in++;
cout << "osd." << i
<< "\t" << count[i]
<< "\t" << first_count[i]
<< "\t" << primary_count[i]
<< "\t" << osdmap.crush->get_item_weightf(i)
<< "\t" << osdmap.get_weightf(i)
<< std::endl;
total += count[i];
if (count[i] &&
(min_osd < 0 ||
count[i] < count[min_osd]))
min_osd = i;
if (count[i] &&
(max_osd < 0 ||
count[i] > count[max_osd]))
max_osd = i;
}
uint64_t avg = in ? (total / in) : 0;
double dev = 0;
for (int i=0; i<n; i++) {
if (!osdmap.is_in(i))
continue;
if (osdmap.crush->get_item_weight(i) <= 0)
continue;
dev += (avg - count[i]) * (avg - count[i]);
}
dev /= in;
dev = sqrt(dev);
//double edev = sqrt(pgavg) * (double)avg / pgavg;
double edev = sqrt((double)total / (double)in * (1.0 - (1.0 / (double)in)));
cout << " in " << in << std::endl;
cout << " avg " << avg
<< " stddev " << dev
<< " (" << (dev/avg) << "x)"
<< " (expected " << edev << " " << (edev/avg) << "x))"
<< std::endl;
if (min_osd >= 0)
cout << " min osd." << min_osd << " " << count[min_osd] << std::endl;
if (max_osd >= 0)
cout << " max osd." << max_osd << " " << count[max_osd] << std::endl;
for (int i=0; i<=max_size; i++) {
if (size[i])
cout << "size " << i << "\t" << size[i] << std::endl;
}
}
if (test_crush) {
int pass = 0;
while (1) {
cout << "pass " << ++pass << std::endl;
ceph::unordered_map<pg_t,vector<int> > m;
for (map<int64_t,pg_pool_t>::const_iterator p = osdmap.get_pools().begin();
p != osdmap.get_pools().end();
++p) {
const pg_pool_t *pool = osdmap.get_pg_pool(p->first);
for (ps_t ps = 0; ps < pool->get_pg_num(); ps++) {
pg_t pgid(ps, p->first);
for (int i=0; i<100; i++) {
cout << pgid << " attempt " << i << std::endl;
vector<int> r;
osdmap.pg_to_acting_osds(pgid, r);
//cout << pgid << " " << r << std::endl;
if (m.count(pgid)) {
if (m[pgid] != r) {
cout << pgid << " had " << m[pgid] << " now " << r << std::endl;
ceph_abort();
}
} else
m[pgid] = r;
}
}
}
}
}
if (!print && !health && !tree && !modified &&
export_crush.empty() && import_crush.empty() &&
test_map_pg.empty() && test_map_object.empty() &&
!test_map_pgs && !test_map_pgs_dump && !test_map_pgs_dump_all &&
adjust_crush_weight.empty() && !upmap && !upmap_cleanup && !read) {
cerr << me << ": no action specified?" << std::endl;
usage();
}
if (modified)
osdmap.inc_epoch();
if (health) {
health_check_map_t checks;
osdmap.check_health(cct.get(), &checks);
JSONFormatter jf(true);
jf.dump_object("checks", checks);
jf.flush(cout);
}
if (print) {
if (print_formatter) {
print_formatter->open_object_section("osdmap");
osdmap.dump(print_formatter.get());
print_formatter->close_section();
print_formatter->flush(cout);
} else {
osdmap.print(cct.get(), cout);
}
}
if (tree) {
if (tree_formatter) {
tree_formatter->open_object_section("tree");
osdmap.print_tree(tree_formatter.get(), NULL);
tree_formatter->close_section();
tree_formatter->flush(cout);
cout << std::endl;
} else {
osdmap.print_tree(NULL, &cout);
}
}
if (modified) {
bl.clear();
osdmap.encode(bl, CEPH_FEATURES_SUPPORTED_DEFAULT | CEPH_FEATURE_RESERVED);
// write it out
cout << me << ": writing epoch " << osdmap.get_epoch()
<< " to " << fn
<< std::endl;
int r = bl.write_file(fn.c_str());
if (r) {
cerr << "osdmaptool: error writing to '" << fn << "': "
<< cpp_strerror(r) << std::endl;
return 1;
}
}
return 0;
}
| 33,958 | 34.746316 | 168 |
cc
|
null |
ceph-main/src/tools/psim.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "osd/OSDMap.h"
#include "include/buffer.h"
using namespace std;
int main(int argc, char **argv)
{
/*
* you need to create a suitable osdmap first. e.g., for 40 osds,
* $ ./osdmaptool --createsimple 40 --clobber .ceph_osdmap
*/
bufferlist bl;
std::string error;
if (bl.read_file(".ceph_osdmap", &error)) {
cout << argv[0] << ": error reading .ceph_osdmap: " << error << std::endl;
return 1;
}
OSDMap osdmap;
try {
osdmap.decode(bl);
} catch (ceph::buffer::end_of_buffer &eob) {
cout << "Exception (end_of_buffer) in decode(), exit." << std::endl;
exit(1);
}
//osdmap.set_primary_affinity(0, 0x8000);
//osdmap.set_primary_affinity(3, 0);
int n = osdmap.get_max_osd();
int count[n];
int first_count[n];
int primary_count[n];
int size[4];
memset(count, 0, sizeof(count));
memset(first_count, 0, sizeof(first_count));
memset(primary_count, 0, sizeof(primary_count));
memset(size, 0, sizeof(size));
for (int i=0; i<n; i++) {
osdmap.set_state(i, osdmap.get_state(i) | CEPH_OSD_UP);
//if (i<12)
osdmap.set_weight(i, CEPH_OSD_IN);
}
//pg_pool_t *p = (pg_pool_t *)osdmap.get_pg_pool(0);
//p->type = pg_pool_t::TYPE_ERASURE;
for (int n = 0; n < 10; n++) { // namespaces
char nspace[20];
snprintf(nspace, sizeof(nspace), "n%d", n);
for (int f = 0; f < 5000; f++) { // files
for (int b = 0; b < 4; b++) { // blocks
char foo[20];
snprintf(foo, sizeof(foo), "%d.%d", f, b);
object_t oid(foo);
ceph_object_layout l = osdmap.make_object_layout(oid, 0, nspace);
vector<int> osds;
pg_t pgid = pg_t(l.ol_pgid);
//pgid.u.ps = f * 4 + b;
int primary;
osdmap.pg_to_acting_osds(pgid, &osds, &primary);
size[osds.size()]++;
#if 0
if (0) {
hash<object_t> H;
int x = H(oid);
x = ceph_stable_mod(x, 1023, 1023);
int s = crush_hash32(x) % 15;
//cout << "ceph_psim: x = " << x << " s = " << s << std::endl;
//osds[0] = s;
}
#endif
//osds[0] = crush_hash32(f) % n;
//cout << "oid " << oid << " pgid " << pgid << " on " << osds << std::endl;
for (unsigned i=0; i<osds.size(); i++) {
//cout << " rep " << i << " on " << osds[i] << std::endl;
count[osds[i]]++;
}
if (osds.size())
first_count[osds[0]]++;
if (primary >= 0)
primary_count[primary]++;
}
}
}
uint64_t avg = 0;
for (int i=0; i<n; i++) {
cout << "osd." << i << "\t" << count[i]
<< "\t" << first_count[i]
<< "\t" << primary_count[i]
<< std::endl;
avg += count[i];
}
avg /= n;
double dev = 0;
for (int i=0; i<n; i++)
dev += (avg - count[i]) * (avg - count[i]);
dev /= n;
dev = sqrt(dev);
double pgavg = (double)osdmap.get_pg_pool(0)->get_pg_num() / (double)n;
double edev = sqrt(pgavg) * (double)avg / pgavg;
cout << " avg " << avg
<< " stddev " << dev
<< " (expected " << edev << ")"
<< " (indep object placement would be " << sqrt(avg) << ")" << std::endl;
for (int i=0; i<4; i++) {
cout << "size" << i << "\t" << size[i] << std::endl;
}
return 0;
}
| 3,210 | 25.758333 | 81 |
cc
|
null |
ceph-main/src/tools/radosacl.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <stdlib.h>
#include <time.h>
#include <errno.h>
#include "include/types.h"
#include "include/rados/librados.hpp"
using namespace std;
using namespace librados;
void buf_to_hex(const unsigned char *buf, int len, char *str)
{
str[0] = '\0';
for (int i = 0; i < len; i++) {
sprintf(&str[i*2], "%02x", (int)buf[i]);
}
}
#define ID_SIZE 8
#define ACL_RD 0x1
#define ACL_WR 0x2
struct ACLID {
char id[ID_SIZE + 1];
void encode(bufferlist& bl) const {
bl.append((const char *)id, ID_SIZE);
}
void decode(bufferlist::const_iterator& iter) {
iter.copy(ID_SIZE, (char *)id);
}
};
WRITE_CLASS_ENCODER(ACLID)
typedef __u32 ACLFlags;
inline bool operator<(const ACLID& l, const ACLID& r)
{
return (memcmp(&l, &r, ID_SIZE) < 0);
}
struct ACLPair {
ACLID id;
ACLFlags flags;
};
class ObjectACLs {
map<ACLID, ACLFlags> acls_map;
public:
void encode(bufferlist& bl) const {
using ceph::encode;
encode(acls_map, bl);
}
void decode(bufferlist::const_iterator& bl) {
using ceph::decode;
decode(acls_map, bl);
}
int read_acl(ACLID& id, ACLFlags *flags);
void set_acl(ACLID& id, ACLFlags flags);
};
WRITE_CLASS_ENCODER(ObjectACLs)
int ObjectACLs::read_acl(ACLID& id, ACLFlags *flags)
{
if (!flags)
return -EINVAL;
map<ACLID, ACLFlags>::iterator iter = acls_map.find(id);
if (iter == acls_map.end())
return -ENOENT;
*flags = iter->second;
return 0;
}
void ObjectACLs::set_acl(ACLID& id, ACLFlags flags)
{
acls_map[id] = flags;
}
class ACLEntity
{
string name;
map<ACLID, ACLEntity> groups;
};
typedef map<ACLID, ACLEntity> tACLIDEntityMap;
static map<ACLID, ACLEntity> users;
static map<ACLID, ACLEntity> groups;
void get_user(ACLID& aclid, ACLEntity *entity)
{
//users.find(aclid);
}
int main(int argc, const char **argv)
{
Rados rados;
if (rados.init(NULL) < 0) {
cerr << "couldn't initialize rados!" << std::endl;
exit(1);
}
if (rados.conf_read_file(NULL)) {
cerr << "couldn't read Ceph configuration file!" << std::endl;
exit(1);
}
if (rados.connect() < 0) {
cerr << "couldn't connect to cluster!" << std::endl;
exit(1);
}
time_t tm;
bufferlist bl, bl2;
char buf[128];
time(&tm);
snprintf(buf, 128, "%s", ctime(&tm));
bl.append(buf, strlen(buf));
const char *oid = "bar";
IoCtx io_ctx;
int r = rados.ioctx_create("data", io_ctx);
cout << "open io_ctx result = " << r << " pool = " << io_ctx.get_pool_name() << std::endl;
ACLID id;
snprintf(id.id, sizeof(id.id), "%.8x", 0x1234);
cout << "id=" << id.id << std::endl;
r = io_ctx.exec(oid, "acl", "get", bl, bl2);
cout << "exec(acl get) returned " << r
<< " len=" << bl2.length() << std::endl;
ObjectACLs oa;
if (r >= 0) {
auto iter = bl2.cbegin();
oa.decode(iter);
}
oa.set_acl(id, ACL_RD);
bl.clear();
oa.encode(bl);
r = io_ctx.exec(oid, "acl", "set", bl, bl2);
cout << "exec(acl set) returned " << r
<< " len=" << bl2.length() << std::endl;
const unsigned char *md5 = (const unsigned char *)bl2.c_str();
char md5_str[bl2.length()*2 + 1];
buf_to_hex(md5, bl2.length(), md5_str);
cout << "md5 result=" << md5_str << std::endl;
int size = io_ctx.read(oid, bl2, 128, 0);
cout << "read result=" << bl2.c_str() << std::endl;
cout << "size=" << size << std::endl;
return 0;
}
| 3,802 | 19.228723 | 92 |
cc
|
null |
ceph-main/src/tools/rebuild_mondb.cc
|
#include "auth/cephx/CephxKeyServer.h"
#include "common/errno.h"
#include "mon/AuthMonitor.h"
#include "mon/MonitorDBStore.h"
#include "os/ObjectStore.h"
#include "osd/OSD.h"
using namespace std;
static int update_auth(const string& keyring_path,
const OSDSuperblock& sb,
MonitorDBStore& ms);
static int update_monitor(const OSDSuperblock& sb, MonitorDBStore& ms);
static int update_osdmap(ObjectStore& fs,
OSDSuperblock& sb,
MonitorDBStore& ms);
int update_mon_db(ObjectStore& fs, OSDSuperblock& sb,
const string& keyring,
const string& store_path)
{
MonitorDBStore ms(store_path);
int r = ms.create_and_open(cerr);
if (r < 0) {
cerr << "unable to open mon store: " << store_path << std::endl;
return r;
}
if ((r = update_auth(keyring, sb, ms)) < 0) {
goto out;
}
if ((r = update_osdmap(fs, sb, ms)) < 0) {
goto out;
}
if ((r = update_monitor(sb, ms)) < 0) {
goto out;
}
out:
ms.close();
return r;
}
static void add_auth(KeyServerData::Incremental& auth_inc,
MonitorDBStore& ms)
{
AuthMonitor::Incremental inc;
inc.inc_type = AuthMonitor::AUTH_DATA;
encode(auth_inc, inc.auth_data);
inc.auth_type = CEPH_AUTH_CEPHX;
bufferlist bl;
__u8 v = 1;
encode(v, bl);
inc.encode(bl, CEPH_FEATURES_ALL);
const string prefix("auth");
auto last_committed = ms.get(prefix, "last_committed") + 1;
auto t = make_shared<MonitorDBStore::Transaction>();
t->put(prefix, last_committed, bl);
t->put(prefix, "last_committed", last_committed);
auto first_committed = ms.get(prefix, "first_committed");
if (!first_committed) {
t->put(prefix, "first_committed", last_committed);
}
ms.apply_transaction(t);
}
static int get_auth_inc(const string& keyring_path,
const OSDSuperblock& sb,
KeyServerData::Incremental* auth_inc)
{
auth_inc->op = KeyServerData::AUTH_INC_ADD;
// get the name
EntityName entity;
// assuming the entity name of OSD is "osd.<osd_id>"
entity.set(CEPH_ENTITY_TYPE_OSD, std::to_string(sb.whoami));
auth_inc->name = entity;
// read keyring from disk
KeyRing keyring;
{
bufferlist bl;
string error;
int r = bl.read_file(keyring_path.c_str(), &error);
if (r < 0) {
if (r == -ENOENT) {
cout << "ignoring keyring (" << keyring_path << ")"
<< ": " << error << std::endl;
return 0;
} else {
cerr << "unable to read keyring (" << keyring_path << ")"
<< ": " << error << std::endl;
return r;
}
} else if (bl.length() == 0) {
cout << "ignoring empty keyring: " << keyring_path << std::endl;
return 0;
}
auto bp = bl.cbegin();
try {
decode(keyring, bp);
} catch (const buffer::error& e) {
cerr << "error decoding keyring: " << keyring_path << std::endl;
return -EINVAL;
}
}
// get the key
EntityAuth new_inc;
if (!keyring.get_auth(auth_inc->name, new_inc)) {
cerr << "key for " << auth_inc->name << " not found in keyring: "
<< keyring_path << std::endl;
return -EINVAL;
}
auth_inc->auth.key = new_inc.key;
// get the caps
map<string,bufferlist> caps;
if (new_inc.caps.empty()) {
// fallback to default caps for an OSD
// osd 'allow *' mon 'allow rwx'
// as suggested by document.
encode(string("allow *"), caps["osd"]);
encode(string("allow rwx"), caps["mon"]);
} else {
caps = new_inc.caps;
}
auth_inc->auth.caps = caps;
return 0;
}
// rebuild
// - auth/${epoch}
// - auth/first_committed
// - auth/last_committed
static int update_auth(const string& keyring_path,
const OSDSuperblock& sb,
MonitorDBStore& ms)
{
// stolen from AuthMonitor::prepare_command(), where prefix is "auth add"
KeyServerData::Incremental auth_inc;
int r;
if ((r = get_auth_inc(keyring_path, sb, &auth_inc))) {
return r;
}
add_auth(auth_inc, ms);
return 0;
}
// stolen from Monitor::check_fsid()
static int check_fsid(const uuid_d& fsid, MonitorDBStore& ms)
{
bufferlist bl;
int r = ms.get("monitor", "cluster_uuid", bl);
if (r == -ENOENT)
return r;
string uuid(bl.c_str(), bl.length());
auto end = uuid.find_first_of('\n');
if (end != uuid.npos) {
uuid.resize(end);
}
uuid_d existing;
if (!existing.parse(uuid.c_str())) {
cerr << "error: unable to parse uuid" << std::endl;
return -EINVAL;
}
if (fsid != existing) {
cerr << "error: cluster_uuid " << existing << " != " << fsid << std::endl;
return -EEXIST;
}
return 0;
}
// rebuild
// - monitor/cluster_uuid
int update_monitor(const OSDSuperblock& sb, MonitorDBStore& ms)
{
switch (check_fsid(sb.cluster_fsid, ms)) {
case -ENOENT:
break;
case -EINVAL:
return -EINVAL;
case -EEXIST:
return -EEXIST;
case 0:
return 0;
default:
ceph_abort();
}
string uuid = stringify(sb.cluster_fsid) + "\n";
bufferlist bl;
bl.append(uuid);
auto t = make_shared<MonitorDBStore::Transaction>();
t->put("monitor", "cluster_uuid", bl);
ms.apply_transaction(t);
return 0;
}
// rebuild
// - osdmap/${epoch}
// - osdmap/full_${epoch}
// - osdmap/full_latest
// - osdmap/first_committed
// - osdmap/last_committed
int update_osdmap(ObjectStore& fs, OSDSuperblock& sb, MonitorDBStore& ms)
{
const string prefix("osdmap");
const string first_committed_name("first_committed");
const string last_committed_name("last_committed");
epoch_t first_committed = ms.get(prefix, first_committed_name);
epoch_t last_committed = ms.get(prefix, last_committed_name);
auto t = make_shared<MonitorDBStore::Transaction>();
// trim stale maps
unsigned ntrimmed = 0;
// osdmap starts at 1. if we have a "0" first_committed, then there is nothing
// to trim. and "1 osdmaps trimmed" in the output message is misleading. so
// let's make it an exception.
for (auto e = first_committed; first_committed && e < sb.oldest_map; e++) {
t->erase(prefix, e);
t->erase(prefix, ms.combine_strings("full", e));
ntrimmed++;
}
// make sure we have a non-zero first_committed. OSDMonitor relies on this.
// because PaxosService::put_last_committed() set it to last_committed, if it
// is zero. which breaks OSDMonitor::update_from_paxos(), in which we believe
// that latest_full should always be greater than last_committed.
if (first_committed == 0 && sb.oldest_map < sb.newest_map) {
first_committed = 1;
} else if (ntrimmed) {
first_committed += ntrimmed;
}
if (first_committed) {
t->put(prefix, first_committed_name, first_committed);
ms.apply_transaction(t);
t = make_shared<MonitorDBStore::Transaction>();
}
unsigned nadded = 0;
auto ch = fs.open_collection(coll_t::meta());
OSDMap osdmap;
for (auto e = std::max(last_committed+1, sb.oldest_map);
e <= sb.newest_map; e++) {
bool have_crc = false;
uint32_t crc = -1;
uint64_t features = 0;
// add inc maps
auto add_inc_result = [&] {
const auto oid = OSD::get_inc_osdmap_pobject_name(e);
bufferlist bl;
int nread = fs.read(ch, oid, 0, 0, bl);
if (nread <= 0) {
cout << "missing " << oid << std::endl;
return -ENOENT;
}
t->put(prefix, e, bl);
OSDMap::Incremental inc;
auto p = bl.cbegin();
inc.decode(p);
features = inc.encode_features | CEPH_FEATURE_RESERVED;
if (osdmap.get_epoch() && e > 1) {
if (osdmap.apply_incremental(inc)) {
cerr << "bad fsid: "
<< osdmap.get_fsid() << " != " << inc.fsid << std::endl;
return -EINVAL;
}
have_crc = inc.have_crc;
if (inc.have_crc) {
crc = inc.full_crc;
bufferlist fbl;
osdmap.encode(fbl, features);
if (osdmap.get_crc() != inc.full_crc) {
cerr << "mismatched inc crc: "
<< osdmap.get_crc() << " != " << inc.full_crc << std::endl;
return -EINVAL;
}
// inc.decode() verifies `inc_crc`, so it's been taken care of.
}
}
return 0;
}();
switch (add_inc_result) {
case -ENOENT:
// no worries, we always have full map
break;
case -EINVAL:
return -EINVAL;
case 0:
break;
default:
assert(0);
}
// add full maps
{
const auto oid = OSD::get_osdmap_pobject_name(e);
bufferlist bl;
int nread = fs.read(ch, oid, 0, 0, bl);
if (nread <= 0) {
cerr << "missing " << oid << std::endl;
return -EINVAL;
}
t->put(prefix, ms.combine_strings("full", e), bl);
auto p = bl.cbegin();
osdmap.decode(p);
if (osdmap.have_crc()) {
if (have_crc && osdmap.get_crc() != crc) {
cerr << "mismatched full/inc crc: "
<< osdmap.get_crc() << " != " << crc << std::endl;
return -EINVAL;
}
uint32_t saved_crc = osdmap.get_crc();
bufferlist fbl;
osdmap.encode(fbl, features);
if (osdmap.get_crc() != saved_crc) {
cerr << "mismatched full crc: "
<< saved_crc << " != " << osdmap.get_crc() << std::endl;
return -EINVAL;
}
}
}
nadded++;
// last_committed
t->put(prefix, last_committed_name, e);
// full last
t->put(prefix, ms.combine_strings("full", "latest"), e);
// this number comes from the default value of osd_target_transaction_size,
// so we won't OOM or stuff too many maps in a single transaction if OSD is
// keeping a large series of osdmap
static constexpr unsigned TRANSACTION_SIZE = 30;
if (t->size() >= TRANSACTION_SIZE) {
ms.apply_transaction(t);
t = make_shared<MonitorDBStore::Transaction>();
}
}
if (!t->empty()) {
ms.apply_transaction(t);
}
t.reset();
string osd_name("osd.");
osd_name += std::to_string(sb.whoami);
cout << std::left << setw(8)
<< osd_name << ": "
<< ntrimmed << " osdmaps trimmed, "
<< nadded << " osdmaps added." << std::endl;
return 0;
}
| 10,243 | 27.937853 | 80 |
cc
|
null |
ceph-main/src/tools/rebuild_mondb.h
|
#pragma once
#include <string>
class ObjectStore;
class OSDSuperblock;
int update_mon_db(ObjectStore& fs, OSDSuperblock& sb,
const std::string& keyring_path,
const std::string& store_path);
| 228 | 21.9 | 53 |
h
|
null |
ceph-main/src/tools/scratchtoolpp.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/types.h"
#include "include/rados/librados.hpp"
using namespace librados;
#include <iostream>
#include <errno.h>
#include <stdlib.h>
#include <time.h>
#pragma GCC diagnostic ignored "-Wpragmas"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
using namespace std;
void buf_to_hex(const unsigned char *buf, int len, char *str)
{
str[0] = '\0';
for (int i = 0; i < len; i++) {
sprintf(&str[i*2], "%02x", (int)buf[i]);
}
}
class C_Watch : public WatchCtx {
public:
C_Watch() {}
void notify(uint8_t opcode, uint64_t ver, bufferlist& bl) override {
cout << "C_Watch::notify() opcode=" << (int)opcode << " ver=" << ver << std::endl;
}
};
void testradospp_milestone(void)
{
int c;
cout << "*** press enter to continue ***" << std::endl;
while ((c = getchar()) != EOF) {
if (c == '\n')
break;
}
}
int main(int argc, const char **argv)
{
Rados rados;
if (rados.init(NULL) < 0) {
cerr << "couldn't initialize rados!" << std::endl;
exit(1);
}
if (rados.conf_read_file(NULL)) {
cerr << "couldn't read configuration file." << std::endl;
exit(1);
}
rados.conf_parse_argv(argc, argv);
if (!rados.conf_set("config option that doesn't exist",
"some random value")) {
printf("error: succeeded in setting nonexistent config option\n");
exit(1);
}
if (rados.conf_set("log to stderr", "true")) {
printf("error: error setting log_to_stderr\n");
exit(1);
}
std::string tmp;
if (rados.conf_get("log to stderr", tmp)) {
printf("error: failed to read log_to_stderr from config\n");
exit(1);
}
if (tmp != "true") {
printf("error: new setting for log_to_stderr failed to take effect.\n");
exit(1);
}
if (rados.connect()) {
printf("error connecting\n");
exit(1);
}
cout << "rados_initialize completed" << std::endl;
testradospp_milestone();
time_t tm;
bufferlist bl, bl2, blf;
char buf[128];
time(&tm);
snprintf(buf, 128, "%s", ctime(&tm));
bl.append(buf, strlen(buf));
blf.append(buf, 16);
const char *oid = "bar";
int r = rados.pool_create("foo");
cout << "pool_create result = " << r << std::endl;
IoCtx io_ctx;
r = rados.ioctx_create("foo", io_ctx);
cout << "ioctx_create result = " << r << std::endl;
r = io_ctx.write(oid, bl, bl.length(), 0);
uint64_t objver = io_ctx.get_last_version();
ceph_assert(objver > 0);
cout << "io_ctx.write returned " << r << " last_ver=" << objver << std::endl;
uint64_t stat_size;
time_t stat_mtime;
r = io_ctx.stat(oid, &stat_size, &stat_mtime);
cout << "io_ctx.stat returned " << r << " size = " << stat_size << " mtime = " << stat_mtime << std::endl;
r = io_ctx.stat(oid, NULL, NULL);
cout << "io_ctx.stat(does_not_exist) = " << r << std::endl;
uint64_t handle;
C_Watch wc;
r = io_ctx.watch(oid, objver, &handle, &wc);
cout << "io_ctx.watch returned " << r << std::endl;
testradospp_milestone();
io_ctx.set_notify_timeout(7);
bufferlist notify_bl;
r = io_ctx.notify(oid, objver, notify_bl);
cout << "io_ctx.notify returned " << r << std::endl;
testradospp_milestone();
r = io_ctx.notify(oid, objver, notify_bl);
cout << "io_ctx.notify returned " << r << std::endl;
testradospp_milestone();
r = io_ctx.unwatch(oid, handle);
cout << "io_ctx.unwatch returned " << r << std::endl;
testradospp_milestone();
r = io_ctx.notify(oid, objver, notify_bl);
cout << "io_ctx.notify returned " << r << std::endl;
testradospp_milestone();
io_ctx.set_assert_version(objver);
r = io_ctx.write(oid, bl, bl.length() - 1, 0);
cout << "io_ctx.write returned " << r << std::endl;
r = io_ctx.write(oid, bl, bl.length() - 2, 0);
cout << "io_ctx.write returned " << r << std::endl;
r = io_ctx.write(oid, bl, bl.length() - 3, 0);
cout << "rados.write returned " << r << std::endl;
r = io_ctx.append(oid, bl, bl.length());
cout << "rados.write returned " << r << std::endl;
r = io_ctx.write_full(oid, blf);
cout << "rados.write_full returned " << r << std::endl;
r = io_ctx.read(oid, bl, bl.length(), 0);
cout << "rados.read returned " << r << std::endl;
r = io_ctx.trunc(oid, 8);
cout << "rados.trunc returned " << r << std::endl;
r = io_ctx.read(oid, bl, bl.length(), 0);
cout << "rados.read returned " << r << std::endl;
r = io_ctx.exec(oid, "crypto", "md5", bl, bl2);
cout << "exec returned " << r << " buf size=" << bl2.length() << std::endl;
const unsigned char *md5 = (const unsigned char *)bl2.c_str();
char md5_str[bl2.length()*2 + 1];
buf_to_hex(md5, bl2.length(), md5_str);
cout << "md5 result=" << md5_str << std::endl;
// test assert_version
r = io_ctx.read(oid, bl, 0, 1);
ceph_assert(r >= 0);
uint64_t v = io_ctx.get_last_version();
cout << oid << " version is " << v << std::endl;
ceph_assert(v > 0);
io_ctx.set_assert_version(v);
r = io_ctx.read(oid, bl, 0, 1);
ceph_assert(r >= 0);
io_ctx.set_assert_version(v - 1);
r = io_ctx.read(oid, bl, 0, 1);
ceph_assert(r == -ERANGE);
io_ctx.set_assert_version(v + 1);
r = io_ctx.read(oid, bl, 0, 1);
ceph_assert(r == -EOVERFLOW);
r = io_ctx.exec(oid, "crypto", "sha1", bl, bl2);
cout << "exec returned " << r << std::endl;
const unsigned char *sha1 = (const unsigned char *)bl2.c_str();
char sha1_str[bl2.length()*2 + 1];
buf_to_hex(sha1, bl2.length(), sha1_str);
cout << "sha1 result=" << sha1_str << std::endl;
r = io_ctx.exec(oid, "acl", "set", bl, bl2);
cout << "exec (set) returned " << r << std::endl;
r = io_ctx.exec(oid, "acl", "get", bl, bl2);
cout << "exec (get) returned " << r << std::endl;
if (bl2.length() > 0) {
cout << "attr=" << bl2.c_str() << std::endl;
}
int size = io_ctx.read(oid, bl2, 128, 0);
if (size <= 0) {
cout << "failed to read oid " << oid << "." << std::endl;
exit(1);
}
if (size > 4096) {
cout << "read too many bytes from oid " << oid << "." << std::endl;
exit(1);
}
char rbuf[size + 1];
memcpy(rbuf, bl2.c_str(), size);
rbuf[size] = '\0';
cout << "read result='" << rbuf << "'" << std::endl;
cout << "size=" << size << std::endl;
const char *oid2 = "jjj10.rbd";
r = io_ctx.exec(oid2, "rbd", "snap_list", bl, bl2);
cout << "snap_list result=" << r << std::endl;
r = io_ctx.exec(oid2, "rbd", "snap_add", bl, bl2);
cout << "snap_add result=" << r << std::endl;
if (r > 0) {
char *s = bl2.c_str();
for (int i=0; i<r; i++, s += strlen(s) + 1)
cout << s << std::endl;
}
cout << "compound operation..." << std::endl;
ObjectWriteOperation o;
o.write(0, bl);
o.setxattr("foo", bl2);
r = io_ctx.operate(oid, &o);
cout << "operate result=" << r << std::endl;
cout << "cmpxattr" << std::endl;
bufferlist val;
val.append("foo");
r = io_ctx.setxattr(oid, "foo", val);
ceph_assert(r >= 0);
{
ObjectReadOperation o;
o.cmpxattr("foo", CEPH_OSD_CMPXATTR_OP_EQ, val);
r = io_ctx.operate(oid, &o, &bl2);
cout << " got " << r << " wanted >= 0" << std::endl;
ceph_assert(r >= 0);
}
val.append("...");
{
ObjectReadOperation o;
o.cmpxattr("foo", CEPH_OSD_CMPXATTR_OP_EQ, val);
r = io_ctx.operate(oid, &o, &bl2);
cout << " got " << r << " wanted " << -ECANCELED << " (-ECANCELED)" << std::endl;
ceph_assert(r == -ECANCELED);
}
io_ctx.locator_set_key(string());
cout << "iterating over objects..." << std::endl;
int num_objs = 0;
for (NObjectIterator iter = io_ctx.nobjects_begin();
iter != io_ctx.nobjects_end(); ++iter) {
num_objs++;
cout << "'" << *iter << "'" << std::endl;
}
cout << "iterated over " << num_objs << " objects." << std::endl;
map<string, bufferlist> attrset;
io_ctx.getxattrs(oid, attrset);
map<string, bufferlist>::iterator it;
for (it = attrset.begin(); it != attrset.end(); ++it) {
cout << "xattr: " << it->first << std::endl;
}
r = io_ctx.remove(oid);
cout << "remove result=" << r << std::endl;
r = rados.pool_delete("foo");
cout << "pool_delete result=" << r << std::endl;
rados.shutdown();
return 0;
}
#pragma GCC diagnostic pop
#pragma GCC diagnostic warning "-Wpragmas"
| 8,612 | 28.097973 | 108 |
cc
|
null |
ceph-main/src/tools/setup-virtualenv.sh
|
#!/usr/bin/env bash
#
# Copyright (C) 2016 <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
SCRIPTNAME="$(basename $0)"
if [ `uname` == FreeBSD ]; then
GETOPT="/usr/local/bin/getopt"
else
GETOPT=getopt
fi
function usage {
echo
echo "$SCRIPTNAME - automate setup of Python virtual environment"
echo " (for use in building Ceph)"
echo
echo "Usage:"
echo " $SCRIPTNAME [--python=PYTHON_BINARY] TARGET_DIRECTORY"
echo
echo " TARGET_DIRECTORY will be created if it doesn't exist,"
echo " and completely destroyed and re-created if it does!"
echo
exit 1
}
TEMP=$($GETOPT --options "h" --long "help,python:" --name "$SCRIPTNAME" -- "$@")
test $? != 0 && usage
eval set -- "$TEMP"
PYTHON=python3
while true ; do
case "$1" in
-h|--help) usage ;; # does not return
--python) PYTHON="$2" ; shift ; shift ;;
--) shift ; break ;;
*) echo "Internal error" ; exit 1 ;;
esac
done
if ! $PYTHON -VV; then
echo "$SCRIPTNAME: unable to locate a valid PYTHON_BINARY"
usage
fi
DIR=$1
if [ -z "$DIR" ] ; then
echo "$SCRIPTNAME: need a directory path, but none was provided"
usage
fi
rm -fr $DIR
mkdir -p $DIR
$PYTHON -m venv $DIR
. $DIR/bin/activate
if pip --help | grep -q disable-pip-version-check; then
DISABLE_PIP_VERSION_CHECK=--disable-pip-version-check
else
DISABLE_PIP_VERSION_CHECK=
fi
# older versions of pip will not install wrap_console scripts
# when using wheel packages
pip $DISABLE_PIP_VERSION_CHECK --log $DIR/log.txt install --upgrade 'pip >= 6.1'
if pip --help | grep -q disable-pip-version-check; then
DISABLE_PIP_VERSION_CHECK=--disable-pip-version-check
else
DISABLE_PIP_VERSION_CHECK=
fi
if test -d wheelhouse ; then
NO_INDEX=--no-index
FIND_LINKS_OPT=--find-links=file://$(pwd)/wheelhouse
fi
pip $DISABLE_PIP_VERSION_CHECK --log $DIR/log.txt install $NO_INDEX $FIND_LINKS_OPT 'tox >=2.9.1'
require_files=$(ls *requirements*.txt 2>/dev/null) || true
constraint_files=$(ls *constraints*.txt 2>/dev/null) || true
require=$(echo -n "$require_files" | sed -e 's/^/-r /')
constraint=$(echo -n "$constraint_files" | sed -e 's/^/-c /')
md5=wheelhouse/md5
if test "$require"; then
if ! test -f $md5 || ! md5sum -c wheelhouse/md5 > /dev/null; then
NO_INDEX=''
FIND_LINKS_OPT=''
fi
pip --exists-action i $DISABLE_PIP_VERSION_CHECK --log $DIR/log.txt install \
$NO_INDEX $FIND_LINKS_OPT $require $constraint
fi
| 2,994 | 27.798077 | 97 |
sh
|
null |
ceph-main/src/tools/ceph-dencoder/ceph_dencoder.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <errno.h>
#include <filesystem>
#include <iomanip>
#include "ceph_ver.h"
#include "include/types.h"
#include "common/Formatter.h"
#include "common/ceph_argparse.h"
#include "common/errno.h"
#include "denc_plugin.h"
#include "denc_registry.h"
#define MB(m) ((m) * 1024 * 1024)
namespace fs = std::filesystem;
using namespace std;
void usage(ostream &out)
{
out << "usage: ceph-dencoder [commands ...]" << std::endl;
out << "\n";
out << " version print version string (to stdout)\n";
out << "\n";
out << " import <encfile> read encoded data from encfile\n";
out << " export <outfile> write encoded data to outfile\n";
out << "\n";
out << " set_features <num> set feature bits used for encoding\n";
out << " get_features print feature bits (int) to stdout\n";
out << "\n";
out << " list_types list supported types\n";
out << " type <classname> select in-memory type\n";
out << " skip <num> skip <num> leading bytes before decoding\n";
out << " decode decode into in-memory object\n";
out << " encode encode in-memory object\n";
out << " dump_json dump in-memory object as json (to stdout)\n";
out << " hexdump print encoded data in hex\n";
out << " get_struct_v print version of the encoded object\n";
out << " get_struct_compat print the oldest version of decoder that can decode the encoded object\n";
out << "\n";
out << " copy copy object (via operator=)\n";
out << " copy_ctor copy object (via copy ctor)\n";
out << "\n";
out << " count_tests print number of generated test objects (to stdout)\n";
out << " select_test <n> select generated test object as in-memory object\n";
out << " is_deterministic exit w/ success if type encodes deterministically\n";
}
vector<DencoderPlugin> load_plugins()
{
fs::path mod_dir{CEPH_DENC_MOD_DIR};
if (auto ceph_lib = getenv("CEPH_LIB"); ceph_lib) {
mod_dir = ceph_lib;
} else if (fs::is_regular_file("CMakeCache.txt")) {
mod_dir = std::filesystem::canonical("lib");
}
if (!fs::is_directory(mod_dir)) {
std::cerr << "unable to load dencoders from "
<< std::quoted(mod_dir.native()) << ". "
<< "it is not a directory." << std::endl;
return {};
}
vector<DencoderPlugin> dencoder_plugins;
for (auto& entry : fs::directory_iterator(mod_dir)) {
static const string_view DENC_MOD_PREFIX = "denc-mod-";
if (entry.path().stem().string().compare(0, DENC_MOD_PREFIX.size(),
DENC_MOD_PREFIX) != 0) {
continue;
}
DencoderPlugin plugin(entry);
if (!plugin.good()) {
continue;
}
dencoder_plugins.push_back(std::move(plugin));
}
return dencoder_plugins;
}
int main(int argc, const char **argv)
{
vector<DencoderPlugin> plugins = load_plugins();
DencoderRegistry registry;
for (auto& plugin : plugins) {
for (auto& [name, denc] : plugin.register_dencoders()) {
registry.register_dencoder(name, denc);
}
}
auto args = argv_to_vec(argc, argv);
env_to_vec(args);
Dencoder *den = NULL;
uint64_t features = CEPH_FEATURES_SUPPORTED_DEFAULT;
bufferlist encbl;
uint64_t skip = 0;
if (args.empty()) {
cerr << "-h for help" << std::endl;
return 1;
}
for (std::vector<const char*>::iterator i = args.begin(); i != args.end(); ++i) {
string err;
auto& dencoders = registry.get();
if (*i == string("help") || *i == string("-h") || *i == string("--help")) {
usage(cout);
return 0;
} else if (*i == string("version")) {
cout << CEPH_GIT_NICE_VER << std::endl;
} else if (*i == string("list_types")) {
for (auto& dencoder : dencoders)
cout << dencoder.first << std::endl;
return 0;
} else if (*i == string("type")) {
++i;
if (i == args.end()) {
cerr << "expecting type" << std::endl;
return 1;
}
string cname = *i;
if (!dencoders.count(cname)) {
cerr << "class '" << cname << "' unknown" << std::endl;
return 1;
}
den = dencoders[cname];
den->generate();
} else if (*i == string("skip")) {
++i;
if (i == args.end()) {
cerr << "expecting byte count" << std::endl;
return 1;
}
skip = atoi(*i);
} else if (*i == string("get_features")) {
cout << CEPH_FEATURES_SUPPORTED_DEFAULT << std::endl;
return 0;
} else if (*i == string("set_features")) {
++i;
if (i == args.end()) {
cerr << "expecting features" << std::endl;
return 1;
}
features = atoll(*i);
} else if (*i == string("encode")) {
if (!den) {
cerr << "must first select type with 'type <name>'" << std::endl;
return 1;
}
den->encode(encbl, features | CEPH_FEATURE_RESERVED); // hack for OSDMap
} else if (*i == string("decode")) {
if (!den) {
cerr << "must first select type with 'type <name>'" << std::endl;
return 1;
}
err = den->decode(encbl, skip);
} else if (*i == string("copy_ctor")) {
if (!den) {
cerr << "must first select type with 'type <name>'" << std::endl;
return 1;
}
den->copy_ctor();
} else if (*i == string("copy")) {
if (!den) {
cerr << "must first select type with 'type <name>'" << std::endl;
return 1;
}
den->copy();
} else if (*i == string("dump_json")) {
if (!den) {
cerr << "must first select type with 'type <name>'" << std::endl;
return 1;
}
JSONFormatter jf(true);
jf.open_object_section("object");
den->dump(&jf);
jf.close_section();
jf.flush(cout);
cout << std::endl;
} else if (*i == string("hexdump")) {
encbl.hexdump(cout);
} else if (*i == string("get_struct_v")) {
std::cout << den->get_struct_v(encbl, 0) << std::endl;
} else if (*i == string("get_struct_compat")) {
std::cout << den->get_struct_v(encbl, sizeof(uint8_t)) << std::endl;
} else if (*i == string("import")) {
++i;
if (i == args.end()) {
cerr << "expecting filename" << std::endl;
return 1;
}
int r;
if (*i == string("-")) {
*i = "stdin";
// Read up to 1mb if stdin specified
r = encbl.read_fd(STDIN_FILENO, MB(1));
} else {
r = encbl.read_file(*i, &err);
}
if (r < 0) {
cerr << "error reading " << *i << ": " << err << std::endl;
return 1;
}
} else if (*i == string("export")) {
++i;
if (i == args.end()) {
cerr << "expecting filename" << std::endl;
return 1;
}
int fd = ::open(*i, O_WRONLY|O_CREAT|O_TRUNC|O_BINARY, 0644);
if (fd < 0) {
cerr << "error opening " << *i << " for write: " << cpp_strerror(errno) << std::endl;
return 1;
}
int r = encbl.write_fd(fd);
if (r < 0) {
cerr << "error writing " << *i << ": " << cpp_strerror(errno) << std::endl;
return 1;
}
::close(fd);
} else if (*i == string("count_tests")) {
if (!den) {
cerr << "must first select type with 'type <name>'" << std::endl;
return 1;
}
cout << den->num_generated() << std::endl;
} else if (*i == string("select_test")) {
if (!den) {
cerr << "must first select type with 'type <name>'" << std::endl;
return 1;
}
++i;
if (i == args.end()) {
cerr << "expecting instance number" << std::endl;
return 1;
}
int n = atoi(*i);
err = den->select_generated(n);
} else if (*i == string("is_deterministic")) {
if (!den) {
cerr << "must first select type with 'type <name>'" << std::endl;
return 1;
}
if (den->is_deterministic())
return 0;
else
return 1;
} else {
cerr << "unknown option '" << *i << "'" << std::endl;
return 1;
}
if (err.length()) {
cerr << "error: " << err << std::endl;
return 1;
}
}
return 0;
}
| 8,299 | 28.963899 | 106 |
cc
|
null |
ceph-main/src/tools/ceph-dencoder/ceph_time.h
|
#ifndef TEST_CEPH_TIME_H
#define TEST_CEPH_TIME_H
#include <list>
#include "include/encoding.h"
#include "common/ceph_time.h"
#include "common/Formatter.h"
// wrapper for ceph::real_time that implements the dencoder interface
template <typename Clock>
class time_point_wrapper {
using time_point = typename Clock::time_point;
time_point t;
public:
time_point_wrapper() = default;
explicit time_point_wrapper(const time_point& t) : t(t) {}
void encode(bufferlist& bl) const {
using ceph::encode;
encode(t, bl);
}
void decode(bufferlist::const_iterator &p) {
using ceph::decode;
decode(t, p);
}
void dump(Formatter* f) {
auto epoch_time = Clock::to_time_t(t);
f->dump_string("time", std::ctime(&epoch_time));
}
static void generate_test_instances(std::list<time_point_wrapper*>& ls) {
constexpr time_t t{455500800}; // Ghostbusters release date
ls.push_back(new time_point_wrapper(Clock::from_time_t(t)));
}
};
using real_time_wrapper = time_point_wrapper<ceph::real_clock>;
WRITE_CLASS_ENCODER(real_time_wrapper)
using coarse_real_time_wrapper = time_point_wrapper<ceph::coarse_real_clock>;
WRITE_CLASS_ENCODER(coarse_real_time_wrapper)
// wrapper for ceph::timespan that implements the dencoder interface
class timespan_wrapper {
ceph::timespan d;
public:
timespan_wrapper() = default;
explicit timespan_wrapper(const ceph::timespan& d) : d(d) {}
void encode(bufferlist& bl) const {
using ceph::encode;
encode(d, bl);
}
void decode(bufferlist::const_iterator &p) {
using ceph::decode;
decode(d, p);
}
void dump(Formatter* f) {
f->dump_int("timespan", d.count());
}
static void generate_test_instances(std::list<timespan_wrapper*>& ls) {
constexpr std::chrono::seconds d{7377}; // marathon world record (2:02:57)
ls.push_back(new timespan_wrapper(d));
}
};
WRITE_CLASS_ENCODER(timespan_wrapper)
#endif
| 1,918 | 26.811594 | 78 |
h
|
null |
ceph-main/src/tools/ceph-dencoder/common_types.cc
|
#include "acconfig.h"
#include <cstdint>
using namespace std;
#include "include/ceph_features.h"
#define TYPE(t)
#define TYPE_STRAYDATA(t)
#define TYPE_NONDETERMINISTIC(t)
#define TYPE_FEATUREFUL(t)
#define TYPE_FEATUREFUL_STRAYDATA(t)
#define TYPE_FEATUREFUL_NONDETERMINISTIC(t)
#define TYPE_FEATUREFUL_NOCOPY(t)
#define TYPE_NOCOPY(t)
#define MESSAGE(t)
#include "common_types.h"
#undef TYPE
#undef TYPE_STRAYDATA
#undef TYPE_NONDETERMINISTIC
#undef TYPE_NOCOPY
#undef TYPE_FEATUREFUL
#undef TYPE_FEATUREFUL_STRAYDATA
#undef TYPE_FEATUREFUL_NONDETERMINISTIC
#undef TYPE_FEATUREFUL_NOCOPY
#undef MESSAGE
#include "denc_plugin.h"
DENC_API void register_dencoders(DencoderPlugin* plugin)
{
#include "common_types.h"
}
DENC_API void unregister_dencoders(DencoderPlugin* plugin)
{
plugin->unregister_dencoders();
}
| 818 | 21.135135 | 58 |
cc
|
null |
ceph-main/src/tools/ceph-dencoder/common_types.h
|
#include "ceph_time.h"
TYPE(real_time_wrapper)
TYPE(coarse_real_time_wrapper)
TYPE(timespan_wrapper)
#include "include/utime.h"
TYPE(utime_t)
#include "include/uuid.h"
TYPE(uuid_d)
#include "sstring.h"
TYPE(sstring_wrapper)
#include "str.h"
TYPE(string_wrapper)
#include "include/CompatSet.h"
TYPE(CompatSet)
#include "include/filepath.h"
TYPE(filepath)
#include "include/fs_types.h"
TYPE_FEATUREFUL(file_layout_t)
#include "include/util.h"
TYPE(ceph_data_stats)
#include "common/bit_vector.hpp"
TYPE(BitVector<2>)
#include "common/bloom_filter.hpp"
TYPE(bloom_filter)
TYPE(compressible_bloom_filter)
#include "common/DecayCounter.h"
TYPE(DecayCounter)
#include "common/histogram.h"
TYPE(pow2_hist_t)
#include "common/hobject.h"
TYPE(hobject_t)
TYPE(ghobject_t)
#include "common/LogEntry.h"
TYPE_FEATUREFUL(LogEntry)
TYPE_FEATUREFUL(LogSummary)
#include "common/SloppyCRCMap.h"
TYPE(SloppyCRCMap)
#include "common/snap_types.h"
TYPE(SnapContext)
TYPE(SnapRealmInfo)
#include "msg/msg_types.h"
TYPE(entity_name_t)
TYPE_FEATUREFUL(entity_addr_t)
TYPE_FEATUREFUL(entity_addrvec_t)
TYPE_FEATUREFUL(entity_inst_t)
#include "crush/CrushWrapper.h"
TYPE_FEATUREFUL_NOCOPY(CrushWrapper)
#include "cls/cas/cls_cas_ops.h"
TYPE(cls_cas_chunk_create_or_get_ref_op)
TYPE(cls_cas_chunk_get_ref_op)
TYPE(cls_cas_chunk_put_ref_op)
#include "cls/cas/cls_cas_internal.h"
TYPE(chunk_refs_t)
#include "cls/lock/cls_lock_types.h"
TYPE(rados::cls::lock::locker_id_t)
TYPE_FEATUREFUL(rados::cls::lock::locker_info_t)
TYPE_FEATUREFUL(rados::cls::lock::lock_info_t)
#include "cls/lock/cls_lock_ops.h"
TYPE(cls_lock_lock_op)
TYPE(cls_lock_unlock_op)
TYPE(cls_lock_break_op)
TYPE(cls_lock_get_info_op)
TYPE_FEATUREFUL(cls_lock_get_info_reply)
TYPE(cls_lock_list_locks_reply)
TYPE(cls_lock_assert_op)
TYPE(cls_lock_set_cookie_op)
#include "cls/refcount/cls_refcount_ops.h"
TYPE(cls_refcount_get_op)
TYPE(cls_refcount_put_op)
TYPE(cls_refcount_set_op)
TYPE(cls_refcount_read_op)
TYPE(cls_refcount_read_ret)
TYPE(obj_refcount)
#include "cls/timeindex/cls_timeindex_types.h"
TYPE(cls_timeindex_entry)
#include "journal/Entry.h"
TYPE(journal::Entry)
// --- messages ---
#include "messages/MAuth.h"
MESSAGE(MAuth)
#include "messages/MAuthReply.h"
MESSAGE(MAuthReply)
#include "messages/MCacheExpire.h"
MESSAGE(MCacheExpire)
#include "messages/MClientCapRelease.h"
MESSAGE(MClientCapRelease)
#include "messages/MClientCaps.h"
MESSAGE(MClientCaps)
#include "messages/MClientLease.h"
MESSAGE(MClientLease)
#include "messages/MClientReconnect.h"
MESSAGE(MClientReconnect)
#include "messages/MClientReply.h"
MESSAGE(MClientReply)
#include "messages/MClientRequest.h"
MESSAGE(MClientRequest)
#include "messages/MClientRequestForward.h"
MESSAGE(MClientRequestForward)
#include "messages/MClientQuota.h"
MESSAGE(MClientQuota)
#include "messages/MClientSession.h"
MESSAGE(MClientSession)
#include "messages/MClientSnap.h"
MESSAGE(MClientSnap)
#include "messages/MCommand.h"
MESSAGE(MCommand)
#include "messages/MCommandReply.h"
MESSAGE(MCommandReply)
#include "messages/MConfig.h"
MESSAGE(MConfig)
#include "messages/MDentryLink.h"
MESSAGE(MDentryLink)
#include "messages/MDentryUnlink.h"
MESSAGE(MDentryUnlink)
#include "messages/MDirUpdate.h"
MESSAGE(MDirUpdate)
#include "messages/MDiscover.h"
MESSAGE(MDiscover)
#include "messages/MDiscoverReply.h"
MESSAGE(MDiscoverReply)
#include "messages/MExportCaps.h"
MESSAGE(MExportCaps)
#include "messages/MExportCapsAck.h"
MESSAGE(MExportCapsAck)
#include "messages/MExportDir.h"
MESSAGE(MExportDir)
#include "messages/MExportDirAck.h"
MESSAGE(MExportDirAck)
#include "messages/MExportDirCancel.h"
MESSAGE(MExportDirCancel)
#include "messages/MExportDirDiscover.h"
MESSAGE(MExportDirDiscover)
#include "messages/MExportDirDiscoverAck.h"
MESSAGE(MExportDirDiscoverAck)
#include "messages/MExportDirFinish.h"
MESSAGE(MExportDirFinish)
#include "messages/MExportDirNotify.h"
MESSAGE(MExportDirNotify)
#include "messages/MExportDirNotifyAck.h"
MESSAGE(MExportDirNotifyAck)
#include "messages/MExportDirPrep.h"
MESSAGE(MExportDirPrep)
#include "messages/MExportDirPrepAck.h"
MESSAGE(MExportDirPrepAck)
#include "messages/MForward.h"
MESSAGE(MForward)
#include "messages/MFSMap.h"
MESSAGE(MFSMap)
#include "messages/MFSMapUser.h"
MESSAGE(MFSMapUser)
#include "messages/MGatherCaps.h"
MESSAGE(MGatherCaps)
#include "messages/MGenericMessage.h"
MESSAGE(MGenericMessage)
#include "messages/MGetConfig.h"
MESSAGE(MGetConfig)
#include "messages/MGetPoolStats.h"
MESSAGE(MGetPoolStats)
#include "messages/MGetPoolStatsReply.h"
MESSAGE(MGetPoolStatsReply)
#include "messages/MHeartbeat.h"
MESSAGE(MHeartbeat)
#include "messages/MInodeFileCaps.h"
MESSAGE(MInodeFileCaps)
#include "messages/MLock.h"
MESSAGE(MLock)
#include "messages/MLog.h"
MESSAGE(MLog)
#include "messages/MLogAck.h"
MESSAGE(MLogAck)
#include "messages/MMDSOpenIno.h"
MESSAGE(MMDSOpenIno)
#include "messages/MMDSOpenInoReply.h"
MESSAGE(MMDSOpenInoReply)
#include "messages/MMDSBeacon.h"
MESSAGE(MMDSBeacon)
#include "messages/MMDSCacheRejoin.h"
MESSAGE(MMDSCacheRejoin)
#include "messages/MMDSFindIno.h"
MESSAGE(MMDSFindIno)
#include "messages/MMDSFindInoReply.h"
MESSAGE(MMDSFindInoReply)
#include "messages/MMDSFragmentNotify.h"
MESSAGE(MMDSFragmentNotify)
#include "messages/MMDSLoadTargets.h"
MESSAGE(MMDSLoadTargets)
#include "messages/MMDSMap.h"
MESSAGE(MMDSMap)
#include "messages/MMgrReport.h"
MESSAGE(MMgrReport)
#include "messages/MMDSResolve.h"
MESSAGE(MMDSResolve)
#include "messages/MMDSResolveAck.h"
MESSAGE(MMDSResolveAck)
#include "messages/MMDSPeerRequest.h"
MESSAGE(MMDSPeerRequest)
#include "messages/MMDSSnapUpdate.h"
MESSAGE(MMDSSnapUpdate)
#include "messages/MMDSTableRequest.h"
MESSAGE(MMDSTableRequest)
#include "messages/MMgrClose.h"
MESSAGE(MMgrClose)
#include "messages/MMgrConfigure.h"
MESSAGE(MMgrConfigure)
#include "messages/MMgrDigest.h"
MESSAGE(MMgrDigest)
#include "messages/MMgrMap.h"
MESSAGE(MMgrMap)
#include "messages/MMgrOpen.h"
MESSAGE(MMgrOpen)
#include "messages/MMonCommand.h"
MESSAGE(MMonCommand)
#include "messages/MMonCommandAck.h"
MESSAGE(MMonCommandAck)
#include "messages/MMonElection.h"
MESSAGE(MMonElection)
#include "messages/MMonGetMap.h"
MESSAGE(MMonGetMap)
#include "messages/MMonGetVersion.h"
MESSAGE(MMonGetVersion)
#include "messages/MMonGetVersionReply.h"
MESSAGE(MMonGetVersionReply)
#include "messages/MMonGlobalID.h"
MESSAGE(MMonGlobalID)
#include "messages/MMonJoin.h"
MESSAGE(MMonJoin)
#include "messages/MMonMap.h"
MESSAGE(MMonMap)
#include "messages/MMonPaxos.h"
MESSAGE(MMonPaxos)
#include "messages/MMonProbe.h"
MESSAGE(MMonProbe)
#include "messages/MMonScrub.h"
MESSAGE(MMonScrub)
#include "messages/MMonSync.h"
MESSAGE(MMonSync)
#include "messages/MMonSubscribe.h"
MESSAGE(MMonSubscribe)
#include "messages/MMonSubscribeAck.h"
MESSAGE(MMonSubscribeAck)
#include "messages/MOSDAlive.h"
MESSAGE(MOSDAlive)
#include "messages/MOSDBoot.h"
MESSAGE(MOSDBoot)
#include "messages/MOSDFailure.h"
MESSAGE(MOSDFailure)
#include "messages/MOSDMap.h"
MESSAGE(MOSDMap)
#include "messages/MOSDOp.h"
MESSAGE(MOSDOp)
#include "messages/MOSDOpReply.h"
MESSAGE(MOSDOpReply)
#include "messages/MOSDPGBackfill.h"
MESSAGE(MOSDPGBackfill)
#include "messages/MOSDPGCreate2.h"
MESSAGE(MOSDPGCreate2)
#include "messages/MOSDPGInfo.h"
MESSAGE(MOSDPGInfo)
#include "messages/MOSDPGLog.h"
MESSAGE(MOSDPGLog)
#include "messages/MOSDPGNotify.h"
MESSAGE(MOSDPGNotify)
#include "messages/MOSDPGQuery.h"
MESSAGE(MOSDPGQuery)
#include "messages/MOSDPGRemove.h"
MESSAGE(MOSDPGRemove)
#include "messages/MOSDPGRecoveryDelete.h"
MESSAGE(MOSDPGRecoveryDelete)
#include "messages/MOSDPGRecoveryDeleteReply.h"
MESSAGE(MOSDPGRecoveryDeleteReply)
#include "messages/MOSDPGScan.h"
MESSAGE(MOSDPGScan)
#include "messages/MOSDPGTemp.h"
MESSAGE(MOSDPGTemp)
#include "messages/MOSDPGTrim.h"
MESSAGE(MOSDPGTrim)
#include "messages/MOSDPing.h"
MESSAGE(MOSDPing)
#include "messages/MOSDRepScrub.h"
MESSAGE(MOSDRepScrub)
#include "messages/MOSDScrub2.h"
MESSAGE(MOSDScrub2)
#include "messages/MOSDForceRecovery.h"
MESSAGE(MOSDForceRecovery)
#include "messages/MPGStats.h"
MESSAGE(MPGStats)
#include "messages/MPGStatsAck.h"
MESSAGE(MPGStatsAck)
#include "messages/MPing.h"
MESSAGE(MPing)
#include "messages/MPoolOp.h"
MESSAGE(MPoolOp)
#include "messages/MPoolOpReply.h"
MESSAGE(MPoolOpReply)
#include "messages/MRemoveSnaps.h"
MESSAGE(MRemoveSnaps)
#include "messages/MRoute.h"
MESSAGE(MRoute)
#include "messages/MServiceMap.h"
MESSAGE(MServiceMap)
#include "messages/MStatfs.h"
MESSAGE(MStatfs)
#include "messages/MStatfsReply.h"
MESSAGE(MStatfsReply)
#include "messages/MTimeCheck.h"
MESSAGE(MTimeCheck)
#include "messages/MTimeCheck2.h"
MESSAGE(MTimeCheck2)
#include "messages/MWatchNotify.h"
MESSAGE(MWatchNotify)
#include "messages/MMgrUpdate.h"
MESSAGE(MMgrUpdate)
| 8,889 | 18.538462 | 48 |
h
|
null |
ceph-main/src/tools/ceph-dencoder/denc_plugin.h
|
#include <dlfcn.h>
#include <filesystem>
#include <vector>
#include "denc_registry.h"
namespace fs = std::filesystem;
class DencoderPlugin {
using dencoders_t = std::vector<std::pair<std::string, Dencoder*>>;
public:
DencoderPlugin(const fs::path& path) {
mod = dlopen(path.c_str(), RTLD_NOW);
if (mod == nullptr) {
std::cerr << "failed to dlopen(" << path << "): " << dlerror() << std::endl;
}
}
DencoderPlugin(DencoderPlugin&& other)
: mod{other.mod},
dencoders{std::move(other.dencoders)}
{
other.mod = nullptr;
other.dencoders.clear();
}
~DencoderPlugin() {
#if !defined(__FreeBSD__)
if (mod) {
dlclose(mod);
}
#endif
}
const dencoders_t& register_dencoders() {
static constexpr std::string_view REGISTER_DENCODERS_FUNCTION = "register_dencoders\0";
assert(mod);
using register_dencoders_t = void (*)(DencoderPlugin*);
const auto do_register =
reinterpret_cast<register_dencoders_t>(dlsym(mod, REGISTER_DENCODERS_FUNCTION.data()));
if (do_register == nullptr) {
std::cerr << "failed to dlsym(" << REGISTER_DENCODERS_FUNCTION << "): "
<< dlerror() << std::endl;
return dencoders;
}
do_register(this);
return dencoders;
}
bool good() const {
return mod != nullptr;
}
void unregister_dencoders() {
while (!dencoders.empty()) {
delete dencoders.back().second;
dencoders.pop_back();
}
}
template<typename DencoderT, typename...Args>
void emplace(const char* name, Args&&...args) {
dencoders.emplace_back(name, new DencoderT(std::forward<Args>(args)...));
}
private:
void *mod = nullptr;
dencoders_t dencoders;
};
#define TYPE(t) plugin->emplace<DencoderImplNoFeature<t>>(#t, false, false);
#define TYPE_STRAYDATA(t) plugin->emplace<DencoderImplNoFeature<t>>(#t, true, false);
#define TYPE_NONDETERMINISTIC(t) plugin->emplace<DencoderImplNoFeature<t>>(#t, false, true);
#define TYPE_FEATUREFUL(t) plugin->emplace<DencoderImplFeatureful<t>>(#t, false, false);
#define TYPE_FEATUREFUL_STRAYDATA(t) plugin->emplace<DencoderImplFeatureful<t>>(#t, true, false);
#define TYPE_FEATUREFUL_NONDETERMINISTIC(t) plugin->emplace<DencoderImplFeatureful<t>>(#t, false, true);
#define TYPE_FEATUREFUL_NOCOPY(t) plugin->emplace<DencoderImplFeaturefulNoCopy<t>>(#t, false, false);
#define TYPE_NOCOPY(t) plugin->emplace<DencoderImplNoFeatureNoCopy<t>>(#t, false, false);
#define MESSAGE(t) plugin->emplace<MessageDencoderImpl<t>>(#t);
#define DENC_API extern "C" [[gnu::visibility("default")]]
| 2,566 | 31.493671 | 104 |
h
|
null |
ceph-main/src/tools/ceph-dencoder/denc_registry.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <string>
#include <string_view>
#include "include/buffer_fwd.h"
#include "msg/Message.h"
namespace ceph {
class Formatter;
}
struct Dencoder {
virtual ~Dencoder() {}
virtual std::string decode(bufferlist bl, uint64_t seek) = 0;
virtual void encode(bufferlist& out, uint64_t features) = 0;
virtual void dump(ceph::Formatter *f) = 0;
virtual void copy() {
std::cerr << "copy operator= not supported" << std::endl;
}
virtual void copy_ctor() {
std::cerr << "copy ctor not supported" << std::endl;
}
virtual void generate() = 0;
virtual int num_generated() = 0;
virtual std::string select_generated(unsigned n) = 0;
virtual bool is_deterministic() = 0;
unsigned get_struct_v(bufferlist bl, uint64_t seek) const {
auto p = bl.cbegin(seek);
uint8_t struct_v = 0;
ceph::decode(struct_v, p);
return struct_v;
}
//virtual void print(ostream& out) = 0;
};
template<class T>
class DencoderBase : public Dencoder {
protected:
T* m_object;
std::list<T*> m_list;
bool stray_okay;
bool nondeterministic;
public:
DencoderBase(bool stray_okay, bool nondeterministic)
: m_object(new T),
stray_okay(stray_okay),
nondeterministic(nondeterministic) {}
~DencoderBase() override {
delete m_object;
}
std::string decode(bufferlist bl, uint64_t seek) override {
auto p = bl.cbegin();
p.seek(seek);
try {
using ceph::decode;
decode(*m_object, p);
}
catch (buffer::error& e) {
return e.what();
}
if (!stray_okay && !p.end()) {
std::ostringstream ss;
ss << "stray data at end of buffer, offset " << p.get_off();
return ss.str();
}
return {};
}
void encode(bufferlist& out, uint64_t features) override = 0;
void dump(ceph::Formatter *f) override {
m_object->dump(f);
}
void generate() override {
T::generate_test_instances(m_list);
}
int num_generated() override {
return m_list.size();
}
std::string select_generated(unsigned i) override {
// allow 0- or 1-based (by wrapping)
if (i == 0)
i = m_list.size();
if ((i == 0) || (i > m_list.size()))
return "invalid id for generated object";
m_object = *(std::next(m_list.begin(), i-1));
return {};
}
bool is_deterministic() override {
return !nondeterministic;
}
};
template<class T>
class DencoderImplNoFeatureNoCopy : public DencoderBase<T> {
public:
DencoderImplNoFeatureNoCopy(bool stray_ok, bool nondeterministic)
: DencoderBase<T>(stray_ok, nondeterministic) {}
void encode(bufferlist& out, uint64_t features) override {
out.clear();
using ceph::encode;
encode(*this->m_object, out);
}
};
template<class T>
class DencoderImplNoFeature : public DencoderImplNoFeatureNoCopy<T> {
public:
DencoderImplNoFeature(bool stray_ok, bool nondeterministic)
: DencoderImplNoFeatureNoCopy<T>(stray_ok, nondeterministic) {}
void copy() override {
T *n = new T;
*n = *this->m_object;
delete this->m_object;
this->m_object = n;
}
void copy_ctor() override {
T *n = new T(*this->m_object);
delete this->m_object;
this->m_object = n;
}
};
template<class T>
class DencoderImplFeaturefulNoCopy : public DencoderBase<T> {
public:
DencoderImplFeaturefulNoCopy(bool stray_ok, bool nondeterministic)
: DencoderBase<T>(stray_ok, nondeterministic) {}
void encode(bufferlist& out, uint64_t features) override {
out.clear();
using ceph::encode;
encode(*(this->m_object), out, features);
}
};
template<class T>
class DencoderImplFeatureful : public DencoderImplFeaturefulNoCopy<T> {
public:
DencoderImplFeatureful(bool stray_ok, bool nondeterministic)
: DencoderImplFeaturefulNoCopy<T>(stray_ok, nondeterministic) {}
void copy() override {
T *n = new T;
*n = *this->m_object;
delete this->m_object;
this->m_object = n;
}
void copy_ctor() override {
T *n = new T(*this->m_object);
delete this->m_object;
this->m_object = n;
}
};
template<class T>
class MessageDencoderImpl : public Dencoder {
ref_t<T> m_object;
std::list<ref_t<T>> m_list;
public:
MessageDencoderImpl() : m_object{make_message<T>()} {}
~MessageDencoderImpl() override {}
std::string decode(bufferlist bl, uint64_t seek) override {
auto p = bl.cbegin();
p.seek(seek);
try {
ref_t<Message> n(decode_message(g_ceph_context, 0, p), false);
if (!n)
throw std::runtime_error("failed to decode");
if (n->get_type() != m_object->get_type()) {
std::stringstream ss;
ss << "decoded type " << n->get_type() << " instead of expected " << m_object->get_type();
throw std::runtime_error(ss.str());
}
m_object = ref_cast<T>(n);
}
catch (buffer::error& e) {
return e.what();
}
if (!p.end()) {
std::ostringstream ss;
ss << "stray data at end of buffer, offset " << p.get_off();
return ss.str();
}
return {};
}
void encode(bufferlist& out, uint64_t features) override {
out.clear();
encode_message(m_object.get(), features, out);
}
void dump(ceph::Formatter *f) override {
m_object->dump(f);
}
void generate() override {
//T::generate_test_instances(m_list);
}
int num_generated() override {
return m_list.size();
}
std::string select_generated(unsigned i) override {
// allow 0- or 1-based (by wrapping)
if (i == 0)
i = m_list.size();
if ((i == 0) || (i > m_list.size()))
return "invalid id for generated object";
m_object = *(std::next(m_list.begin(), i-1));
return {};
}
bool is_deterministic() override {
return true;
}
//void print(ostream& out) {
//out << m_object << std::endl;
//}
};
class DencoderRegistry
{
using dencoders_t = std::map<std::string_view, Dencoder*>;
public:
dencoders_t& get() {
return dencoders;
}
void register_dencoder(std::string_view name, Dencoder* denc) {
dencoders.emplace(name, denc);
}
private:
dencoders_t dencoders;
};
| 6,138 | 24.367769 | 91 |
h
|
null |
ceph-main/src/tools/ceph-dencoder/mds_types.cc
|
#include "acconfig.h"
#include <cstdint>
using namespace std;
#include "include/ceph_features.h"
#define TYPE(t)
#define TYPE_STRAYDATA(t)
#define TYPE_NONDETERMINISTIC(t)
#define TYPE_FEATUREFUL(t)
#define TYPE_FEATUREFUL_STRAYDATA(t)
#define TYPE_FEATUREFUL_NONDETERMINISTIC(t)
#define TYPE_FEATUREFUL_NOCOPY(t)
#define TYPE_NOCOPY(t)
#define MESSAGE(t)
#include "mds_types.h"
#undef TYPE
#undef TYPE_STRAYDATA
#undef TYPE_NONDETERMINISTIC
#undef TYPE_NOCOPY
#undef TYPE_FEATUREFUL
#undef TYPE_FEATUREFUL_STRAYDATA
#undef TYPE_FEATUREFUL_NONDETERMINISTIC
#undef TYPE_FEATUREFUL_NOCOPY
#undef MESSAGE
#include "denc_plugin.h"
DENC_API void register_dencoders(DencoderPlugin* plugin)
{
#include "mds_types.h"
}
DENC_API void unregister_dencoders(DencoderPlugin* plugin)
{
plugin->unregister_dencoders();
}
| 812 | 20.972973 | 58 |
cc
|
null |
ceph-main/src/tools/ceph-dencoder/mds_types.h
|
#ifdef WITH_CEPHFS
#include "mds/JournalPointer.h"
TYPE(JournalPointer)
#include "osdc/Journaler.h"
TYPE(Journaler::Header)
#include "mds/snap.h"
TYPE(SnapInfo)
TYPE(snaplink_t)
TYPE(sr_t)
#include "mds/mdstypes.h"
#include "include/cephfs/types.h"
TYPE(frag_info_t)
TYPE(nest_info_t)
TYPE(quota_info_t)
TYPE(client_writeable_range_t)
TYPE_FEATUREFUL(inode_t<std::allocator>)
TYPE_FEATUREFUL(old_inode_t<std::allocator>)
TYPE(fnode_t)
TYPE(old_rstat_t)
TYPE_FEATUREFUL(session_info_t)
TYPE(string_snap_t)
TYPE(MDSCacheObjectInfo)
TYPE(mds_table_pending_t)
TYPE(cap_reconnect_t)
TYPE(inode_load_vec_t)
TYPE(dirfrag_load_vec_t)
TYPE(mds_load_t)
TYPE(MDSCacheObjectInfo)
TYPE(inode_backtrace_t)
TYPE(inode_backpointer_t)
#include "mds/CInode.h"
TYPE_FEATUREFUL(InodeStore)
TYPE_FEATUREFUL(InodeStoreBare)
#include "mds/MDSMap.h"
TYPE_FEATUREFUL(MDSMap)
TYPE_FEATUREFUL(MDSMap::mds_info_t)
#include "mds/FSMap.h"
//TYPE_FEATUREFUL(Filesystem)
TYPE_FEATUREFUL(FSMap)
#include "mds/Capability.h"
TYPE_NOCOPY(Capability)
#include "mds/inode_backtrace.h"
TYPE(inode_backpointer_t)
TYPE(inode_backtrace_t)
#include "mds/InoTable.h"
TYPE(InoTable)
#include "mds/SnapServer.h"
TYPE_STRAYDATA(SnapServer)
#include "mds/events/ECommitted.h"
TYPE_FEATUREFUL_NOCOPY(ECommitted)
#include "mds/events/EExport.h"
TYPE_FEATUREFUL_NOCOPY(EExport)
#include "mds/events/EFragment.h"
TYPE_FEATUREFUL_NOCOPY(EFragment)
#include "mds/events/EImportFinish.h"
TYPE_FEATUREFUL_NOCOPY(EImportFinish)
#include "mds/events/EImportStart.h"
TYPE_FEATUREFUL_NOCOPY(EImportStart)
#include "mds/events/EMetaBlob.h"
TYPE_FEATUREFUL_NOCOPY(EMetaBlob::fullbit)
TYPE(EMetaBlob::remotebit)
TYPE(EMetaBlob::nullbit)
TYPE_FEATUREFUL_NOCOPY(EMetaBlob::dirlump)
TYPE_FEATUREFUL_NOCOPY(EMetaBlob)
#include "mds/events/EOpen.h"
TYPE_FEATUREFUL_NOCOPY(EOpen)
#include "mds/events/EResetJournal.h"
TYPE_FEATUREFUL_NOCOPY(EResetJournal)
#include "mds/events/ESession.h"
TYPE_FEATUREFUL_NOCOPY(ESession)
#include "mds/events/ESessions.h"
TYPE_FEATUREFUL_NOCOPY(ESessions)
#include "mds/events/EPeerUpdate.h"
TYPE(link_rollback)
TYPE(rmdir_rollback)
TYPE(rename_rollback::drec)
TYPE(rename_rollback)
TYPE_FEATUREFUL_NOCOPY(EPeerUpdate)
#include "mds/events/ESubtreeMap.h"
TYPE_FEATUREFUL_NOCOPY(ESubtreeMap)
#include "mds/events/ETableClient.h"
TYPE_FEATUREFUL_NOCOPY(ETableClient)
#include "mds/events/ETableServer.h"
TYPE_FEATUREFUL_NOCOPY(ETableServer)
#include "mds/events/EUpdate.h"
TYPE_FEATUREFUL_NOCOPY(EUpdate)
#endif // WITH_CEPHFS
| 2,515 | 21.265487 | 44 |
h
|
null |
ceph-main/src/tools/ceph-dencoder/osd_types.cc
|
#include "acconfig.h"
#include <cstdint>
using namespace std;
#include "include/ceph_features.h"
#define TYPE(t)
#define TYPE_STRAYDATA(t)
#define TYPE_NONDETERMINISTIC(t)
#define TYPE_FEATUREFUL(t)
#define TYPE_FEATUREFUL_STRAYDATA(t)
#define TYPE_FEATUREFUL_NONDETERMINISTIC(t)
#define TYPE_FEATUREFUL_NOCOPY(t)
#define TYPE_NOCOPY(t)
#define MESSAGE(t)
#include "osd_types.h"
#undef TYPE
#undef TYPE_STRAYDATA
#undef TYPE_NONDETERMINISTIC
#undef TYPE_NOCOPY
#undef TYPE_FEATUREFUL
#undef TYPE_FEATUREFUL_STRAYDATA
#undef TYPE_FEATUREFUL_NONDETERMINISTIC
#undef TYPE_FEATUREFUL_NOCOPY
#undef MESSAGE
#include "denc_plugin.h"
// cannot initialize dencoders when initializing static variables, as some of
// the types are allocated using mempool, and the mempools are initialized as
// static variables.
DENC_API void register_dencoders(DencoderPlugin* plugin)
{
#include "osd_types.h"
}
DENC_API void unregister_dencoders(DencoderPlugin* plugin)
{
plugin->unregister_dencoders();
}
| 989 | 23.75 | 77 |
cc
|
null |
ceph-main/src/tools/ceph-dencoder/osd_types.h
|
#include "osd/OSDMap.h"
TYPE(osd_info_t)
TYPE_FEATUREFUL(osd_xinfo_t)
TYPE_FEATUREFUL_NOCOPY(OSDMap)
TYPE_FEATUREFUL_STRAYDATA(OSDMap::Incremental)
#include "osd/osd_types.h"
TYPE(osd_reqid_t)
TYPE(object_locator_t)
TYPE(request_redirect_t)
TYPE(pg_t)
TYPE(coll_t)
TYPE_FEATUREFUL(objectstore_perf_stat_t)
TYPE_FEATUREFUL(osd_stat_t)
TYPE(OSDSuperblock)
TYPE_FEATUREFUL(pool_snap_info_t)
TYPE_FEATUREFUL(pg_pool_t)
TYPE(object_stat_sum_t)
TYPE(object_stat_collection_t)
TYPE(pg_stat_t)
TYPE_FEATUREFUL(pool_stat_t)
TYPE(pg_hit_set_info_t)
TYPE(pg_hit_set_history_t)
TYPE(pg_history_t)
TYPE(pg_info_t)
TYPE(PastIntervals)
TYPE_FEATUREFUL(pg_query_t)
TYPE(ObjectModDesc)
TYPE(pg_log_entry_t)
TYPE(pg_log_dup_t)
TYPE(pg_log_t)
TYPE_FEATUREFUL(pg_missing_item)
TYPE_FEATUREFUL(pg_missing_t)
TYPE(pg_nls_response_t)
TYPE(pg_ls_response_t)
TYPE(object_copy_cursor_t)
TYPE_FEATUREFUL(object_copy_data_t)
TYPE(pg_create_t)
TYPE(OSDSuperblock)
TYPE(SnapSet)
TYPE_FEATUREFUL(watch_info_t)
TYPE_FEATUREFUL(watch_item_t)
TYPE(object_manifest_t)
TYPE_FEATUREFUL(object_info_t)
TYPE(SnapSet)
TYPE_FEATUREFUL(ObjectRecoveryInfo)
TYPE(ObjectRecoveryProgress)
TYPE(PushReplyOp)
TYPE_FEATUREFUL(PullOp)
TYPE_FEATUREFUL(PushOp)
TYPE(ScrubMap::object)
TYPE(ScrubMap)
TYPE_FEATUREFUL(obj_list_watch_response_t)
TYPE(clone_info)
TYPE(obj_list_snap_response_t)
TYPE(pool_pg_num_history_t)
#include "osd/ECUtil.h"
// TYPE(stripe_info_t) non-standard encoding/decoding functions
TYPE(ECUtil::HashInfo)
#include "osd/ECMsgTypes.h"
TYPE_NOCOPY(ECSubWrite)
TYPE(ECSubWriteReply)
TYPE_FEATUREFUL(ECSubRead)
TYPE(ECSubReadReply)
#include "osd/HitSet.h"
TYPE_NONDETERMINISTIC(ExplicitHashHitSet)
TYPE_NONDETERMINISTIC(ExplicitObjectHitSet)
TYPE(BloomHitSet)
TYPE_NONDETERMINISTIC(HitSet) // because some subclasses are
TYPE(HitSet::Params)
#include "os/ObjectStore.h"
TYPE(ObjectStore::Transaction)
#include "os/SequencerPosition.h"
TYPE(SequencerPosition)
#ifdef WITH_BLUESTORE
#include "os/bluestore/bluestore_types.h"
TYPE(bluestore_bdev_label_t)
TYPE(bluestore_cnode_t)
TYPE(bluestore_compression_header_t)
TYPE(bluestore_extent_ref_map_t)
TYPE(bluestore_pextent_t)
TYPE(bluestore_blob_use_tracker_t)
// TODO: bluestore_blob_t repurposes the "feature" param of encode() for its
// struct_v. at a higher level, BlueStore::ExtentMap encodes the extends using
// a different interface than the normal ones. see
// BlueStore::ExtentMap::encode_some(). maybe we can test it using another
// approach.
// TYPE_FEATUREFUL(bluestore_blob_t)
// TYPE(bluestore_shared_blob_t) there is no encode here
TYPE(bluestore_onode_t)
TYPE(bluestore_deferred_op_t)
TYPE(bluestore_deferred_transaction_t)
// TYPE(bluestore_compression_header_t) there is no encode here
#include "os/bluestore/bluefs_types.h"
TYPE(bluefs_extent_t)
TYPE(bluefs_fnode_t)
TYPE(bluefs_super_t)
TYPE(bluefs_transaction_t)
#endif
#include "mon/AuthMonitor.h"
TYPE_FEATUREFUL(AuthMonitor::Incremental)
#include "mon/PGMap.h"
TYPE_FEATUREFUL_NONDETERMINISTIC(PGMapDigest)
TYPE_FEATUREFUL_NONDETERMINISTIC(PGMap)
#include "mon/MonitorDBStore.h"
TYPE(MonitorDBStore::Transaction)
TYPE(MonitorDBStore::Op)
#include "mon/MonMap.h"
TYPE_FEATUREFUL(MonMap)
#include "mon/MonCap.h"
TYPE(MonCap)
#include "mon/MgrMap.h"
TYPE_FEATUREFUL(MgrMap)
#include "mon/mon_types.h"
TYPE(MonitorDBStoreStats)
TYPE(ScrubResult)
#include "mon/CreatingPGs.h"
TYPE_FEATUREFUL(creating_pgs_t)
#include "mgr/ServiceMap.h"
TYPE_FEATUREFUL(ServiceMap)
TYPE_FEATUREFUL(ServiceMap::Service)
TYPE_FEATUREFUL(ServiceMap::Daemon)
#include "mon/ConnectionTracker.h"
TYPE(ConnectionReport);
TYPE(ConnectionTracker);
#include "os/DBObjectMap.h"
TYPE(DBObjectMap::_Header)
TYPE(DBObjectMap::State)
#include "os/kstore/kstore_types.h"
TYPE(kstore_cnode_t)
TYPE(kstore_onode_t)
| 3,786 | 24.07947 | 78 |
h
|
null |
ceph-main/src/tools/ceph-dencoder/rbd_types.cc
|
#include "acconfig.h"
#include <cstdint>
using namespace std;
#include "include/ceph_features.h"
#define TYPE(t)
#define TYPE_STRAYDATA(t)
#define TYPE_NONDETERMINISTIC(t)
#define TYPE_FEATUREFUL(t)
#define TYPE_FEATUREFUL_STRAYDATA(t)
#define TYPE_FEATUREFUL_NONDETERMINISTIC(t)
#define TYPE_FEATUREFUL_NOCOPY(t)
#define TYPE_NOCOPY(t)
#define MESSAGE(t)
#include "rbd_types.h"
#undef TYPE
#undef TYPE_STRAYDATA
#undef TYPE_NONDETERMINISTIC
#undef TYPE_NOCOPY
#undef TYPE_FEATUREFUL
#undef TYPE_FEATUREFUL_STRAYDATA
#undef TYPE_FEATUREFUL_NONDETERMINISTIC
#undef TYPE_FEATUREFUL_NOCOPY
#undef MESSAGE
#include "denc_plugin.h"
DENC_API void register_dencoders(DencoderPlugin* plugin)
{
#include "rbd_types.h"
}
DENC_API void unregister_dencoders(DencoderPlugin* plugin)
{
plugin->unregister_dencoders();
}
| 812 | 20.972973 | 58 |
cc
|
null |
ceph-main/src/tools/ceph-dencoder/rbd_types.h
|
#ifdef WITH_RBD
#include "librbd/journal/Types.h"
TYPE(librbd::journal::EventEntry)
TYPE(librbd::journal::ClientData)
TYPE(librbd::journal::TagData)
#include "librbd/mirroring_watcher/Types.h"
TYPE(librbd::mirroring_watcher::NotifyMessage)
#include "librbd/trash_watcher/Types.h"
TYPE(librbd::trash_watcher::NotifyMessage)
#include "librbd/WatchNotifyTypes.h"
TYPE_NOCOPY(librbd::watch_notify::NotifyMessage)
TYPE(librbd::watch_notify::ResponseMessage)
#include "rbd_replay/ActionTypes.h"
TYPE(rbd_replay::action::Dependency)
TYPE(rbd_replay::action::ActionEntry)
#include "tools/rbd_mirror/image_map/Types.h"
TYPE(rbd::mirror::image_map::PolicyData)
#endif
#if defined(WITH_RBD) && defined(WITH_RBD_SSD_CACHE)
#include "librbd/cache/pwl/Types.h"
#include "librbd/cache/pwl/ssd/Types.h"
TYPE(librbd::cache::pwl::WriteLogCacheEntry)
TYPE(librbd::cache::pwl::WriteLogPoolRoot)
TYPE(librbd::cache::pwl::ssd::SuperBlock)
#endif
#ifdef WITH_RBD
#include "cls/rbd/cls_rbd.h"
TYPE_FEATUREFUL(cls_rbd_parent)
TYPE_FEATUREFUL(cls_rbd_snap)
#include "cls/rbd/cls_rbd_types.h"
TYPE(cls::rbd::ParentImageSpec)
TYPE(cls::rbd::ChildImageSpec)
TYPE(cls::rbd::MigrationSpec)
TYPE(cls::rbd::MirrorPeer)
TYPE(cls::rbd::MirrorImage)
TYPE(cls::rbd::MirrorImageMap)
TYPE(cls::rbd::MirrorImageStatus)
TYPE(cls::rbd::MirrorImageSiteStatus)
TYPE_FEATUREFUL(cls::rbd::MirrorImageSiteStatusOnDisk)
TYPE(cls::rbd::GroupImageSpec)
TYPE(cls::rbd::GroupImageStatus)
TYPE(cls::rbd::GroupSnapshot)
TYPE(cls::rbd::GroupSpec)
TYPE(cls::rbd::ImageSnapshotSpec)
TYPE(cls::rbd::SnapshotInfo)
TYPE(cls::rbd::SnapshotNamespace)
#endif
| 1,601 | 29.226415 | 54 |
h
|
null |
ceph-main/src/tools/ceph-dencoder/rgw_types.cc
|
#include "acconfig.h"
#include <cstdint>
using namespace std;
#include "include/ceph_features.h"
#define TYPE(t)
#define TYPE_STRAYDATA(t)
#define TYPE_NONDETERMINISTIC(t)
#define TYPE_FEATUREFUL(t)
#define TYPE_FEATUREFUL_STRAYDATA(t)
#define TYPE_FEATUREFUL_NONDETERMINISTIC(t)
#define TYPE_FEATUREFUL_NOCOPY(t)
#define TYPE_NOCOPY(t)
#define MESSAGE(t)
#include "rgw_types.h"
#undef TYPE
#undef TYPE_STRAYDATA
#undef TYPE_NONDETERMINISTIC
#undef TYPE_NOCOPY
#undef TYPE_FEATUREFUL
#undef TYPE_FEATUREFUL_STRAYDATA
#undef TYPE_FEATUREFUL_NONDETERMINISTIC
#undef TYPE_FEATUREFUL_NOCOPY
#undef MESSAGE
#include "denc_plugin.h"
DENC_API void register_dencoders(DencoderPlugin* plugin)
{
#include "rgw_types.h"
}
DENC_API void unregister_dencoders(DencoderPlugin* plugin)
{
plugin->unregister_dencoders();
}
| 812 | 20.972973 | 58 |
cc
|
null |
ceph-main/src/tools/ceph-dencoder/rgw_types.h
|
#ifdef WITH_RADOSGW
#include "rgw_rados.h"
TYPE(RGWOLHInfo)
TYPE(RGWObjManifestPart)
TYPE(RGWObjManifest)
TYPE(objexp_hint_entry)
#include "rgw_zone.h"
TYPE(RGWZoneParams)
TYPE(RGWZone)
TYPE(RGWZoneGroup)
TYPE(RGWRealm)
TYPE(RGWPeriod)
TYPE(RGWPeriodLatestEpochInfo)
#include "rgw_acl.h"
TYPE(ACLPermission)
TYPE(ACLGranteeType)
TYPE(ACLGrant)
TYPE(RGWAccessControlList)
TYPE(ACLOwner)
TYPE(RGWAccessControlPolicy)
#include "rgw_cache.h"
TYPE(ObjectMetaInfo)
TYPE(ObjectCacheInfo)
TYPE(RGWCacheNotifyInfo)
#include "rgw_lc.h"
TYPE(RGWLifecycleConfiguration)
#include "cls/log/cls_log_types.h"
TYPE(cls_log_entry)
#include "cls/rgw/cls_rgw_types.h"
TYPE(rgw_bucket_pending_info)
TYPE(rgw_bucket_dir_entry_meta)
TYPE(rgw_bucket_entry_ver)
TYPE(rgw_bucket_dir_entry)
TYPE(rgw_bucket_category_stats)
TYPE(rgw_bucket_dir_header)
TYPE(rgw_bucket_dir)
TYPE(rgw_bucket_entry_ver)
TYPE(cls_rgw_obj_key)
TYPE(rgw_bucket_olh_log_entry)
TYPE(rgw_usage_log_entry)
TYPE(rgw_cls_bi_entry)
TYPE(rgw_bucket_olh_entry)
TYPE(rgw_usage_data)
TYPE(rgw_usage_log_info)
TYPE(rgw_user_bucket)
TYPE(cls_rgw_lc_entry)
#include "cls/rgw/cls_rgw_ops.h"
TYPE(cls_rgw_lc_get_entry_ret)
TYPE(rgw_cls_obj_prepare_op)
TYPE(rgw_cls_obj_complete_op)
TYPE(rgw_cls_list_op)
TYPE(rgw_cls_list_ret)
TYPE(cls_rgw_gc_defer_entry_op)
TYPE(cls_rgw_gc_list_op)
TYPE(cls_rgw_gc_list_ret)
TYPE(cls_rgw_gc_obj_info)
TYPE(cls_rgw_gc_remove_op)
TYPE(cls_rgw_gc_set_entry_op)
TYPE(cls_rgw_obj)
TYPE(cls_rgw_obj_chain)
TYPE(rgw_cls_tag_timeout_op)
TYPE(cls_rgw_bi_log_list_op)
TYPE(cls_rgw_bi_log_trim_op)
TYPE(cls_rgw_bi_log_list_ret)
TYPE(rgw_cls_link_olh_op)
TYPE(rgw_cls_unlink_instance_op)
TYPE(rgw_cls_read_olh_log_op)
TYPE(rgw_cls_read_olh_log_ret)
TYPE(rgw_cls_trim_olh_log_op)
TYPE(rgw_cls_bucket_clear_olh_op)
TYPE(rgw_cls_check_index_ret)
TYPE(cls_rgw_reshard_add_op)
TYPE(cls_rgw_reshard_list_op)
TYPE(cls_rgw_reshard_list_ret)
TYPE(cls_rgw_reshard_get_op)
TYPE(cls_rgw_reshard_get_ret)
TYPE(cls_rgw_reshard_remove_op)
TYPE(cls_rgw_set_bucket_resharding_op)
TYPE(cls_rgw_clear_bucket_resharding_op)
TYPE(cls_rgw_lc_obj_head)
#include "cls/rgw/cls_rgw_client.h"
TYPE(rgw_bi_log_entry)
TYPE(cls_rgw_reshard_entry)
TYPE(cls_rgw_bucket_instance_entry)
#include "cls/user/cls_user_types.h"
TYPE(cls_user_bucket)
TYPE(cls_user_bucket_entry)
TYPE(cls_user_stats)
TYPE(cls_user_header)
#include "cls/user/cls_user_ops.h"
TYPE(cls_user_set_buckets_op)
TYPE(cls_user_remove_bucket_op)
TYPE(cls_user_list_buckets_op)
TYPE(cls_user_list_buckets_ret)
TYPE(cls_user_get_header_op)
TYPE(cls_user_get_header_ret)
TYPE(cls_user_complete_stats_sync_op)
#include "cls/journal/cls_journal_types.h"
TYPE(cls::journal::ObjectPosition)
TYPE(cls::journal::ObjectSetPosition)
TYPE(cls::journal::Client)
TYPE(cls::journal::Tag)
#include "rgw_common.h"
TYPE(RGWAccessKey)
TYPE(RGWSubUser)
TYPE(RGWUserInfo)
TYPE(rgw_bucket)
TYPE(RGWBucketInfo)
TYPE(RGWBucketEnt)
TYPE(rgw_obj)
#include "rgw_log.h"
TYPE(rgw_log_entry)
#include "rgw_datalog.h"
TYPE(rgw_data_change)
#include "rgw_mdlog.h"
TYPE(RGWMetadataLogData)
#include "rgw_meta_sync_status.h"
TYPE(rgw_meta_sync_info)
TYPE(rgw_meta_sync_marker)
TYPE(rgw_meta_sync_status)
#include "rgw_multi.h"
TYPE(RGWUploadPartInfo)
#include "rgw_data_sync.h"
TYPE(rgw_data_sync_info)
TYPE(rgw_data_sync_marker)
TYPE(rgw_data_sync_status)
#include "rgw_bucket_encryption.h"
TYPE(RGWBucketEncryptionConfig)
#endif
| 3,408 | 21.576159 | 42 |
h
|
null |
ceph-main/src/tools/ceph-dencoder/sstring.h
|
#ifndef TEST_SSTRING_H
#define TEST_SSTRING_H
#include "common/sstring.hh"
// wrapper for sstring that implements the dencoder interface
class sstring_wrapper {
using sstring16 = basic_sstring<char, uint32_t, 16>;
sstring16 s1;
using sstring24 = basic_sstring<unsigned char, uint16_t, 24>;
sstring24 s2;
public:
sstring_wrapper() = default;
sstring_wrapper(sstring16&& s1, sstring24&& s2)
: s1(std::move(s1)), s2(std::move(s2))
{}
DENC(sstring_wrapper, w, p) {
DENC_START(1, 1, p);
denc(w.s1, p);
denc(w.s2, p);
DENC_FINISH(p);
}
void dump(Formatter* f) {
f->dump_string("s1", s1.c_str());
f->dump_string("s2", reinterpret_cast<const char*>(s2.c_str()));
}
static void generate_test_instances(std::list<sstring_wrapper*>& ls) {
ls.push_back(new sstring_wrapper());
// initialize sstrings that fit in internal storage
constexpr auto cstr6 = "abcdef";
ls.push_back(new sstring_wrapper(sstring16{cstr6}, sstring24{cstr6}));
// initialize sstrings that overflow into external storage
constexpr auto cstr26 = "abcdefghijklmnopqrstuvwxyz";
ls.push_back(new sstring_wrapper(sstring16{cstr26}, sstring24{cstr26}));
}
};
WRITE_CLASS_DENC(sstring_wrapper)
#endif
| 1,240 | 29.268293 | 76 |
h
|
null |
ceph-main/src/tools/ceph-dencoder/str.h
|
#ifndef TEST_STRING_H
#define TEST_STRING_H
#include "common/Formatter.h"
// wrapper for std::string that implements the dencoder interface
class string_wrapper {
std::string s;
public:
string_wrapper() = default;
string_wrapper(string s1)
: s(s1)
{}
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
encode(s, bl);
}
void decode(ceph::buffer::list::const_iterator &bl) {
using ceph::decode;
decode(s, bl);
}
void dump(Formatter* f) {
f->dump_string("s", s);
}
static void generate_test_instances(std::list<string_wrapper*>& ls) {
ls.push_back(new string_wrapper());
// initialize strings that fit in internal storage
std::string s1 = "abcdef";
ls.push_back(new string_wrapper(s1));
}
};
WRITE_CLASS_ENCODER(string_wrapper)
#endif
| 821 | 20.076923 | 71 |
h
|
null |
ceph-main/src/tools/cephfs/DataScan.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/compat.h"
#include "common/errno.h"
#include "common/ceph_argparse.h"
#include <fstream>
#include "include/util.h"
#include "include/ceph_fs.h"
#include "mds/CDentry.h"
#include "mds/CInode.h"
#include "mds/CDentry.h"
#include "mds/InoTable.h"
#include "mds/SnapServer.h"
#include "cls/cephfs/cls_cephfs_client.h"
#include "PgFiles.h"
#include "DataScan.h"
#include "include/compat.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
#undef dout_prefix
#define dout_prefix *_dout << "datascan." << __func__ << ": "
using namespace std;
void DataScan::usage()
{
std::cout << "Usage: \n"
<< " cephfs-data-scan init [--force-init]\n"
<< " cephfs-data-scan scan_extents [--force-pool] [--worker_n N --worker_m M] [<data pool name> [<extra data pool name> ...]]\n"
<< " cephfs-data-scan scan_inodes [--force-pool] [--force-corrupt] [--worker_n N --worker_m M] [<data pool name>]\n"
<< " cephfs-data-scan pg_files <path> <pg id> [<pg id>...]\n"
<< " cephfs-data-scan scan_links\n"
<< "\n"
<< " --force-corrupt: overrite apparently corrupt structures\n"
<< " --force-init: write root inodes even if they exist\n"
<< " --force-pool: use data pool even if it is not in FSMap\n"
<< " --worker_m: Maximum number of workers\n"
<< " --worker_n: Worker number, range 0-(worker_m-1)\n"
<< "\n"
<< " cephfs-data-scan scan_frags [--force-corrupt]\n"
<< " cephfs-data-scan cleanup [<data pool name>]\n"
<< std::endl;
generic_client_usage();
}
bool DataScan::parse_kwarg(
const std::vector<const char*> &args,
std::vector<const char *>::const_iterator &i,
int *r)
{
if (i + 1 == args.end()) {
return false;
}
const std::string arg(*i);
const std::string val(*(i + 1));
if (arg == std::string("--output-dir")) {
if (driver != NULL) {
derr << "Unexpected --output-dir: output already selected!" << dendl;
*r = -EINVAL;
return false;
}
dout(4) << "Using local file output to '" << val << "'" << dendl;
driver = new LocalFileDriver(val, data_io);
return true;
} else if (arg == std::string("--worker_n")) {
std::string err;
n = strict_strtoll(val.c_str(), 10, &err);
if (!err.empty()) {
std::cerr << "Invalid worker number '" << val << "'" << std::endl;
*r = -EINVAL;
return false;
}
return true;
} else if (arg == std::string("--worker_m")) {
std::string err;
m = strict_strtoll(val.c_str(), 10, &err);
if (!err.empty()) {
std::cerr << "Invalid worker count '" << val << "'" << std::endl;
*r = -EINVAL;
return false;
}
return true;
} else if (arg == std::string("--filter-tag")) {
filter_tag = val;
dout(10) << "Applying tag filter: '" << filter_tag << "'" << dendl;
return true;
} else if (arg == std::string("--filesystem")) {
std::shared_ptr<const Filesystem> fs;
*r = fsmap->parse_filesystem(val, &fs);
if (*r != 0) {
std::cerr << "Invalid filesystem '" << val << "'" << std::endl;
return false;
}
fscid = fs->fscid;
return true;
} else if (arg == std::string("--alternate-pool")) {
metadata_pool_name = val;
return true;
} else {
return false;
}
}
bool DataScan::parse_arg(
const std::vector<const char*> &args,
std::vector<const char *>::const_iterator &i)
{
const std::string arg(*i);
if (arg == "--force-pool") {
force_pool = true;
return true;
} else if (arg == "--force-corrupt") {
force_corrupt = true;
return true;
} else if (arg == "--force-init") {
force_init = true;
return true;
} else {
return false;
}
}
int DataScan::main(const std::vector<const char*> &args)
{
// Parse args
// ==========
if (args.size() < 1) {
cerr << "missing position argument" << std::endl;
return -EINVAL;
}
// Common RADOS init: open metadata pool
// =====================================
librados::Rados rados;
int r = rados.init_with_context(g_ceph_context);
if (r < 0) {
derr << "RADOS unavailable" << dendl;
return r;
}
std::string const &command = args[0];
std::string data_pool_name;
std::set<std::string> extra_data_pool_names;
std::string pg_files_path;
std::set<pg_t> pg_files_pgs;
// Consume any known --key val or --flag arguments
for (std::vector<const char *>::const_iterator i = args.begin() + 1;
i != args.end(); ++i) {
if (parse_kwarg(args, i, &r)) {
// Skip the kwarg value field
++i;
continue;
} else if (r) {
return r;
}
if (parse_arg(args, i)) {
continue;
}
// Trailing positional arguments
if (command == "scan_extents") {
if (data_pool_name.empty()) {
data_pool_name = *i;
} else if (*i != data_pool_name) {
extra_data_pool_names.insert(*i);
}
continue;
}
// Trailing positional argument
if (i + 1 == args.end() &&
(command == "scan_inodes"
|| command == "cleanup")) {
data_pool_name = *i;
continue;
}
if (command == "pg_files") {
if (i == args.begin() + 1) {
pg_files_path = *i;
continue;
} else {
pg_t pg;
bool parsed = pg.parse(*i);
if (!parsed) {
std::cerr << "Invalid PG '" << *i << "'" << std::endl;
return -EINVAL;
} else {
pg_files_pgs.insert(pg);
continue;
}
}
}
// Fall through: unhandled
std::cerr << "Unknown argument '" << *i << "'" << std::endl;
return -EINVAL;
}
// If caller didn't specify a namespace, try to pick
// one if only one exists
if (fscid == FS_CLUSTER_ID_NONE) {
if (fsmap->filesystem_count() == 1) {
fscid = fsmap->get_filesystem()->fscid;
} else {
std::cerr << "Specify a filesystem with --filesystem" << std::endl;
return -EINVAL;
}
}
auto fs = fsmap->get_filesystem(fscid);
ceph_assert(fs != nullptr);
// Default to output to metadata pool
if (driver == NULL) {
driver = new MetadataDriver();
driver->set_force_corrupt(force_corrupt);
driver->set_force_init(force_init);
dout(4) << "Using metadata pool output" << dendl;
}
dout(4) << "connecting to RADOS..." << dendl;
r = rados.connect();
if (r < 0) {
std::cerr << "couldn't connect to cluster: " << cpp_strerror(r)
<< std::endl;
return r;
}
r = driver->init(rados, metadata_pool_name, fsmap, fscid);
if (r < 0) {
return r;
}
if (command == "pg_files") {
auto pge = PgFiles(objecter, pg_files_pgs);
pge.init();
return pge.scan_path(pg_files_path);
}
bool autodetect_data_pools = false;
// Initialize data_io for those commands that need it
if (command == "scan_inodes" ||
command == "scan_extents" ||
command == "cleanup") {
data_pool_id = fs->mds_map.get_first_data_pool();
std::string pool_name;
r = rados.pool_reverse_lookup(data_pool_id, &pool_name);
if (r < 0) {
std::cerr << "Failed to resolve data pool: " << cpp_strerror(r)
<< std::endl;
return r;
}
if (data_pool_name.empty()) {
autodetect_data_pools = true;
data_pool_name = pool_name;
} else if (data_pool_name != pool_name) {
std::cerr << "Warning: pool '" << data_pool_name << "' is not the "
"main CephFS data pool!" << std::endl;
if (!force_pool) {
std::cerr << "Use --force-pool to continue" << std::endl;
return -EINVAL;
}
data_pool_id = rados.pool_lookup(data_pool_name.c_str());
if (data_pool_id < 0) {
std::cerr << "Data pool '" << data_pool_name << "' not found!"
<< std::endl;
return -ENOENT;
}
}
dout(4) << "data pool '" << data_pool_name << "' has ID " << data_pool_id
<< dendl;
dout(4) << "opening data pool '" << data_pool_name << "'" << dendl;
r = rados.ioctx_create(data_pool_name.c_str(), data_io);
if (r != 0) {
return r;
}
}
// Initialize extra data_ios for those commands that need it
if (command == "scan_extents") {
if (autodetect_data_pools) {
ceph_assert(extra_data_pool_names.empty());
for (auto &pool_id : fs->mds_map.get_data_pools()) {
if (pool_id == data_pool_id) {
continue;
}
std::string pool_name;
r = rados.pool_reverse_lookup(pool_id, &pool_name);
if (r < 0) {
std::cerr << "Failed to resolve data pool: " << cpp_strerror(r)
<< std::endl;
return r;
}
extra_data_pool_names.insert(pool_name);
}
}
for (auto &data_pool_name: extra_data_pool_names) {
int64_t pool_id = rados.pool_lookup(data_pool_name.c_str());
if (data_pool_id < 0) {
std::cerr << "Data pool '" << data_pool_name << "' not found!" << std::endl;
return -ENOENT;
} else {
dout(4) << "data pool '" << data_pool_name << "' has ID " << pool_id
<< dendl;
}
if (!fs->mds_map.is_data_pool(pool_id)) {
std::cerr << "Warning: pool '" << data_pool_name << "' is not a "
"CephFS data pool!" << std::endl;
if (!force_pool) {
std::cerr << "Use --force-pool to continue" << std::endl;
return -EINVAL;
}
}
dout(4) << "opening data pool '" << data_pool_name << "'" << dendl;
extra_data_ios.push_back({});
r = rados.ioctx_create(data_pool_name.c_str(), extra_data_ios.back());
if (r != 0) {
return r;
}
}
}
// Initialize metadata_io from MDSMap for scan_frags
if (command == "scan_frags" || command == "scan_links") {
const auto fs = fsmap->get_filesystem(fscid);
if (fs == nullptr) {
std::cerr << "Filesystem id " << fscid << " does not exist" << std::endl;
return -ENOENT;
}
int64_t const metadata_pool_id = fs->mds_map.get_metadata_pool();
dout(4) << "resolving metadata pool " << metadata_pool_id << dendl;
int r = rados.pool_reverse_lookup(metadata_pool_id, &metadata_pool_name);
if (r < 0) {
std::cerr << "Pool " << metadata_pool_id
<< " identified in MDS map not found in RADOS!" << std::endl;
return r;
}
r = rados.ioctx_create(metadata_pool_name.c_str(), metadata_io);
if (r != 0) {
return r;
}
data_pools = fs->mds_map.get_data_pools();
}
// Finally, dispatch command
if (command == "scan_inodes") {
return scan_inodes();
} else if (command == "scan_extents") {
return scan_extents();
} else if (command == "scan_frags") {
return scan_frags();
} else if (command == "scan_links") {
return scan_links();
} else if (command == "cleanup") {
return cleanup();
} else if (command == "init") {
return driver->init_roots(fs->mds_map.get_first_data_pool());
} else {
std::cerr << "Unknown command '" << command << "'" << std::endl;
return -EINVAL;
}
}
int MetadataDriver::inject_unlinked_inode(
inodeno_t inono, int mode, int64_t data_pool_id)
{
const object_t oid = InodeStore::get_object_name(inono, frag_t(), ".inode");
// Skip if exists
bool already_exists = false;
int r = root_exists(inono, &already_exists);
if (r) {
return r;
}
if (already_exists && !force_init) {
std::cerr << "Inode 0x" << std::hex << inono << std::dec << " already"
" exists, skipping create. Use --force-init to overwrite"
" the existing object." << std::endl;
return 0;
}
// Compose
InodeStore inode_data;
auto inode = inode_data.get_inode();
inode->ino = inono;
inode->version = 1;
inode->xattr_version = 1;
inode->mode = 0500 | mode;
// Fake dirstat.nfiles to 1, so that the directory doesn't appear to be empty
// (we won't actually give the *correct* dirstat here though)
inode->dirstat.nfiles = 1;
inode->ctime = inode->mtime = ceph_clock_now();
inode->nlink = 1;
inode->truncate_size = -1ull;
inode->truncate_seq = 1;
inode->uid = g_conf()->mds_root_ino_uid;
inode->gid = g_conf()->mds_root_ino_gid;
// Force layout to default: should we let users override this so that
// they don't have to mount the filesystem to correct it?
inode->layout = file_layout_t::get_default();
inode->layout.pool_id = data_pool_id;
inode->dir_layout.dl_dir_hash = g_conf()->mds_default_dir_hash;
// Assume that we will get our stats wrong, and that we may
// be ignoring dirfrags that exist
inode_data.damage_flags |= (DAMAGE_STATS | DAMAGE_RSTATS | DAMAGE_FRAGTREE);
if (inono == CEPH_INO_ROOT || MDS_INO_IS_MDSDIR(inono)) {
sr_t srnode;
srnode.seq = 1;
encode(srnode, inode_data.snap_blob);
}
// Serialize
bufferlist inode_bl;
encode(std::string(CEPH_FS_ONDISK_MAGIC), inode_bl);
inode_data.encode(inode_bl, CEPH_FEATURES_SUPPORTED_DEFAULT);
// Write
r = metadata_io.write_full(oid.name, inode_bl);
if (r != 0) {
derr << "Error writing '" << oid.name << "': " << cpp_strerror(r) << dendl;
return r;
}
return r;
}
int MetadataDriver::root_exists(inodeno_t ino, bool *result)
{
object_t oid = InodeStore::get_object_name(ino, frag_t(), ".inode");
uint64_t size;
time_t mtime;
int r = metadata_io.stat(oid.name, &size, &mtime);
if (r == -ENOENT) {
*result = false;
return 0;
} else if (r < 0) {
return r;
}
*result = true;
return 0;
}
int MetadataDriver::init_roots(int64_t data_pool_id)
{
int r = 0;
r = inject_unlinked_inode(CEPH_INO_ROOT, S_IFDIR|0755, data_pool_id);
if (r != 0) {
return r;
}
r = inject_unlinked_inode(MDS_INO_MDSDIR(0), S_IFDIR, data_pool_id);
if (r != 0) {
return r;
}
bool created = false;
r = find_or_create_dirfrag(MDS_INO_MDSDIR(0), frag_t(), &created);
if (r != 0) {
return r;
}
return 0;
}
int MetadataDriver::check_roots(bool *result)
{
int r;
r = root_exists(CEPH_INO_ROOT, result);
if (r != 0) {
return r;
}
if (!*result) {
return 0;
}
r = root_exists(MDS_INO_MDSDIR(0), result);
if (r != 0) {
return r;
}
if (!*result) {
return 0;
}
return 0;
}
/**
* Stages:
*
* SERIAL init
* 0. Create root inodes if don't exist
* PARALLEL scan_extents
* 1. Size and mtime recovery: scan ALL objects, and update 0th
* objects with max size and max mtime seen.
* PARALLEL scan_inodes
* 2. Inode recovery: scan ONLY 0th objects, and inject metadata
* into dirfrag OMAPs, creating blank dirfrags as needed. No stats
* or rstats at this stage. Inodes without backtraces go into
* lost+found
* TODO: SERIAL "recover stats"
* 3. Dirfrag statistics: depth first traverse into metadata tree,
* rebuilding dir sizes.
* TODO PARALLEL "clean up"
* 4. Cleanup; go over all 0th objects (and dirfrags if we tagged
* anything onto them) and remove any of the xattrs that we
* used for accumulating.
*/
int parse_oid(const std::string &oid, uint64_t *inode_no, uint64_t *obj_id)
{
if (oid.find(".") == std::string::npos || oid.find(".") == oid.size() - 1) {
return -EINVAL;
}
std::string err;
std::string inode_str = oid.substr(0, oid.find("."));
*inode_no = strict_strtoll(inode_str.c_str(), 16, &err);
if (!err.empty()) {
return -EINVAL;
}
std::string pos_string = oid.substr(oid.find(".") + 1);
*obj_id = strict_strtoll(pos_string.c_str(), 16, &err);
if (!err.empty()) {
return -EINVAL;
}
return 0;
}
int DataScan::scan_extents()
{
std::vector<librados::IoCtx *> data_ios;
data_ios.push_back(&data_io);
for (auto &extra_data_io : extra_data_ios) {
data_ios.push_back(&extra_data_io);
}
for (auto ioctx : data_ios) {
int r = forall_objects(*ioctx, false, [this, ioctx](
std::string const &oid,
uint64_t obj_name_ino,
uint64_t obj_name_offset) -> int
{
// Read size
uint64_t size;
time_t mtime;
int r = ioctx->stat(oid, &size, &mtime);
dout(10) << "handling object " << obj_name_ino
<< "." << obj_name_offset << dendl;
if (r != 0) {
dout(4) << "Cannot stat '" << oid << "': skipping" << dendl;
return r;
}
int64_t obj_pool_id = data_io.get_id() != ioctx->get_id() ?
ioctx->get_id() : -1;
// I need to keep track of
// * The highest object ID seen
// * The size of the highest object ID seen
// * The largest object seen
// * The pool of the objects seen (if it is not the main data pool)
//
// Given those things, I can later infer the object chunking
// size, the offset of the last object (chunk size * highest ID seen),
// the actual size (offset of last object + size of highest ID seen),
// and the layout pool id.
//
// This logic doesn't take account of striping.
r = ClsCephFSClient::accumulate_inode_metadata(
data_io,
obj_name_ino,
obj_name_offset,
size,
obj_pool_id,
mtime);
if (r < 0) {
derr << "Failed to accumulate metadata data from '"
<< oid << "': " << cpp_strerror(r) << dendl;
return r;
}
return r;
});
if (r < 0) {
return r;
}
}
return 0;
}
int DataScan::probe_filter(librados::IoCtx &ioctx)
{
bufferlist filter_bl;
ClsCephFSClient::build_tag_filter("test", &filter_bl);
librados::ObjectCursor range_i;
librados::ObjectCursor range_end;
std::vector<librados::ObjectItem> tmp_result;
librados::ObjectCursor tmp_next;
int r = ioctx.object_list(ioctx.object_list_begin(), ioctx.object_list_end(),
1, filter_bl, &tmp_result, &tmp_next);
return r >= 0;
}
int DataScan::forall_objects(
librados::IoCtx &ioctx,
bool untagged_only,
std::function<int(std::string, uint64_t, uint64_t)> handler
)
{
librados::ObjectCursor range_i;
librados::ObjectCursor range_end;
ioctx.object_list_slice(
ioctx.object_list_begin(),
ioctx.object_list_end(),
n,
m,
&range_i,
&range_end);
bufferlist filter_bl;
bool legacy_filtering = false;
if (untagged_only) {
// probe to deal with older OSDs that don't support
// the cephfs pgls filtering mode
legacy_filtering = !probe_filter(ioctx);
if (!legacy_filtering) {
ClsCephFSClient::build_tag_filter(filter_tag, &filter_bl);
}
}
int r = 0;
while(range_i < range_end) {
std::vector<librados::ObjectItem> result;
int r = ioctx.object_list(range_i, range_end, 1,
filter_bl, &result, &range_i);
if (r < 0) {
derr << "Unexpected error listing objects: " << cpp_strerror(r) << dendl;
return r;
}
for (const auto &i : result) {
const std::string &oid = i.oid;
uint64_t obj_name_ino = 0;
uint64_t obj_name_offset = 0;
r = parse_oid(oid, &obj_name_ino, &obj_name_offset);
if (r != 0) {
dout(4) << "Bad object name '" << oid << "', skipping" << dendl;
continue;
}
if (untagged_only && legacy_filtering) {
dout(20) << "Applying filter to " << oid << dendl;
// We are only interested in 0th objects during this phase: we touched
// the other objects during scan_extents
if (obj_name_offset != 0) {
dout(20) << "Non-zeroth object" << dendl;
continue;
}
bufferlist scrub_tag_bl;
int r = ioctx.getxattr(oid, "scrub_tag", scrub_tag_bl);
if (r >= 0) {
std::string read_tag;
auto q = scrub_tag_bl.cbegin();
try {
decode(read_tag, q);
if (read_tag == filter_tag) {
dout(20) << "skipping " << oid << " because it has the filter_tag"
<< dendl;
continue;
}
} catch (const buffer::error &err) {
}
dout(20) << "read non-matching tag '" << read_tag << "'" << dendl;
} else {
dout(20) << "no tag read (" << r << ")" << dendl;
}
} else if (untagged_only) {
ceph_assert(obj_name_offset == 0);
dout(20) << "OSD matched oid " << oid << dendl;
}
int this_oid_r = handler(oid, obj_name_ino, obj_name_offset);
if (r == 0 && this_oid_r < 0) {
r = this_oid_r;
}
}
}
return r;
}
int DataScan::scan_inodes()
{
bool roots_present;
int r = driver->check_roots(&roots_present);
if (r != 0) {
derr << "Unexpected error checking roots: '"
<< cpp_strerror(r) << "'" << dendl;
return r;
}
if (!roots_present) {
std::cerr << "Some or all system inodes are absent. Run 'init' from "
"one node before running 'scan_inodes'" << std::endl;
return -EIO;
}
return forall_objects(data_io, true, [this](
std::string const &oid,
uint64_t obj_name_ino,
uint64_t obj_name_offset) -> int
{
int r = 0;
dout(10) << "handling object "
<< std::hex << obj_name_ino << "." << obj_name_offset << std::dec
<< dendl;
AccumulateResult accum_res;
inode_backtrace_t backtrace;
file_layout_t loaded_layout = file_layout_t::get_default();
std::string symlink;
r = ClsCephFSClient::fetch_inode_accumulate_result(
data_io, oid, &backtrace, &loaded_layout, &symlink, &accum_res);
if (r == -EINVAL) {
dout(4) << "Accumulated metadata missing from '"
<< oid << ", did you run scan_extents?" << dendl;
return r;
} else if (r < 0) {
dout(4) << "Unexpected error loading accumulated metadata from '"
<< oid << "': " << cpp_strerror(r) << dendl;
// FIXME: this creates situation where if a client has a corrupt
// backtrace/layout, we will fail to inject it. We should (optionally)
// proceed if the backtrace/layout is corrupt but we have valid
// accumulated metadata.
return r;
}
const time_t file_mtime = accum_res.max_mtime;
uint64_t file_size = 0;
bool have_backtrace = !(backtrace.ancestors.empty());
// This is the layout we will use for injection, populated either
// from loaded_layout or from best guesses
file_layout_t guessed_layout;
if (accum_res.obj_pool_id == -1) {
guessed_layout.pool_id = data_pool_id;
} else {
guessed_layout.pool_id = accum_res.obj_pool_id;
librados::IoCtx ioctx;
r = librados::Rados(data_io).ioctx_create2(guessed_layout.pool_id, ioctx);
if (r != 0) {
derr << "Unexpected error opening file data pool id="
<< guessed_layout.pool_id << ": " << cpp_strerror(r) << dendl;
return r;
}
bufferlist bl;
int r = ioctx.getxattr(oid, "layout", bl);
if (r < 0) {
if (r != -ENODATA) {
derr << "Unexpected error reading layout for " << oid << ": "
<< cpp_strerror(r) << dendl;
return r;
}
} else {
try {
auto q = bl.cbegin();
decode(loaded_layout, q);
} catch (ceph::buffer::error &e) {
derr << "Unexpected error decoding layout for " << oid << dendl;
return -EINVAL;
}
}
}
// Calculate file_size, guess the layout
if (accum_res.ceiling_obj_index > 0) {
uint32_t chunk_size = file_layout_t::get_default().object_size;
// When there are multiple objects, the largest object probably
// indicates the chunk size. But not necessarily, because files
// can be sparse. Only make this assumption if size seen
// is a power of two, as chunk sizes typically are.
if ((accum_res.max_obj_size & (accum_res.max_obj_size - 1)) == 0) {
chunk_size = accum_res.max_obj_size;
}
if (loaded_layout.pool_id == -1) {
// If no stashed layout was found, guess it
guessed_layout.object_size = chunk_size;
guessed_layout.stripe_unit = chunk_size;
guessed_layout.stripe_count = 1;
} else if (!loaded_layout.is_valid() ||
loaded_layout.object_size < accum_res.max_obj_size) {
// If the max size seen exceeds what the stashed layout claims, then
// disbelieve it. Guess instead. Same for invalid layouts on disk.
dout(4) << "bogus xattr layout on 0x" << std::hex << obj_name_ino
<< std::dec << ", ignoring in favour of best guess" << dendl;
guessed_layout.object_size = chunk_size;
guessed_layout.stripe_unit = chunk_size;
guessed_layout.stripe_count = 1;
} else {
// We have a stashed layout that we can't disprove, so apply it
guessed_layout = loaded_layout;
dout(20) << "loaded layout from xattr:"
<< " pi: " << guessed_layout.pool_id
<< " os: " << guessed_layout.object_size
<< " sc: " << guessed_layout.stripe_count
<< " su: " << guessed_layout.stripe_unit
<< dendl;
// User might have transplanted files from a pool with a different
// ID, so if the pool from loaded_layout is not found in the list of
// the data pools, we'll force the injected layout to point to the
// pool we read from.
if (!fsmap->get_filesystem(fscid)->mds_map.is_data_pool(
guessed_layout.pool_id)) {
dout(20) << "overwriting layout pool_id " << data_pool_id << dendl;
guessed_layout.pool_id = data_pool_id;
}
}
if (guessed_layout.stripe_count == 1) {
// Unstriped file: simple chunking
file_size = guessed_layout.object_size * accum_res.ceiling_obj_index
+ accum_res.ceiling_obj_size;
} else {
// Striped file: need to examine the last stripe_count objects
// in the file to determine the size.
librados::IoCtx ioctx;
if (guessed_layout.pool_id == data_io.get_id()) {
ioctx.dup(data_io);
} else {
r = librados::Rados(data_io).ioctx_create2(guessed_layout.pool_id,
ioctx);
if (r != 0) {
derr << "Unexpected error opening file data pool id="
<< guessed_layout.pool_id << ": " << cpp_strerror(r) << dendl;
return r;
}
}
// How many complete (i.e. not last stripe) objects?
uint64_t complete_objs = 0;
if (accum_res.ceiling_obj_index > guessed_layout.stripe_count - 1) {
complete_objs = (accum_res.ceiling_obj_index / guessed_layout.stripe_count) * guessed_layout.stripe_count;
} else {
complete_objs = 0;
}
// How many potentially-short objects (i.e. last stripe set) objects?
uint64_t partial_objs = accum_res.ceiling_obj_index + 1 - complete_objs;
dout(10) << "calculating striped size from complete objs: "
<< complete_objs << ", partial objs: " << partial_objs
<< dendl;
// Maximum amount of data that may be in the incomplete objects
uint64_t incomplete_size = 0;
// For each short object, calculate the max file size within it
// and accumulate the maximum
for (uint64_t i = complete_objs; i < complete_objs + partial_objs; ++i) {
char buf[60];
snprintf(buf, sizeof(buf), "%llx.%08llx",
(long long unsigned)obj_name_ino, (long long unsigned)i);
uint64_t osize(0);
time_t omtime(0);
r = ioctx.stat(std::string(buf), &osize, &omtime);
if (r == 0) {
if (osize > 0) {
// Upper bound within this object
uint64_t upper_size = (osize - 1) / guessed_layout.stripe_unit
* (guessed_layout.stripe_unit * guessed_layout.stripe_count)
+ (i % guessed_layout.stripe_count)
* guessed_layout.stripe_unit + (osize - 1)
% guessed_layout.stripe_unit + 1;
incomplete_size = std::max(incomplete_size, upper_size);
}
} else if (r == -ENOENT) {
// Absent object, treat as size 0 and ignore.
} else {
// Unexpected error, carry r to outer scope for handling.
break;
}
}
if (r != 0 && r != -ENOENT) {
derr << "Unexpected error checking size of ino 0x" << std::hex
<< obj_name_ino << std::dec << ": " << cpp_strerror(r) << dendl;
return r;
}
file_size = complete_objs * guessed_layout.object_size
+ incomplete_size;
}
} else {
file_size = accum_res.ceiling_obj_size;
if (loaded_layout.pool_id < 0
|| loaded_layout.object_size < accum_res.max_obj_size) {
// No layout loaded, or inconsistent layout, use default
guessed_layout = file_layout_t::get_default();
guessed_layout.pool_id = accum_res.obj_pool_id != -1 ?
accum_res.obj_pool_id : data_pool_id;
} else {
guessed_layout = loaded_layout;
}
}
// Santity checking backtrace ino against object name
if (have_backtrace && backtrace.ino != obj_name_ino) {
dout(4) << "Backtrace ino 0x" << std::hex << backtrace.ino
<< " doesn't match object name ino 0x" << obj_name_ino
<< std::dec << dendl;
have_backtrace = false;
}
InodeStore dentry;
build_file_dentry(obj_name_ino, file_size, file_mtime, guessed_layout, &dentry, symlink);
// Inject inode to the metadata pool
if (have_backtrace) {
inode_backpointer_t root_bp = *(backtrace.ancestors.rbegin());
if (MDS_INO_IS_MDSDIR(root_bp.dirino)) {
/* Special case for strays: even if we have a good backtrace,
* don't put it in the stray dir, because while that would technically
* give it linkage it would still be invisible to the user */
r = driver->inject_lost_and_found(obj_name_ino, dentry);
if (r < 0) {
dout(4) << "Error injecting 0x" << std::hex << backtrace.ino
<< std::dec << " into lost+found: " << cpp_strerror(r) << dendl;
if (r == -EINVAL) {
dout(4) << "Use --force-corrupt to overwrite structures that "
"appear to be corrupt" << dendl;
}
}
} else {
/* Happy case: we will inject a named dentry for this inode */
r = driver->inject_with_backtrace(backtrace, dentry);
if (r < 0) {
dout(4) << "Error injecting 0x" << std::hex << backtrace.ino
<< std::dec << " with backtrace: " << cpp_strerror(r) << dendl;
if (r == -EINVAL) {
dout(4) << "Use --force-corrupt to overwrite structures that "
"appear to be corrupt" << dendl;
}
}
}
} else {
/* Backtrace-less case: we will inject a lost+found dentry */
r = driver->inject_lost_and_found(
obj_name_ino, dentry);
if (r < 0) {
dout(4) << "Error injecting 0x" << std::hex << obj_name_ino
<< std::dec << " into lost+found: " << cpp_strerror(r) << dendl;
if (r == -EINVAL) {
dout(4) << "Use --force-corrupt to overwrite structures that "
"appear to be corrupt" << dendl;
}
}
}
return r;
});
}
int DataScan::cleanup()
{
// We are looking for only zeroth object
//
return forall_objects(data_io, true, [this](
std::string const &oid,
uint64_t obj_name_ino,
uint64_t obj_name_offset) -> int
{
int r = 0;
r = ClsCephFSClient::delete_inode_accumulate_result(data_io, oid);
if (r < 0) {
dout(4) << "Error deleting accumulated metadata from '"
<< oid << "': " << cpp_strerror(r) << dendl;
}
return r;
});
}
bool DataScan::valid_ino(inodeno_t ino) const
{
return (ino >= inodeno_t((1ull << 40)))
|| (MDS_INO_IS_STRAY(ino))
|| (MDS_INO_IS_MDSDIR(ino))
|| ino == CEPH_INO_ROOT
|| ino == CEPH_INO_CEPH
|| ino == CEPH_INO_LOST_AND_FOUND;
}
int DataScan::scan_links()
{
MetadataDriver *metadata_driver = dynamic_cast<MetadataDriver*>(driver);
if (!metadata_driver) {
derr << "Unexpected --output-dir option for scan_links" << dendl;
return -EINVAL;
}
interval_set<uint64_t> used_inos;
map<inodeno_t, int> remote_links;
map<snapid_t, SnapInfo> snaps;
snapid_t last_snap = 1;
snapid_t snaprealm_v2_since = 2;
struct link_info_t {
inodeno_t dirino;
frag_t frag;
string name;
version_t version;
int nlink;
bool is_dir;
map<snapid_t, SnapInfo> snaps;
link_info_t() : version(0), nlink(0), is_dir(false) {}
link_info_t(inodeno_t di, frag_t df, const string& n, const CInode::inode_const_ptr& i) :
dirino(di), frag(df), name(n),
version(i->version), nlink(i->nlink), is_dir(S_IFDIR & i->mode) {}
dirfrag_t dirfrag() const {
return dirfrag_t(dirino, frag);
}
};
map<inodeno_t, list<link_info_t> > dup_primaries;
map<inodeno_t, link_info_t> bad_nlink_inos;
map<inodeno_t, link_info_t> injected_inos;
map<dirfrag_t, set<string> > to_remove;
enum {
SCAN_INOS = 1,
CHECK_LINK,
};
for (int step = SCAN_INOS; step <= CHECK_LINK; step++) {
const librados::NObjectIterator it_end = metadata_io.nobjects_end();
for (auto it = metadata_io.nobjects_begin(); it != it_end; ++it) {
const std::string oid = it->get_oid();
dout(10) << "step " << step << ": handling object " << oid << dendl;
uint64_t dir_ino = 0;
uint64_t frag_id = 0;
int r = parse_oid(oid, &dir_ino, &frag_id);
if (r == -EINVAL) {
dout(10) << "Not a dirfrag: '" << oid << "'" << dendl;
continue;
} else {
// parse_oid can only do 0 or -EINVAL
ceph_assert(r == 0);
}
if (!valid_ino(dir_ino)) {
dout(10) << "Not a dirfrag (invalid ino): '" << oid << "'" << dendl;
continue;
}
std::map<std::string, bufferlist> items;
r = metadata_io.omap_get_vals(oid, "", (uint64_t)-1, &items);
if (r < 0) {
derr << "Error getting omap from '" << oid << "': " << cpp_strerror(r) << dendl;
return r;
}
for (auto& p : items) {
auto q = p.second.cbegin();
string dname;
snapid_t last;
dentry_key_t::decode_helper(p.first, dname, last);
if (last != CEPH_NOSNAP) {
if (last > last_snap)
last_snap = last;
continue;
}
try {
snapid_t dnfirst;
decode(dnfirst, q);
if (dnfirst == CEPH_NOSNAP) {
dout(20) << "injected ino detected" << dendl;
} else if (dnfirst <= CEPH_MAXSNAP) {
if (dnfirst - 1 > last_snap)
last_snap = dnfirst - 1;
}
char dentry_type;
decode(dentry_type, q);
mempool::mds_co::string alternate_name;
if (dentry_type == 'I' || dentry_type == 'i') {
InodeStore inode;
if (dentry_type == 'i') {
DECODE_START(2, q);
if (struct_v >= 2)
decode(alternate_name, q);
inode.decode(q);
DECODE_FINISH(q);
} else {
inode.decode_bare(q);
}
inodeno_t ino = inode.inode->ino;
if (step == SCAN_INOS) {
if (used_inos.contains(ino, 1)) {
dup_primaries.emplace(std::piecewise_construct,
std::forward_as_tuple(ino),
std::forward_as_tuple());
} else {
used_inos.insert(ino);
}
} else if (step == CHECK_LINK) {
sr_t srnode;
if (inode.snap_blob.length()) {
auto p = inode.snap_blob.cbegin();
decode(srnode, p);
for (auto it = srnode.snaps.begin();
it != srnode.snaps.end(); ) {
if (it->second.ino != ino ||
it->second.snapid != it->first) {
srnode.snaps.erase(it++);
} else {
++it;
}
}
if (!srnode.past_parents.empty()) {
snapid_t last = srnode.past_parents.rbegin()->first;
if (last + 1 > snaprealm_v2_since)
snaprealm_v2_since = last + 1;
}
}
if (inode.old_inodes && !inode.old_inodes->empty()) {
auto _last_snap = inode.old_inodes->rbegin()->first;
if (_last_snap > last_snap)
last_snap = _last_snap;
}
auto q = dup_primaries.find(ino);
if (q != dup_primaries.end()) {
q->second.push_back(link_info_t(dir_ino, frag_id, dname, inode.inode));
q->second.back().snaps.swap(srnode.snaps);
} else {
int nlink = 0;
auto r = remote_links.find(ino);
if (r != remote_links.end())
nlink = r->second;
if (!MDS_INO_IS_STRAY(dir_ino))
nlink++;
if (inode.inode->nlink != nlink) {
derr << "Bad nlink on " << ino << " expected " << nlink
<< " has " << inode.inode->nlink << dendl;
bad_nlink_inos[ino] = link_info_t(dir_ino, frag_id, dname, inode.inode);
bad_nlink_inos[ino].nlink = nlink;
}
snaps.insert(make_move_iterator(begin(srnode.snaps)),
make_move_iterator(end(srnode.snaps)));
}
if (dnfirst == CEPH_NOSNAP) {
injected_inos[ino] = link_info_t(dir_ino, frag_id, dname, inode.inode);
dout(20) << "adding " << ino << " for future processing to fix dnfirst" << dendl;
}
}
} else if (dentry_type == 'L' || dentry_type == 'l') {
inodeno_t ino;
unsigned char d_type;
CDentry::decode_remote(dentry_type, ino, d_type, alternate_name, q);
if (step == SCAN_INOS) {
remote_links[ino]++;
} else if (step == CHECK_LINK) {
if (!used_inos.contains(ino, 1)) {
derr << "Bad remote link dentry 0x" << std::hex << dir_ino
<< std::dec << "/" << dname
<< ", ino " << ino << " not found" << dendl;
std::string key;
dentry_key_t dn_key(CEPH_NOSNAP, dname.c_str());
dn_key.encode(key);
to_remove[dirfrag_t(dir_ino, frag_id)].insert(key);
}
}
} else {
derr << "Invalid tag char '" << dentry_type << "' dentry 0x" << dir_ino
<< std::dec << "/" << dname << dendl;
return -EINVAL;
}
} catch (const buffer::error &err) {
derr << "Error decoding dentry 0x" << std::hex << dir_ino
<< std::dec << "/" << dname << dendl;
return -EINVAL;
}
}
}
}
map<unsigned, uint64_t> max_ino_map;
{
auto prev_max_ino = (uint64_t)1 << 40;
for (auto p = used_inos.begin(); p != used_inos.end(); ++p) {
auto cur_max = p.get_start() + p.get_len() - 1;
if (cur_max < prev_max_ino)
continue; // system inodes
if ((prev_max_ino >> 40) != (cur_max >> 40)) {
unsigned rank = (prev_max_ino >> 40) - 1;
max_ino_map[rank] = prev_max_ino;
} else if ((p.get_start() >> 40) != (cur_max >> 40)) {
unsigned rank = (p.get_start() >> 40) - 1;
max_ino_map[rank] = ((uint64_t)(rank + 2) << 40) - 1;
}
prev_max_ino = cur_max;
}
unsigned rank = (prev_max_ino >> 40) - 1;
max_ino_map[rank] = prev_max_ino;
}
used_inos.clear();
dout(10) << "processing " << dup_primaries.size() << " dup_primaries, "
<< remote_links.size() << " remote_links" << dendl;
for (auto& p : dup_primaries) {
dout(10) << "handling dup " << p.first << dendl;
link_info_t newest;
for (auto& q : p.second) {
if (q.version > newest.version) {
newest = q;
} else if (q.version == newest.version &&
!MDS_INO_IS_STRAY(q.dirino) &&
MDS_INO_IS_STRAY(newest.dirino)) {
newest = q;
}
}
for (auto& q : p.second) {
// in the middle of dir fragmentation?
if (newest.dirino == q.dirino && newest.name == q.name) {
snaps.insert(make_move_iterator(begin(q.snaps)),
make_move_iterator(end(q.snaps)));
continue;
}
std::string key;
dentry_key_t dn_key(CEPH_NOSNAP, q.name.c_str());
dn_key.encode(key);
to_remove[q.dirfrag()].insert(key);
derr << "Remove duplicated ino 0x" << p.first << " from "
<< q.dirfrag() << "/" << q.name << dendl;
}
int nlink = 0;
auto q = remote_links.find(p.first);
if (q != remote_links.end())
nlink = q->second;
if (!MDS_INO_IS_STRAY(newest.dirino))
nlink++;
if (nlink != newest.nlink) {
derr << "Bad nlink on " << p.first << " expected " << nlink
<< " has " << newest.nlink << dendl;
bad_nlink_inos[p.first] = newest;
bad_nlink_inos[p.first].nlink = nlink;
}
}
dup_primaries.clear();
remote_links.clear();
{
objecter->with_osdmap([&](const OSDMap& o) {
for (auto p : data_pools) {
const pg_pool_t *pi = o.get_pg_pool(p);
if (!pi)
continue;
if (pi->snap_seq > last_snap)
last_snap = pi->snap_seq;
}
});
if (!snaps.empty()) {
if (snaps.rbegin()->first > last_snap)
last_snap = snaps.rbegin()->first;
}
}
dout(10) << "removing dup dentries from " << to_remove.size() << " objects"
<< dendl;
for (auto& p : to_remove) {
object_t frag_oid = InodeStore::get_object_name(p.first.ino, p.first.frag, "");
dout(10) << "removing dup dentries from " << p.first << dendl;
int r = metadata_io.omap_rm_keys(frag_oid.name, p.second);
if (r != 0) {
derr << "Error removing duplicated dentries from " << p.first << dendl;
return r;
}
}
to_remove.clear();
dout(10) << "processing " << bad_nlink_inos.size() << " bad_nlink_inos"
<< dendl;
for (auto &p : bad_nlink_inos) {
dout(10) << "handling bad_nlink_ino " << p.first << dendl;
InodeStore inode;
snapid_t first;
int r = read_dentry(p.second.dirino, p.second.frag, p.second.name, &inode, &first);
if (r < 0) {
derr << "Unexpected error reading dentry "
<< p.second.dirfrag() << "/" << p.second.name
<< ": " << cpp_strerror(r) << dendl;
return r;
}
if (inode.inode->ino != p.first || inode.inode->version != p.second.version)
continue;
inode.get_inode()->nlink = p.second.nlink;
r = metadata_driver->inject_linkage(p.second.dirino, p.second.name, p.second.frag, inode, first);
if (r < 0)
return r;
}
dout(10) << "processing " << injected_inos.size() << " injected_inos"
<< dendl;
for (auto &p : injected_inos) {
dout(10) << "handling injected_ino " << p.first << dendl;
InodeStore inode;
snapid_t first;
dout(20) << " fixing linkage (dnfirst) of " << p.second.dirino << ":" << p.second.name << dendl;
int r = read_dentry(p.second.dirino, p.second.frag, p.second.name, &inode, &first);
if (r < 0) {
derr << "Unexpected error reading dentry "
<< p.second.dirfrag() << "/" << p.second.name
<< ": " << cpp_strerror(r) << dendl;
return r;
}
if (first != CEPH_NOSNAP) {
dout(20) << " ????" << dendl;
continue;
}
first = last_snap + 1;
dout(20) << " first is now " << first << dendl;
r = metadata_driver->inject_linkage(p.second.dirino, p.second.name, p.second.frag, inode, first);
if (r < 0)
return r;
}
dout(10) << "updating inotable" << dendl;
for (auto& p : max_ino_map) {
InoTable inotable(nullptr);
inotable.set_rank(p.first);
bool dirty = false;
int r = metadata_driver->load_table(&inotable);
if (r < 0) {
inotable.reset_state();
dirty = true;
}
if (inotable.force_consume_to(p.second))
dirty = true;
if (dirty) {
r = metadata_driver->save_table(&inotable);
if (r < 0)
return r;
}
}
dout(10) << "updating snaptable" << dendl;
{
SnapServer snaptable;
snaptable.set_rank(0);
bool dirty = false;
int r = metadata_driver->load_table(&snaptable);
if (r < 0) {
snaptable.reset_state();
dirty = true;
}
if (snaptable.force_update(last_snap, snaprealm_v2_since, snaps))
dirty = true;
if (dirty) {
r = metadata_driver->save_table(&snaptable);
if (r < 0)
return r;
}
}
return 0;
}
int DataScan::scan_frags()
{
bool roots_present;
int r = driver->check_roots(&roots_present);
if (r != 0) {
derr << "Unexpected error checking roots: '"
<< cpp_strerror(r) << "'" << dendl;
return r;
}
if (!roots_present) {
std::cerr << "Some or all system inodes are absent. Run 'init' from "
"one node before running 'scan_inodes'" << std::endl;
return -EIO;
}
return forall_objects(metadata_io, true, [this](
std::string const &oid,
uint64_t obj_name_ino,
uint64_t obj_name_offset) -> int
{
int r = 0;
r = parse_oid(oid, &obj_name_ino, &obj_name_offset);
if (r != 0) {
dout(4) << "Bad object name '" << oid << "', skipping" << dendl;
return r;
}
if (obj_name_ino < (1ULL << 40)) {
// FIXME: we're skipping stray dirs here: if they're
// orphaned then we should be resetting them some other
// way
dout(10) << "Skipping system ino " << obj_name_ino << dendl;
return 0;
}
AccumulateResult accum_res;
inode_backtrace_t backtrace;
// Default to inherit layout (i.e. no explicit layout on dir) which is
// expressed as a zeroed layout struct (see inode_t::has_layout)
file_layout_t loaded_layout;
int parent_r = 0;
bufferlist parent_bl;
int layout_r = 0;
bufferlist layout_bl;
bufferlist op_bl;
librados::ObjectReadOperation op;
op.getxattr("parent", &parent_bl, &parent_r);
op.getxattr("layout", &layout_bl, &layout_r);
r = metadata_io.operate(oid, &op, &op_bl);
if (r != 0 && r != -ENODATA) {
derr << "Unexpected error reading backtrace: " << cpp_strerror(parent_r) << dendl;
return r;
}
if (parent_r != -ENODATA) {
try {
auto q = parent_bl.cbegin();
backtrace.decode(q);
} catch (buffer::error &e) {
dout(4) << "Corrupt backtrace on '" << oid << "': " << e.what() << dendl;
if (!force_corrupt) {
return -EINVAL;
} else {
// Treat backtrace as absent: we'll inject into lost+found
backtrace = inode_backtrace_t();
}
}
}
if (layout_r != -ENODATA) {
try {
auto q = layout_bl.cbegin();
decode(loaded_layout, q);
} catch (buffer::error &e) {
dout(4) << "Corrupt layout on '" << oid << "': " << e.what() << dendl;
if (!force_corrupt) {
return -EINVAL;
}
}
}
bool have_backtrace = !(backtrace.ancestors.empty());
// Santity checking backtrace ino against object name
if (have_backtrace && backtrace.ino != obj_name_ino) {
dout(4) << "Backtrace ino 0x" << std::hex << backtrace.ino
<< " doesn't match object name ino 0x" << obj_name_ino
<< std::dec << dendl;
have_backtrace = false;
}
uint64_t fnode_version = 0;
fnode_t fnode;
r = read_fnode(obj_name_ino, frag_t(), &fnode, &fnode_version);
if (r == -EINVAL) {
derr << "Corrupt fnode on " << oid << dendl;
if (force_corrupt) {
fnode.fragstat.mtime = 0;
fnode.fragstat.nfiles = 1;
fnode.fragstat.nsubdirs = 0;
fnode.accounted_fragstat = fnode.fragstat;
} else {
return r;
}
}
InodeStore dentry;
build_dir_dentry(obj_name_ino, fnode.accounted_fragstat,
loaded_layout, &dentry);
// Inject inode to the metadata pool
if (have_backtrace) {
inode_backpointer_t root_bp = *(backtrace.ancestors.rbegin());
if (MDS_INO_IS_MDSDIR(root_bp.dirino)) {
/* Special case for strays: even if we have a good backtrace,
* don't put it in the stray dir, because while that would technically
* give it linkage it would still be invisible to the user */
r = driver->inject_lost_and_found(obj_name_ino, dentry);
if (r < 0) {
dout(4) << "Error injecting 0x" << std::hex << backtrace.ino
<< std::dec << " into lost+found: " << cpp_strerror(r) << dendl;
if (r == -EINVAL) {
dout(4) << "Use --force-corrupt to overwrite structures that "
"appear to be corrupt" << dendl;
}
}
} else {
/* Happy case: we will inject a named dentry for this inode */
r = driver->inject_with_backtrace(backtrace, dentry);
if (r < 0) {
dout(4) << "Error injecting 0x" << std::hex << backtrace.ino
<< std::dec << " with backtrace: " << cpp_strerror(r) << dendl;
if (r == -EINVAL) {
dout(4) << "Use --force-corrupt to overwrite structures that "
"appear to be corrupt" << dendl;
}
}
}
} else {
/* Backtrace-less case: we will inject a lost+found dentry */
r = driver->inject_lost_and_found(
obj_name_ino, dentry);
if (r < 0) {
dout(4) << "Error injecting 0x" << std::hex << obj_name_ino
<< std::dec << " into lost+found: " << cpp_strerror(r) << dendl;
if (r == -EINVAL) {
dout(4) << "Use --force-corrupt to overwrite structures that "
"appear to be corrupt" << dendl;
}
}
}
return r;
});
}
int MetadataTool::read_fnode(
inodeno_t ino, frag_t frag, fnode_t *fnode,
uint64_t *last_version)
{
ceph_assert(fnode != NULL);
object_t frag_oid = InodeStore::get_object_name(ino, frag, "");
bufferlist fnode_bl;
int r = metadata_io.omap_get_header(frag_oid.name, &fnode_bl);
*last_version = metadata_io.get_last_version();
if (r < 0) {
return r;
}
auto old_fnode_iter = fnode_bl.cbegin();
try {
(*fnode).decode(old_fnode_iter);
} catch (const buffer::error &err) {
return -EINVAL;
}
return 0;
}
int MetadataTool::read_dentry(inodeno_t parent_ino, frag_t frag,
const std::string &dname, InodeStore *inode, snapid_t *dnfirst)
{
ceph_assert(inode != NULL);
std::string key;
dentry_key_t dn_key(CEPH_NOSNAP, dname.c_str());
dn_key.encode(key);
std::set<std::string> keys;
keys.insert(key);
std::map<std::string, bufferlist> vals;
object_t frag_oid = InodeStore::get_object_name(parent_ino, frag, "");
int r = metadata_io.omap_get_vals_by_keys(frag_oid.name, keys, &vals);
dout(20) << "oid=" << frag_oid.name
<< " dname=" << dname
<< " frag=" << frag
<< ", r=" << r << dendl;
if (r < 0) {
return r;
}
if (vals.find(key) == vals.end()) {
dout(20) << key << " not found in result" << dendl;
return -ENOENT;
}
try {
auto q = vals[key].cbegin();
snapid_t first;
decode(first, q);
char dentry_type;
decode(dentry_type, q);
if (dentry_type == 'I' || dentry_type == 'i') {
if (dentry_type == 'i') {
mempool::mds_co::string alternate_name;
DECODE_START(2, q);
if (struct_v >= 2)
decode(alternate_name, q);
inode->decode(q);
DECODE_FINISH(q);
} else {
inode->decode_bare(q);
}
} else {
dout(20) << "dentry type '" << dentry_type << "': cannot"
"read an inode out of that" << dendl;
return -EINVAL;
}
if (dnfirst)
*dnfirst = first;
} catch (const buffer::error &err) {
dout(20) << "encoding error in dentry 0x" << std::hex << parent_ino
<< std::dec << "/" << dname << dendl;
return -EINVAL;
}
return 0;
}
int MetadataDriver::load_table(MDSTable *table)
{
object_t table_oid = table->get_object_name();
bufferlist table_bl;
int r = metadata_io.read(table_oid.name, table_bl, 0, 0);
if (r < 0) {
derr << "unable to read mds table '" << table_oid.name << "': "
<< cpp_strerror(r) << dendl;
return r;
}
try {
version_t table_ver;
auto p = table_bl.cbegin();
decode(table_ver, p);
table->decode_state(p);
table->force_replay_version(table_ver);
} catch (const buffer::error &err) {
derr << "unable to decode mds table '" << table_oid.name << "': "
<< err.what() << dendl;
return -EIO;
}
return 0;
}
int MetadataDriver::save_table(MDSTable *table)
{
object_t table_oid = table->get_object_name();
bufferlist table_bl;
encode(table->get_version(), table_bl);
table->encode_state(table_bl);
int r = metadata_io.write_full(table_oid.name, table_bl);
if (r != 0) {
derr << "error updating mds table " << table_oid.name
<< ": " << cpp_strerror(r) << dendl;
return r;
}
return 0;
}
int MetadataDriver::inject_lost_and_found(
inodeno_t ino, const InodeStore &dentry)
{
// Create lost+found if doesn't exist
bool created = false;
int r = find_or_create_dirfrag(CEPH_INO_ROOT, frag_t(), &created);
if (r < 0) {
return r;
}
InodeStore lf_ino;
r = read_dentry(CEPH_INO_ROOT, frag_t(), "lost+found", &lf_ino);
if (r == -ENOENT || r == -EINVAL) {
if (r == -EINVAL && !force_corrupt) {
return r;
}
// To have a directory not specify a layout, give it zeros (see
// inode_t::has_layout)
file_layout_t inherit_layout;
// Construct LF inode
frag_info_t fragstat;
fragstat.nfiles = 1,
build_dir_dentry(CEPH_INO_LOST_AND_FOUND, fragstat, inherit_layout, &lf_ino);
// Inject link to LF inode in the root dir
r = inject_linkage(CEPH_INO_ROOT, "lost+found", frag_t(), lf_ino);
if (r < 0) {
return r;
}
} else {
if (!(lf_ino.inode->mode & S_IFDIR)) {
derr << "lost+found exists but is not a directory!" << dendl;
// In this case we error out, and the user should do something about
// this problem.
return -EINVAL;
}
}
r = find_or_create_dirfrag(CEPH_INO_LOST_AND_FOUND, frag_t(), &created);
if (r < 0) {
return r;
}
const std::string dname = lost_found_dname(ino);
// Write dentry into lost+found dirfrag
return inject_linkage(lf_ino.inode->ino, dname, frag_t(), dentry);
}
int MetadataDriver::get_frag_of(
inodeno_t dirino,
const std::string &target_dname,
frag_t *result_ft)
{
object_t root_frag_oid = InodeStore::get_object_name(dirino, frag_t(), "");
dout(20) << "dirino=" << dirino << " target_dname=" << target_dname << dendl;
// Find and load fragtree if existing dirfrag
// ==========================================
bool have_backtrace = false;
bufferlist parent_bl;
int r = metadata_io.getxattr(root_frag_oid.name, "parent", parent_bl);
if (r == -ENODATA) {
dout(10) << "No backtrace on '" << root_frag_oid << "'" << dendl;
} else if (r < 0) {
dout(4) << "Unexpected error on '" << root_frag_oid << "': "
<< cpp_strerror(r) << dendl;
return r;
}
// Deserialize backtrace
inode_backtrace_t backtrace;
if (parent_bl.length()) {
try {
auto q = parent_bl.cbegin();
backtrace.decode(q);
have_backtrace = true;
} catch (buffer::error &e) {
dout(4) << "Corrupt backtrace on '" << root_frag_oid << "': "
<< e.what() << dendl;
}
}
if (!(have_backtrace && backtrace.ancestors.size())) {
// Can't work out fragtree without a backtrace
dout(4) << "No backtrace on '" << root_frag_oid
<< "': cannot determine fragtree" << dendl;
return -ENOENT;
}
// The parentage of dirino
const inode_backpointer_t &bp = *(backtrace.ancestors.begin());
// The inode of dirino's parent
const inodeno_t parent_ino = bp.dirino;
// The dname of dirino in its parent.
const std::string &parent_dname = bp.dname;
dout(20) << "got backtrace parent " << parent_ino << "/"
<< parent_dname << dendl;
// The primary dentry for dirino
InodeStore existing_dentry;
// See if we can find ourselves in dirfrag zero of the parent: this
// is a fast path that avoids needing to go further up the tree
// if the parent isn't fragmented (worst case we would have to
// go all the way to the root)
r = read_dentry(parent_ino, frag_t(), parent_dname, &existing_dentry);
if (r >= 0) {
// Great, fast path: return the fragtree from here
if (existing_dentry.inode->ino != dirino) {
dout(4) << "Unexpected inode in dentry! 0x" << std::hex
<< existing_dentry.inode->ino
<< " vs expected 0x" << dirino << std::dec << dendl;
return -ENOENT;
}
dout(20) << "fast path, fragtree is "
<< existing_dentry.dirfragtree << dendl;
*result_ft = existing_dentry.pick_dirfrag(target_dname);
dout(20) << "frag is " << *result_ft << dendl;
return 0;
} else if (r != -ENOENT) {
// Dentry not present in 0th frag, must read parent's fragtree
frag_t parent_frag;
r = get_frag_of(parent_ino, parent_dname, &parent_frag);
if (r == 0) {
// We have the parent fragtree, so try again to load our dentry
r = read_dentry(parent_ino, parent_frag, parent_dname, &existing_dentry);
if (r >= 0) {
// Got it!
*result_ft = existing_dentry.pick_dirfrag(target_dname);
dout(20) << "resolved via parent, frag is " << *result_ft << dendl;
return 0;
} else {
if (r == -EINVAL || r == -ENOENT) {
return -ENOENT; // dentry missing or corrupt, so frag is missing
} else {
return r;
}
}
} else {
// Couldn't resolve parent fragtree, so can't find ours.
return r;
}
} else if (r == -EINVAL) {
// Unreadable dentry, can't know the fragtree.
return -ENOENT;
} else {
// Unexpected error, raise it
return r;
}
}
int MetadataDriver::inject_with_backtrace(
const inode_backtrace_t &backtrace, const InodeStore &dentry)
{
// On dirfrags
// ===========
// In order to insert something into a directory, we first (ideally)
// need to know the fragtree for the directory. Sometimes we can't
// get that, in which case we just go ahead and insert it into
// fragment zero for a good chance of that being the right thing
// anyway (most moderate-sized dirs aren't fragmented!)
// On ancestry
// ===========
// My immediate ancestry should be correct, so if we can find that
// directory's dirfrag then go inject it there. This works well
// in the case that this inode's dentry was somehow lost and we
// are recreating it, because the rest of the hierarchy
// will probably still exist.
//
// It's more of a "better than nothing" approach when rebuilding
// a whole tree, as backtraces will in general not be up to date
// beyond the first parent, if anything in the trace was ever
// moved after the file was created.
// On inode numbers
// ================
// The backtrace tells us inodes for each of the parents. If we are
// creating those parent dirfrags, then there is a risk that somehow
// the inode indicated here was also used for data (not a dirfrag) at
// some stage. That would be a zany situation, and we don't check
// for it here, because to do so would require extra IOs for everything
// we inject, and anyway wouldn't guarantee that the inode number
// wasn't in use in some dentry elsewhere in the metadata tree that
// just happened not to have any data objects.
// On multiple workers touching the same traces
// ============================================
// When creating linkage for a directory, *only* create it if we are
// also creating the object. That way, we might not manage to get the
// *right* linkage for a directory, but at least we won't multiply link
// it. We assume that if a root dirfrag exists for a directory, then
// it is linked somewhere (i.e. that the metadata pool is not already
// inconsistent).
//
// Making sure *that* is true is someone else's job! Probably someone
// who is not going to run in parallel, so that they can self-consistently
// look at versions and move things around as they go.
// Note this isn't 100% safe: if we die immediately after creating dirfrag
// object, next run will fail to create linkage for the dirfrag object
// and leave it orphaned.
inodeno_t ino = backtrace.ino;
dout(10) << " inode: 0x" << std::hex << ino << std::dec << dendl;
for (std::vector<inode_backpointer_t>::const_iterator i = backtrace.ancestors.begin();
i != backtrace.ancestors.end(); ++i) {
const inode_backpointer_t &backptr = *i;
dout(10) << " backptr: 0x" << std::hex << backptr.dirino << std::dec
<< "/" << backptr.dname << dendl;
// Examine root dirfrag for parent
const inodeno_t parent_ino = backptr.dirino;
const std::string dname = backptr.dname;
frag_t fragment;
int r = get_frag_of(parent_ino, dname, &fragment);
if (r == -ENOENT) {
// Don't know fragment, fall back to assuming root
dout(20) << "don't know fragment for 0x" << std::hex <<
parent_ino << std::dec << "/" << dname << ", will insert to root"
<< dendl;
}
// Find or create dirfrag
// ======================
bool created_dirfrag;
r = find_or_create_dirfrag(parent_ino, fragment, &created_dirfrag);
if (r < 0) {
return r;
}
// Check if dentry already exists
// ==============================
InodeStore existing_dentry;
r = read_dentry(parent_ino, fragment, dname, &existing_dentry);
bool write_dentry = false;
if (r == -ENOENT || r == -EINVAL) {
if (r == -EINVAL && !force_corrupt) {
return r;
}
// Missing or corrupt dentry
write_dentry = true;
} else if (r < 0) {
derr << "Unexpected error reading dentry 0x" << std::hex
<< parent_ino << std::dec << "/"
<< dname << ": " << cpp_strerror(r) << dendl;
break;
} else {
// Dentry already present, does it link to me?
if (existing_dentry.inode->ino == ino) {
dout(20) << "Dentry 0x" << std::hex
<< parent_ino << std::dec << "/"
<< dname << " already exists and points to me" << dendl;
} else {
derr << "Dentry 0x" << std::hex
<< parent_ino << std::dec << "/"
<< dname << " already exists but points to 0x"
<< std::hex << existing_dentry.inode->ino << std::dec << dendl;
// Fall back to lost+found!
return inject_lost_and_found(backtrace.ino, dentry);
}
}
// Inject linkage
// ==============
if (write_dentry) {
if (i == backtrace.ancestors.begin()) {
// This is the linkage for the file of interest
dout(10) << "Linking inode 0x" << std::hex << ino
<< " at 0x" << parent_ino << "/" << dname << std::dec
<< " with size=" << dentry.inode->size << " bytes" << dendl;
/* NOTE: dnfirst fixed in scan_links */
r = inject_linkage(parent_ino, dname, fragment, dentry);
} else {
// This is the linkage for an ancestor directory
dout(10) << "Linking ancestor directory of inode 0x" << std::hex << ino
<< " at 0x" << std::hex << parent_ino
<< ":" << dname << dendl;
InodeStore ancestor_dentry;
auto inode = ancestor_dentry.get_inode();
inode->mode = 0755 | S_IFDIR;
// Set nfiles to something non-zero, to fool any other code
// that tries to ignore 'empty' directories. This won't be
// accurate, but it should avoid functional issues.
inode->dirstat.nfiles = 1;
inode->dir_layout.dl_dir_hash =
g_conf()->mds_default_dir_hash;
inode->nlink = 1;
inode->ino = ino;
inode->uid = g_conf()->mds_root_ino_uid;
inode->gid = g_conf()->mds_root_ino_gid;
inode->version = 1;
inode->backtrace_version = 1;
/* NOTE: dnfirst fixed in scan_links */
r = inject_linkage(parent_ino, dname, fragment, ancestor_dentry);
}
if (r < 0) {
return r;
}
}
if (!created_dirfrag) {
// If the parent dirfrag already existed, then stop traversing the
// backtrace: assume that the other ancestors already exist too. This
// is an assumption rather than a truth, but it's a convenient way
// to avoid the risk of creating multiply-linked directories while
// injecting data. If there are in fact missing ancestors, this
// should be fixed up using a separate tool scanning the metadata
// pool.
break;
} else {
// Proceed up the backtrace, creating parents
ino = parent_ino;
}
}
return 0;
}
int MetadataDriver::find_or_create_dirfrag(
inodeno_t ino,
frag_t fragment,
bool *created)
{
ceph_assert(created != NULL);
fnode_t existing_fnode;
*created = false;
uint64_t read_version = 0;
int r = read_fnode(ino, fragment, &existing_fnode, &read_version);
dout(10) << "read_version = " << read_version << dendl;
if (r == -ENOENT || r == -EINVAL) {
if (r == -EINVAL && !force_corrupt) {
return r;
}
// Missing or corrupt fnode, create afresh
bufferlist fnode_bl;
fnode_t blank_fnode;
blank_fnode.version = 1;
// mark it as non-empty
blank_fnode.fragstat.nfiles = 1;
blank_fnode.accounted_fragstat = blank_fnode.fragstat;
blank_fnode.damage_flags |= (DAMAGE_STATS | DAMAGE_RSTATS);
blank_fnode.encode(fnode_bl);
librados::ObjectWriteOperation op;
if (read_version) {
ceph_assert(r == -EINVAL);
// Case A: We must assert that the version isn't changed since we saw the object
// was unreadable, to avoid the possibility of two data-scan processes
// both creating the frag.
op.assert_version(read_version);
} else {
ceph_assert(r == -ENOENT);
// Case B: The object didn't exist in read_fnode, so while creating it we must
// use an exclusive create to correctly populate *creating with
// whether we created it ourselves or someone beat us to it.
op.create(true);
}
object_t frag_oid = InodeStore::get_object_name(ino, fragment, "");
op.omap_set_header(fnode_bl);
r = metadata_io.operate(frag_oid.name, &op);
if (r == -EOVERFLOW || r == -EEXIST) {
// Someone else wrote it (see case A above)
dout(10) << "Dirfrag creation race: 0x" << std::hex
<< ino << " " << fragment << std::dec << dendl;
*created = false;
return 0;
} else if (r < 0) {
// We were unable to create or write it, error out
derr << "Failed to create dirfrag 0x" << std::hex
<< ino << std::dec << ": " << cpp_strerror(r) << dendl;
return r;
} else {
// Success: the dirfrag object now exists with a value header
dout(10) << "Created dirfrag: 0x" << std::hex
<< ino << std::dec << dendl;
*created = true;
}
} else if (r < 0) {
derr << "Unexpected error reading dirfrag 0x" << std::hex
<< ino << std::dec << " : " << cpp_strerror(r) << dendl;
return r;
} else {
dout(20) << "Dirfrag already exists: 0x" << std::hex
<< ino << " " << fragment << std::dec << dendl;
}
return 0;
}
int MetadataDriver::inject_linkage(
inodeno_t dir_ino, const std::string &dname,
const frag_t fragment, const InodeStore &inode, const snapid_t dnfirst)
{
object_t frag_oid = InodeStore::get_object_name(dir_ino, fragment, "");
std::string key;
dentry_key_t dn_key(CEPH_NOSNAP, dname.c_str());
dn_key.encode(key);
bufferlist dentry_bl;
encode(dnfirst, dentry_bl);
encode('I', dentry_bl);
inode.encode_bare(dentry_bl, CEPH_FEATURES_SUPPORTED_DEFAULT);
// Write out
std::map<std::string, bufferlist> vals;
vals[key] = dentry_bl;
int r = metadata_io.omap_set(frag_oid.name, vals);
if (r != 0) {
derr << "Error writing dentry 0x" << std::hex
<< dir_ino << std::dec << "/"
<< dname << ": " << cpp_strerror(r) << dendl;
return r;
} else {
dout(20) << "Injected dentry 0x" << std::hex
<< dir_ino << "/" << dname << " pointing to 0x"
<< inode.inode->ino << std::dec << dendl;
return 0;
}
}
int MetadataDriver::init(
librados::Rados &rados, std::string &metadata_pool_name, const FSMap *fsmap,
fs_cluster_id_t fscid)
{
if (metadata_pool_name.empty()) {
auto fs = fsmap->get_filesystem(fscid);
ceph_assert(fs != nullptr);
int64_t const metadata_pool_id = fs->mds_map.get_metadata_pool();
dout(4) << "resolving metadata pool " << metadata_pool_id << dendl;
int r = rados.pool_reverse_lookup(metadata_pool_id, &metadata_pool_name);
if (r < 0) {
derr << "Pool " << metadata_pool_id
<< " identified in MDS map not found in RADOS!" << dendl;
return r;
}
dout(4) << "found metadata pool '" << metadata_pool_name << "'" << dendl;
} else {
dout(4) << "forcing metadata pool '" << metadata_pool_name << "'" << dendl;
}
return rados.ioctx_create(metadata_pool_name.c_str(), metadata_io);
}
int LocalFileDriver::init(
librados::Rados &rados, std::string &metadata_pool_name, const FSMap *fsmap,
fs_cluster_id_t fscid)
{
return 0;
}
int LocalFileDriver::inject_data(
const std::string &file_path,
uint64_t size,
uint32_t chunk_size,
inodeno_t ino)
{
// Scrape the file contents out of the data pool and into the
// local filesystem
std::fstream f;
f.open(file_path.c_str(), std::fstream::out | std::fstream::binary);
for (uint64_t offset = 0; offset < size; offset += chunk_size) {
bufferlist bl;
char buf[32];
snprintf(buf, sizeof(buf),
"%llx.%08llx",
(unsigned long long)ino,
(unsigned long long)(offset / chunk_size));
std::string oid(buf);
int r = data_io.read(oid, bl, chunk_size, 0);
if (r <= 0 && r != -ENOENT) {
derr << "error reading data object '" << oid << "': "
<< cpp_strerror(r) << dendl;
f.close();
return r;
} else if (r >=0) {
f.seekp(offset);
bl.write_stream(f);
}
}
f.close();
return 0;
}
int LocalFileDriver::inject_with_backtrace(
const inode_backtrace_t &bt,
const InodeStore &dentry)
{
std::string path_builder = path;
// Iterate through backtrace creating directory parents
std::vector<inode_backpointer_t>::const_reverse_iterator i;
for (i = bt.ancestors.rbegin();
i != bt.ancestors.rend(); ++i) {
const inode_backpointer_t &backptr = *i;
path_builder += "/";
path_builder += backptr.dname;
// Last entry is the filename itself
bool is_file = (i + 1 == bt.ancestors.rend());
if (is_file) {
// FIXME: inject_data won't cope with interesting (i.e. striped)
// layouts (need a librados-compatible Filer to read these)
inject_data(path_builder, dentry.inode->size,
dentry.inode->layout.object_size, bt.ino);
} else {
int r = mkdir(path_builder.c_str(), 0755);
if (r != 0 && r != -EPERM) {
derr << "error creating directory: '" << path_builder << "': "
<< cpp_strerror(r) << dendl;
return r;
}
}
}
return 0;
}
int LocalFileDriver::inject_lost_and_found(
inodeno_t ino,
const InodeStore &dentry)
{
std::string lf_path = path + "/lost+found";
int r = mkdir(lf_path.c_str(), 0755);
if (r != 0 && r != -EPERM) {
derr << "error creating directory: '" << lf_path << "': "
<< cpp_strerror(r) << dendl;
return r;
}
std::string file_path = lf_path + "/" + lost_found_dname(ino);
return inject_data(file_path, dentry.inode->size,
dentry.inode->layout.object_size, ino);
}
int LocalFileDriver::init_roots(int64_t data_pool_id)
{
// Ensure that the path exists and is a directory
bool exists;
int r = check_roots(&exists);
if (r != 0) {
return r;
}
if (exists) {
return 0;
} else {
return ::mkdir(path.c_str(), 0755);
}
}
int LocalFileDriver::check_roots(bool *result)
{
// Check if the path exists and is a directory
DIR *d = ::opendir(path.c_str());
if (d == NULL) {
*result = false;
} else {
int r = closedir(d);
if (r != 0) {
// Weird, but maybe possible with e.g. stale FD on NFS mount?
*result = false;
} else {
*result = true;
}
}
return 0;
}
void MetadataTool::build_file_dentry(
inodeno_t ino, uint64_t file_size, time_t file_mtime,
const file_layout_t &layout, InodeStore *out, std::string symlink)
{
ceph_assert(out != NULL);
auto inode = out->get_inode();
if(!symlink.empty()) {
inode->mode = 0777 | S_IFLNK;
out->symlink = symlink;
}
else {
inode->mode = 0500 | S_IFREG;
}
inode->size = file_size;
inode->max_size_ever = file_size;
inode->mtime.tv.tv_sec = file_mtime;
inode->atime.tv.tv_sec = file_mtime;
inode->ctime.tv.tv_sec = file_mtime;
inode->layout = layout;
inode->truncate_seq = 1;
inode->truncate_size = -1ull;
inode->inline_data.version = CEPH_INLINE_NONE;
inode->nlink = 1;
inode->ino = ino;
inode->version = 1;
inode->backtrace_version = 1;
inode->uid = g_conf()->mds_root_ino_uid;
inode->gid = g_conf()->mds_root_ino_gid;
}
void MetadataTool::build_dir_dentry(
inodeno_t ino, const frag_info_t &fragstat,
const file_layout_t &layout, InodeStore *out)
{
ceph_assert(out != NULL);
auto inode = out->get_inode();
inode->mode = 0755 | S_IFDIR;
inode->dirstat = fragstat;
inode->mtime.tv.tv_sec = fragstat.mtime;
inode->atime.tv.tv_sec = fragstat.mtime;
inode->ctime.tv.tv_sec = fragstat.mtime;
inode->layout = layout;
inode->dir_layout.dl_dir_hash = g_conf()->mds_default_dir_hash;
inode->truncate_seq = 1;
inode->truncate_size = -1ull;
inode->inline_data.version = CEPH_INLINE_NONE;
inode->nlink = 1;
inode->ino = ino;
inode->version = 1;
inode->backtrace_version = 1;
inode->uid = g_conf()->mds_root_ino_uid;
inode->gid = g_conf()->mds_root_ino_gid;
}
| 73,267 | 29.464865 | 133 |
cc
|
null |
ceph-main/src/tools/cephfs/DataScan.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "MDSUtility.h"
#include "include/rados/librados.hpp"
class InodeStore;
class MDSTable;
class RecoveryDriver {
protected:
// If true, overwrite structures that generate decoding errors.
bool force_corrupt;
// If true, overwrite root objects during init_roots even if they
// exist
bool force_init;
public:
virtual int init(
librados::Rados &rados,
std::string &metadata_pool_name,
const FSMap *fsmap,
fs_cluster_id_t fscid) = 0;
void set_force_corrupt(const bool val)
{
force_corrupt = val;
}
void set_force_init(const bool val)
{
force_init = val;
}
/**
* Inject an inode + dentry parents into the metadata pool,
* based on a backtrace recovered from the data pool
*/
virtual int inject_with_backtrace(
const inode_backtrace_t &bt,
const InodeStore &dentry) = 0;
/**
* Inject an inode + dentry into the lost+found directory,
* when all we know about a file is its inode.
*/
virtual int inject_lost_and_found(
inodeno_t ino,
const InodeStore &dentry) = 0;
/**
* Create any missing roots (i.e. mydir, strays, root inode)
*/
virtual int init_roots(
int64_t data_pool_id) = 0;
/**
* Pre-injection check that all the roots are present in
* the metadata pool. Used to avoid parallel workers interfering
* with one another, by cueing the user to go run 'init' on a
* single node before running a parallel scan.
*
* @param result: set to true if roots are present, else set to false
* @returns 0 on no unexpected errors, else error code. Missing objects
* are not considered an unexpected error: check *result for
* this case.
*/
virtual int check_roots(bool *result) = 0;
/**
* Helper to compose dnames for links to lost+found
* inodes.
*/
std::string lost_found_dname(inodeno_t ino)
{
char s[20];
snprintf(s, sizeof(s), "%llx", (unsigned long long)ino);
return std::string(s);
}
RecoveryDriver()
: force_corrupt(false),
force_init(false)
{}
virtual ~RecoveryDriver() {}
};
class LocalFileDriver : public RecoveryDriver
{
protected:
const std::string path;
librados::IoCtx &data_io;
int inject_data(
const std::string &file_path,
uint64_t size,
uint32_t chunk_size,
inodeno_t ino);
public:
LocalFileDriver(const std::string &path_, librados::IoCtx &data_io_)
: RecoveryDriver(), path(path_), data_io(data_io_)
{}
// Implement RecoveryDriver interface
int init(
librados::Rados &rados,
std::string &metadata_pool_name,
const FSMap *fsmap,
fs_cluster_id_t fscid) override;
int inject_with_backtrace(
const inode_backtrace_t &bt,
const InodeStore &dentry) override;
int inject_lost_and_found(
inodeno_t ino,
const InodeStore &dentry) override;
int init_roots(int64_t data_pool_id) override;
int check_roots(bool *result) override;
};
/**
* A class that knows how to work with objects in a CephFS
* metadata pool.
*/
class MetadataTool
{
protected:
librados::IoCtx metadata_io;
/**
* Construct a synthetic InodeStore for a normal file
*/
void build_file_dentry(
inodeno_t ino, uint64_t file_size, time_t file_mtime,
const file_layout_t &layout,
InodeStore *out,
std::string symlink);
/**
* Construct a synthetic InodeStore for a directory
*/
void build_dir_dentry(
inodeno_t ino,
const frag_info_t &fragstat,
const file_layout_t &layout,
InodeStore *out);
/**
* Try and read an fnode from a dirfrag
*/
int read_fnode(inodeno_t ino, frag_t frag,
fnode_t *fnode, uint64_t *read_version);
/**
* Try and read a dentry from a dirfrag
*/
int read_dentry(inodeno_t parent_ino, frag_t frag,
const std::string &dname, InodeStore *inode, snapid_t *dnfirst=nullptr);
};
/**
* A class that knows how to manipulate CephFS metadata pools
*/
class MetadataDriver : public RecoveryDriver, public MetadataTool
{
protected:
/**
* Create a .inode object, i.e. root or mydir
*/
int inject_unlinked_inode(inodeno_t inono, int mode, int64_t data_pool_id);
/**
* Check for existence of .inode objects, before
* trying to go ahead and inject metadata.
*/
int root_exists(inodeno_t ino, bool *result);
int find_or_create_dirfrag(
inodeno_t ino,
frag_t fragment,
bool *created);
/**
* Work out which fragment of a directory should contain a named
* dentry, recursing up the trace as necessary to retrieve
* fragtrees.
*/
int get_frag_of(
inodeno_t dirino,
const std::string &dname,
frag_t *result_ft);
public:
// Implement RecoveryDriver interface
int init(
librados::Rados &rados,
std::string &metadata_pool_name,
const FSMap *fsmap,
fs_cluster_id_t fscid) override;
int inject_linkage(
inodeno_t dir_ino, const std::string &dname,
const frag_t fragment, const InodeStore &inode, snapid_t dnfirst=CEPH_NOSNAP);
int inject_with_backtrace(
const inode_backtrace_t &bt,
const InodeStore &dentry) override;
int inject_lost_and_found(
inodeno_t ino,
const InodeStore &dentry) override;
int init_roots(int64_t data_pool_id) override;
int check_roots(bool *result) override;
int load_table(MDSTable *table);
int save_table(MDSTable *table);
};
class DataScan : public MDSUtility, public MetadataTool
{
protected:
RecoveryDriver *driver;
fs_cluster_id_t fscid;
std::string metadata_pool_name;
std::vector<int64_t> data_pools;
// IoCtx for data pool (where we scrape file backtraces from)
librados::IoCtx data_io;
// Remember the data pool ID for use in layouts
int64_t data_pool_id;
// IoCtxs for extra data pools
std::vector<librados::IoCtx> extra_data_ios;
uint32_t n;
uint32_t m;
/**
* Scan data pool for backtraces, and inject inodes to metadata pool
*/
int scan_inodes();
/**
* Scan data pool for file sizes and mtimes
*/
int scan_extents();
/**
* Scan metadata pool for 0th dirfrags to link orphaned
* directory inodes.
*/
int scan_frags();
/**
* Cleanup xattrs from data pool
*/
int cleanup();
/**
* Check if an inode number is in the permitted ranges
*/
bool valid_ino(inodeno_t ino) const;
int scan_links();
// Accept pools which are not in the FSMap
bool force_pool;
// Respond to decode errors by overwriting
bool force_corrupt;
// Overwrite root objects even if they exist
bool force_init;
// Only scan inodes without this scrub tag
std::string filter_tag;
/**
* @param r set to error on valid key with invalid value
* @return true if argument consumed, else false
*/
bool parse_kwarg(
const std::vector<const char*> &args,
std::vector<const char *>::const_iterator &i,
int *r);
/**
* @return true if argument consumed, else false
*/
bool parse_arg(
const std::vector<const char*> &arg,
std::vector<const char *>::const_iterator &i);
int probe_filter(librados::IoCtx &ioctx);
/**
* Apply a function to all objects in an ioctx's pool, optionally
* restricted to only those objects with a 00000000 offset and
* no tag matching DataScan::scrub_tag.
*/
int forall_objects(
librados::IoCtx &ioctx,
bool untagged_only,
std::function<int(std::string, uint64_t, uint64_t)> handler);
public:
static void usage();
int main(const std::vector<const char *> &args);
DataScan()
: driver(NULL), fscid(FS_CLUSTER_ID_NONE),
data_pool_id(-1), n(0), m(1),
force_pool(false), force_corrupt(false),
force_init(false)
{
}
~DataScan() override
{
delete driver;
}
};
| 8,566 | 23.831884 | 86 |
h
|
null |
ceph-main/src/tools/cephfs/Dumper.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2010 Greg Farnum <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef _BACKWARD_BACKWARD_WARNING_H
#define _BACKWARD_BACKWARD_WARNING_H // make gcc 4.3 shut up about hash_*
#endif
#include "include/compat.h"
#include "include/fs_types.h"
#include "common/entity_name.h"
#include "common/errno.h"
#include "common/safe_io.h"
#include "mds/mdstypes.h"
#include "mds/LogEvent.h"
#include "mds/JournalPointer.h"
#include "osdc/Journaler.h"
#include "mon/MonClient.h"
#include "Dumper.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
#define HEADER_LEN 4096
using namespace std;
int Dumper::init(mds_role_t role_, const std::string &type)
{
role = role_;
int r = MDSUtility::init();
if (r < 0) {
return r;
}
auto fs = fsmap->get_filesystem(role.fscid);
ceph_assert(fs != nullptr);
if (type == "mdlog") {
JournalPointer jp(role.rank, fs->mds_map.get_metadata_pool());
int jp_load_result = jp.load(objecter);
if (jp_load_result != 0) {
std::cerr << "Error loading journal: " << cpp_strerror(jp_load_result) << std::endl;
return jp_load_result;
} else {
ino = jp.front;
}
} else if (type == "purge_queue") {
ino = MDS_INO_PURGE_QUEUE + role.rank;
} else {
ceph_abort(); // should not get here
}
return 0;
}
int Dumper::recover_journal(Journaler *journaler)
{
C_SaferCond cond;
lock.lock();
journaler->recover(&cond);
lock.unlock();
const int r = cond.wait();
if (r < 0) { // Error
derr << "error on recovery: " << cpp_strerror(r) << dendl;
return r;
} else {
dout(10) << "completed journal recovery" << dendl;
return 0;
}
}
int Dumper::dump(const char *dump_file)
{
int r = 0;
auto fs = fsmap->get_filesystem(role.fscid);
ceph_assert(fs != nullptr);
Journaler journaler("dumper", ino, fs->mds_map.get_metadata_pool(),
CEPH_FS_ONDISK_MAGIC, objecter, 0, 0,
&finisher);
r = recover_journal(&journaler);
if (r) {
return r;
}
uint64_t start = journaler.get_read_pos();
uint64_t end = journaler.get_write_pos();
uint64_t len = end-start;
Filer filer(objecter, &finisher);
cout << "journal is " << start << "~" << len << std::endl;
int fd = ::open(dump_file, O_WRONLY|O_CREAT|O_TRUNC|O_BINARY, 0644);
if (fd >= 0) {
// include an informative header
uuid_d fsid = monc->get_fsid();
char fsid_str[40];
fsid.print(fsid_str);
char buf[HEADER_LEN];
memset(buf, 0, sizeof(buf));
snprintf(buf, HEADER_LEN, "Ceph mds%d journal dump\n start offset %llu (0x%llx)\n\
length %llu (0x%llx)\n write_pos %llu (0x%llx)\n format %llu\n\
trimmed_pos %llu (0x%llx)\n stripe_unit %lu (0x%lx)\n stripe_count %lu (0x%lx)\n\
object_size %lu (0x%lx)\n fsid %s\n%c",
role.rank,
(unsigned long long)start, (unsigned long long)start,
(unsigned long long)len, (unsigned long long)len,
(unsigned long long)journaler.last_committed.write_pos, (unsigned long long)journaler.last_committed.write_pos,
(unsigned long long)journaler.last_committed.stream_format,
(unsigned long long)journaler.last_committed.trimmed_pos, (unsigned long long)journaler.last_committed.trimmed_pos,
(unsigned long)journaler.last_committed.layout.stripe_unit, (unsigned long)journaler.last_committed.layout.stripe_unit,
(unsigned long)journaler.last_committed.layout.stripe_count, (unsigned long)journaler.last_committed.layout.stripe_count,
(unsigned long)journaler.last_committed.layout.object_size, (unsigned long)journaler.last_committed.layout.object_size,
fsid_str,
4);
r = safe_write(fd, buf, sizeof(buf));
if (r) {
derr << "Error " << r << " (" << cpp_strerror(r) << ") writing journal file header" << dendl;
::close(fd);
return r;
}
// write the data
off64_t seeked = ::lseek64(fd, start, SEEK_SET);
if (seeked == (off64_t)-1) {
r = errno;
derr << "Error " << r << " (" << cpp_strerror(r) << ") seeking to 0x" << std::hex << start << std::dec << dendl;
::close(fd);
return r;
}
// Read and write 32MB chunks. Slower than it could be because we're not
// streaming, but that's okay because this is just a debug/disaster tool.
const uint32_t chunk_size = 32 * 1024 * 1024;
for (uint64_t pos = start; pos < start + len; pos += chunk_size) {
bufferlist bl;
dout(10) << "Reading at pos=0x" << std::hex << pos << std::dec << dendl;
const uint32_t read_size = std::min<uint64_t>(chunk_size, end - pos);
C_SaferCond cond;
lock.lock();
filer.read(ino, &journaler.get_layout(), CEPH_NOSNAP,
pos, read_size, &bl, 0, &cond);
lock.unlock();
r = cond.wait();
if (r < 0) {
derr << "Error " << r << " (" << cpp_strerror(r) << ") reading "
"journal at offset 0x" << std::hex << pos << std::dec << dendl;
::close(fd);
return r;
}
dout(10) << "Got 0x" << std::hex << bl.length() << std::dec
<< " bytes" << dendl;
r = bl.write_fd(fd);
if (r) {
derr << "Error " << r << " (" << cpp_strerror(r) << ") writing journal file" << dendl;
::close(fd);
return r;
}
}
r = ::close(fd);
if (r) {
r = errno;
derr << "Error " << r << " (" << cpp_strerror(r) << ") closing journal file" << dendl;
return r;
}
cout << "wrote " << len << " bytes at offset " << start << " to " << dump_file << "\n"
<< "NOTE: this is a _sparse_ file; you can\n"
<< "\t$ tar cSzf " << dump_file << ".tgz " << dump_file << "\n"
<< " to efficiently compress it while preserving sparseness." << std::endl;
return 0;
} else {
int err = errno;
derr << "unable to open " << dump_file << ": " << cpp_strerror(err) << dendl;
return err;
}
}
int Dumper::undump(const char *dump_file, bool force)
{
cout << "undump " << dump_file << std::endl;
auto fs = fsmap->get_filesystem(role.fscid);
ceph_assert(fs != nullptr);
int r = 0;
// try get layout info from cluster
Journaler journaler("umdumper", ino, fs->mds_map.get_metadata_pool(),
CEPH_FS_ONDISK_MAGIC, objecter, 0, 0,
&finisher);
int recovered = recover_journal(&journaler);
if (recovered != 0) {
derr << "recover_journal failed, try to get header from dump file " << dendl;
}
int fd = ::open(dump_file, O_RDONLY|O_BINARY);
if (fd < 0) {
r = errno;
derr << "couldn't open " << dump_file << ": " << cpp_strerror(r) << dendl;
return r;
}
// Ceph mds0 journal dump
// start offset 232401996 (0xdda2c4c)
// length 1097504 (0x10bf20)
char buf[HEADER_LEN];
r = safe_read(fd, buf, sizeof(buf));
if (r < 0) {
VOID_TEMP_FAILURE_RETRY(::close(fd));
return r;
}
long long unsigned start, len, write_pos, format, trimmed_pos;
long unsigned stripe_unit, stripe_count, object_size;
sscanf(strstr(buf, "start offset"), "start offset %llu", &start);
sscanf(strstr(buf, "length"), "length %llu", &len);
sscanf(strstr(buf, "write_pos"), "write_pos %llu", &write_pos);
sscanf(strstr(buf, "format"), "format %llu", &format);
if (!force) {
// need to check if fsid match onlien cluster fsid
if (strstr(buf, "fsid")) {
uuid_d fsid;
char fsid_str[40];
sscanf(strstr(buf, "fsid"), "fsid %39s", fsid_str);
r = fsid.parse(fsid_str);
if (!r) {
derr << "Invalid fsid" << dendl;
::close(fd);
return -EINVAL;
}
if (fsid != monc->get_fsid()) {
derr << "Imported journal fsid does not match online cluster fsid" << dendl;
derr << "Use --force to skip fsid check" << dendl;
::close(fd);
return -EINVAL;
}
} else {
derr << "Invalid header, no fsid embeded" << dendl;
::close(fd);
return -EINVAL;
}
}
if (recovered == 0) {
stripe_unit = journaler.last_committed.layout.stripe_unit;
stripe_count = journaler.last_committed.layout.stripe_count;
object_size = journaler.last_committed.layout.object_size;
} else {
// try to get layout from dump file header, if failed set layout to default
if (strstr(buf, "stripe_unit")) {
sscanf(strstr(buf, "stripe_unit"), "stripe_unit %lu", &stripe_unit);
} else {
stripe_unit = file_layout_t::get_default().stripe_unit;
}
if (strstr(buf, "stripe_count")) {
sscanf(strstr(buf, "stripe_count"), "stripe_count %lu", &stripe_count);
} else {
stripe_count = file_layout_t::get_default().stripe_count;
}
if (strstr(buf, "object_size")) {
sscanf(strstr(buf, "object_size"), "object_size %lu", &object_size);
} else {
object_size = file_layout_t::get_default().object_size;
}
}
if (strstr(buf, "trimmed_pos")) {
sscanf(strstr(buf, "trimmed_pos"), "trimmed_pos %llu", &trimmed_pos);
} else {
// Old format dump, any untrimmed objects before expire_pos will
// be discarded as trash.
trimmed_pos = start - (start % object_size);
}
if (trimmed_pos > start) {
derr << std::hex << "Invalid header (trimmed 0x" << trimmed_pos
<< " > expire 0x" << start << std::dec << dendl;
::close(fd);
return -EINVAL;
}
if (start > write_pos) {
derr << std::hex << "Invalid header (expire 0x" << start
<< " > write 0x" << write_pos << std::dec << dendl;
::close(fd);
return -EINVAL;
}
cout << "start " << start <<
" len " << len <<
" write_pos " << write_pos <<
" format " << format <<
" trimmed_pos " << trimmed_pos <<
" stripe_unit " << stripe_unit <<
" stripe_count " << stripe_count <<
" object_size " << object_size << std::endl;
Journaler::Header h;
h.trimmed_pos = trimmed_pos;
h.expire_pos = start;
h.write_pos = write_pos;
h.stream_format = format;
h.magic = CEPH_FS_ONDISK_MAGIC;
h.layout.stripe_unit = stripe_unit;
h.layout.stripe_count = stripe_count;
h.layout.object_size = object_size;
h.layout.pool_id = fs->mds_map.get_metadata_pool();
bufferlist hbl;
encode(h, hbl);
object_t oid = file_object_t(ino, 0);
object_locator_t oloc(fs->mds_map.get_metadata_pool());
SnapContext snapc;
cout << "writing header " << oid << std::endl;
C_SaferCond header_cond;
lock.lock();
objecter->write_full(oid, oloc, snapc, hbl,
ceph::real_clock::now(), 0,
&header_cond);
lock.unlock();
r = header_cond.wait();
if (r != 0) {
derr << "Failed to write header: " << cpp_strerror(r) << dendl;
::close(fd);
return r;
}
Filer filer(objecter, &finisher);
/* Erase any objects at the end of the region to which we shall write
* the new log data. This is to avoid leaving trailing junk after
* the newly written data. Any junk more than one object ahead
* will be taken care of during normal operation by Journaler's
* prezeroing behaviour */
{
uint32_t const object_size = h.layout.object_size;
ceph_assert(object_size > 0);
uint64_t last_obj = h.write_pos / object_size;
uint64_t purge_count = 2;
/* When the length is zero, the last_obj should be zeroed
* from the offset determined by the new write_pos instead of being purged.
*/
if (!len) {
purge_count = 1;
++last_obj;
}
C_SaferCond purge_cond;
cout << "Purging " << purge_count << " objects from " << last_obj << std::endl;
lock.lock();
filer.purge_range(ino, &h.layout, snapc, last_obj, purge_count,
ceph::real_clock::now(), 0, &purge_cond);
lock.unlock();
purge_cond.wait();
}
/* When the length is zero, zero the last object
* from the offset determined by the new write_pos.
*/
if (!len) {
uint64_t offset_in_obj = h.write_pos % h.layout.object_size;
uint64_t len = h.layout.object_size - offset_in_obj;
C_SaferCond zero_cond;
cout << "Zeroing " << len << " bytes in the last object." << std::endl;
lock.lock();
filer.zero(ino, &h.layout, snapc, h.write_pos, len, ceph::real_clock::now(), 0, &zero_cond);
lock.unlock();
zero_cond.wait();
}
// Stream from `fd` to `filer`
uint64_t pos = start;
uint64_t left = len;
while (left > 0) {
// Read
bufferlist j;
lseek64(fd, pos, SEEK_SET);
uint64_t l = std::min<uint64_t>(left, 1024*1024);
j.read_fd(fd, l);
// Write
cout << " writing " << pos << "~" << l << std::endl;
C_SaferCond write_cond;
lock.lock();
filer.write(ino, &h.layout, snapc, pos, l, j,
ceph::real_clock::now(), 0, &write_cond);
lock.unlock();
r = write_cond.wait();
if (r != 0) {
derr << "Failed to write header: " << cpp_strerror(r) << dendl;
::close(fd);
return r;
}
// Advance
pos += l;
left -= l;
}
VOID_TEMP_FAILURE_RETRY(::close(fd));
cout << "done." << std::endl;
return 0;
}
| 13,328 | 29.711982 | 133 |
cc
|
null |
ceph-main/src/tools/cephfs/Dumper.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2010 Greg Farnum <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef JOURNAL_DUMPER_H_
#define JOURNAL_DUMPER_H_
#include "MDSUtility.h"
class Journaler;
/**
* This class lets you dump out an mds journal for troubleshooting or whatever.
*
* It was built to work with cmds so some of the design choices are random.
* To use, create a Dumper, call init(), and then call dump() with the name
* of the file to dump to.
*/
class Dumper : public MDSUtility {
private:
mds_role_t role;
inodeno_t ino;
public:
Dumper() : ino(-1)
{}
int init(mds_role_t role_, const std::string &type);
int recover_journal(Journaler *journaler);
int dump(const char *dumpfile);
int undump(const char *dumpfile, bool force);
};
#endif /* JOURNAL_DUMPER_H_ */
| 1,125 | 23.478261 | 79 |
h
|
null |
ceph-main/src/tools/cephfs/EventOutput.cc
|
// -*- mode:c++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* ceph - scalable distributed file system
*
* copyright (c) 2014 john spray <[email protected]>
*
* this is free software; you can redistribute it and/or
* modify it under the terms of the gnu lesser general public
* license version 2.1, as published by the free software
* foundation. see file copying.
*/
#include <iostream>
#include <fstream>
#include "common/errno.h"
#include "mds/mdstypes.h"
#include "mds/events/EUpdate.h"
#include "mds/LogEvent.h"
#include "JournalScanner.h"
#include "EventOutput.h"
int EventOutput::binary() const
{
// Binary output, files
int r = ::mkdir(path.c_str(), 0755);
if (r != 0) {
r = -errno;
if (r != -EEXIST) {
std::cerr << "Error creating output directory: " << cpp_strerror(r) << std::endl;
return r;
}
}
for (JournalScanner::EventMap::const_iterator i = scan.events.begin(); i != scan.events.end(); ++i) {
bufferlist bin;
std::stringstream filename;
if (auto& le = i->second.log_event; le) {
le->encode(bin, CEPH_FEATURES_SUPPORTED_DEFAULT);
filename << "0x" << std::hex << i->first << std::dec << "_" << le->get_type_str() << ".bin";
} else if (auto& pi = i->second.pi; pi) {
pi->encode(bin);
filename << "0x" << std::hex << i->first << std::dec << "_" << pi->get_type_str() << ".bin";
}
std::string const file_path = path + std::string("/") + filename.str();
std::ofstream bin_file(file_path.c_str(), std::ofstream::out | std::ofstream::binary);
bin.write_stream(bin_file);
bin_file.close();
if (bin_file.fail()) {
return -EIO;
}
}
std::cerr << "Wrote output to binary files in directory '" << path << "'" << std::endl;
return 0;
}
int EventOutput::json() const
{
JSONFormatter jf(true);
std::ofstream out_file(path.c_str(), std::ofstream::out);
jf.open_array_section("journal");
{
for (JournalScanner::EventMap::const_iterator i = scan.events.begin(); i != scan.events.end(); ++i) {
if (auto& le = i->second.log_event; le) {
jf.open_object_section("log_event");
le->dump(&jf);
jf.close_section(); // log_event
} else if (auto& pi = i->second.pi; pi) {
jf.open_object_section("purge_action");
pi->dump(&jf);
jf.close_section();
}
}
}
jf.close_section(); // journal
jf.flush(out_file);
out_file.close();
if (out_file.fail()) {
return -EIO;
} else {
std::cerr << "Wrote output to JSON file '" << path << "'" << std::endl;
return 0;
}
}
void EventOutput::list() const
{
for (JournalScanner::EventMap::const_iterator i = scan.events.begin(); i != scan.events.end(); ++i) {
if (auto& le = i->second.log_event; le) {
std::vector<std::string> ev_paths;
EMetaBlob const *emb = le->get_metablob();
if (emb) {
emb->get_paths(ev_paths);
}
std::string detail;
if (le->get_type() == EVENT_UPDATE) {
auto& eu = reinterpret_cast<EUpdate&>(*le);
detail = eu.type;
}
std::cout << le->get_stamp() << " 0x"
<< std::hex << i->first << std::dec << " "
<< le->get_type_str() << ": "
<< " (" << detail << ")" << std::endl;
for (std::vector<std::string>::iterator i = ev_paths.begin(); i != ev_paths.end(); ++i) {
std::cout << " " << *i << std::endl;
}
} else if (auto& pi = i->second.pi; pi) {
std::cout << pi->stamp << " 0x"
<< std::hex << i->first << std::dec << " "
<< pi->get_type_str() << std::endl;
}
}
}
void EventOutput::summary() const
{
std::map<std::string, int> type_count;
for (JournalScanner::EventMap::const_iterator i = scan.events.begin(); i != scan.events.end(); ++i) {
std::string type;
if (auto& le = i->second.log_event; le)
type = le->get_type_str();
else if (auto& pi = i->second.pi; pi)
type = pi->get_type_str();
if (type_count.count(type) == 0) {
type_count[type] = 0;
}
type_count[type] += 1;
}
std::cout << "Events by type:" << std::endl;
for (std::map<std::string, int>::iterator i = type_count.begin(); i != type_count.end(); ++i) {
std::cout << " " << i->first << ": " << i->second << std::endl;
}
std::cout << "Errors: " << scan.errors.size() << std::endl;
if (!scan.errors.empty()) {
for (JournalScanner::ErrorMap::const_iterator i = scan.errors.begin();
i != scan.errors.end(); ++i) {
std::cout << " 0x" << std::hex << i->first << std::dec
<< ": " << i->second.r << " "
<< i->second.description << std::endl;
}
}
}
| 4,601 | 28.883117 | 105 |
cc
|
null |
ceph-main/src/tools/cephfs/EventOutput.h
|
// -*- mode:c++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* ceph - scalable distributed file system
*
* copyright (c) 2014 john spray <[email protected]>
*
* this is free software; you can redistribute it and/or
* modify it under the terms of the gnu lesser general public
* license version 2.1, as published by the free software
* foundation. see file copying.
*/
#ifndef EVENT_OUTPUT_H
#define EVENT_OUTPUT_H
#include <string>
class JournalScanner;
/**
* Different output formats for the results of a journal scan
*/
class EventOutput
{
private:
JournalScanner const &scan;
std::string const path;
public:
EventOutput(JournalScanner const &scan_, std::string const &path_)
: scan(scan_), path(path_) {}
void summary() const;
void list() const;
int json() const;
int binary() const;
};
#endif // EVENT_OUTPUT_H
| 920 | 20.418605 | 71 |
h
|
null |
ceph-main/src/tools/cephfs/JournalFilter.cc
|
// -*- mode:c++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* ceph - scalable distributed file system
*
* copyright (c) 2014 john spray <[email protected]>
*
* this is free software; you can redistribute it and/or
* modify it under the terms of the gnu lesser general public
* license version 2.1, as published by the free software
* foundation. see file copying.
*/
#include "JournalFilter.h"
#include "common/ceph_argparse.h"
#include "mds/events/ESession.h"
#include "mds/events/EUpdate.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
using namespace std;
const string JournalFilter::range_separator("..");
bool JournalFilter::apply(uint64_t pos, PurgeItem &pi) const
{
/* Filtering by journal offset range */
if (pos < range_start || pos >= range_end) {
return false;
}
if (purge_action != PurgeItem::NONE) {
if (pi.action != purge_action)
return false;
}
if (inode) {
if (inode != pi.ino)
return false;
}
return true;
}
/*
* Return whether a LogEvent is to be included or excluded.
*
* The filter parameters are applied on an AND basis: if any
* condition is not met, the event is excluded. Try to do
* the fastest checks first.
*/
bool JournalFilter::apply(uint64_t pos, LogEvent &le) const
{
/* Filtering by journal offset range */
if (pos < range_start || pos >= range_end) {
return false;
}
/* Filtering by event type */
if (event_type != 0) {
if (le.get_type() != event_type) {
return false;
}
}
/* Filtering by client */
if (client_name.num()) {
EMetaBlob const *metablob = le.get_metablob();
if (metablob) {
if (metablob->get_client_name() != client_name) {
return false;
}
} else if (le.get_type() == EVENT_SESSION) {
ESession *es = reinterpret_cast<ESession*>(&le);
if (es->get_client_inst().name != client_name) {
return false;
}
} else {
return false;
}
}
/* Filtering by inode */
if (inode) {
EMetaBlob const *metablob = le.get_metablob();
if (metablob) {
std::set<inodeno_t> inodes;
metablob->get_inodes(inodes);
bool match_any = false;
for (std::set<inodeno_t>::iterator i = inodes.begin(); i != inodes.end(); ++i) {
if (*i == inode) {
match_any = true;
break;
}
}
if (!match_any) {
return false;
}
} else {
return false;
}
}
/* Filtering by frag and dentry */
if (!frag_dentry.empty() || frag.ino) {
EMetaBlob const *metablob = le.get_metablob();
if (metablob) {
std::map<dirfrag_t, std::set<std::string> > dentries;
metablob->get_dentries(dentries);
if (frag.ino) {
bool match_any = false;
for (std::map<dirfrag_t, std::set<std::string> >::iterator i = dentries.begin();
i != dentries.end(); ++i) {
if (i->first == frag) {
match_any = true;
break;
}
}
if (!match_any) {
return false;
}
}
if (!frag_dentry.empty()) {
bool match_any = false;
for (std::map<dirfrag_t, std::set<std::string> >::iterator i = dentries.begin();
i != dentries.end() && !match_any; ++i) {
std::set<std::string> const &names = i->second;
for (std::set<std::string>::iterator j = names.begin();
j != names.end() && !match_any; ++j) {
if (*j == frag_dentry) {
match_any = true;
}
}
}
if (!match_any) {
return false;
}
}
} else {
return false;
}
}
/* Filtering by file path */
if (!path_expr.empty()) {
EMetaBlob const *metablob = le.get_metablob();
if (metablob) {
std::vector<std::string> paths;
metablob->get_paths(paths);
bool match_any = false;
for (std::vector<std::string>::iterator p = paths.begin(); p != paths.end(); ++p) {
if ((*p).find(path_expr) != std::string::npos) {
match_any = true;
break;
}
}
if (!match_any) {
return false;
}
} else {
return false;
}
}
return true;
}
int JournalFilter::parse_args(
std::vector<const char*> &argv,
std::vector<const char*>::iterator &arg)
{
while(arg != argv.end()) {
std::string arg_str;
if (ceph_argparse_witharg(argv, arg, &arg_str, "--range", (char*)NULL)) {
size_t sep_loc = arg_str.find(JournalFilter::range_separator);
if (sep_loc == std::string::npos || arg_str.size() <= JournalFilter::range_separator.size()) {
derr << "Invalid range '" << arg_str << "'" << dendl;
return -EINVAL;
}
// We have a lower bound
if (sep_loc > 0) {
std::string range_start_str = arg_str.substr(0, sep_loc);
std::string parse_err;
range_start = strict_strtoll(range_start_str.c_str(), 0, &parse_err);
if (!parse_err.empty()) {
derr << "Invalid lower bound '" << range_start_str << "': " << parse_err << dendl;
return -EINVAL;
}
}
if (sep_loc < arg_str.size() - JournalFilter::range_separator.size()) {
std::string range_end_str = arg_str.substr(sep_loc + range_separator.size());
std::string parse_err;
range_end = strict_strtoll(range_end_str.c_str(), 0, &parse_err);
if (!parse_err.empty()) {
derr << "Invalid upper bound '" << range_end_str << "': " << parse_err << dendl;
return -EINVAL;
}
}
} else if (ceph_argparse_witharg(argv, arg, &arg_str, "--path", (char*)NULL)) {
if (!type.compare("purge_queue")) {
derr << "Invalid filter arguments: purge_queue doesn't take \"--path\"." << dendl;
return -EINVAL;
}
dout(4) << "Filtering by path '" << arg_str << "'" << dendl;
path_expr = arg_str;
} else if (ceph_argparse_witharg(argv, arg, &arg_str, "--inode", (char*)NULL)) {
dout(4) << "Filtering by inode '" << arg_str << "'" << dendl;
std::string parse_err;
inode = strict_strtoll(arg_str.c_str(), 0, &parse_err);
if (!parse_err.empty()) {
derr << "Invalid inode '" << arg_str << "': " << parse_err << dendl;
return -EINVAL;
}
} else if (ceph_argparse_witharg(argv, arg, &arg_str, "--type", (char*)NULL)) {
try {
if (!type.compare("mdlog")) {
event_type = LogEvent::str_to_type(arg_str);
} else if (!type.compare("purge_queue")) {
purge_action = PurgeItem::str_to_type(arg_str);
}
} catch (const std::out_of_range&) {
derr << "Invalid event type '" << arg_str << "'" << dendl;
return -EINVAL;
}
} else if (ceph_argparse_witharg(argv, arg, &arg_str, "--frag", (char*)NULL)) {
if (!type.compare("purge_queue")) {
derr << "Invalid filter arguments: purge_queue doesn't take \"--frag\"." << dendl;
return -EINVAL;
}
std::string const frag_sep = ".";
size_t sep_loc = arg_str.find(frag_sep);
std::string inode_str;
std::string frag_str;
if (sep_loc != std::string::npos) {
inode_str = arg_str.substr(0, sep_loc);
frag_str = arg_str.substr(sep_loc + 1);
} else {
inode_str = arg_str;
frag_str = "0";
}
std::string parse_err;
inodeno_t frag_ino = strict_strtoll(inode_str.c_str(), 0, &parse_err);
if (!parse_err.empty()) {
derr << "Invalid inode '" << inode_str << "': " << parse_err << dendl;
return -EINVAL;
}
uint32_t frag_enc = strict_strtoll(frag_str.c_str(), 0, &parse_err);
if (!parse_err.empty()) {
derr << "Invalid frag '" << frag_str << "': " << parse_err << dendl;
return -EINVAL;
}
frag = dirfrag_t(frag_ino, frag_t(frag_enc));
dout(4) << "dirfrag filter: '" << frag << "'" << dendl;
} else if (ceph_argparse_witharg(argv, arg, &arg_str, "--dname", (char*)NULL)) {
if (!type.compare("purge_queue")) {
derr << "Invalid filter arguments: purge_queue doesn't take \"--dname\"." << dendl;
return -EINVAL;
}
frag_dentry = arg_str;
dout(4) << "dentry filter: '" << frag_dentry << "'" << dendl;
} else if (ceph_argparse_witharg(argv, arg, &arg_str, "--client", (char*)NULL)) {
if (!type.compare("purge_queue")) {
derr << "Invalid filter arguments: purge_queue doesn't take \"--client\"." << dendl;
return -EINVAL;
}
std::string parse_err;
int64_t client_num = strict_strtoll(arg_str.c_str(), 0, &parse_err);
if (!parse_err.empty()) {
derr << "Invalid client number " << arg_str << dendl;
return -EINVAL;
}
client_name = entity_name_t::CLIENT(client_num);
} else {
// We're done with args the filter understands
break;
}
}
return 0;
}
/**
* If the filter params are only range, then return
* true and set start & end. Else return false.
*
* Use this to discover if the user has requested a contiguous range
* rather than any per-event filtering.
*/
bool JournalFilter::get_range(uint64_t &start, uint64_t &end) const
{
if (!path_expr.empty()
|| inode != 0
|| event_type != 0
|| frag.ino != 0
|| client_name.num() != 0
|| (range_start == 0 && range_end == (uint64_t)(-1))) {
return false;
} else {
start = range_start;
end = range_end;
return true;
}
}
| 9,467 | 28.867508 | 100 |
cc
|
null |
ceph-main/src/tools/cephfs/JournalFilter.h
|
// -*- mode:c++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* ceph - scalable distributed file system
*
* copyright (c) 2014 john spray <[email protected]>
*
* this is free software; you can redistribute it and/or
* modify it under the terms of the gnu lesser general public
* license version 2.1, as published by the free software
* foundation. see file copying.
*/
#ifndef JOURNAL_FILTER_H
#define JOURNAL_FILTER_H
#include "mds/mdstypes.h"
#include "mds/LogEvent.h"
#include "mds/PurgeQueue.h"
/**
* A set of conditions for narrowing down a search through the journal
*/
class JournalFilter
{
private:
/* Filtering by journal offset range */
uint64_t range_start;
uint64_t range_end;
static const std::string range_separator;
/* Filtering by file (sub) path */
std::string path_expr;
/* Filtering by inode */
inodeno_t inode;
/* Filtering by type */
LogEvent::EventType event_type;
std::string type;
/* Filtering by PurgeItem::Action */
PurgeItem::Action purge_action;
/* Filtering by dirfrag */
dirfrag_t frag;
std::string frag_dentry; //< optional, filter dentry name within fragment
/* Filtering by metablob client name */
entity_name_t client_name;
public:
JournalFilter(std::string t) :
range_start(0),
range_end(-1),
inode(0),
event_type(0),
type(t),
purge_action(PurgeItem::NONE) {}
bool get_range(uint64_t &start, uint64_t &end) const;
bool apply(uint64_t pos, LogEvent &le) const;
bool apply(uint64_t pos, PurgeItem &pi) const;
int parse_args(
std::vector<const char*> &argv,
std::vector<const char*>::iterator &arg);
};
#endif // JOURNAL_FILTER_H
| 1,719 | 22.243243 | 76 |
h
|
null |
ceph-main/src/tools/cephfs/JournalScanner.cc
|
// -*- mode:c++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* ceph - scalable distributed file system
*
* copyright (c) 2014 john spray <[email protected]>
*
* this is free software; you can redistribute it and/or
* modify it under the terms of the gnu lesser general public
* license version 2.1, as published by the free software
* foundation. see file copying.
*/
#include "include/rados/librados.hpp"
#include "mds/JournalPointer.h"
#include "mds/events/ESubtreeMap.h"
#include "mds/PurgeQueue.h"
#include "JournalScanner.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
/**
* Read journal header, followed by sequential scan through journal space.
*
* Return 0 on success, else error code. Note that success has the special meaning
* that we were able to apply our checks, it does *not* mean that the journal is
* healthy.
*/
int JournalScanner::scan(bool const full)
{
int r = 0;
r = set_journal_ino();
if (r < 0) {
return r;
}
if (!is_mdlog || pointer_present) {
r = scan_header();
if (r < 0) {
return r;
}
}
if (full && header_present) {
r = scan_events();
if (r < 0) {
return r;
}
}
return 0;
}
int JournalScanner::set_journal_ino()
{
int r = 0;
if (type == "purge_queue") {
ino = MDS_INO_PURGE_QUEUE + rank;
}
else if (type == "mdlog"){
r = scan_pointer();
is_mdlog = true;
}
else {
ceph_abort(); // should not get here
}
return r;
}
int JournalScanner::scan_pointer()
{
// Issue read
std::string const pointer_oid = obj_name(MDS_INO_LOG_POINTER_OFFSET + rank, 0);
bufferlist pointer_bl;
int r = io.read(pointer_oid, pointer_bl, INT_MAX, 0);
if (r == -ENOENT) {
// 'Successfully' discovered the pointer is missing.
derr << "Pointer " << pointer_oid << " is absent" << dendl;
return 0;
} else if (r < 0) {
// Error preventing us interrogating pointer
derr << "Pointer " << pointer_oid << " is unreadable" << dendl;
return r;
} else {
dout(4) << "Pointer " << pointer_oid << " is readable" << dendl;
pointer_present = true;
JournalPointer jp;
try {
auto q = pointer_bl.cbegin();
jp.decode(q);
} catch(buffer::error &e) {
derr << "Pointer " << pointer_oid << " is corrupt: " << e.what() << dendl;
return 0;
}
pointer_valid = true;
ino = jp.front;
return 0;
}
}
int JournalScanner::scan_header()
{
int r;
bufferlist header_bl;
std::string header_name = obj_name(0);
dout(4) << "JournalScanner::scan: reading header object '" << header_name << "'" << dendl;
r = io.read(header_name, header_bl, INT_MAX, 0);
if (r < 0) {
derr << "Header " << header_name << " is unreadable" << dendl;
return 0; // "Successfully" found an error
} else {
header_present = true;
}
auto header_bl_i = header_bl.cbegin();
header = new Journaler::Header();
try
{
header->decode(header_bl_i);
}
catch (buffer::error &e)
{
derr << "Header is corrupt (" << e.what() << ")" << dendl;
delete header;
header = NULL;
return 0; // "Successfully" found an error
}
if (header->magic != std::string(CEPH_FS_ONDISK_MAGIC)) {
derr << "Header is corrupt (bad magic)" << dendl;
return 0; // "Successfully" found an error
}
if (!((header->trimmed_pos <= header->expire_pos) && (header->expire_pos <= header->write_pos))) {
derr << "Header is invalid (inconsistent offsets)" << dendl;
return 0; // "Successfully" found an error
}
header_valid = true;
return 0;
}
int JournalScanner::scan_events()
{
uint64_t object_size = g_conf()->mds_log_segment_size;
if (object_size == 0) {
// Default layout object size
object_size = file_layout_t::get_default().object_size;
}
uint64_t read_offset = header->expire_pos;
dout(10) << std::hex << "Header 0x"
<< header->trimmed_pos << " 0x"
<< header->expire_pos << " 0x"
<< header->write_pos << std::dec << dendl;
dout(10) << "Starting journal scan from offset 0x" << std::hex << read_offset << std::dec << dendl;
// TODO also check for extraneous objects before the trimmed pos or after the write pos,
// which would indicate a bogus header.
bufferlist read_buf;
bool gap = false;
uint64_t gap_start = -1;
for (uint64_t obj_offset = (read_offset / object_size); ; obj_offset++) {
uint64_t offset_in_obj = 0;
if (obj_offset * object_size < header->expire_pos) {
// Skip up to expire_pos from start of the object
// (happens for the first object we read)
offset_in_obj = header->expire_pos - obj_offset * object_size;
}
// Read this journal segment
bufferlist this_object;
std::string const oid = obj_name(obj_offset);
int r = io.read(oid, this_object, INT_MAX, offset_in_obj);
// Handle absent journal segments
if (r < 0) {
if (obj_offset > (header->write_pos / object_size)) {
dout(4) << "Reached end of journal objects" << dendl;
break;
} else {
derr << "Missing object " << oid << dendl;
}
objects_missing.push_back(obj_offset);
if (!gap) {
gap_start = read_offset;
gap = true;
}
if (read_buf.length() > 0) {
read_offset += read_buf.length();
read_buf.clear();
}
read_offset += object_size - offset_in_obj;
continue;
} else {
dout(4) << "Read 0x" << std::hex << this_object.length() << std::dec
<< " bytes from " << oid << " gap=" << gap << dendl;
objects_valid.push_back(oid);
this_object.begin().copy(this_object.length(), read_buf);
}
if (gap) {
// No valid data at the current read offset, scan forward until we find something valid looking
// or have to drop out to load another object.
dout(4) << "Searching for sentinel from 0x" << std::hex << read_offset
<< ", 0x" << read_buf.length() << std::dec << " bytes available" << dendl;
do {
auto p = read_buf.cbegin();
uint64_t candidate_sentinel;
decode(candidate_sentinel, p);
dout(4) << "Data at 0x" << std::hex << read_offset << " = 0x" << candidate_sentinel << std::dec << dendl;
if (candidate_sentinel == JournalStream::sentinel) {
dout(4) << "Found sentinel at 0x" << std::hex << read_offset << std::dec << dendl;
ranges_invalid.push_back(Range(gap_start, read_offset));
gap = false;
break;
} else {
// No sentinel, discard this byte
read_buf.splice(0, 1);
read_offset += 1;
}
} while (read_buf.length() >= sizeof(JournalStream::sentinel));
dout(4) << "read_buf size is " << read_buf.length() << dendl;
}
{
dout(10) << "Parsing data, 0x" << std::hex << read_buf.length() << std::dec << " bytes available" << dendl;
while(true) {
// TODO: detect and handle legacy format journals: can do many things
// on them but on read errors have to give up instead of searching
// for sentinels.
JournalStream journal_stream(JOURNAL_FORMAT_RESILIENT);
bool readable = false;
try {
uint64_t need;
readable = journal_stream.readable(read_buf, &need);
} catch (buffer::error &e) {
readable = false;
dout(4) << "Invalid container encoding at 0x" << std::hex << read_offset << std::dec << dendl;
gap = true;
gap_start = read_offset;
read_buf.splice(0, 1);
read_offset += 1;
break;
}
if (!readable) {
// Out of data, continue to read next object
break;
}
bufferlist le_bl; //< Serialized LogEvent blob
dout(10) << "Attempting decode at 0x" << std::hex << read_offset << std::dec << dendl;
// This cannot fail to decode because we pre-checked that a serialized entry
// blob would be readable.
uint64_t start_ptr = 0;
uint64_t consumed = journal_stream.read(read_buf, &le_bl, &start_ptr);
dout(10) << "Consumed 0x" << std::hex << consumed << std::dec << " bytes" << dendl;
if (start_ptr != read_offset) {
derr << "Bad entry start ptr (0x" << std::hex << start_ptr << ") at 0x"
<< read_offset << std::dec << dendl;
gap = true;
gap_start = read_offset;
// FIXME: given that entry was invalid, should we be skipping over it?
// maybe push bytes back onto start of read_buf and just advance one byte
// to start scanning instead. e.g. if a bogus size value is found it can
// cause us to consume and thus skip a bunch of following valid events.
read_offset += consumed;
break;
}
bool valid_entry = true;
if (is_mdlog) {
auto le = LogEvent::decode_event(le_bl.cbegin());
if (le) {
dout(10) << "Valid entry at 0x" << std::hex << read_offset << std::dec << dendl;
if (le->get_type() == EVENT_SUBTREEMAP
|| le->get_type() == EVENT_SUBTREEMAP_TEST) {
auto&& sle = dynamic_cast<ESubtreeMap&>(*le);
if (sle.expire_pos > read_offset) {
errors.insert(std::make_pair(
read_offset, EventError(
-ERANGE,
"ESubtreeMap has expire_pos ahead of its own position")));
}
}
if (filter.apply(read_offset, *le)) {
events.insert_or_assign(read_offset, EventRecord(std::move(le), consumed));
}
} else {
valid_entry = false;
}
} else if (type == "purge_queue"){
auto pi = std::make_unique<PurgeItem>();
try {
auto q = le_bl.cbegin();
pi->decode(q);
if (filter.apply(read_offset, *pi)) {
events.insert_or_assign(read_offset, EventRecord(std::move(pi), consumed));
}
} catch (const buffer::error &err) {
valid_entry = false;
}
} else {
ceph_abort(); // should not get here
}
if (!valid_entry) {
dout(10) << "Invalid entry at 0x" << std::hex << read_offset << std::dec << dendl;
gap = true;
gap_start = read_offset;
read_offset += consumed;
break;
} else {
events_valid.push_back(read_offset);
read_offset += consumed;
}
}
}
}
if (gap) {
// Ended on a gap, assume it ran to end
ranges_invalid.push_back(Range(gap_start, -1));
}
dout(4) << "Scanned objects, " << objects_missing.size() << " missing, " << objects_valid.size() << " valid" << dendl;
dout(4) << "Events scanned, " << ranges_invalid.size() << " gaps" << dendl;
dout(4) << "Found " << events_valid.size() << " valid events" << dendl;
dout(4) << "Selected " << events.size() << " events events for processing" << dendl;
return 0;
}
JournalScanner::~JournalScanner()
{
if (header) {
delete header;
header = NULL;
}
dout(4) << events.size() << " events" << dendl;
events.clear();
}
/**
* Whether the journal data looks valid and replayable
*/
bool JournalScanner::is_healthy() const
{
return ((!is_mdlog || (pointer_present && pointer_valid))
&& header_present && header_valid
&& ranges_invalid.empty()
&& objects_missing.empty());
}
/**
* Whether the journal data can be read from RADOS
*/
bool JournalScanner::is_readable() const
{
return (header_present && header_valid && objects_missing.empty());
}
/**
* Calculate the object name for a given offset
*/
std::string JournalScanner::obj_name(inodeno_t ino, uint64_t offset) const
{
char name[60];
snprintf(name, sizeof(name), "%llx.%08llx",
(unsigned long long)(ino),
(unsigned long long)offset);
return std::string(name);
}
std::string JournalScanner::obj_name(uint64_t offset) const
{
return obj_name(ino, offset);
}
/*
* Write a human readable summary of the journal health
*/
void JournalScanner::report(std::ostream &out) const
{
out << "Overall journal integrity: " << (is_healthy() ? "OK" : "DAMAGED") << std::endl;
if (is_mdlog) {
if (!pointer_present) {
out << "Pointer not found" << std::endl;
} else if (!pointer_valid) {
out << "Pointer could not be decoded" << std::endl;
}
}
if (!header_present) {
out << "Header not found" << std::endl;
} else if (!header_valid) {
out << "Header could not be decoded" << std::endl;
}
if (objects_missing.size()) {
out << "Objects missing:" << std::endl;
for (std::vector<uint64_t>::const_iterator om = objects_missing.begin();
om != objects_missing.end(); ++om) {
out << " 0x" << std::hex << *om << std::dec << std::endl;
}
}
if (ranges_invalid.size()) {
out << "Corrupt regions:" << std::endl;
for (std::vector<Range>::const_iterator r = ranges_invalid.begin();
r != ranges_invalid.end(); ++r) {
out << " 0x" << std::hex << r->first << "-" << r->second << std::dec << std::endl;
}
}
}
| 13,240 | 29.161731 | 120 |
cc
|
null |
ceph-main/src/tools/cephfs/JournalScanner.h
|
// -*- mode:c++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* ceph - scalable distributed file system
*
* copyright (c) 2014 john spray <[email protected]>
*
* this is free software; you can redistribute it and/or
* modify it under the terms of the gnu lesser general public
* license version 2.1, as published by the free software
* foundation. see file copying.
*/
#ifndef JOURNAL_SCANNER_H
#define JOURNAL_SCANNER_H
#include "include/rados/librados_fwd.hpp"
// For Journaler::Header, can't forward-declare nested classes
#include <osdc/Journaler.h>
#include "JournalFilter.h"
/**
* A simple sequential reader for metadata journals. Unlike
* the MDS Journaler class, this is written to detect, record,
* and read past corruptions and missing objects. It is also
* less efficient but more plainly written.
*/
class JournalScanner
{
private:
librados::IoCtx &io;
// Input constraints
const int rank;
std::string type;
JournalFilter const filter;
void gap_advance();
public:
JournalScanner(
librados::IoCtx &io_,
int rank_,
const std::string &type_,
JournalFilter const &filter_) :
io(io_),
rank(rank_),
type(type_),
filter(filter_),
is_mdlog(false),
pointer_present(false),
pointer_valid(false),
header_present(false),
header_valid(false),
header(NULL) {};
JournalScanner(
librados::IoCtx &io_,
int rank_,
const std::string &type_) :
io(io_),
rank(rank_),
type(type_),
filter(type_),
is_mdlog(false),
pointer_present(false),
pointer_valid(false),
header_present(false),
header_valid(false),
header(NULL) {};
~JournalScanner();
int set_journal_ino();
int scan(bool const full=true);
int scan_pointer();
int scan_header();
int scan_events();
void report(std::ostream &out) const;
std::string obj_name(uint64_t offset) const;
std::string obj_name(inodeno_t ino, uint64_t offset) const;
// The results of the scan
inodeno_t ino; // Corresponds to journal ino according their type
struct EventRecord {
EventRecord(std::unique_ptr<LogEvent> le, uint32_t rs) : log_event(std::move(le)), raw_size(rs) {}
EventRecord(std::unique_ptr<PurgeItem> p, uint32_t rs) : pi(std::move(p)), raw_size(rs) {}
std::unique_ptr<LogEvent> log_event;
std::unique_ptr<PurgeItem> pi;
uint32_t raw_size = 0; //< Size from start offset including all encoding overhead
};
class EventError {
public:
int r;
std::string description;
EventError(int r_, const std::string &desc_)
: r(r_), description(desc_) {}
};
typedef std::map<uint64_t, EventRecord> EventMap;
typedef std::map<uint64_t, EventError> ErrorMap;
typedef std::pair<uint64_t, uint64_t> Range;
bool is_mdlog;
bool pointer_present; //mdlog specific
bool pointer_valid; //mdlog specific
bool header_present;
bool header_valid;
Journaler::Header *header;
bool is_healthy() const;
bool is_readable() const;
std::vector<std::string> objects_valid;
std::vector<uint64_t> objects_missing;
std::vector<Range> ranges_invalid;
std::vector<uint64_t> events_valid;
EventMap events;
// For events present in ::events (i.e. scanned successfully),
// any subsequent errors handling them (e.g. replaying)
ErrorMap errors;
private:
// Forbid copy construction because I have ptr members
JournalScanner(const JournalScanner &rhs);
};
#endif // JOURNAL_SCANNER_H
| 3,519 | 25.268657 | 102 |
h
|
null |
ceph-main/src/tools/cephfs/JournalTool.cc
|
// -*- mode:c++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* ceph - scalable distributed file system
*
* copyright (c) 2014 john spray <[email protected]>
*
* this is free software; you can redistribute it and/or
* modify it under the terms of the gnu lesser general public
* license version 2.1, as published by the free software
* foundation. see file copying.
*/
#include <sstream>
#include "common/ceph_argparse.h"
#include "common/errno.h"
#include "osdc/Journaler.h"
#include "mds/mdstypes.h"
#include "mds/LogEvent.h"
#include "mds/InoTable.h"
#include "mds/events/ENoOp.h"
#include "mds/events/EUpdate.h"
#include "JournalScanner.h"
#include "EventOutput.h"
#include "Dumper.h"
#include "Resetter.h"
#include "JournalTool.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
#undef dout_prefix
#define dout_prefix *_dout << __func__ << ": "
using namespace std;
void JournalTool::usage()
{
std::cout << "Usage: \n"
<< " cephfs-journal-tool [options] journal <command>\n"
<< " <command>:\n"
<< " inspect\n"
<< " import <path> [--force]\n"
<< " export <path>\n"
<< " reset [--force]\n"
<< " cephfs-journal-tool [options] header <get|set> <field> <value>\n"
<< " <field>: [trimmed_pos|expire_pos|write_pos|pool_id]\n"
<< " cephfs-journal-tool [options] event <effect> <selector> <output> [special options]\n"
<< " <selector>:\n"
<< " --range=<start>..<end>\n"
<< " --path=<substring>\n"
<< " --inode=<integer>\n"
<< " --type=<UPDATE|OPEN|SESSION...><\n"
<< " --frag=<ino>.<frag> [--dname=<dentry string>]\n"
<< " --client=<session id integer>\n"
<< " <effect>: [get|recover_dentries|splice]\n"
<< " <output>: [summary|list|binary|json] [--path <path>]\n"
<< "\n"
<< "General options:\n"
<< " --rank=filesystem:{mds-rank|all} journal rank or \"all\" ranks (mandatory)\n"
<< " --journal=<mdlog|purge_queue> Journal type (purge_queue means\n"
<< " this journal is used to queue for purge operation,\n"
<< " default is mdlog, and only mdlog support event mode)\n"
<< "\n"
<< "Special options\n"
<< " --alternate-pool <name> Alternative metadata pool to target\n"
<< " when using recover_dentries.\n";
generic_client_usage();
}
/**
* Handle arguments and hand off to journal/header/event mode
*/
int JournalTool::main(std::vector<const char*> &argv)
{
int r;
dout(10) << "JournalTool::main " << dendl;
// Common arg parsing
// ==================
if (argv.empty()) {
cerr << "missing positional argument" << std::endl;
return -EINVAL;
}
std::vector<const char*>::iterator arg = argv.begin();
std::string rank_str;
if (!ceph_argparse_witharg(argv, arg, &rank_str, "--rank", (char*)NULL)) {
derr << "missing mandatory \"--rank\" argument" << dendl;
return -EINVAL;
}
if (!ceph_argparse_witharg(argv, arg, &type, "--journal", (char*)NULL)) {
// Default is mdlog
type = "mdlog";
}
r = validate_type(type);
if (r != 0) {
derr << "journal type is not correct." << dendl;
return r;
}
r = role_selector.parse(*fsmap, rank_str, false);
if (r != 0) {
derr << "Couldn't determine MDS rank." << dendl;
return r;
}
std::string mode;
if (arg == argv.end()) {
derr << "Missing mode [journal|header|event]" << dendl;
return -EINVAL;
}
mode = std::string(*arg);
arg = argv.erase(arg);
// RADOS init
// ==========
r = rados.init_with_context(g_ceph_context);
if (r < 0) {
derr << "RADOS unavailable, cannot scan filesystem journal" << dendl;
return r;
}
dout(4) << "JournalTool: connecting to RADOS..." << dendl;
r = rados.connect();
if (r < 0) {
derr << "couldn't connect to cluster: " << cpp_strerror(r) << dendl;
return r;
}
auto fs = fsmap->get_filesystem(role_selector.get_ns());
ceph_assert(fs != nullptr);
int64_t const pool_id = fs->mds_map.get_metadata_pool();
dout(4) << "JournalTool: resolving pool " << pool_id << dendl;
std::string pool_name;
r = rados.pool_reverse_lookup(pool_id, &pool_name);
if (r < 0) {
derr << "Pool " << pool_id << " named in MDS map not found in RADOS!" << dendl;
return r;
}
dout(4) << "JournalTool: creating IoCtx.." << dendl;
r = rados.ioctx_create(pool_name.c_str(), input);
ceph_assert(r == 0);
output.dup(input);
// Execution
// =========
// journal and header are general journal mode
// event mode is only specific for mdlog
auto roles = role_selector.get_roles();
if (roles.size() > 1) {
const std::string &command = argv[0];
bool allowed = can_execute_for_all_ranks(mode, command);
if (!allowed) {
derr << "operation not allowed for all ranks" << dendl;
return -EINVAL;
}
all_ranks = true;
}
for (auto role : roles) {
rank = role.rank;
std::vector<const char *> rank_argv(argv);
dout(4) << "Executing for rank " << rank << dendl;
if (mode == std::string("journal")) {
r = main_journal(rank_argv);
} else if (mode == std::string("header")) {
r = main_header(rank_argv);
} else if (mode == std::string("event")) {
r = main_event(rank_argv);
} else {
cerr << "Bad command '" << mode << "'" << std::endl;
return -EINVAL;
}
if (r != 0) {
return r;
}
}
return r;
}
int JournalTool::validate_type(const std::string &type)
{
if (type == "mdlog" || type == "purge_queue") {
return 0;
}
return -1;
}
std::string JournalTool::gen_dump_file_path(const std::string &prefix) {
if (!all_ranks) {
return prefix;
}
return prefix + "." + std::to_string(rank);
}
bool JournalTool::can_execute_for_all_ranks(const std::string &mode,
const std::string &command) {
if (mode == "journal" && command == "import") {
return false;
}
return true;
}
/**
* Handle arguments for 'journal' mode
*
* This is for operations that act on the journal as a whole.
*/
int JournalTool::main_journal(std::vector<const char*> &argv)
{
if (argv.empty()) {
derr << "Missing journal command, please see help" << dendl;
return -EINVAL;
}
std::string command = argv[0];
if (command == "inspect") {
return journal_inspect();
} else if (command == "export" || command == "import") {
bool force = false;
if (argv.size() >= 2) {
std::string const path = argv[1];
if (argv.size() == 3) {
if (std::string(argv[2]) == "--force") {
force = true;
} else {
std::cerr << "Unknown argument " << argv[1] << std::endl;
return -EINVAL;
}
}
return journal_export(path, command == "import", force);
} else {
derr << "Missing path" << dendl;
return -EINVAL;
}
} else if (command == "reset") {
bool force = false;
if (argv.size() == 2) {
if (std::string(argv[1]) == "--force") {
force = true;
} else {
std::cerr << "Unknown argument " << argv[1] << std::endl;
return -EINVAL;
}
} else if (argv.size() > 2) {
std::cerr << "Too many arguments!" << std::endl;
return -EINVAL;
}
return journal_reset(force);
} else {
derr << "Bad journal command '" << command << "'" << dendl;
return -EINVAL;
}
}
/**
* Parse arguments and execute for 'header' mode
*
* This is for operations that act on the header only.
*/
int JournalTool::main_header(std::vector<const char*> &argv)
{
JournalFilter filter(type);
JournalScanner js(input, rank, type, filter);
int r = js.scan(false);
if (r < 0) {
std::cerr << "Unable to scan journal" << std::endl;
return r;
}
if (!js.header_present) {
std::cerr << "Header object not found!" << std::endl;
return -ENOENT;
} else if (!js.header_valid && js.header == NULL) {
// Can't do a read or a single-field write without a copy of the original
derr << "Header could not be read!" << dendl;
return -ENOENT;
} else {
ceph_assert(js.header != NULL);
}
if (argv.empty()) {
derr << "Missing header command, must be [get|set]" << dendl;
return -EINVAL;
}
std::vector<const char *>::iterator arg = argv.begin();
std::string const command = *arg;
arg = argv.erase(arg);
if (command == std::string("get")) {
// Write JSON journal dump to stdout
JSONFormatter jf(true);
js.header->dump(&jf);
jf.flush(std::cout);
std::cout << std::endl;
} else if (command == std::string("set")) {
// Need two more args <key> <val>
if (argv.size() != 2) {
derr << "'set' requires two arguments <trimmed_pos|expire_pos|write_pos> <value>" << dendl;
return -EINVAL;
}
std::string const field_name = *arg;
arg = argv.erase(arg);
std::string const value_str = *arg;
arg = argv.erase(arg);
ceph_assert(argv.empty());
std::string parse_err;
uint64_t new_val = strict_strtoll(value_str.c_str(), 0, &parse_err);
if (!parse_err.empty()) {
derr << "Invalid value '" << value_str << "': " << parse_err << dendl;
return -EINVAL;
}
uint64_t *field = NULL;
if (field_name == "trimmed_pos") {
field = &(js.header->trimmed_pos);
} else if (field_name == "expire_pos") {
field = &(js.header->expire_pos);
} else if (field_name == "write_pos") {
field = &(js.header->write_pos);
} else if (field_name == "pool_id") {
field = (uint64_t*)(&(js.header->layout.pool_id));
} else {
derr << "Invalid field '" << field_name << "'" << dendl;
return -EINVAL;
}
std::cout << "Updating " << field_name << std::hex << " 0x" << *field << " -> 0x" << new_val << std::dec << std::endl;
*field = new_val;
dout(4) << "Writing object..." << dendl;
bufferlist header_bl;
encode(*(js.header), header_bl);
output.write_full(js.obj_name(0), header_bl);
dout(4) << "Write complete." << dendl;
std::cout << "Successfully updated header." << std::endl;
} else {
derr << "Bad header command '" << command << "'" << dendl;
return -EINVAL;
}
return 0;
}
/**
* Parse arguments and execute for 'event' mode
*
* This is for operations that act on LogEvents within the log
*/
int JournalTool::main_event(std::vector<const char*> &argv)
{
int r;
if (argv.empty()) {
derr << "Missing event command, please see help" << dendl;
return -EINVAL;
}
std::vector<const char*>::iterator arg = argv.begin();
bool dry_run = false;
std::string command = *(arg++);
if (command != "get" && command != "splice" && command != "recover_dentries") {
derr << "Unknown argument '" << command << "'" << dendl;
return -EINVAL;
}
if (command == "recover_dentries") {
if (type != "mdlog") {
derr << "journaler for " << type << " can't do \"recover_dentries\"." << dendl;
return -EINVAL;
} else {
if (arg != argv.end() && ceph_argparse_flag(argv, arg, "--dry_run", (char*)NULL)) {
dry_run = true;
}
}
}
if (arg == argv.end()) {
derr << "Incomplete command line" << dendl;
return -EINVAL;
}
// Parse filter options
// ====================
JournalFilter filter(type);
r = filter.parse_args(argv, arg);
if (r) {
return r;
}
// Parse output options
// ====================
if (arg == argv.end()) {
cerr << "Missing output command" << std::endl;
return -EINVAL;
}
std::string output_style = *(arg++);
if (output_style != "binary" && output_style != "json" &&
output_style != "summary" && output_style != "list") {
cerr << "Unknown argument: '" << output_style << "'" << std::endl;
return -EINVAL;
}
std::string output_path = "dump";
while(arg != argv.end()) {
std::string arg_str;
if (ceph_argparse_witharg(argv, arg, &arg_str, "--path", (char*)NULL)) {
output_path = arg_str;
} else if (ceph_argparse_witharg(argv, arg, &arg_str, "--alternate-pool",
nullptr)) {
dout(1) << "Using alternate pool " << arg_str << dendl;
int r = rados.ioctx_create(arg_str.c_str(), output);
ceph_assert(r == 0);
other_pool = true;
} else {
cerr << "Unknown argument: '" << *arg << "'" << std::endl;
return -EINVAL;
}
}
const std::string dump_path = gen_dump_file_path(output_path);
// Execute command
// ===============
JournalScanner js(input, rank, type, filter);
if (command == "get") {
r = js.scan();
if (r) {
derr << "Failed to scan journal (" << cpp_strerror(r) << ")" << dendl;
return r;
}
} else if (command == "recover_dentries") {
r = js.scan();
if (r) {
derr << "Failed to scan journal (" << cpp_strerror(r) << ")" << dendl;
return r;
}
/**
* Iterate over log entries, attempting to scavenge from each one
*/
std::set<inodeno_t> consumed_inos;
for (JournalScanner::EventMap::iterator i = js.events.begin();
i != js.events.end(); ++i) {
auto& le = i->second.log_event;
EMetaBlob const *mb = le->get_metablob();
if (mb) {
int scav_r = recover_dentries(*mb, dry_run, &consumed_inos);
if (scav_r) {
dout(1) << "Error processing event 0x" << std::hex << i->first << std::dec
<< ": " << cpp_strerror(scav_r) << ", continuing..." << dendl;
if (r == 0) {
r = scav_r;
}
// Our goal is to read all we can, so don't stop on errors, but
// do record them for possible later output
js.errors.insert(std::make_pair(i->first,
JournalScanner::EventError(scav_r, cpp_strerror(r))));
}
}
}
/**
* Update InoTable to reflect any inode numbers consumed during scavenge
*/
dout(4) << "consumed " << consumed_inos.size() << " inodes" << dendl;
if (consumed_inos.size() && !dry_run) {
int consume_r = consume_inos(consumed_inos);
if (consume_r) {
dout(1) << "Error updating InoTable for " << consumed_inos.size()
<< " consume inos: " << cpp_strerror(consume_r) << dendl;
if (r == 0) {
r = consume_r;
}
}
}
// Remove consumed dentries from lost+found.
if (other_pool && !dry_run) {
std::set<std::string> found;
for (auto i : consumed_inos) {
char s[20];
snprintf(s, sizeof(s), "%llx_head", (unsigned long long) i);
dout(20) << "removing " << s << dendl;
found.insert(std::string(s));
}
object_t frag_oid;
frag_oid = InodeStore::get_object_name(CEPH_INO_LOST_AND_FOUND,
frag_t(), "");
output.omap_rm_keys(frag_oid.name, found);
}
} else if (command == "splice") {
r = js.scan();
if (r) {
derr << "Failed to scan journal (" << cpp_strerror(r) << ")" << dendl;
return r;
}
uint64_t start, end;
if (filter.get_range(start, end)) {
// Special case for range filter: erase a numeric range in the log
uint64_t range = end - start;
int r = erase_region(js, start, range);
if (r) {
derr << "Failed to erase region 0x" << std::hex << start << "~0x" << range << std::dec
<< ": " << cpp_strerror(r) << dendl;
return r;
}
} else {
// General case: erase a collection of individual entries in the log
for (JournalScanner::EventMap::iterator i = js.events.begin(); i != js.events.end(); ++i) {
dout(4) << "Erasing offset 0x" << std::hex << i->first << std::dec << dendl;
int r = erase_region(js, i->first, i->second.raw_size);
if (r) {
derr << "Failed to erase event 0x" << std::hex << i->first << std::dec
<< ": " << cpp_strerror(r) << dendl;
return r;
}
}
}
} else {
cerr << "Unknown argument '" << command << "'" << std::endl;
return -EINVAL;
}
// Generate output
// ===============
EventOutput output(js, dump_path);
int output_result = 0;
if (output_style == "binary") {
output_result = output.binary();
} else if (output_style == "json") {
output_result = output.json();
} else if (output_style == "summary") {
output.summary();
} else if (output_style == "list") {
output.list();
} else {
std::cerr << "Bad output command '" << output_style << "'" << std::endl;
return -EINVAL;
}
if (output_result != 0) {
std::cerr << "Error writing output: " << cpp_strerror(output_result) << std::endl;
}
return output_result;
}
/**
* Provide the user with information about the condition of the journal,
* especially indicating what range of log events is available and where
* any gaps or corruptions in the journal are.
*/
int JournalTool::journal_inspect()
{
int r;
JournalFilter filter(type);
JournalScanner js(input, rank, type, filter);
r = js.scan();
if (r) {
std::cerr << "Failed to scan journal (" << cpp_strerror(r) << ")" << std::endl;
return r;
}
js.report(std::cout);
return 0;
}
/**
* Attempt to export a binary dump of the journal.
*
* This is allowed to fail if the header is malformed or there are
* objects inaccessible, in which case the user would have to fall
* back to manually listing RADOS objects and extracting them, which
* they can do with the ``rados`` CLI.
*/
int JournalTool::journal_export(std::string const &path, bool import, bool force)
{
int r = 0;
JournalScanner js(input, rank, type);
if (!import) {
/*
* If doing an export, first check that the header is valid and
* no objects are missing before trying to dump
*/
r = js.scan();
if (r < 0) {
derr << "Unable to scan journal, assuming badly damaged" << dendl;
return r;
}
if (!js.is_readable()) {
derr << "Journal not readable, attempt object-by-object dump with `rados`" << dendl;
return -EIO;
}
}
/*
* Assuming we can cleanly read the journal data, dump it out to a file
*/
{
Dumper dumper;
r = dumper.init(mds_role_t(role_selector.get_ns(), rank), type);
if (r < 0) {
derr << "dumper::init failed: " << cpp_strerror(r) << dendl;
return r;
}
if (import) {
r = dumper.undump(path.c_str(), force);
} else {
const std::string ex_path = gen_dump_file_path(path);
r = dumper.dump(ex_path.c_str());
}
}
return r;
}
/**
* Truncate journal and insert EResetJournal
*/
int JournalTool::journal_reset(bool hard)
{
int r = 0;
Resetter resetter;
r = resetter.init(mds_role_t(role_selector.get_ns(), rank), type, hard);
if (r < 0) {
derr << "resetter::init failed: " << cpp_strerror(r) << dendl;
return r;
}
if (hard) {
r = resetter.reset_hard();
} else {
r = resetter.reset();
}
return r;
}
/**
* Selective offline replay which only reads out dentries and writes
* them to the backing store iff their version is > what is currently
* in the backing store.
*
* In order to write dentries to the backing store, we may create the
* required enclosing dirfrag objects.
*
* Test this by running scavenge on an unflushed journal, then nuking
* it offline, then starting an MDS and seeing that the dentries are
* visible.
*
* @param metablob an EMetaBlob retrieved from the journal
* @param dry_run if true, do no writes to RADOS
* @param consumed_inos output, populated with any inos inserted
* @returns 0 on success, else negative error code
*/
int JournalTool::recover_dentries(
EMetaBlob const &metablob,
bool const dry_run,
std::set<inodeno_t> *consumed_inos)
{
ceph_assert(consumed_inos != NULL);
int r = 0;
// Replay fullbits (dentry+inode)
for (const auto& frag : metablob.lump_order) {
EMetaBlob::dirlump const &lump = metablob.lump_map.find(frag)->second;
lump._decode_bits();
object_t frag_oid = InodeStore::get_object_name(frag.ino, frag.frag, "");
dout(4) << "inspecting lump " << frag_oid.name << dendl;
// We will record old fnode version for use in hard link handling
// If we don't read an old fnode, take version as zero and write in
// all hardlinks we find.
version_t old_fnode_version = 0;
// Update fnode in omap header of dirfrag object
bool write_fnode = false;
bufferlist old_fnode_bl;
r = input.omap_get_header(frag_oid.name, &old_fnode_bl);
if (r == -ENOENT) {
// Creating dirfrag from scratch
dout(4) << "failed to read OMAP header from directory fragment "
<< frag_oid.name << " " << cpp_strerror(r) << dendl;
write_fnode = true;
// Note: creating the dirfrag *without* a backtrace, relying on
// MDS to regenerate backtraces on read or in FSCK
} else if (r == 0) {
// Conditionally update existing omap header
fnode_t old_fnode;
auto old_fnode_iter = old_fnode_bl.cbegin();
try {
old_fnode.decode(old_fnode_iter);
dout(4) << "frag " << frag_oid.name << " fnode old v" <<
old_fnode.version << " vs new v" << lump.fnode->version << dendl;
old_fnode_version = old_fnode.version;
write_fnode = old_fnode_version < lump.fnode->version;
} catch (const buffer::error &err) {
dout(1) << "frag " << frag_oid.name
<< " is corrupt, overwriting" << dendl;
write_fnode = true;
}
} else {
// Unexpected error
dout(4) << "failed to read OMAP header from directory fragment "
<< frag_oid.name << " " << cpp_strerror(r) << dendl;
return r;
}
if ((other_pool || write_fnode) && !dry_run) {
dout(4) << "writing fnode to omap header" << dendl;
bufferlist fnode_bl;
lump.fnode->encode(fnode_bl);
if (!other_pool || frag.ino >= MDS_INO_SYSTEM_BASE) {
r = output.omap_set_header(frag_oid.name, fnode_bl);
}
if (r != 0) {
derr << "Failed to write fnode for frag object "
<< frag_oid.name << dendl;
return r;
}
}
std::set<std::string> read_keys;
// Compose list of potentially-existing dentries we would like to fetch
for (const auto& fb : lump.get_dfull()) {
// Get a key like "foobar_head"
std::string key;
dentry_key_t dn_key(fb.dnlast, fb.dn.c_str());
dn_key.encode(key);
read_keys.insert(key);
}
for(const auto& rb : lump.get_dremote()) {
// Get a key like "foobar_head"
std::string key;
dentry_key_t dn_key(rb.dnlast, rb.dn.c_str());
dn_key.encode(key);
read_keys.insert(key);
}
for (const auto& nb : lump.get_dnull()) {
// Get a key like "foobar_head"
std::string key;
dentry_key_t dn_key(nb.dnlast, nb.dn.c_str());
dn_key.encode(key);
read_keys.insert(key);
}
// Perform bulk read of existing dentries
std::map<std::string, bufferlist> read_vals;
r = input.omap_get_vals_by_keys(frag_oid.name, read_keys, &read_vals);
if (r == -ENOENT && other_pool) {
r = output.omap_get_vals_by_keys(frag_oid.name, read_keys, &read_vals);
}
if (r != 0) {
derr << "unexpected error reading fragment object "
<< frag_oid.name << ": " << cpp_strerror(r) << dendl;
return r;
}
// Compose list of dentries we will write back
std::map<std::string, bufferlist> write_vals;
for (const auto& fb : lump.get_dfull()) {
// Get a key like "foobar_head"
std::string key;
dentry_key_t dn_key(fb.dnlast, fb.dn.c_str());
dn_key.encode(key);
dout(4) << "inspecting fullbit " << frag_oid.name << "/" << fb.dn
<< dendl;
bool write_dentry = false;
if (read_vals.find(key) == read_vals.end()) {
dout(4) << "dentry did not already exist, will create" << dendl;
write_dentry = true;
} else {
dout(4) << "dentry " << key << " existed already" << dendl;
dout(4) << "dentry exists, checking versions..." << dendl;
bufferlist &old_dentry = read_vals[key];
// Decode dentry+inode
auto q = old_dentry.cbegin();
snapid_t dnfirst;
decode(dnfirst, q);
char dentry_type;
decode(dentry_type, q);
if (dentry_type == 'L' || dentry_type == 'l') {
// leave write_dentry false, we have no version to
// compare with in a hardlink, so it's not safe to
// squash over it with what's in this fullbit
dout(10) << "Existing remote inode in slot to be (maybe) written "
<< "by a full inode from the journal dn '" << fb.dn.c_str()
<< "' with lump fnode version " << lump.fnode->version
<< "vs existing fnode version " << old_fnode_version << dendl;
write_dentry = old_fnode_version < lump.fnode->version;
} else if (dentry_type == 'I' || dentry_type == 'i') {
// Read out inode version to compare with backing store
InodeStore inode;
if (dentry_type == 'i') {
mempool::mds_co::string alternate_name;
DECODE_START(2, q);
if (struct_v >= 2)
decode(alternate_name, q);
inode.decode(q);
DECODE_FINISH(q);
} else {
inode.decode_bare(q);
}
dout(4) << "decoded embedded inode version "
<< inode.inode->version << " vs fullbit version "
<< fb.inode->version << dendl;
if (inode.inode->version < fb.inode->version) {
write_dentry = true;
}
} else {
dout(4) << "corrupt dentry in backing store, overwriting from "
"journal" << dendl;
write_dentry = true;
}
}
if ((other_pool || write_dentry) && !dry_run) {
dout(4) << "writing I dentry " << key << " into frag "
<< frag_oid.name << dendl;
// Compose: Dentry format is dnfirst, [I|L], InodeStore(bare=true)
bufferlist dentry_bl;
encode(fb.dnfirst, dentry_bl);
encode('I', dentry_bl);
encode_fullbit_as_inode(fb, true, &dentry_bl);
// Record for writing to RADOS
write_vals[key] = dentry_bl;
consumed_inos->insert(fb.inode->ino);
}
}
for(const auto& rb : lump.get_dremote()) {
// Get a key like "foobar_head"
std::string key;
dentry_key_t dn_key(rb.dnlast, rb.dn.c_str());
dn_key.encode(key);
dout(4) << "inspecting remotebit " << frag_oid.name << "/" << rb.dn
<< dendl;
bool write_dentry = false;
if (read_vals.find(key) == read_vals.end()) {
dout(4) << "dentry did not already exist, will create" << dendl;
write_dentry = true;
} else {
dout(4) << "dentry " << key << " existed already" << dendl;
dout(4) << "dentry exists, checking versions..." << dendl;
bufferlist &old_dentry = read_vals[key];
// Decode dentry+inode
auto q = old_dentry.cbegin();
snapid_t dnfirst;
decode(dnfirst, q);
char dentry_type;
decode(dentry_type, q);
if (dentry_type == 'L' || dentry_type == 'l') {
dout(10) << "Existing hardlink inode in slot to be (maybe) written "
<< "by a remote inode from the journal dn '" << rb.dn.c_str()
<< "' with lump fnode version " << lump.fnode->version
<< "vs existing fnode version " << old_fnode_version << dendl;
write_dentry = old_fnode_version < lump.fnode->version;
} else if (dentry_type == 'I' || dentry_type == 'i') {
dout(10) << "Existing full inode in slot to be (maybe) written "
<< "by a remote inode from the journal dn '" << rb.dn.c_str()
<< "' with lump fnode version " << lump.fnode->version
<< "vs existing fnode version " << old_fnode_version << dendl;
write_dentry = old_fnode_version < lump.fnode->version;
} else {
dout(4) << "corrupt dentry in backing store, overwriting from "
"journal" << dendl;
write_dentry = true;
}
}
if ((other_pool || write_dentry) && !dry_run) {
dout(4) << "writing L dentry " << key << " into frag "
<< frag_oid.name << dendl;
// Compose: Dentry format is dnfirst, [I|L], InodeStore(bare=true)
bufferlist dentry_bl;
encode(rb.dnfirst, dentry_bl);
encode('L', dentry_bl);
encode(rb.ino, dentry_bl);
encode(rb.d_type, dentry_bl);
// Record for writing to RADOS
write_vals[key] = dentry_bl;
consumed_inos->insert(rb.ino);
}
}
std::set<std::string> null_vals;
for (const auto& nb : lump.get_dnull()) {
std::string key;
dentry_key_t dn_key(nb.dnlast, nb.dn.c_str());
dn_key.encode(key);
dout(4) << "inspecting nullbit " << frag_oid.name << "/" << nb.dn
<< dendl;
auto it = read_vals.find(key);
if (it != read_vals.end()) {
dout(4) << "dentry exists, will remove" << dendl;
auto q = it->second.cbegin();
snapid_t dnfirst;
decode(dnfirst, q);
char dentry_type;
decode(dentry_type, q);
bool remove_dentry = false;
if (dentry_type == 'L' || dentry_type == 'l') {
dout(10) << "Existing hardlink inode in slot to be (maybe) removed "
<< "by null journal dn '" << nb.dn.c_str()
<< "' with lump fnode version " << lump.fnode->version
<< "vs existing fnode version " << old_fnode_version << dendl;
remove_dentry = old_fnode_version < lump.fnode->version;
} else if (dentry_type == 'I' || dentry_type == 'i') {
dout(10) << "Existing full inode in slot to be (maybe) removed "
<< "by null journal dn '" << nb.dn.c_str()
<< "' with lump fnode version " << lump.fnode->version
<< "vs existing fnode version " << old_fnode_version << dendl;
remove_dentry = old_fnode_version < lump.fnode->version;
} else {
dout(4) << "corrupt dentry in backing store, will remove" << dendl;
remove_dentry = true;
}
if (remove_dentry)
null_vals.insert(key);
}
}
// Write back any new/changed dentries
if (!write_vals.empty()) {
r = output.omap_set(frag_oid.name, write_vals);
if (r != 0) {
derr << "error writing dentries to " << frag_oid.name
<< ": " << cpp_strerror(r) << dendl;
return r;
}
}
// remove any null dentries
if (!null_vals.empty()) {
r = output.omap_rm_keys(frag_oid.name, null_vals);
if (r != 0) {
derr << "error removing dentries from " << frag_oid.name
<< ": " << cpp_strerror(r) << dendl;
return r;
}
}
}
/* Now that we've looked at the dirlumps, we finally pay attention to
* the roots (i.e. inodes without ancestry). This is necessary in order
* to pick up dirstat updates on ROOT_INO. dirstat updates are functionally
* important because clients use them to infer completeness
* of directories
*/
for (const auto& fb : metablob.roots) {
inodeno_t ino = fb.inode->ino;
dout(4) << "updating root 0x" << std::hex << ino << std::dec << dendl;
object_t root_oid = InodeStore::get_object_name(ino, frag_t(), ".inode");
dout(4) << "object id " << root_oid.name << dendl;
bool write_root_ino = false;
bufferlist old_root_ino_bl;
r = input.read(root_oid.name, old_root_ino_bl, (1<<22), 0);
if (r == -ENOENT) {
dout(4) << "root does not exist, will create" << dendl;
write_root_ino = true;
} else if (r >= 0) {
r = 0;
InodeStore old_inode;
dout(4) << "root exists, will modify (" << old_root_ino_bl.length()
<< ")" << dendl;
auto inode_bl_iter = old_root_ino_bl.cbegin();
std::string magic;
decode(magic, inode_bl_iter);
if (magic == CEPH_FS_ONDISK_MAGIC) {
dout(4) << "magic ok" << dendl;
old_inode.decode(inode_bl_iter);
if (old_inode.inode->version < fb.inode->version) {
write_root_ino = true;
}
} else {
dout(4) << "magic bad: '" << magic << "'" << dendl;
write_root_ino = true;
}
} else {
derr << "error reading root inode object " << root_oid.name
<< ": " << cpp_strerror(r) << dendl;
return r;
}
if (write_root_ino && !dry_run) {
dout(4) << "writing root ino " << root_oid.name
<< " version " << fb.inode->version << dendl;
// Compose: root ino format is magic,InodeStore(bare=false)
bufferlist new_root_ino_bl;
encode(std::string(CEPH_FS_ONDISK_MAGIC), new_root_ino_bl);
encode_fullbit_as_inode(fb, false, &new_root_ino_bl);
// Write to RADOS
r = output.write_full(root_oid.name, new_root_ino_bl);
if (r != 0) {
derr << "error writing inode object " << root_oid.name
<< ": " << cpp_strerror(r) << dendl;
return r;
}
}
}
return r;
}
/**
* Erase a region of the log by overwriting it with ENoOp
*
*/
int JournalTool::erase_region(JournalScanner const &js, uint64_t const pos, uint64_t const length)
{
// To erase this region, we use our preamble, the encoding overhead
// of an ENoOp, and our trailing start ptr. Calculate how much padding
// is needed inside the ENoOp to make up the difference.
bufferlist tmp;
if (type == "mdlog") {
ENoOp enoop(0);
enoop.encode_with_header(tmp, CEPH_FEATURES_SUPPORTED_DEFAULT);
} else if (type == "purge_queue") {
PurgeItem pi;
pi.encode(tmp);
}
dout(4) << "erase_region " << pos << " len=" << length << dendl;
// FIXME: get the preamble/postamble length via JournalStream
int32_t padding = length - tmp.length() - sizeof(uint32_t) - sizeof(uint64_t) - sizeof(uint64_t);
dout(4) << "erase_region padding=0x" << std::hex << padding << std::dec << dendl;
if (padding < 0) {
derr << "Erase region " << length << " too short" << dendl;
return -EINVAL;
}
bufferlist entry;
if (type == "mdlog") {
// Serialize an ENoOp with the correct amount of padding
ENoOp enoop(padding);
enoop.encode_with_header(entry, CEPH_FEATURES_SUPPORTED_DEFAULT);
} else if (type == "purge_queue") {
PurgeItem pi;
pi.pad_size = padding;
pi.encode(entry);
}
JournalStream stream(JOURNAL_FORMAT_RESILIENT);
// Serialize region of log stream
bufferlist log_data;
stream.write(entry, &log_data, pos);
dout(4) << "erase_region data length " << log_data.length() << dendl;
ceph_assert(log_data.length() == length);
// Write log stream region to RADOS
// FIXME: get object size somewhere common to scan_events
uint32_t object_size = g_conf()->mds_log_segment_size;
if (object_size == 0) {
// Default layout object size
object_size = file_layout_t::get_default().object_size;
}
uint64_t write_offset = pos;
uint64_t obj_offset = (pos / object_size);
int r = 0;
while(log_data.length()) {
std::string const oid = js.obj_name(obj_offset);
uint32_t offset_in_obj = write_offset % object_size;
uint32_t write_len = min(log_data.length(), object_size - offset_in_obj);
r = output.write(oid, log_data, write_len, offset_in_obj);
if (r < 0) {
return r;
} else {
dout(4) << "Wrote " << write_len << " bytes to " << oid << dendl;
r = 0;
}
log_data.splice(0, write_len);
write_offset += write_len;
obj_offset++;
}
return r;
}
/**
* Given an EMetaBlob::fullbit containing an inode, write out
* the encoded inode in the format used by InodeStore (i.e. the
* backing store format)
*
* This is a distant cousin of EMetaBlob::fullbit::update_inode, but for use
* on an offline InodeStore instance. It's way simpler, because we are just
* uncritically hauling the data between structs.
*
* @param fb a fullbit extracted from a journal entry
* @param bare if true, leave out [EN|DE]CODE_START decoration
* @param out_bl output, write serialized inode to this bufferlist
*/
void JournalTool::encode_fullbit_as_inode(
const EMetaBlob::fullbit &fb,
const bool bare,
bufferlist *out_bl)
{
ceph_assert(out_bl != NULL);
// Compose InodeStore
InodeStore new_inode;
new_inode.inode = fb.inode;
new_inode.xattrs = fb.xattrs;
new_inode.dirfragtree = fb.dirfragtree;
new_inode.snap_blob = fb.snapbl;
new_inode.symlink = fb.symlink;
new_inode.old_inodes = fb.old_inodes;
// Serialize InodeStore
if (bare) {
new_inode.encode_bare(*out_bl, CEPH_FEATURES_SUPPORTED_DEFAULT);
} else {
new_inode.encode(*out_bl, CEPH_FEATURES_SUPPORTED_DEFAULT);
}
}
/**
* Given a list of inode numbers known to be in use by
* inodes in the backing store, ensure that none of these
* numbers are listed as free in the InoTables in the
* backing store.
*
* Used after injecting inodes into the backing store, to
* ensure that the same inode numbers are not subsequently
* used for new files during ordinary operation.
*
* @param inos list of inode numbers to be removed from
* free lists in InoTables
* @returns 0 on success, else negative error code
*/
int JournalTool::consume_inos(const std::set<inodeno_t> &inos)
{
int r = 0;
// InoTable is a per-MDS structure, so iterate over assigned ranks
auto fs = fsmap->get_filesystem(role_selector.get_ns());
std::set<mds_rank_t> in_ranks;
fs->mds_map.get_mds_set(in_ranks);
for (std::set<mds_rank_t>::iterator rank_i = in_ranks.begin();
rank_i != in_ranks.end(); ++rank_i)
{
// Compose object name
std::ostringstream oss;
oss << "mds" << *rank_i << "_inotable";
object_t inotable_oid = object_t(oss.str());
// Read object
bufferlist inotable_bl;
int read_r = input.read(inotable_oid.name, inotable_bl, (1<<22), 0);
if (read_r < 0) {
// Things are really bad if we can't read inotable. Beyond our powers.
derr << "unable to read inotable '" << inotable_oid.name << "': "
<< cpp_strerror(read_r) << dendl;
r = r ? r : read_r;
continue;
}
// Deserialize InoTable
version_t inotable_ver;
auto q = inotable_bl.cbegin();
decode(inotable_ver, q);
InoTable ino_table(NULL);
ino_table.decode(q);
// Update InoTable in memory
bool inotable_modified = false;
for (std::set<inodeno_t>::iterator i = inos.begin();
i != inos.end(); ++i)
{
const inodeno_t ino = *i;
if (ino_table.force_consume(ino)) {
dout(4) << "Used ino 0x" << std::hex << ino << std::dec
<< " requires inotable update" << dendl;
inotable_modified = true;
}
}
// Serialize and write InoTable
if (inotable_modified) {
inotable_ver += 1;
dout(4) << "writing modified inotable version " << inotable_ver << dendl;
bufferlist inotable_new_bl;
encode(inotable_ver, inotable_new_bl);
ino_table.encode_state(inotable_new_bl);
int write_r = output.write_full(inotable_oid.name, inotable_new_bl);
if (write_r != 0) {
derr << "error writing modified inotable " << inotable_oid.name
<< ": " << cpp_strerror(write_r) << dendl;
r = r ? r : read_r;
continue;
}
}
}
return r;
}
| 39,303 | 30.02131 | 122 |
cc
|
null |
ceph-main/src/tools/cephfs/JournalTool.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 John Spray <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include "MDSUtility.h"
#include "RoleSelector.h"
#include <vector>
#include "mds/mdstypes.h"
#include "mds/LogEvent.h"
#include "mds/events/EMetaBlob.h"
#include "include/rados/librados.hpp"
#include "JournalFilter.h"
class JournalScanner;
/**
* Command line tool for investigating and repairing filesystems
* with damaged metadata logs
*/
class JournalTool : public MDSUtility
{
private:
MDSRoleSelector role_selector;
// Bit hacky, use this `rank` member to control behaviour of the
// various main_ functions.
mds_rank_t rank;
// when set, generate per rank dump file path
bool all_ranks = false;
std::string type;
// Entry points
int main_journal(std::vector<const char*> &argv);
int main_header(std::vector<const char*> &argv);
int main_event(std::vector<const char*> &argv);
// Shared functionality
int recover_journal();
// Journal operations
int journal_inspect();
int journal_export(std::string const &path, bool import, bool force);
int journal_reset(bool hard);
// Header operations
int header_set();
// I/O handles
librados::Rados rados;
librados::IoCtx input;
librados::IoCtx output;
bool other_pool;
// Metadata backing store manipulation
int read_lost_found(std::set<std::string> &lost);
int recover_dentries(
EMetaBlob const &metablob,
bool const dry_run,
std::set<inodeno_t> *consumed_inos);
// Splicing
int erase_region(JournalScanner const &jp, uint64_t const pos, uint64_t const length);
// Backing store helpers
void encode_fullbit_as_inode(
const EMetaBlob::fullbit &fb,
const bool bare,
bufferlist *out_bl);
int consume_inos(const std::set<inodeno_t> &inos);
//validate type
int validate_type(const std::string &type);
// generate output file path for dump/export
std::string gen_dump_file_path(const std::string &prefix);
// check if an operation (mode, command) is safe to be
// executed on all ranks.
bool can_execute_for_all_ranks(const std::string &mode,
const std::string &command);
public:
static void usage();
JournalTool() :
rank(0), other_pool(false) {}
int main(std::vector<const char*> &argv);
};
| 2,745 | 25.921569 | 90 |
h
|
null |
ceph-main/src/tools/cephfs/MDSUtility.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 John Spray <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include "MDSUtility.h"
#include "mon/MonClient.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
MDSUtility::MDSUtility() :
Dispatcher(g_ceph_context),
objecter(NULL),
finisher(g_ceph_context, "MDSUtility", "fn_mds_utility"),
waiting_for_mds_map(NULL),
inited(false)
{
monc = new MonClient(g_ceph_context, poolctx);
messenger = Messenger::create_client_messenger(g_ceph_context, "mds");
fsmap = new FSMap();
objecter = new Objecter(g_ceph_context, messenger, monc, poolctx);
}
MDSUtility::~MDSUtility()
{
if (inited) {
shutdown();
}
delete objecter;
delete monc;
delete messenger;
delete fsmap;
ceph_assert(waiting_for_mds_map == NULL);
}
int MDSUtility::init()
{
// Initialize Messenger
poolctx.start(1);
messenger->start();
objecter->set_client_incarnation(0);
objecter->init();
// Connect dispatchers before starting objecter
messenger->add_dispatcher_tail(objecter);
messenger->add_dispatcher_tail(this);
// Initialize MonClient
if (monc->build_initial_monmap() < 0) {
objecter->shutdown();
messenger->shutdown();
messenger->wait();
return -1;
}
monc->set_want_keys(CEPH_ENTITY_TYPE_MON|CEPH_ENTITY_TYPE_OSD|CEPH_ENTITY_TYPE_MDS);
monc->set_messenger(messenger);
monc->init();
int r = monc->authenticate();
if (r < 0) {
derr << "Authentication failed, did you specify an MDS ID with a valid keyring?" << dendl;
monc->shutdown();
objecter->shutdown();
messenger->shutdown();
messenger->wait();
return r;
}
client_t whoami = monc->get_global_id();
messenger->set_myname(entity_name_t::CLIENT(whoami.v));
// Start Objecter and wait for OSD map
objecter->start();
objecter->wait_for_osd_map();
// Prepare to receive MDS map and request it
ceph::mutex init_lock = ceph::make_mutex("MDSUtility:init");
ceph::condition_variable cond;
bool done = false;
ceph_assert(!fsmap->get_epoch());
lock.lock();
waiting_for_mds_map = new C_SafeCond(init_lock, cond, &done, NULL);
lock.unlock();
monc->sub_want("fsmap", 0, CEPH_SUBSCRIBE_ONETIME);
monc->renew_subs();
// Wait for MDS map
dout(4) << "waiting for MDS map..." << dendl;
{
std::unique_lock locker{init_lock};
cond.wait(locker, [&done] { return done; });
}
dout(4) << "Got MDS map " << fsmap->get_epoch() << dendl;
finisher.start();
inited = true;
return 0;
}
void MDSUtility::shutdown()
{
finisher.stop();
lock.lock();
objecter->shutdown();
lock.unlock();
monc->shutdown();
messenger->shutdown();
messenger->wait();
poolctx.finish();
}
bool MDSUtility::ms_dispatch(Message *m)
{
std::lock_guard locker{lock};
switch (m->get_type()) {
case CEPH_MSG_FS_MAP:
handle_fs_map((MFSMap*)m);
break;
case CEPH_MSG_OSD_MAP:
break;
default:
return false;
}
m->put();
return true;
}
void MDSUtility::handle_fs_map(MFSMap* m)
{
*fsmap = m->get_fsmap();
if (waiting_for_mds_map) {
waiting_for_mds_map->complete(0);
waiting_for_mds_map = NULL;
}
}
| 3,492 | 21.391026 | 94 |
cc
|
null |
ceph-main/src/tools/cephfs/MDSUtility.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 John Spray <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef MDS_UTILITY_H_
#define MDS_UTILITY_H_
#include "osdc/Objecter.h"
#include "mds/FSMap.h"
#include "messages/MFSMap.h"
#include "msg/Dispatcher.h"
#include "msg/Messenger.h"
#include "auth/Auth.h"
#include "common/async/context_pool.h"
#include "common/Finisher.h"
#include "common/Timer.h"
/// MDS Utility
/**
* This class is the parent for MDS utilities, i.e. classes that
* need access the objects belonging to the MDS without actually
* acting as an MDS daemon themselves.
*/
class MDSUtility : public Dispatcher {
protected:
Objecter *objecter;
FSMap *fsmap;
Messenger *messenger;
MonClient *monc;
ceph::mutex lock = ceph::make_mutex("MDSUtility::lock");
Finisher finisher;
ceph::async::io_context_pool poolctx;
Context *waiting_for_mds_map;
bool inited;
public:
MDSUtility();
~MDSUtility() override;
void handle_fs_map(MFSMap* m);
bool ms_dispatch(Message *m) override;
bool ms_handle_reset(Connection *con) override { return false; }
void ms_handle_remote_reset(Connection *con) override {}
bool ms_handle_refused(Connection *con) override { return false; }
int init();
void shutdown();
};
#endif /* MDS_UTILITY_H_ */
| 1,602 | 25.278689 | 70 |
h
|
null |
ceph-main/src/tools/cephfs/MetaTool.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <string.h>
#include <map>
#include <sstream>
#include <fstream>
#include "include/types.h"
#include "common/Formatter.h"
#include "common/ceph_argparse.h"
#include "common/errno.h"
#include "osdc/Journaler.h"
#include "mds/mdstypes.h"
#include "mds/LogEvent.h"
#include "mds/InoTable.h"
#include "mds/CDentry.h"
#include "mds/events/ENoOp.h"
#include "mds/events/EUpdate.h"
#include "mds/JournalPointer.h"
// #include "JournalScanner.h"
// #include "EventOutput.h"
// #include "Dumper.h"
// #include "Resetter.h"
// #include "JournalTool.h"
#include "MetaTool.h"
#include "type_helper.hpp"
#include "include/object.h"
WRITE_RAW_ENCODER(char)
WRITE_RAW_ENCODER(unsigned char)
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
#undef dout_prefix
#define dout_prefix *_dout << __func__ << ": "
using namespace std;
void MetaTool::meta_op::release()
{
for (const auto& i : inodes) {
delete i.second;
}
while (!sub_ops.empty()) {
delete sub_ops.top();
sub_ops.pop();
}
}
void MetaTool::inode_meta_t::decode_json(JSONObj *obj)
{
unsigned long long tmp;
JSONDecoder::decode_json("snapid_t", tmp, obj, true);
_f.val = tmp;
JSONDecoder::decode_json("itype", tmp, obj, true);
_t = tmp;
if (NULL == _i)
_i = new InodeStore;
JSONDecoder::decode_json("store", *_i, obj, true);
}
void MetaTool::usage()
{
generic_client_usage();
}
int MetaTool::main(string& mode,
string& rank_str,
string& minfo,
string&ino,
string& out,
string& in,
bool confirm
)
{
int r = 0;
std::string manual_meta_pool;
std::string manual_data_pool;
std::string manual_rank_num;
bool manual_mode = false;
if (minfo != "") {
vector<string> v;
string_split(minfo, v);
manual_meta_pool = v.size() >= 1 ? v[0] : "";
manual_data_pool = v.size() >= 2 ? v[1] : "";
manual_rank_num = v.size() >= 3 ? v[2] : "";
std::cout << "("<< minfo<< ")=>"
<< " mpool: " << manual_meta_pool
<< " dpool: " << manual_data_pool
<< " rank: " << manual_rank_num
<< std::endl;
if (!manual_meta_pool.empty() && !manual_data_pool.empty() && !manual_rank_num.empty()) {
std::cout << "you specify rank: " << manual_rank_num
<< " mpool: " << manual_meta_pool
<< " dpool: " << manual_data_pool
<< "\nstart manual mode!!"<< std::endl;
manual_mode = true;
}
}
// RADOS init
r = rados.init_with_context(g_ceph_context);
if (r < 0) {
cerr << "RADOS unavailable" << std::endl;
return r;
}
if (_debug)
cout << "MetaTool: connecting to RADOS..." << std::endl;
r = rados.connect();
if (r < 0) {
cerr << "couldn't connect to cluster: " << cpp_strerror(r) << std::endl;
return r;
}
if (!manual_mode) {
r = role_selector.parse(*fsmap, rank_str);
if (r != 0) {
cerr << "Couldn't determine MDS rank." << std::endl;
return r;
}
auto fs = fsmap->get_filesystem(role_selector.get_ns());
assert(fs != nullptr);
// prepare io for meta pool
int64_t const pool_id = fs->mds_map.get_metadata_pool();
features = fs->mds_map.get_up_features();
if (features == 0)
features = CEPH_FEATURES_SUPPORTED_DEFAULT;
else if (features != CEPH_FEATURES_SUPPORTED_DEFAULT) {
cout << "I think we need to check the feature! : " << features << std::endl;
return -1;
}
std::string pool_name;
r = rados.pool_reverse_lookup(pool_id, &pool_name);
if (r < 0) {
cerr << "Pool " << pool_id << " named in MDS map not found in RADOS!" << std::endl;
return r;
}
if (_debug)
cout << "MetaTool: creating IoCtx.." << std::endl;
r = rados.ioctx_create(pool_name.c_str(), io_meta);
assert(r == 0);
output.dup(io_meta);
// prepare io for data pool
for (const auto p : fs->mds_map.get_data_pools()) {
r = rados.pool_reverse_lookup(p, &pool_name);
if (r < 0) {
cerr << "Pool " << pool_id << " named in MDS map not found in RADOS!" << std::endl;
return r;
}
librados::IoCtx* io_data = new librados::IoCtx;
r = rados.ioctx_create(pool_name.c_str(), *io_data);
assert(r == 0);
io_data_v.push_back(io_data);
}
for (auto role : role_selector.get_roles()) {
rank = role.rank;
r = process(mode, ino, out, in, confirm);
cout << "executing for rank " << rank << " op[" <<mode<< "] ret : " << r << std::endl;
}
} else {
features = CEPH_FEATURES_SUPPORTED_DEFAULT;
r = rados.ioctx_create(manual_meta_pool.c_str(), io_meta);
assert(r == 0);
librados::IoCtx* io_data = new librados::IoCtx;
r = rados.ioctx_create(manual_data_pool.c_str(), *io_data);
assert(r == 0);
io_data_v.push_back(io_data);
rank = conv_t<int>(manual_rank_num);
r = process(mode, ino, out, in, confirm);
cout << "op[" << mode << "] ret : " << r << std::endl;
}
return r;
}
int MetaTool::process(string& mode, string& ino, string out, string in, bool confirm)
{
if (mode == "showm") {
return show_meta_info(ino, out);
} else if (mode == "showfn") {
return show_fnode(ino, out);
} else if (mode == "listc") {
return list_meta_info(ino, out);
} else if (mode == "amend") {
return amend_meta_info(ino, in, confirm);
} else if (mode == "amendfn") {
return amend_fnode(in, confirm);
} else {
cerr << "bad command '" << mode << "'" << std::endl;
return -EINVAL;
}
}
int MetaTool::show_fnode(string& ino, string& out)
{
if (ino != "0") {
inodeno_t i_ino = std::stoull(ino.c_str(), nullptr, 0);
meta_op op(_debug, out);
meta_op::sub_op* nsop = new meta_op::sub_op(&op);
nsop->sub_op_t = meta_op::OP_SHOW_FN;
nsop->sub_ino_t = meta_op::INO_DIR;
nsop->ino = i_ino;
op.push_op(nsop);
return op_process(op);
} else {
cerr << "parameter error? : ino = " << ino << std::endl;
}
return 0;
}
int MetaTool::amend_fnode(string& in, bool confirm)
{
meta_op op(_debug, "", in, confirm);
meta_op::sub_op* nsop = new meta_op::sub_op(&op);
nsop->sub_op_t = meta_op::OP_AMEND_FN;
nsop->sub_ino_t = meta_op::INO_DIR;
nsop->ino = 0;
op.push_op(nsop);
return op_process(op);
}
int MetaTool::amend_meta_info(string& ino, string& in, bool confirm)
{
if (ino != "0" && in != "") {
inodeno_t i_ino = std::stoull(ino.c_str(), nullptr, 0);
meta_op op(_debug, "", in, confirm);
meta_op::sub_op* nsop = new meta_op::sub_op(&op);
nsop->sub_op_t = meta_op::OP_AMEND;
nsop->sub_ino_t = meta_op::INO_DIR;
nsop->ino = i_ino;
op.push_op(nsop);
return op_process(op);
} else {
cerr << "parameter error? : ino = " << ino << std::endl;
}
return 0;
}
int MetaTool::list_meta_info(string& ino, string& out)
{
if (ino != "0") {
inodeno_t i_ino = std::stoull(ino.c_str(), nullptr, 0);
meta_op op(_debug, out);
meta_op::sub_op* nsop = new meta_op::sub_op(&op);
nsop->sub_op_t = meta_op::OP_LIST;
nsop->sub_ino_t = meta_op::INO_DIR;
nsop->ino = i_ino;
op.push_op(nsop);
return op_process(op);
} else {
cerr << "parameter error? : ino = " << ino << std::endl;
}
return 0;
}
int MetaTool::show_meta_info(string& ino, string& out)
{
if (ino != "0") {
inodeno_t i_ino = std::stoull(ino.c_str(), nullptr, 0);
meta_op op(_debug, out);
meta_op::sub_op* nsop = new meta_op::sub_op(&op);
nsop->sub_op_t = meta_op::OP_SHOW;
nsop->sub_ino_t = meta_op::INO_DIR;
nsop->ino = i_ino;
op.push_op(nsop);
return op_process(op);
} else {
cerr << "parameter error? : ino = " << ino << std::endl;
}
return 0;
}
int MetaTool::op_process(meta_op& op)
{
int r = 0;
while (!op.no_sops()) {
if (_debug)
std::cout << "process : " << op.top_op()->detail() << std::endl;
switch(op.top_op()->sub_op_t) {
case meta_op::OP_LIST:
r = list_meta(op);
break;
case meta_op::OP_LTRACE:
r = file_meta(op);
break;
case meta_op::OP_SHOW:
r = show_meta(op);
break;
case meta_op::OP_AMEND:
r = amend_meta(op);
break;
case meta_op::OP_SHOW_FN:
r = show_fn(op);
break;
case meta_op::OP_AMEND_FN:
r = amend_fn(op);
break;
default:
cerr << "unknow op" << std::endl;
}
if (r == 0)
op.pop_op();
else if (r < 0)
op.clear_sops();
}
op.release();
return r;
}
int MetaTool::amend_meta(meta_op &op)
{
meta_op::sub_op* sop = op.top_op();
auto item = op.inodes.find(sop->ino);
auto item_k = op.okeys.find(sop->ino);
if (item != op.inodes.end() && item_k != op.okeys.end()) {
if (_amend_meta(item_k->second, *(item->second), op.infile(), op) < 0)
return -1;
} else {
if (op.inodes.empty()) {
meta_op::sub_op* nsop = new meta_op::sub_op(&op);
nsop->sub_op_t = meta_op::OP_LIST;
nsop->sub_ino_t = meta_op::INO_DIR;
nsop->trace_level = 0;
nsop->ino_c = sop->ino;
op.push_op(nsop);
return 1;
} else {
return -1;
}
}
return 0;
}
void MetaTool::inode_meta_t::encode(::ceph::bufferlist& bl, uint64_t features)
{
::encode(_f, bl);
::encode(_t, bl);
_i->encode_bare(bl, features);
}
int MetaTool::_amend_meta(string& k, inode_meta_t& inode_meta, const string& fn, meta_op& op)
{
JSONParser parser;
if (!parser.parse(fn.c_str())) {
cout << "Error parsing create user response" << std::endl;
return -1;
}
try {
inode_meta.decode_json(&parser);
} catch (JSONDecoder::err& e) {
cout << "failed to decode JSON input: " << e.what() << std::endl;
return -1;
}
if (!op.confirm_chg() || op.is_debug()) {
cout << "you will amend info of inode ==>: " << std::endl;
_show_meta(inode_meta, "");
}
if (!op.confirm_chg()) {
cout << "warning: this operation is irreversibl!!!\n"
<< " You must confirm that all logs of mds have been flushed!!!\n"
<< " if you want amend it, please add --yes-i-really-really-mean-it!!!"
<< std::endl;
return -1;
}
bufferlist bl;
inode_meta.encode(bl, features);
map<string, bufferlist> to_set;
to_set[k].swap(bl);
inode_backpointer_t bp;
if (!op.top_op()->get_ancestor(bp))
return -1;
frag_t frag;
auto item = op.inodes.find(bp.dirino);
if (item != op.inodes.end()) {
frag = item->second->get_meta()->pick_dirfrag(bp.dname);
}
string oid = obj_name(bp.dirino, frag);
int ret = io_meta.omap_set(oid, to_set);
to_set.clear();
return ret;
}
int MetaTool::show_fn(meta_op &op)
{
meta_op::sub_op* sop = op.top_op();
auto item = op.inodes.find(sop->ino);
if (item != op.inodes.end()) {
if (_show_fn(*(item->second), op.outfile()) < 0)
return -1;
} else {
if (op.inodes.empty()) {
meta_op::sub_op* nsop = new meta_op::sub_op(&op);
nsop->sub_op_t = meta_op::OP_LIST;
nsop->sub_ino_t = meta_op::INO_DIR;
nsop->trace_level = 0;
nsop->ino_c = sop->ino;
op.push_op(nsop);
return 1;
} else
return -1;
}
return 0;
}
int MetaTool::_show_fn(inode_meta_t& inode_meta, const string& fn)
{
std::list<frag_t> frags;
inode_meta.get_meta()->dirfragtree.get_leaves(frags);
std::stringstream ds;
std::string format = "json";
std::string oids;
Formatter* f = Formatter::create(format);
f->enable_line_break();
f->open_object_section("fnodes");
for (const auto &frag : frags) {
bufferlist hbl;
string oid = obj_name(inode_meta.get_meta()->inode->ino, frag);
int ret = io_meta.omap_get_header(oid, &hbl);
if (ret < 0) {
std::cerr << __func__ << " : can't find oid("<< oid << ")" << std::endl;
return -1;
}
{
fnode_t got_fnode;
try {
auto p = hbl.cbegin();
::decode(got_fnode, p);
} catch (const buffer::error &err) {
cerr << "corrupt fnode header in " << oid
<< ": " << err.what() << std::endl;
return -1;
}
if (!oids.empty())
oids += ",";
oids += oid;
f->open_object_section(oid.c_str());
got_fnode.dump(f);
f->close_section();
}
}
f->dump_string("oids", oids.c_str());
f->close_section();
f->flush(ds);
if (fn != "") {
ofstream o;
o.open(fn);
if (o) {
o << ds.str();
o.close();
} else {
cout << "out to file (" << fn << ") failed" << std::endl;
cout << ds.str() << std::endl;
}
} else
std::cout << ds.str() << std::endl;
return 0;
}
int MetaTool::amend_fn(meta_op &op)
{
if (_amend_fn(op.infile(), op.confirm_chg()) < 0)
return -1;
return 0;
}
int MetaTool::_amend_fn(const string& fn, bool confirm)
{
JSONParser parser;
if (!parser.parse(fn.c_str())) {
cout << "Error parsing create user response : " << fn << std::endl;
return -1;
}
if (!confirm) {
cout << "warning: this operation is irreversibl!!!\n"
<< " You must confirm that all logs of mds have been flushed!!!\n"
<< " if you want amend it, please add --yes-i-really-really-mean-it!!!"
<< std::endl;
return -1;
}
try {
string tmp;
JSONDecoder::decode_json("oids", tmp, &parser, true);
string::size_type pos1, pos2;
vector<string> v;
string c = ",";
pos2 = tmp.find(c);
pos1 = 0;
while (string::npos != pos2) {
v.push_back(tmp.substr(pos1, pos2-pos1));
pos1 = pos2 + c.size();
pos2 = tmp.find(c, pos1);
}
if (pos1 != tmp.length())
v.push_back(tmp.substr(pos1));
int ret = 0;
for (auto i : v) {
cout << "amend frag : " << i << "..." << std::endl;
fnode_t fnode;
JSONDecoder::decode_json(i.c_str(), fnode, &parser, true);
bufferlist bl;
fnode.encode(bl);
ret = io_meta.omap_set_header(i, bl);
if (ret < 0)
return ret;
}
} catch (JSONDecoder::err& e) {
cout << "failed to decode JSON input: " << e.what() << std::endl;
return -1;
}
return 0;
}
int MetaTool::show_meta(meta_op &op)
{
meta_op::sub_op* sop = op.top_op();
auto item = op.inodes.find(sop->ino);
if (item != op.inodes.end()) {
if (_show_meta(*(item->second), op.outfile()) < 0)
return -1;
} else {
if (op.inodes.empty()) {
meta_op::sub_op* nsop = new meta_op::sub_op(&op);
nsop->sub_op_t = meta_op::OP_LIST;
nsop->sub_ino_t = meta_op::INO_DIR;
nsop->trace_level = 0;
nsop->ino_c = sop->ino;
op.push_op(nsop);
return 1;
} else {
return -1;
}
}
return 0;
}
int MetaTool::_show_meta(inode_meta_t& inode_meta, const string& fn)
{
std::stringstream ds;
std::string format = "json";
InodeStore& inode_data = *inode_meta.get_meta();
Formatter* f = Formatter::create(format);
f->enable_line_break();
f->open_object_section("meta");
f->dump_unsigned("snapid_t", inode_meta.get_snapid());
f->dump_unsigned("itype", inode_meta.get_type());
f->open_object_section("store");
inode_data.dump(f);
try {
if (inode_data.snap_blob.length()) {
sr_t srnode;
auto p = inode_data.snap_blob.cbegin();
decode(srnode, p);
f->open_object_section("snap_blob");
srnode.dump(f);
f->close_section();
}
} catch (const buffer::error &err) {
cerr << "corrupt decode in snap_blob"
<< ": " << err.what() << std::endl;
return -1;
}
f->close_section();
f->close_section();
f->flush(ds);
if (fn != "") {
ofstream o;
o.open(fn);
if (o) {
o << ds.str();
o.close();
} else {
cout << "out to file (" << fn << ") failed" << std::endl;
cout << ds.str() << std::endl;
}
} else
std::cout << ds.str() << std::endl;
return 0;
}
int MetaTool::list_meta(meta_op &op)
{
meta_op::sub_op* sop = op.top_op();
bool list_all = false;
string oid;
inodeno_t ino = sop->ino_c;
frag_t frag = sop->frag;
if (sop->ino_c == 0) {
list_all = true;
oid = obj_name(sop->ino, frag);
} else {
if (_debug)
std::cout << __func__ << " : " << sop->trace_level << " " << op.ancestors.size() << std::endl;
inode_backpointer_t bp;
if (sop->get_c_ancestor(bp)) {
auto item = op.inodes.find(bp.dirino);
if (item != op.inodes.end()) {
frag = item->second->get_meta()->pick_dirfrag(bp.dname);
}
oid = obj_name(bp.dirino, frag);
} else {
meta_op::sub_op* nsop = new meta_op::sub_op(&op);
nsop->ino = sop->ino_c;
nsop->sub_op_t = meta_op::OP_LTRACE;
nsop->sub_ino_t = meta_op::INO_DIR;
op.push_op(nsop);
return 1;
}
}
if (_debug)
std::cout << __func__ << " : " << string(list_all?"listall ":"info ") << oid << " "<< ino << std::endl;
bufferlist hbl;
int ret = io_meta.omap_get_header(oid, &hbl);
if (ret < 0) {
std::cerr << __func__ << " : can't find it, maybe it (ino:"<< sop->ino<< ")isn't a normal dir!" << std::endl;
return -1;
}
if (hbl.length() == 0) { // obj has splite
if (list_all) {
if (frag == frag_t()) {
auto item = op.inodes.find(sop->ino);
if (item != op.inodes.end()) {
inodeno_t tmp = sop->ino;
op.pop_op();
std::list<frag_t> frags;
item->second->get_meta()->dirfragtree.get_leaves(frags);
for (const auto &frag : frags) {
meta_op::sub_op* nsop = new meta_op::sub_op(&op);
nsop->ino = tmp;
nsop->sub_op_t = meta_op::OP_LIST;
nsop->sub_ino_t = meta_op::INO_DIR;
nsop->frag = frag;
op.push_op(nsop);
}
} else {
meta_op::sub_op* nsop = new meta_op::sub_op(&op);
nsop->ino_c = sop->ino;
nsop->sub_op_t = meta_op::OP_LIST;
nsop->sub_ino_t = meta_op::INO_DIR;
op.push_op(nsop);
}
return 1;
} else {
cerr << __func__ << " missing some data (" << oid << ")???" << std::endl;
return -1;
}
} else {
if (frag == frag_t()) {
inode_backpointer_t bp;
if (sop->get_c_ancestor(bp)) {
meta_op::sub_op* nsop = new meta_op::sub_op(&op);
nsop->ino_c = bp.dirino;
nsop->sub_op_t = meta_op::OP_LIST;
nsop->sub_ino_t = meta_op::INO_DIR;
nsop->trace_level = sop->trace_level + 1;
op.push_op(nsop);
return 1;
} else {
cerr << __func__ << "can't find obj(" << oid << ") ,miss ancestors or miss some objs??? " << std::endl;
return -1;
}
} else {
cerr << __func__ << "missing some objs(" << oid << ")??? " << std::endl;
return -1;
}
}
}
fnode_t got_fnode;
try {
auto p = hbl.cbegin();
::decode(got_fnode, p);
} catch (const buffer::error &err) {
cerr << "corrupt fnode header in " << oid
<< ": " << err.what() << std::endl;
return -1;
}
if (_debug) {
std::string format = "json";
Formatter* f = Formatter::create(format);
f->enable_line_break();
f->dump_string("type", "--fnode--");
f->open_object_section("fnode");
got_fnode.dump(f);
f->close_section();
f->flush(std::cout);
std::cout << std::endl;
}
// print children
std::map<string, bufferlist> out_vals;
int max_vals = 5;
io_meta.omap_get_vals(oid, "", max_vals, &out_vals);
bool force_dirty = false;
const set<snapid_t> *snaps = NULL;
unsigned pos = out_vals.size() - 1;
std::string last_dname;
for (map<string, bufferlist>::iterator p = out_vals.begin();
p != out_vals.end();
++p, --pos) {
string dname;
snapid_t last;
dentry_key_t::decode_helper(p->first, dname, last);
if (_debug)
last_dname = dname;
try {
if (!list_all) {
if (show_child(p->first, dname, last, p->second, pos, snaps,
&force_dirty, ino, &op) == 1) {
return 0;
}
} else {
cout << "dname : " << dname << " " << last << std::endl;
if (show_child(p->first, dname, last, p->second, pos, snaps,
&force_dirty) == 1)
return 0;
}
} catch (const buffer::error &err) {
derr << "Corrupt dentry '" << dname << "' : "
<< err.what() << "(" << "" << ")" << dendl;
return -1;
}
}
while (out_vals.size() == (size_t)max_vals) {
out_vals.clear();
io_meta.omap_get_vals(oid, last_dname, max_vals, &out_vals);
pos = out_vals.size() - 1;
for (map<string, bufferlist>::iterator p = (++out_vals.begin());
p != out_vals.end();
++p, --pos) {
string dname;
snapid_t last;
dentry_key_t::decode_helper(p->first, dname, last);
last_dname = dname;
try {
if (!list_all) {
if (show_child(p->first, dname, last, p->second, pos, snaps,
&force_dirty, ino, &op) == 1) {
return 0;
}
} else {
cout << "dname : " << dname << " " << last << std::endl;
if (show_child(p->first, dname, last, p->second, pos, snaps,
&force_dirty) == 1)
return 0;
}
} catch (const buffer::error &err) {
derr << "Corrupt dentry '" << dname << "' : "
<< err.what() << "(" << "" << ")" << dendl;
return -1;
}
}
}
if (!list_all) {
cerr << __func__ << "miss obj(ino:" << ino << ")??? " << std::endl;
return -1;
}
return 0;
}
int MetaTool::file_meta(meta_op &op)
{
int r = 0;
if (op.top_op()->sub_ino_t == meta_op::INO_DIR) {
r = _file_meta(op, io_meta);
} else if (op.top_op()->sub_ino_t == meta_op::INO_F) {
for (auto i = io_data_v.begin(); i != io_data_v.end(); ++i)
if ((r = _file_meta(op, **i)) == 1)
break;
}
if (r == 1) {
inode_backpointer_t bp;
if (op.top_op()->get_ancestor(bp)) {
return 0;
} else {
std::cerr << "no trace for obj (ino:" << op.top_op()->ino <<")??" << std::endl;
return -1;
}
} else if (op.top_op()->sub_ino_t == meta_op::INO_DIR) {
std::cerr << "\tmaybe it's a file(ino:" << op.top_op()->ino << ")" << std::endl;
op.top_op()->sub_ino_t = meta_op::INO_F;
return 1;
}
std::cerr << "can't get (ino:" << op.top_op()->ino <<")trace??" << std::endl;
return -1;
}
int MetaTool::_file_meta(meta_op &op, librados::IoCtx& io)
{
inodeno_t ino = op.top_op()->ino;
std::string oid = obj_name(ino);
bufferlist pointer_bl;
std::map<std::string, bufferlist> attrset;
int r = 0;
bool have_data = false;
r = io.getxattrs (oid.c_str(), attrset);
if (0 == r) {
std::stringstream ds;
std::string format = "json";
Formatter* f = Formatter::create(format);
auto item = attrset.find("parent");
if (item != attrset.end()) {
inode_backtrace_t i_bt;
try {
bufferlist::const_iterator q = item->second.cbegin();
i_bt.decode(q);
f->open_array_section("info");
have_data = true;
if (i_bt.ancestors.size() > 0)
op.ancestors[ino] = i_bt.ancestors[0];
f->dump_string("type", "--i_bt--");
f->open_object_section("parent");
i_bt.dump(f);
f->close_section();
} catch (buffer::error &e) {
cerr << "failed to decode parent of " << oid << std::endl;
return -1;
}
} else {
cerr << oid << " in " << io.get_pool_name() << " , but no parent" << std::endl;
return -1;
}
item = attrset.find("layout");
if (item != attrset.end()) {
file_layout_t layout;
try {
auto q = item->second.cbegin();
layout.decode(q);
f->dump_string("type", "--layout--");
f->open_object_section("layout");
layout.dump(f);
f->close_section();
} catch (buffer::error &e) {
cerr << "failed to decode layout of " << oid << std::endl;
return -1;
}
} else {
cerr << oid << " in " << io.get_pool_name() << " , but no layout" << std::endl;
}
if (have_data) {
f->close_section();
f->flush(ds);
if (_debug)
cout << ino << " : "<< ds.str() << std::endl;
return 1;
}
}
return 0;
}
std::string MetaTool::obj_name(inodeno_t ino, uint64_t offset, const char *suffix) const
{
char name[60];
snprintf(name, sizeof(name), "%llx.%08llx%s", (long long unsigned)ino, (long long unsigned)offset, suffix ? suffix : "");
return std::string(name);
}
std::string MetaTool::obj_name(inodeno_t ino, frag_t fg, const char *suffix) const
{
char name[60];
snprintf(name, sizeof(name), "%llx.%08llx%s", (long long unsigned)ino, (long long unsigned)fg, suffix ? suffix : "");
return std::string(name);
}
std::string MetaTool::obj_name(const char* ino, uint64_t offset, const char *suffix) const
{
char name[60];
snprintf(name, sizeof(name), "%s.%08llx%s", ino, (long long unsigned)offset, suffix ? suffix : "");
std::string out = name;
transform(out.begin(), out.end(), out.begin(),::tolower);
return out;
}
int MetaTool::show_child(std::string_view key,
std::string_view dname,
const snapid_t last,
bufferlist &bl,
const int pos,
const std::set<snapid_t> *snaps,
bool *force_dirty,
inodeno_t sp_ino,
meta_op* op)
{
bufferlist::const_iterator q = bl.cbegin();
snapid_t first;
::decode(first, q);
// marker
char type;
::decode(type, q);
if (_debug)
std::cout << pos << " type '" << type << "' dname '" << dname
<< " [" << first << "," << last << "]"
<< std::endl;
// bool stale = false;
if (snaps && last != CEPH_NOSNAP) {
derr << "!!!! erro !!!!" << dendl;
return -1;
}
// CDentry *dn = NULL;
// look for existing dentry for _last_ snap, can't process snap of obj
//if *(stale)
// dn = lookup_exact_snap(dname, last);
//else
// dn = lookup(dname, last);
if (type == 'L' || type == 'l') {
// hard link
inodeno_t ino;
unsigned char d_type;
mempool::mds_co::string alternate_name;
CDentry::decode_remote(type, ino, d_type, alternate_name, q);
if (sp_ino > 0) {
if (sp_ino == ino) {
std::cout << "find hard link : " << ino << "," << d_type << std::endl;
return 1;
}
}
std::cout << "hard link : " << ino << "," << d_type << std::endl;
} else if (type == 'I' || type == 'i') {
// inode
// load inode data before lookuping up or constructing CInode
InodeStore& inode_data = *(new InodeStore);
if (type == 'i') {
mempool::mds_co::string alternate_name;
DECODE_START(2, q);
if (struct_v >= 2)
decode(alternate_name, q);
inode_data.decode(q);
DECODE_FINISH(q);
} else {
inode_data.decode_bare(q);
}
std::stringstream ds;
std::string format = "json";
Formatter* f = Formatter::create(format);
f->enable_line_break();
f->open_object_section("meta");
f->dump_unsigned("snapid_t", first);
f->dump_unsigned("itype", type);
f->open_object_section("store");
inode_data.dump(f);
try {
if (inode_data.snap_blob.length()) {
sr_t srnode;
auto p = inode_data.snap_blob.cbegin();
srnode.decode(p);
f->open_object_section("snap_blob");
srnode.dump(f);
f->close_section();
}
} catch (const buffer::error &err) {
cerr << "corrupt decode in snap_blob"
<< ": " << err.what() << std::endl;
}
f->close_section();
f->close_section();
f->flush(ds);
if (sp_ino > 0 && op != NULL && sp_ino == inode_data.inode->ino) {
inode_meta_t* tmp = new inode_meta_t(first, type, &inode_data);
op->inodes[inode_data.inode->ino] = tmp;
op->okeys[inode_data.inode->ino] = key.data();
return 1;
} else {
delete &inode_data;
}
if (sp_ino == 0) {
cout << ds.str() << std::endl;
}
} else {
std::cerr << __func__ << "unknow type : " << dname << "," << type << std::endl;
}
return 0;
}
| 28,460 | 27.432567 | 123 |
cc
|
null |
ceph-main/src/tools/cephfs/MetaTool.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef METATOOL_H__
#define METATOOL_H__
#include "MDSUtility.h"
#include "RoleSelector.h"
#include <vector>
#include <stack>
using std::stack;
#include "mds/mdstypes.h"
#include "mds/LogEvent.h"
#include "mds/events/EMetaBlob.h"
#include "include/rados/librados.hpp"
#include "common/ceph_json.h"
using ::ceph::bufferlist;
class MetaTool : public MDSUtility
{
public:
class inode_meta_t {
public:
inode_meta_t(snapid_t f = CEPH_NOSNAP, char t = char(255), InodeStore* i = NULL):
_f(f),_t(t),_i(i) {
};
snapid_t get_snapid() const {
return _f;
}
InodeStore* get_meta() const {
if (_t == 'I')
return _i;
else
return NULL;
}
int get_type() const {
return _t;
}
void decode_json(JSONObj *obj);
void encode(::ceph::bufferlist& bl, uint64_t features);
private:
snapid_t _f;
char _t;
InodeStore* _i;
};
private:
class meta_op {
public:
meta_op(bool debug = false, std::string out = "", std::string in = "", bool confirm = false):
_debug(debug),
_out(out),
_in(in),
_confirm(confirm)
{}
void release();
typedef enum {
OP_LIST = 0,
OP_LTRACE,
OP_SHOW,
OP_AMEND,
OP_SHOW_FN,
OP_AMEND_FN,
OP_NO
} op_type;
typedef enum {
INO_DIR = 0,
INO_F
} ino_type;
static std::string op_type_name(op_type& t) {
std::string name;
switch (t) {
case OP_LIST:
name = "list dir";
break;
case OP_LTRACE:
name = "load trace";
break;
case OP_SHOW:
name = "show info";
break;
case OP_AMEND:
name = "amend info";
break;
case OP_SHOW_FN:
name = "show fnode";
break;
case OP_AMEND_FN:
name = "amend fnode";
break;
case OP_NO:
name = "noop";
break;
default:
name = "unknow op type";
}
return name;
}
static std::string ino_type_name(ino_type& t) {
std::string name;
switch (t) {
case INO_DIR:
name = "dir";
break;
case INO_F:
name = "file";
break;
default:
name = "unknow file type";
}
return name;
}
class sub_op {
public:
sub_op(meta_op* mop):
trace_level(0),
_proc(false),
_mop(mop)
{}
void print() {
std::cout << detail() << std::endl;
}
std::string detail() {
std::stringstream ds;
ds << " [sub_op]" << op_type_name(sub_op_t) << "|"
<< ino_type_name(sub_ino_t) << "|"
<< ino << "|"
<< frag << "|"
<< ino_c << "|"
<< trace_level << "|"
<< name;
return ds.str();
}
bool get_c_ancestor(inode_backpointer_t& bp) {
if (!_mop || !ino_c)
return false;
auto item = _mop->ancestors.find(ino_c);
if (item != _mop->ancestors.end()) {
bp = item->second;
return true;
} else
return false;
}
bool get_ancestor(inode_backpointer_t& bp) {
if (!_mop || !ino)
return false;
auto item = _mop->ancestors.find(ino);
if (item != _mop->ancestors.end()) {
bp = item->second;
return true;
} else
return false;
}
op_type sub_op_t;
ino_type sub_ino_t;
inodeno_t ino;
frag_t frag;
inodeno_t ino_c;
unsigned trace_level;
std::string name;
bool _proc;
meta_op* _mop;
};
std::map<inodeno_t, inode_backpointer_t > ancestors;
std::map<inodeno_t, inode_meta_t* > inodes;
std::map<inodeno_t, std::string > okeys;
void clear_sops() {
while(!no_sops())
pop_op();
}
bool no_sops() {
return sub_ops.empty();
}
void push_op(sub_op* sop) {
if (_debug)
std::cout << "<<====" << sop->detail() << std::endl;
sub_ops.push(sop);
}
sub_op* top_op() {
return sub_ops.top();
}
void pop_op() {
sub_op* sop = sub_ops.top();
if (_debug)
std::cout << "====>>" << sop->detail() << std::endl;;
delete sop;
sub_ops.pop();
}
std::string outfile() {
return _out;
}
std::string infile() {
return _in;
}
bool is_debug() {
return _debug;
}
bool confirm_chg() {
return _confirm;
}
private:
stack<sub_op*> sub_ops;
bool _debug;
std::string _out;
std::string _in;
bool _confirm;
};
MDSRoleSelector role_selector;
mds_rank_t rank;
// I/O handles
librados::Rados rados;
librados::IoCtx io_meta;
std::vector<librados::IoCtx*> io_data_v;
librados::IoCtx output;
bool _debug;
uint64_t features;
std::string obj_name(inodeno_t ino, frag_t fg = frag_t(), const char *suffix = NULL) const;
std::string obj_name(inodeno_t ino, uint64_t offset, const char *suffix = NULL) const;
std::string obj_name(const char* ino, uint64_t offset, const char *suffix = NULL) const;
// 0 : continue to find
// 1 : stop to find it
int show_child(std::string_view key,
std::string_view dname,
const snapid_t last,
bufferlist &bl,
const int pos,
const std::set<snapid_t> *snaps,
bool *force_dirty,
inodeno_t sp_ino = 0,
meta_op* op = NULL
);
int process(std::string& mode, std::string& ino, std::string out, std::string in, bool confirm);
int show_meta_info(std::string& ino, std::string& out);
int list_meta_info(std::string& ino, std::string& out);
int amend_meta_info(std::string& ino, std::string& in, bool confirm);
int show_fnode(std::string& ino, std::string& out);
int amend_fnode(std::string& in, bool confirm);
int op_process(meta_op &op);
int list_meta(meta_op &op);
int file_meta(meta_op &op);
int show_meta(meta_op &op);
int amend_meta(meta_op &op);
int show_fn(meta_op &op);
int amend_fn(meta_op &op);
public:
int _file_meta(meta_op &op, librados::IoCtx& io);
int _show_meta(inode_meta_t& i, const std::string& fn);
int _amend_meta(std::string &k, inode_meta_t& i, const std::string& fn, meta_op& op);
int _show_fn(inode_meta_t& i, const std::string& fn);
int _amend_fn(const std::string& fn, bool confirm);
void usage();
MetaTool(bool debug=false):
_debug(debug) {}
~MetaTool() {}
int main(std::string& mode,
std::string& rank_str,
std::string& minfo,
std::string&ino,
std::string& out,
std::string& in,
bool confirm = false
);
};
#endif // METATOOL_H__
| 6,916 | 24.336996 | 98 |
h
|
null |
ceph-main/src/tools/cephfs/PgFiles.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/errno.h"
#include "osdc/Striper.h"
#include "PgFiles.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
#undef dout_prefix
#define dout_prefix *_dout << "pgeffects." << __func__ << ": "
int PgFiles::init()
{
int r = ceph_create_with_context(&cmount, g_ceph_context);
if (r != 0) {
return r;
}
return ceph_init(cmount);
}
PgFiles::PgFiles(Objecter *o, const std::set<pg_t> &pgs_)
: objecter(o), pgs(pgs_)
{
for (const auto &i : pgs) {
pools.insert(i.m_pool);
}
}
PgFiles::~PgFiles()
{
ceph_release(cmount);
}
void PgFiles::hit_dir(std::string const &path)
{
dout(10) << "entering " << path << dendl;
ceph_dir_result *dr = nullptr;
int r = ceph_opendir(cmount, path.c_str(), &dr);
if (r != 0) {
derr << "Failed to open path: " << cpp_strerror(r) << dendl;
return;
}
struct dirent de;
while((r = ceph_readdir_r(cmount, dr, &de)) != 0) {
if (r < 0) {
derr << "Error reading path " << path << ": " << cpp_strerror(r)
<< dendl;
ceph_closedir(cmount, dr); // best effort, ignore r
return;
}
if (std::string(de.d_name) == "." || std::string(de.d_name) == "..") {
continue;
}
struct ceph_statx stx;
std::string de_path = (path + std::string("/") + de.d_name);
r = ceph_statx(cmount, de_path.c_str(), &stx,
CEPH_STATX_INO|CEPH_STATX_SIZE, 0);
if (r != 0) {
derr << "Failed to stat path " << de_path << ": "
<< cpp_strerror(r) << dendl;
// Don't hold up the whole process for one bad inode
continue;
}
if (S_ISREG(stx.stx_mode)) {
hit_file(de_path, stx);
} else if (S_ISDIR(stx.stx_mode)) {
hit_dir(de_path);
} else {
dout(20) << "Skipping non reg/dir file: " << de_path << dendl;
}
}
r = ceph_closedir(cmount, dr);
if (r != 0) {
derr << "Error closing path " << path << ": " << cpp_strerror(r) << dendl;
return;
}
}
void PgFiles::hit_file(std::string const &path, const struct ceph_statx &stx)
{
ceph_assert(S_ISREG(stx.stx_mode));
dout(20) << "Hitting file '" << path << "'" << dendl;
int l_stripe_unit = 0;
int l_stripe_count = 0;
int l_object_size = 0;
int l_pool_id = 0;
int r = ceph_get_path_layout(cmount, path.c_str(), &l_stripe_unit,
&l_stripe_count, &l_object_size,
&l_pool_id);
if (r != 0) {
derr << "Error reading layout on " << path << ": " << cpp_strerror(r)
<< dendl;
return;
}
struct file_layout_t layout;
layout.stripe_unit = l_stripe_unit;
layout.stripe_count = l_stripe_count;
layout.object_size = l_object_size;
layout.pool_id = l_pool_id;
// Avoid calculating PG if the layout targeted a completely different pool
if (pools.count(layout.pool_id) == 0) {
dout(20) << "Fast check missed: pool " << layout.pool_id << " not in "
"target set" << dendl;
return;
}
auto num_objects = Striper::get_num_objects(layout, stx.stx_size);
for (uint64_t i = 0; i < num_objects; ++i) {
char buf[32];
snprintf(buf, sizeof(buf), "%llx.%08llx", (long long unsigned)stx.stx_ino,
(long long unsigned int)i);
dout(20) << " object " << std::string(buf) << dendl;
pg_t target;
object_t oid;
object_locator_t loc;
loc.pool = layout.pool_id;
loc.key = std::string(buf);
unsigned pg_num_mask = 0;
unsigned pg_num = 0;
int r = 0;
objecter->with_osdmap([&r, oid, loc, &target, &pg_num_mask, &pg_num]
(const OSDMap &osd_map) {
r = osd_map.object_locator_to_pg(oid, loc, target);
if (r == 0) {
auto pool = osd_map.get_pg_pool(loc.pool);
pg_num_mask = pool->get_pg_num_mask();
pg_num = pool->get_pg_num();
}
});
if (r != 0) {
// Can happen if layout pointed to pool not in osdmap, for example
continue;
}
target.m_seed = ceph_stable_mod(target.ps(), pg_num, pg_num_mask);
dout(20) << " target " << target << dendl;
if (pgs.count(target)) {
std::cout << path << std::endl;
return;
}
}
}
int PgFiles::scan_path(std::string const &path)
{
int r = ceph_mount(cmount, "/");
if (r != 0) {
derr << "Failed to mount: " << cpp_strerror(r) << dendl;
return r;
}
hit_dir(path);
r = ceph_unmount(cmount);
if (r != 0) {
derr << "Failed to unmount: " << cpp_strerror(r) << dendl;
return r;
}
return r;
}
| 4,937 | 24.323077 | 78 |
cc
|
null |
ceph-main/src/tools/cephfs/PgFiles.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef PG_EFFECTS_H_
#define PG_EFFECTS_H_
#include "include/cephfs/libcephfs.h"
#include "osd/osd_types.h"
#include <set>
#include "osdc/Objecter.h"
/**
* This utility scans the files (via an online MDS) and works out
* which ones rely on named PGs. For use when someone has
* some bad/damaged PGs and wants to see which files might be
* affected.
*/
class PgFiles
{
private:
Objecter *objecter;
struct ceph_mount_info *cmount = nullptr;
std::set<pg_t> pgs;
std::set<uint64_t> pools;
void hit_file(std::string const &path, const struct ceph_statx &stx);
void hit_dir(std::string const &path);
public:
PgFiles(Objecter *o, const std::set<pg_t> &pgs_);
~PgFiles();
int init();
int scan_path(std::string const &path);
};
#endif
| 1,161 | 21.346154 | 71 |
h
|
null |
ceph-main/src/tools/cephfs/Resetter.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2010 Greg Farnum <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <memory>
#include "common/errno.h"
#include "osdc/Journaler.h"
#include "mds/JournalPointer.h"
#include "mds/mdstypes.h"
#include "mds/MDCache.h"
#include "mon/MonClient.h"
#include "mds/events/EResetJournal.h"
#include "Resetter.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
using namespace std;
int Resetter::init(mds_role_t role_, const std::string &type, bool hard)
{
role = role_;
int r = MDSUtility::init();
if (r < 0) {
return r;
}
auto fs = fsmap->get_filesystem(role.fscid);
ceph_assert(nullptr != fs);
is_mdlog = false;
if (type == "mdlog") {
JournalPointer jp(role.rank, fs->mds_map.get_metadata_pool());
int rt = 0;
if (hard) {
jp.front = role.rank + MDS_INO_LOG_OFFSET;
jp.back = 0;
rt = jp.save(objecter);
if (rt != 0) {
derr << "Error writing journal pointer: " << cpp_strerror(rt) << dendl;
return rt;
}
ino = jp.front; // only need to reset ino for mdlog
} else {
rt = jp.load(objecter);
if (rt != 0) {
std::cerr << "Error loading journal: " << cpp_strerror(rt) <<
", pass --force to forcibly reset this journal" << std::endl;
return rt;
} else {
ino = jp.front;
}
}
is_mdlog = true;
} else if (type == "purge_queue") {
ino = MDS_INO_PURGE_QUEUE + role.rank;
} else {
ceph_abort(); // should not get here
}
return 0;
}
int Resetter::reset()
{
ceph::mutex mylock = ceph::make_mutex("Resetter::reset::lock");
ceph::condition_variable cond;
bool done;
int r;
auto fs = fsmap->get_filesystem(role.fscid);
ceph_assert(fs != nullptr);
Journaler journaler("resetter", ino,
fs->mds_map.get_metadata_pool(),
CEPH_FS_ONDISK_MAGIC,
objecter, 0, 0, &finisher);
{
std::lock_guard locker{lock};
journaler.recover(new C_SafeCond(mylock, cond, &done, &r));
}
{
std::unique_lock locker{mylock};
cond.wait(locker, [&done] { return done; });
}
if (r != 0) {
if (r == -ENOENT) {
cerr << "journal does not exist on-disk. Did you set a bad rank?"
<< std::endl;
std::cerr << "Error loading journal: " << cpp_strerror(r) <<
", pass --force to forcibly reset this journal" << std::endl;
return r;
} else {
cerr << "got error " << r << "from Journaler, failing" << std::endl;
return r;
}
}
lock.lock();
uint64_t old_start = journaler.get_read_pos();
uint64_t old_end = journaler.get_write_pos();
uint64_t old_len = old_end - old_start;
cout << "old journal was " << old_start << "~" << old_len << std::endl;
uint64_t new_start = round_up_to(old_end+1, journaler.get_layout_period());
cout << "new journal start will be " << new_start
<< " (" << (new_start - old_end) << " bytes past old end)" << std::endl;
journaler.set_read_pos(new_start);
journaler.set_write_pos(new_start);
journaler.set_expire_pos(new_start);
journaler.set_trimmed_pos(new_start);
journaler.set_writeable();
cout << "writing journal head" << std::endl;
journaler.write_head(new C_SafeCond(mylock, cond, &done, &r));
lock.unlock();
{
std::unique_lock locker{mylock};
cond.wait(locker, [&done] { return done; });
}
std::lock_guard l{lock};
if (r != 0) {
return r;
}
if (is_mdlog) {
r = _write_reset_event(&journaler); // reset envent is specific for mdlog journal
if (r != 0) {
return r;
}
}
cout << "done" << std::endl;
return 0;
}
int Resetter::reset_hard()
{
auto fs = fsmap->get_filesystem(role.fscid);
Journaler journaler("resetter", ino,
fs->mds_map.get_metadata_pool(),
CEPH_FS_ONDISK_MAGIC,
objecter, 0, 0, &finisher);
journaler.set_writeable();
file_layout_t default_log_layout = MDCache::gen_default_log_layout(
fsmap->get_filesystem(role.fscid)->mds_map);
journaler.create(&default_log_layout, g_conf()->mds_journal_format);
C_SaferCond cond;
{
std::lock_guard l{lock};
journaler.write_head(&cond);
}
int r = cond.wait();
if (r != 0) {
derr << "Error writing journal header: " << cpp_strerror(r) << dendl;
return r;
}
if (is_mdlog) // reset event is specific for mdlog journal
{
std::lock_guard l{lock};
r = _write_reset_event(&journaler);
if (r != 0) {
derr << "Error writing EResetJournal: " << cpp_strerror(r) << dendl;
return r;
}
}
if (is_mdlog) {
dout(4) << "Successfully wrote new journal pointer and header for rank "
<< role << dendl;
} else {
dout(4) << "Successfully wrote header for rank " << role << dendl;
}
return 0;
}
int Resetter::_write_reset_event(Journaler *journaler)
{
ceph_assert(journaler != NULL);
auto le = std::make_unique<EResetJournal>();
bufferlist bl;
le->encode_with_header(bl, CEPH_FEATURES_SUPPORTED_DEFAULT);
cout << "writing EResetJournal entry" << std::endl;
journaler->append_entry(bl);
int ret;
{
C_SaferCond cond;
journaler->flush(&cond);
ret = cond.wait();
if (ret < 0)
return ret;
}
{
// wait until all journal prezero ops are done
C_SaferCond cond;
journaler->wait_for_prezero(&cond);
cond.wait();
}
return ret;
}
| 5,680 | 24.475336 | 85 |
cc
|
null |
ceph-main/src/tools/cephfs/Resetter.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2010 Greg Farnum <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef JOURNAL_RESETTER_H_
#define JOURNAL_RESETTER_H_
#include "MDSUtility.h"
class Journaler;
/**
* This class lets you reset an mds journal for troubleshooting or whatever.
*
* To use, create a Resetter, call init(), and then call reset() with the name
* of the file to dump to.
*/
class Resetter : public MDSUtility {
private:
mds_role_t role;
inodeno_t ino;
bool is_mdlog;
protected:
int _write_reset_event(Journaler *journaler);
public:
Resetter() {}
~Resetter() {}
int init(mds_role_t role_, const std::string &type, bool hard);
/**
* For use when no journal header/pointer was present: write one
* out from scratch.
*/
int reset_hard();
int reset();
};
#endif /* JOURNAL_RESETTER_H_ */
| 1,161 | 21.784314 | 78 |
h
|
null |
ceph-main/src/tools/cephfs/RoleSelector.cc
|
#include "RoleSelector.h"
int MDSRoleSelector::parse_rank(
const FSMap &fsmap,
std::string const &str)
{
if (str == "all" || str == "*") {
std::set<mds_rank_t> in;
const MDSMap &mds_map = fsmap.get_filesystem(fscid)->mds_map;
mds_map.get_mds_set(in);
for (auto rank : in) {
roles.push_back(mds_role_t(fscid, rank));
}
return 0;
} else {
std::string rank_err;
mds_rank_t rank = strict_strtol(str.c_str(), 10, &rank_err);
if (!rank_err.empty()) {
return -EINVAL;
}
if (fsmap.get_filesystem(fscid)->mds_map.is_dne(rank)) {
return -ENOENT;
}
roles.push_back(mds_role_t(fscid, rank));
return 0;
}
}
int MDSRoleSelector::parse(const FSMap &fsmap, std::string const &str,
bool allow_unqualified_rank)
{
auto colon_pos = str.find(":");
if (colon_pos == std::string::npos) {
// An unqualified rank. Only valid if there is only one
// namespace.
if (fsmap.filesystem_count() == 1 && allow_unqualified_rank) {
fscid = fsmap.get_filesystem()->fscid;
return parse_rank(fsmap, str);
} else {
return -EINVAL;
}
} else if (colon_pos == 0 || colon_pos == str.size() - 1) {
return -EINVAL;
} else {
const std::string ns_str = str.substr(0, colon_pos);
const std::string rank_str = str.substr(colon_pos + 1);
std::shared_ptr<const Filesystem> fs_ptr;
int r = fsmap.parse_filesystem(ns_str, &fs_ptr);
if (r != 0) {
return r;
}
fscid = fs_ptr->fscid;
return parse_rank(fsmap, rank_str);
}
}
| 1,582 | 25.383333 | 70 |
cc
|
null |
ceph-main/src/tools/cephfs/RoleSelector.h
|
#ifndef ROLE_SELECTOR_H_
#define ROLE_SELECTOR_H_
#include <string>
#include <vector>
#include "mds/mdstypes.h"
#include "mds/FSMap.h"
/**
* When you want to let the user act on a single rank in a namespace,
* or all of them.
*/
class MDSRoleSelector
{
public:
const std::vector<mds_role_t> &get_roles() const {return roles;}
int parse(const FSMap &fsmap, std::string const &str,
bool allow_unqualified_rank=true);
MDSRoleSelector()
: fscid(FS_CLUSTER_ID_NONE)
{}
fs_cluster_id_t get_ns() const
{
return fscid;
}
protected:
int parse_rank(
const FSMap &fsmap,
std::string const &str);
std::vector<mds_role_t> roles;
fs_cluster_id_t fscid;
};
#endif // ROLE_SELECTOR_H_
| 761 | 19.594595 | 69 |
h
|
null |
ceph-main/src/tools/cephfs/TableTool.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 John Spray <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include "common/ceph_argparse.h"
#include "common/errno.h"
#include "mds/SessionMap.h"
#include "mds/InoTable.h"
#include "mds/SnapServer.h"
#include "TableTool.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
#undef dout_prefix
#define dout_prefix *_dout << __func__ << ": "
using namespace std;
void TableTool::usage()
{
std::cout << "Usage: \n"
<< " cephfs-table-tool <all|[mds rank]> <reset|show> <session|snap|inode>"
<< " cephfs-table-tool <all|[mds rank]> <take_inos> <max_ino>"
<< std::endl;
generic_client_usage();
}
/**
* For a function that takes an MDS role as an argument and
* returns an error code, execute it on the roles specified
* by `role_selector`.
*/
int TableTool::apply_role_fn(std::function<int(mds_role_t, Formatter *)> fptr, Formatter *f)
{
ceph_assert(f != NULL);
int r = 0;
f->open_object_section("ranks");
for (auto role : role_selector.get_roles()) {
std::ostringstream rank_str;
rank_str << role.rank;
f->open_object_section(rank_str.str().c_str());
f->open_object_section("data");
int rank_r = fptr(role, f);
f->close_section();
r = r ? r : rank_r;
f->dump_int("result", rank_r);
f->close_section();
}
f->close_section();
return r;
}
/**
* This class wraps an MDS table class (SessionMap, SnapServer, InoTable)
* with offline load/store code such that we can do offline dumps and resets
* on those tables.
*/
template <typename A>
class TableHandler
{
protected:
// The RADOS object ID for the table
std::string object_name;
// The role in question (may be NONE)
mds_role_t role;
// Whether this is an MDSTable subclass (i.e. has leading version field to decode)
bool mds_table;
public:
TableHandler(mds_role_t r, std::string const &name, bool mds_table_)
: role(r), mds_table(mds_table_)
{
// Compose object name of the table we will dump
std::ostringstream oss;
oss << "mds";
if (!role.is_none()) {
oss << role.rank;
}
oss << "_" << name;
object_name = oss.str();
}
int load_and_dump(librados::IoCtx *io, Formatter *f)
{
ceph_assert(io != NULL);
ceph_assert(f != NULL);
// Attempt read
bufferlist table_bl;
int read_r = io->read(object_name, table_bl, 0, 0);
if (read_r >= 0) {
auto q = table_bl.cbegin();
try {
if (mds_table) {
version_t version;
decode(version, q);
f->dump_int("version", version);
}
A table_inst;
table_inst.set_rank(role.rank);
table_inst.decode(q);
table_inst.dump(f);
return 0;
} catch (buffer::error &e) {
derr << "table " << object_name << " is corrupt" << dendl;
return -EIO;
}
} else {
derr << "error reading table object " << object_name
<< ": " << cpp_strerror(read_r) << dendl;
return read_r;
}
}
int reset(librados::IoCtx *io)
{
A table_inst;
// Compose new (blank) table
table_inst.set_rank(role.rank);
table_inst.reset_state();
// Write the table out
return write(table_inst, io);
}
protected:
int write(const A &table_inst, librados::IoCtx *io)
{
bufferlist new_bl;
if (mds_table) {
version_t version = 1;
encode(version, new_bl);
}
table_inst.encode_state(new_bl);
// Write out new table
int r = io->write_full(object_name, new_bl);
if (r != 0) {
derr << "error writing table object " << object_name
<< ": " << cpp_strerror(r) << dendl;
return r;
}
return r;
}
};
template <typename A>
class TableHandlerOmap
{
private:
// The RADOS object ID for the table
std::string object_name;
// The role (rank may be NONE)
mds_role_t role;
// Whether this is an MDSTable subclass (i.e. has leading version field to decode)
bool mds_table;
public:
TableHandlerOmap(mds_role_t r, std::string const &name, bool mds_table_)
: role(r), mds_table(mds_table_)
{
// Compose object name of the table we will dump
std::ostringstream oss;
oss << "mds";
if (!role.is_none()) {
oss << role.rank;
}
oss << "_" << name;
object_name = oss.str();
}
int load_and_dump(librados::IoCtx *io, Formatter *f)
{
ceph_assert(io != NULL);
ceph_assert(f != NULL);
// Read in the header
bufferlist header_bl;
int r = io->omap_get_header(object_name, &header_bl);
if (r != 0) {
derr << "error reading header on '" << object_name << "': "
<< cpp_strerror(r) << dendl;
return r;
}
// Decode the header
A table_inst;
table_inst.set_rank(role.rank);
try {
table_inst.decode_header(header_bl);
} catch (buffer::error &e) {
derr << "table " << object_name << " is corrupt" << dendl;
return -EIO;
}
// Read and decode OMAP values in chunks
std::string last_key = "";
while(true) {
std::map<std::string, bufferlist> values;
int r = io->omap_get_vals(object_name, last_key,
g_conf()->mds_sessionmap_keys_per_op, &values);
if (r != 0) {
derr << "error reading values: " << cpp_strerror(r) << dendl;
return r;
}
if (values.empty()) {
break;
}
try {
table_inst.decode_values(values);
} catch (buffer::error &e) {
derr << "table " << object_name << " is corrupt" << dendl;
return -EIO;
}
last_key = values.rbegin()->first;
}
table_inst.dump(f);
return 0;
}
int reset(librados::IoCtx *io)
{
A table_inst;
table_inst.set_rank(role.rank);
table_inst.reset_state();
bufferlist header_bl;
table_inst.encode_header(&header_bl);
// Compose a transaction to clear and write header
librados::ObjectWriteOperation op;
op.omap_clear();
op.set_op_flags2(LIBRADOS_OP_FLAG_FAILOK);
op.omap_set_header(header_bl);
return io->operate(object_name, &op);
}
};
class InoTableHandler : public TableHandler<InoTable>
{
public:
explicit InoTableHandler(mds_role_t r)
: TableHandler(r, "inotable", true)
{}
int take_inos(librados::IoCtx *io, inodeno_t max, Formatter *f)
{
InoTable inst;
inst.set_rank(role.rank);
inst.reset_state();
int r = 0;
if (inst.force_consume_to(max)) {
r = write(inst, io);
}
f->dump_int("version", inst.get_version());
inst.dump(f);
return r;
}
};
int TableTool::main(std::vector<const char*> &argv)
{
int r;
dout(10) << __func__ << dendl;
// RADOS init
// ==========
r = rados.init_with_context(g_ceph_context);
if (r < 0) {
derr << "RADOS unavailable, cannot scan filesystem journal" << dendl;
return r;
}
dout(4) << "connecting to RADOS..." << dendl;
r = rados.connect();
if (r < 0) {
derr << "couldn't connect to cluster: " << cpp_strerror(r) << dendl;
return r;
}
// Require at least 3 args <rank> <mode> <arg> [args...]
if (argv.size() < 3) {
cerr << "missing required 3 arguments" << std::endl;
return -EINVAL;
}
const std::string role_str = std::string(argv[0]);
const std::string mode = std::string(argv[1]);
const std::string table = std::string(argv[2]);
r = role_selector.parse(*fsmap, role_str);
if (r < 0) {
derr << "Bad rank selection: " << role_str << "'" << dendl;
return r;
}
auto fs = fsmap->get_filesystem(role_selector.get_ns());
ceph_assert(fs != nullptr);
int64_t const pool_id = fs->mds_map.get_metadata_pool();
dout(4) << "resolving pool " << pool_id << dendl;
std::string pool_name;
r = rados.pool_reverse_lookup(pool_id, &pool_name);
if (r < 0) {
derr << "Pool " << pool_id << " identified in MDS map not found in RADOS!"
<< dendl;
return r;
}
dout(4) << "creating IoCtx.." << dendl;
r = rados.ioctx_create(pool_name.c_str(), io);
if (r != 0) {
return r;
}
JSONFormatter jf(true);
if (mode == "reset") {
const std::string table = std::string(argv[2]);
if (table == "session") {
r = apply_role_fn([this](mds_role_t rank, Formatter *f) -> int {
return TableHandlerOmap<SessionMapStore>(rank, "sessionmap", false).reset(&io);
}, &jf);
} else if (table == "inode") {
r = apply_role_fn([this](mds_role_t rank, Formatter *f) -> int {
return TableHandler<InoTable>(rank, "inotable", true).reset(&io);
}, &jf);
} else if (table == "snap") {
r = TableHandler<SnapServer>(mds_role_t(), "snaptable", true).reset(&io);
jf.open_object_section("reset_snap_status");
jf.dump_int("result", r);
jf.close_section();
} else {
cerr << "Invalid table '" << table << "'" << std::endl;
return -EINVAL;
}
} else if (mode == "show") {
const std::string table = std::string(argv[2]);
if (table == "session") {
r = apply_role_fn([this](mds_role_t rank, Formatter *f) -> int {
return TableHandlerOmap<SessionMapStore>(rank, "sessionmap", false).load_and_dump(&io, f);
}, &jf);
} else if (table == "inode") {
r = apply_role_fn([this](mds_role_t rank, Formatter *f) -> int {
return TableHandler<InoTable>(rank, "inotable", true).load_and_dump(&io, f);;
}, &jf);
} else if (table == "snap") {
jf.open_object_section("show_snap_table");
{
r = TableHandler<SnapServer>(
mds_role_t(), "snaptable", true).load_and_dump(&io, &jf);
jf.dump_int("result", r);
}
jf.close_section();
} else {
cerr << "Invalid table '" << table << "'" << std::endl;
return -EINVAL;
}
} else if (mode == "take_inos") {
const std::string ino_str = std::string(argv[2]);
std::string ino_err;
inodeno_t ino = strict_strtoll(ino_str.c_str(), 10, &ino_err);
if (!ino_err.empty()) {
derr << "Bad ino '" << ino_str << "'" << dendl;
return -EINVAL;
}
r = apply_role_fn([this, ino](mds_role_t rank, Formatter *f) -> int {
return InoTableHandler(rank).take_inos(&io, ino, f);
}, &jf);
} else {
cerr << "Invalid mode '" << mode << "'" << std::endl;
return -EINVAL;
}
// Subcommand should have written to formatter, flush it
jf.flush(std::cout);
std::cout << std::endl;
return r;
}
| 10,736 | 24.564286 | 98 |
cc
|
null |
ceph-main/src/tools/cephfs/TableTool.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 John Spray <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include "MDSUtility.h"
#include "RoleSelector.h"
#include "include/rados/librados.hpp"
/**
* Command line tool for debugging the backing store of
* MDSTable instances.
*/
class TableTool : public MDSUtility
{
private:
MDSRoleSelector role_selector;
// I/O handles
librados::Rados rados;
librados::IoCtx io;
int apply_role_fn(std::function<int(mds_role_t, Formatter *)> fptr, Formatter *f);
public:
static void usage();
int main(std::vector<const char*> &argv);
};
| 931 | 21.731707 | 86 |
h
|
null |
ceph-main/src/tools/cephfs/cephfs-data-scan.cc
|
#include "include/types.h"
#include "common/config.h"
#include "common/ceph_argparse.h"
#include "common/errno.h"
#include "global/global_init.h"
#include "DataScan.h"
using namespace std;
int main(int argc, const char **argv)
{
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
DataScan::usage();
exit(0);
}
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY, 0);
common_init_finish(g_ceph_context);
DataScan data_scan;
// Connect to mon cluster, download MDS map etc
int rc = data_scan.init();
if (rc != 0) {
std::cerr << "Error in initialization: " << cpp_strerror(rc) << std::endl;
return rc;
}
// Finally, execute the user's commands
rc = data_scan.main(args);
if (rc != 0) {
std::cerr << "Error (" << cpp_strerror(rc) << ")" << std::endl;
}
return rc;
}
| 1,012 | 20.553191 | 80 |
cc
|
null |
ceph-main/src/tools/cephfs/cephfs-journal-tool.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 John Spray <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include "include/types.h"
#include "common/config.h"
#include "common/ceph_argparse.h"
#include "common/errno.h"
#include "global/global_init.h"
#include "JournalTool.h"
int main(int argc, const char **argv)
{
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
std::cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
JournalTool::usage();
exit(0);
}
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY, 0);
common_init_finish(g_ceph_context);
JournalTool jt;
// Connect to mon cluster, download MDS map etc
int rc = jt.init();
if (rc != 0) {
std::cerr << "Error in initialization: " << cpp_strerror(rc) << std::endl;
return rc;
}
// Finally, execute the user's commands
rc = jt.main(args);
if (rc != 0) {
std::cerr << "Error (" << cpp_strerror(rc) << ")" << std::endl;
}
return rc;
}
| 1,391 | 23 | 80 |
cc
|
null |
ceph-main/src/tools/cephfs/cephfs-meta-injection.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <include/types.h>
#include "common/config.h"
#include "common/ceph_argparse.h"
#include "common/errno.h"
#include "global/global_init.h"
#include "MetaTool.h"
#include <iostream>
#include <string>
#include <vector>
#include <boost/program_options.hpp>
namespace po = boost::program_options;
using std::string;
using namespace std;
static string version = "cephfs-meta-injection v1.1";
int main(int argc, const char **argv)
{
auto args = argv_to_vec(argc, argv);
env_to_vec(args);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY, 0);
common_init_finish(g_ceph_context);
string rank_str, minfo, ino, out,in;
po::options_description general("general options");
general.add_options()
("help,h", "produce help message")
("debug", "show debug info")
("rank,r", po::value<string>(&rank_str), "the rank of cephfs, default(0) (e.g. -r cephfs_a:0)")
("minfo", po::value<string>(&minfo), "specify metapool, datapools and rank (e.g. cephfs_metadata_a:cephfs_data_a:0)")
("ino,i", po::value<string>(&ino), "specify inode. e.g. 1099511627776 or 0x10000000000, you can find it with cmd, 'ls -i'")
("out,o", po::value<string>(&out), "output file")
("in", po::value<string>(&in), "input file")
("yes-i-really-really-mean-it", "need by amend info")
;
string mode;
po::options_description modeoptions("mode options");
modeoptions.add_options()
("mode", po::value<string>(&mode),
"\tlistc : list all obj of dir\n" \
"\tshowm : show the info of ino\n" \
"\tshowfn : show the fnode of dir\n" \
"\tamend : amend part of the meta data\n" \
"\tamendfn : amend fnode from file\n"
);
po::positional_options_description p;
p.add("mode", 1);
po::options_description all("all options");
all.add(modeoptions).add(general);
po::variables_map vm;
try {
po::store(po::command_line_parser(argc, argv).options(all).positional(p).allow_unregistered().run(), vm);
} catch(exception &e) {
cerr << "error : " << e.what() << std::endl;
return -1;
} catch(...) {
cout << "param error" << std::endl;
return 0;
}
boost::program_options::notify(vm);
if (vm.count("help")) {
std::cout << version << std::endl;
std::cout << "usage : \n"
<< " cephfs-meta-injection <listc|showm|showfn|amend|amendfn> -r <fsname:rank> -i <ino>"
<< std::endl;
std::cout << "example : \n"
<< " amend info of inode(1099531628828)\n"
<< " cephfs-meta-injection showm -r cephfs_a:0 -i 1099531628828 -o out\n"
<< " alter file\n"
<< " cephfs-meta-injection amend -r cephfs_a:0 -i 1099531628828 --in out --yes-i-really-mean-it"
<< std::endl;
std::cout << all << std::endl;
return 0;
}
MetaTool mt(vm.count("debug"));
int rc = mt.init();
if (rc != 0) {
std::cerr << "error in initialization: " << cpp_strerror(rc) << std::endl;
return rc;
}
rc = mt.main(mode, rank_str, minfo, ino, out, in, vm.count("yes-i-really-really-mean-it"));
if (rc != 0) {
std::cerr << "error (" << cpp_strerror(rc) << ")" << std::endl;
return -1;
}
return rc;
}
| 3,394 | 34 | 129 |
cc
|
null |
ceph-main/src/tools/cephfs/cephfs-table-tool.cc
|
#include "include/types.h"
#include "common/config.h"
#include "common/ceph_argparse.h"
#include "common/errno.h"
#include "global/global_init.h"
#include "TableTool.h"
using namespace std;
int main(int argc, const char **argv)
{
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
TableTool::usage();
exit(0);
}
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY, 0);
common_init_finish(g_ceph_context);
TableTool tt;
// Connect to mon cluster, download MDS map etc
int rc = tt.init();
if (rc != 0) {
std::cerr << "Error in initialization: " << cpp_strerror(rc) << std::endl;
return rc;
}
// Finally, execute the user's commands
rc = tt.main(args);
if (rc != 0) {
std::cerr << "Error (" << cpp_strerror(rc) << ")" << std::endl;
}
return rc;
}
| 994 | 20.170213 | 80 |
cc
|
null |
ceph-main/src/tools/cephfs/type_helper.hpp
|
#ifndef TYPE_HELPER_HPP__
#define TYPE_HELPER_HPP__
template<typename T1, typename T2>
T1 conv_t(T2 s){
T1 target;
std::stringstream conv;
conv << s;
conv >> target;
return target;
}
void string_split(std::string str, std::vector<std::string>& out, std::string split = ":") {
std::cout << str << std::endl;
auto pos = str.find(split);
while(pos != std::string::npos){
std::cout << str.substr(0, pos) << std::endl;
out.push_back(str.substr(0, pos));
if (str.size() > pos + split.size()){
str = str.substr(pos + split.size());
pos = str.find(split);
}else
return;
}
out.push_back(str.substr());
return;
}
#endif // TYPE_HELPER_HPP__
| 745 | 24.724138 | 92 |
hpp
|
null |
ceph-main/src/tools/cephfs_mirror/ClusterWatcher.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <mutex>
#include <vector>
#include "common/ceph_context.h"
#include "common/debug.h"
#include "common/errno.h"
#include "mon/MonClient.h"
#include "ClusterWatcher.h"
#include "ServiceDaemon.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_cephfs_mirror
#undef dout_prefix
#define dout_prefix *_dout << "cephfs::mirror::ClusterWatcher " << __func__
namespace cephfs {
namespace mirror {
ClusterWatcher::ClusterWatcher(CephContext *cct, MonClient *monc, ServiceDaemon *service_daemon,
Listener &listener)
: Dispatcher(cct),
m_monc(monc),
m_service_daemon(service_daemon),
m_listener(listener) {
}
ClusterWatcher::~ClusterWatcher() {
}
bool ClusterWatcher::ms_can_fast_dispatch2(const cref_t<Message> &m) const {
return m->get_type() == CEPH_MSG_FS_MAP;
}
void ClusterWatcher::ms_fast_dispatch2(const ref_t<Message> &m) {
bool handled = ms_dispatch2(m);
ceph_assert(handled);
}
bool ClusterWatcher::ms_dispatch2(const ref_t<Message> &m) {
if (m->get_type() == CEPH_MSG_FS_MAP) {
if (m->get_connection()->get_peer_type() == CEPH_ENTITY_TYPE_MON) {
handle_fsmap(ref_cast<MFSMap>(m));
}
return true;
}
return false;
}
int ClusterWatcher::init() {
dout(20) << dendl;
bool sub = m_monc->sub_want("fsmap", 0, 0);
if (!sub) {
derr << ": failed subscribing to FSMap" << dendl;
return -1;
}
m_monc->renew_subs();
dout(10) << ": subscribed to FSMap" << dendl;
return 0;
}
void ClusterWatcher::shutdown() {
dout(20) << dendl;
std::scoped_lock locker(m_lock);
m_stopping = true;
m_monc->sub_unwant("fsmap");
}
void ClusterWatcher::handle_fsmap(const cref_t<MFSMap> &m) {
dout(20) << dendl;
auto fsmap = m->get_fsmap();
auto filesystems = fsmap.get_filesystems();
std::vector<Filesystem> mirroring_enabled;
std::vector<Filesystem> mirroring_disabled;
std::map<Filesystem, Peers> peers_added;
std::map<Filesystem, Peers> peers_removed;
std::map<Filesystem, uint64_t> fs_metadata_pools;
{
std::scoped_lock locker(m_lock);
if (m_stopping) {
return;
}
// deleted filesystems are considered mirroring disabled
for (auto it = m_filesystem_peers.begin(); it != m_filesystem_peers.end();) {
if (!fsmap.filesystem_exists(it->first.fscid)) {
mirroring_disabled.emplace_back(it->first);
it = m_filesystem_peers.erase(it);
continue;
}
++it;
}
for (auto &filesystem : filesystems) {
auto fs = Filesystem{filesystem->fscid,
std::string(filesystem->mds_map.get_fs_name())};
auto pool_id = filesystem->mds_map.get_metadata_pool();
auto &mirror_info = filesystem->mirror_info;
if (!mirror_info.is_mirrored()) {
auto it = m_filesystem_peers.find(fs);
if (it != m_filesystem_peers.end()) {
mirroring_disabled.emplace_back(fs);
m_filesystem_peers.erase(it);
}
} else {
auto [fspeersit, enabled] = m_filesystem_peers.emplace(fs, Peers{});
auto &peers = fspeersit->second;
if (enabled) {
mirroring_enabled.emplace_back(fs);
fs_metadata_pools.emplace(fs, pool_id);
}
// peers added
Peers added;
std::set_difference(mirror_info.peers.begin(), mirror_info.peers.end(),
peers.begin(), peers.end(), std::inserter(added, added.end()));
// peers removed
Peers removed;
std::set_difference(peers.begin(), peers.end(),
mirror_info.peers.begin(), mirror_info.peers.end(),
std::inserter(removed, removed.end()));
// update set
if (!added.empty()) {
peers_added.emplace(fs, added);
peers.insert(added.begin(), added.end());
}
if (!removed.empty()) {
peers_removed.emplace(fs, removed);
for (auto &p : removed) {
peers.erase(p);
}
}
}
}
}
dout(5) << ": mirroring enabled=" << mirroring_enabled << ", mirroring_disabled="
<< mirroring_disabled << dendl;
for (auto &fs : mirroring_enabled) {
m_service_daemon->add_filesystem(fs.fscid, fs.fs_name);
m_listener.handle_mirroring_enabled(FilesystemSpec(fs, fs_metadata_pools.at(fs)));
}
for (auto &fs : mirroring_disabled) {
m_service_daemon->remove_filesystem(fs.fscid);
m_listener.handle_mirroring_disabled(fs);
}
dout(5) << ": peers added=" << peers_added << ", peers removed=" << peers_removed << dendl;
for (auto &[fs, peers] : peers_added) {
for (auto &peer : peers) {
m_service_daemon->add_peer(fs.fscid, peer);
m_listener.handle_peers_added(fs, peer);
}
}
for (auto &[fs, peers] : peers_removed) {
for (auto &peer : peers) {
m_service_daemon->remove_peer(fs.fscid, peer);
m_listener.handle_peers_removed(fs, peer);
}
}
std::scoped_lock locker(m_lock);
if (!m_stopping) {
m_monc->sub_got("fsmap", fsmap.get_epoch());
} // else we have already done a sub_unwant()
}
} // namespace mirror
} // namespace cephfs
| 5,262 | 27.759563 | 96 |
cc
|
null |
ceph-main/src/tools/cephfs_mirror/ClusterWatcher.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_CLUSTER_WATCHER_H
#define CEPHFS_MIRROR_CLUSTER_WATCHER_H
#include <map>
#include "common/ceph_mutex.h"
#include "common/async/context_pool.h"
#include "messages/MFSMap.h"
#include "msg/Dispatcher.h"
#include "Types.h"
class MonClient;
namespace cephfs {
namespace mirror {
class ServiceDaemon;
// watch peer changes for filesystems via FSMap updates
class ClusterWatcher : public Dispatcher {
public:
struct Listener {
virtual ~Listener() {
}
virtual void handle_mirroring_enabled(const FilesystemSpec &spec) = 0;
virtual void handle_mirroring_disabled(const Filesystem &filesystem) = 0;
virtual void handle_peers_added(const Filesystem &filesystem, const Peer &peer) = 0;
virtual void handle_peers_removed(const Filesystem &filesystem, const Peer &peer) = 0;
};
ClusterWatcher(CephContext *cct, MonClient *monc, ServiceDaemon *service_daemon,
Listener &listener);
~ClusterWatcher();
bool ms_can_fast_dispatch_any() const override {
return true;
}
bool ms_can_fast_dispatch2(const cref_t<Message> &m) const override;
void ms_fast_dispatch2(const ref_t<Message> &m) override;
bool ms_dispatch2(const ref_t<Message> &m) override;
void ms_handle_connect(Connection *c) override {
}
bool ms_handle_reset(Connection *c) override {
return false;
}
void ms_handle_remote_reset(Connection *c) override {
}
bool ms_handle_refused(Connection *c) override {
return false;
}
int init();
void shutdown();
private:
ceph::mutex m_lock = ceph::make_mutex("cephfs::mirror::cluster_watcher");
MonClient *m_monc;
ServiceDaemon *m_service_daemon;
Listener &m_listener;
bool m_stopping = false;
std::map<Filesystem, Peers> m_filesystem_peers;
void handle_fsmap(const cref_t<MFSMap> &m);
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_CLUSTER_WATCHER_H
| 2,000 | 24.653846 | 90 |
h
|
null |
ceph-main/src/tools/cephfs_mirror/FSMirror.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/admin_socket.h"
#include "common/ceph_argparse.h"
#include "common/ceph_context.h"
#include "common/common_init.h"
#include "common/debug.h"
#include "common/errno.h"
#include "common/WorkQueue.h"
#include "include/stringify.h"
#include "msg/Messenger.h"
#include "FSMirror.h"
#include "PeerReplayer.h"
#include "aio_utils.h"
#include "ServiceDaemon.h"
#include "Utils.h"
#include "common/Cond.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_cephfs_mirror
#undef dout_prefix
#define dout_prefix *_dout << "cephfs::mirror::FSMirror " << __func__
using namespace std;
namespace cephfs {
namespace mirror {
namespace {
const std::string SERVICE_DAEMON_DIR_COUNT_KEY("directory_count");
const std::string SERVICE_DAEMON_PEER_INIT_FAILED_KEY("peer_init_failed");
class MirrorAdminSocketCommand {
public:
virtual ~MirrorAdminSocketCommand() {
}
virtual int call(Formatter *f) = 0;
};
class StatusCommand : public MirrorAdminSocketCommand {
public:
explicit StatusCommand(FSMirror *fs_mirror)
: fs_mirror(fs_mirror) {
}
int call(Formatter *f) override {
fs_mirror->mirror_status(f);
return 0;
}
private:
FSMirror *fs_mirror;
};
} // anonymous namespace
class MirrorAdminSocketHook : public AdminSocketHook {
public:
MirrorAdminSocketHook(CephContext *cct, const Filesystem &filesystem, FSMirror *fs_mirror)
: admin_socket(cct->get_admin_socket()) {
int r;
std::string cmd;
// mirror status format is name@fscid
cmd = "fs mirror status " + stringify(filesystem.fs_name) + "@" + stringify(filesystem.fscid);
r = admin_socket->register_command(
cmd, this, "get filesystem mirror status");
if (r == 0) {
commands[cmd] = new StatusCommand(fs_mirror);
}
}
~MirrorAdminSocketHook() override {
admin_socket->unregister_commands(this);
for (auto &[command, cmdptr] : commands) {
delete cmdptr;
}
}
int call(std::string_view command, const cmdmap_t& cmdmap,
const bufferlist&,
Formatter *f, std::ostream &errss, bufferlist &out) override {
auto p = commands.at(std::string(command));
return p->call(f);
}
private:
typedef std::map<std::string, MirrorAdminSocketCommand*, std::less<>> Commands;
AdminSocket *admin_socket;
Commands commands;
};
FSMirror::FSMirror(CephContext *cct, const Filesystem &filesystem, uint64_t pool_id,
ServiceDaemon *service_daemon, std::vector<const char*> args,
ContextWQ *work_queue)
: m_cct(cct),
m_filesystem(filesystem),
m_pool_id(pool_id),
m_service_daemon(service_daemon),
m_args(args),
m_work_queue(work_queue),
m_snap_listener(this),
m_asok_hook(new MirrorAdminSocketHook(cct, filesystem, this)) {
m_service_daemon->add_or_update_fs_attribute(m_filesystem.fscid, SERVICE_DAEMON_DIR_COUNT_KEY,
(uint64_t)0);
}
FSMirror::~FSMirror() {
dout(20) << dendl;
{
std::scoped_lock locker(m_lock);
delete m_instance_watcher;
delete m_mirror_watcher;
}
// outside the lock so that in-progress commands can acquire
// lock and finish executing.
delete m_asok_hook;
}
int FSMirror::init_replayer(PeerReplayer *peer_replayer) {
ceph_assert(ceph_mutex_is_locked(m_lock));
return peer_replayer->init();
}
void FSMirror::shutdown_replayer(PeerReplayer *peer_replayer) {
peer_replayer->shutdown();
}
void FSMirror::cleanup() {
dout(20) << dendl;
ceph_unmount(m_mount);
ceph_release(m_mount);
m_ioctx.close();
m_cluster.reset();
}
void FSMirror::reopen_logs() {
std::scoped_lock locker(m_lock);
if (m_cluster) {
reinterpret_cast<CephContext *>(m_cluster->cct())->reopen_logs();
}
for (auto &[peer, replayer] : m_peer_replayers) {
replayer->reopen_logs();
}
}
void FSMirror::init(Context *on_finish) {
dout(20) << dendl;
std::scoped_lock locker(m_lock);
int r = connect(g_ceph_context->_conf->name.to_str(),
g_ceph_context->_conf->cluster, &m_cluster, "", "", m_args);
if (r < 0) {
m_init_failed = true;
on_finish->complete(r);
return;
}
r = m_cluster->ioctx_create2(m_pool_id, m_ioctx);
if (r < 0) {
m_init_failed = true;
m_cluster.reset();
derr << ": error accessing local pool (id=" << m_pool_id << "): "
<< cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
r = mount(m_cluster, m_filesystem, true, &m_mount);
if (r < 0) {
m_init_failed = true;
m_ioctx.close();
m_cluster.reset();
on_finish->complete(r);
return;
}
m_addrs = m_cluster->get_addrs();
dout(10) << ": rados addrs=" << m_addrs << dendl;
init_instance_watcher(on_finish);
}
void FSMirror::shutdown(Context *on_finish) {
dout(20) << dendl;
{
std::scoped_lock locker(m_lock);
m_stopping = true;
if (m_on_init_finish != nullptr) {
dout(10) << ": delaying shutdown -- init in progress" << dendl;
m_on_shutdown_finish = new LambdaContext([this, on_finish](int r) {
if (r < 0) {
on_finish->complete(0);
return;
}
m_on_shutdown_finish = on_finish;
shutdown_peer_replayers();
});
return;
}
m_on_shutdown_finish = on_finish;
}
shutdown_peer_replayers();
}
void FSMirror::shutdown_peer_replayers() {
dout(20) << dendl;
for (auto &[peer, peer_replayer] : m_peer_replayers) {
dout(5) << ": shutting down replayer for peer=" << peer << dendl;
shutdown_replayer(peer_replayer.get());
}
m_peer_replayers.clear();
shutdown_mirror_watcher();
}
void FSMirror::init_instance_watcher(Context *on_finish) {
dout(20) << dendl;
m_on_init_finish = new LambdaContext([this, on_finish](int r) {
{
std::scoped_lock locker(m_lock);
if (r < 0) {
m_init_failed = true;
}
}
on_finish->complete(r);
if (m_on_shutdown_finish != nullptr) {
m_on_shutdown_finish->complete(r);
}
});
Context *ctx = new C_CallbackAdapter<
FSMirror, &FSMirror::handle_init_instance_watcher>(this);
m_instance_watcher = InstanceWatcher::create(m_ioctx, m_snap_listener, m_work_queue);
m_instance_watcher->init(ctx);
}
void FSMirror::handle_init_instance_watcher(int r) {
dout(20) << ": r=" << r << dendl;
Context *on_init_finish = nullptr;
{
std::scoped_lock locker(m_lock);
if (r < 0) {
std::swap(on_init_finish, m_on_init_finish);
}
}
if (on_init_finish != nullptr) {
on_init_finish->complete(r);
return;
}
init_mirror_watcher();
}
void FSMirror::init_mirror_watcher() {
dout(20) << dendl;
std::scoped_lock locker(m_lock);
Context *ctx = new C_CallbackAdapter<
FSMirror, &FSMirror::handle_init_mirror_watcher>(this);
m_mirror_watcher = MirrorWatcher::create(m_ioctx, this, m_work_queue);
m_mirror_watcher->init(ctx);
}
void FSMirror::handle_init_mirror_watcher(int r) {
dout(20) << ": r=" << r << dendl;
Context *on_init_finish = nullptr;
{
std::scoped_lock locker(m_lock);
if (r == 0) {
std::swap(on_init_finish, m_on_init_finish);
}
}
if (on_init_finish != nullptr) {
on_init_finish->complete(r);
return;
}
m_retval = r; // save errcode for init context callback
shutdown_instance_watcher();
}
void FSMirror::shutdown_mirror_watcher() {
dout(20) << dendl;
std::scoped_lock locker(m_lock);
Context *ctx = new C_CallbackAdapter<
FSMirror, &FSMirror::handle_shutdown_mirror_watcher>(this);
m_mirror_watcher->shutdown(ctx);
}
void FSMirror::handle_shutdown_mirror_watcher(int r) {
dout(20) << ": r=" << r << dendl;
shutdown_instance_watcher();
}
void FSMirror::shutdown_instance_watcher() {
dout(20) << dendl;
std::scoped_lock locker(m_lock);
Context *ctx = new C_CallbackAdapter<
FSMirror, &FSMirror::handle_shutdown_instance_watcher>(this);
m_instance_watcher->shutdown(new C_AsyncCallback<ContextWQ>(m_work_queue, ctx));
}
void FSMirror::handle_shutdown_instance_watcher(int r) {
dout(20) << ": r=" << r << dendl;
cleanup();
Context *on_init_finish = nullptr;
Context *on_shutdown_finish = nullptr;
{
std::scoped_lock locker(m_lock);
std::swap(on_init_finish, m_on_init_finish);
std::swap(on_shutdown_finish, m_on_shutdown_finish);
}
if (on_init_finish != nullptr) {
on_init_finish->complete(m_retval);
}
if (on_shutdown_finish != nullptr) {
on_shutdown_finish->complete(r);
}
}
void FSMirror::handle_acquire_directory(string_view dir_path) {
dout(5) << ": dir_path=" << dir_path << dendl;
{
std::scoped_lock locker(m_lock);
m_directories.emplace(dir_path);
m_service_daemon->add_or_update_fs_attribute(m_filesystem.fscid, SERVICE_DAEMON_DIR_COUNT_KEY,
m_directories.size());
for (auto &[peer, peer_replayer] : m_peer_replayers) {
dout(10) << ": peer=" << peer << dendl;
peer_replayer->add_directory(dir_path);
}
}
}
void FSMirror::handle_release_directory(string_view dir_path) {
dout(5) << ": dir_path=" << dir_path << dendl;
{
std::scoped_lock locker(m_lock);
auto it = m_directories.find(dir_path);
if (it != m_directories.end()) {
m_directories.erase(it);
m_service_daemon->add_or_update_fs_attribute(m_filesystem.fscid, SERVICE_DAEMON_DIR_COUNT_KEY,
m_directories.size());
for (auto &[peer, peer_replayer] : m_peer_replayers) {
dout(10) << ": peer=" << peer << dendl;
peer_replayer->remove_directory(dir_path);
}
}
}
}
void FSMirror::add_peer(const Peer &peer) {
dout(10) << ": peer=" << peer << dendl;
std::scoped_lock locker(m_lock);
m_all_peers.emplace(peer);
if (m_peer_replayers.find(peer) != m_peer_replayers.end()) {
return;
}
auto replayer = std::make_unique<PeerReplayer>(
m_cct, this, m_cluster, m_filesystem, peer, m_directories, m_mount, m_service_daemon);
int r = init_replayer(replayer.get());
if (r < 0) {
m_service_daemon->add_or_update_peer_attribute(m_filesystem.fscid, peer,
SERVICE_DAEMON_PEER_INIT_FAILED_KEY,
true);
return;
}
m_peer_replayers.emplace(peer, std::move(replayer));
ceph_assert(m_peer_replayers.size() == 1); // support only a single peer
}
void FSMirror::remove_peer(const Peer &peer) {
dout(10) << ": peer=" << peer << dendl;
std::unique_ptr<PeerReplayer> replayer;
{
std::scoped_lock locker(m_lock);
m_all_peers.erase(peer);
auto it = m_peer_replayers.find(peer);
if (it != m_peer_replayers.end()) {
replayer = std::move(it->second);
m_peer_replayers.erase(it);
}
}
if (replayer) {
dout(5) << ": shutting down replayers for peer=" << peer << dendl;
shutdown_replayer(replayer.get());
}
}
void FSMirror::mirror_status(Formatter *f) {
std::scoped_lock locker(m_lock);
f->open_object_section("status");
if (m_init_failed) {
f->dump_string("state", "failed");
} else if (is_blocklisted(locker)) {
f->dump_string("state", "blocklisted");
} else {
// dump rados addr for blocklist test
f->dump_string("rados_inst", m_addrs);
f->open_object_section("peers");
for ([[maybe_unused]] auto &[peer, peer_replayer] : m_peer_replayers) {
peer.dump(f);
}
f->close_section(); // peers
f->open_object_section("snap_dirs");
f->dump_int("dir_count", m_directories.size());
f->close_section(); // snap_dirs
}
f->close_section(); // status
}
} // namespace mirror
} // namespace cephfs
| 12,467 | 27.017978 | 100 |
cc
|
null |
ceph-main/src/tools/cephfs_mirror/FSMirror.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_FS_MIRROR_H
#define CEPHFS_MIRROR_FS_MIRROR_H
#include "common/Formatter.h"
#include "common/Thread.h"
#include "mds/FSMap.h"
#include "Types.h"
#include "InstanceWatcher.h"
#include "MirrorWatcher.h"
class ContextWQ;
namespace cephfs {
namespace mirror {
class MirrorAdminSocketHook;
class PeerReplayer;
class ServiceDaemon;
// handle mirroring for a filesystem to a set of peers
class FSMirror {
public:
FSMirror(CephContext *cct, const Filesystem &filesystem, uint64_t pool_id,
ServiceDaemon *service_daemon, std::vector<const char*> args,
ContextWQ *work_queue);
~FSMirror();
void init(Context *on_finish);
void shutdown(Context *on_finish);
void add_peer(const Peer &peer);
void remove_peer(const Peer &peer);
bool is_stopping() {
std::scoped_lock locker(m_lock);
return m_stopping;
}
bool is_init_failed() {
std::scoped_lock locker(m_lock);
return m_init_failed;
}
bool is_failed() {
std::scoped_lock locker(m_lock);
return m_init_failed ||
m_instance_watcher->is_failed() ||
m_mirror_watcher->is_failed();
}
bool is_blocklisted() {
std::scoped_lock locker(m_lock);
return is_blocklisted(locker);
}
Peers get_peers() {
std::scoped_lock locker(m_lock);
return m_all_peers;
}
std::string get_instance_addr() {
std::scoped_lock locker(m_lock);
return m_addrs;
}
// admin socket helpers
void mirror_status(Formatter *f);
void reopen_logs();
private:
bool is_blocklisted(const std::scoped_lock<ceph::mutex> &locker) const {
bool blocklisted = false;
if (m_instance_watcher) {
blocklisted = m_instance_watcher->is_blocklisted();
}
if (m_mirror_watcher) {
blocklisted |= m_mirror_watcher->is_blocklisted();
}
return blocklisted;
}
struct SnapListener : public InstanceWatcher::Listener {
FSMirror *fs_mirror;
SnapListener(FSMirror *fs_mirror)
: fs_mirror(fs_mirror) {
}
void acquire_directory(std::string_view dir_path) override {
fs_mirror->handle_acquire_directory(dir_path);
}
void release_directory(std::string_view dir_path) override {
fs_mirror->handle_release_directory(dir_path);
}
};
CephContext *m_cct;
Filesystem m_filesystem;
uint64_t m_pool_id;
ServiceDaemon *m_service_daemon;
std::vector<const char *> m_args;
ContextWQ *m_work_queue;
ceph::mutex m_lock = ceph::make_mutex("cephfs::mirror::fs_mirror");
SnapListener m_snap_listener;
std::set<std::string, std::less<>> m_directories;
Peers m_all_peers;
std::map<Peer, std::unique_ptr<PeerReplayer>> m_peer_replayers;
RadosRef m_cluster;
std::string m_addrs;
librados::IoCtx m_ioctx;
InstanceWatcher *m_instance_watcher = nullptr;
MirrorWatcher *m_mirror_watcher = nullptr;
int m_retval = 0;
bool m_stopping = false;
bool m_init_failed = false;
Context *m_on_init_finish = nullptr;
Context *m_on_shutdown_finish = nullptr;
MirrorAdminSocketHook *m_asok_hook = nullptr;
MountRef m_mount;
int init_replayer(PeerReplayer *peer_replayer);
void shutdown_replayer(PeerReplayer *peer_replayer);
void cleanup();
void init_instance_watcher(Context *on_finish);
void handle_init_instance_watcher(int r);
void init_mirror_watcher();
void handle_init_mirror_watcher(int r);
void shutdown_peer_replayers();
void shutdown_mirror_watcher();
void handle_shutdown_mirror_watcher(int r);
void shutdown_instance_watcher();
void handle_shutdown_instance_watcher(int r);
void handle_acquire_directory(std::string_view dir_path);
void handle_release_directory(std::string_view dir_path);
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_FS_MIRROR_H
| 3,867 | 23.327044 | 76 |
h
|
null |
ceph-main/src/tools/cephfs_mirror/InstanceWatcher.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/ceph_context.h"
#include "common/ceph_json.h"
#include "common/debug.h"
#include "common/errno.h"
#include "common/WorkQueue.h"
#include "cls/cephfs/cls_cephfs_client.h"
#include "include/stringify.h"
#include "aio_utils.h"
#include "InstanceWatcher.h"
#include "Types.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_cephfs_mirror
#undef dout_prefix
#define dout_prefix *_dout << "cephfs::mirror::InstanceWatcher " << __func__
using namespace std;
namespace cephfs {
namespace mirror {
namespace {
std::string instance_oid(const std::string &instance_id) {
return CEPHFS_MIRROR_OBJECT + "." + instance_id;
}
} // anonymous namespace
InstanceWatcher::InstanceWatcher(librados::IoCtx &ioctx,
Listener &listener, ContextWQ *work_queue)
: Watcher(ioctx, instance_oid(stringify(ioctx.get_instance_id())), work_queue),
m_ioctx(ioctx),
m_listener(listener),
m_work_queue(work_queue),
m_lock(ceph::make_mutex("cephfs::mirror::instance_watcher")) {
}
InstanceWatcher::~InstanceWatcher() {
}
void InstanceWatcher::init(Context *on_finish) {
dout(20) << dendl;
{
std::scoped_lock locker(m_lock);
ceph_assert(m_on_init_finish == nullptr);
m_on_init_finish = new LambdaContext([this, on_finish](int r) {
on_finish->complete(r);
if (m_on_shutdown_finish != nullptr) {
m_on_shutdown_finish->complete(0);
}
});
}
create_instance();
}
void InstanceWatcher::shutdown(Context *on_finish) {
dout(20) << dendl;
{
std::scoped_lock locker(m_lock);
ceph_assert(m_on_shutdown_finish == nullptr);
if (m_on_init_finish != nullptr) {
dout(10) << ": delaying shutdown -- init in progress" << dendl;
m_on_shutdown_finish = new LambdaContext([this, on_finish](int r) {
m_on_shutdown_finish = nullptr;
shutdown(on_finish);
});
return;
}
m_on_shutdown_finish = on_finish;
}
unregister_watcher();
}
void InstanceWatcher::handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist& bl) {
dout(20) << dendl;
std::string dir_path;
std::string mode;
try {
JSONDecoder jd(bl);
JSONDecoder::decode_json("dir_path", dir_path, &jd.parser, true);
JSONDecoder::decode_json("mode", mode, &jd.parser, true);
} catch (const JSONDecoder::err &e) {
derr << ": failed to decode notify json: " << e.what() << dendl;
}
dout(20) << ": notifier_id=" << notifier_id << ", dir_path=" << dir_path
<< ", mode=" << mode << dendl;
if (mode == "acquire") {
m_listener.acquire_directory(dir_path);
} else if (mode == "release") {
m_listener.release_directory(dir_path);
} else {
derr << ": unknown mode" << dendl;
}
bufferlist outbl;
acknowledge_notify(notify_id, handle, outbl);
}
void InstanceWatcher::handle_rewatch_complete(int r) {
dout(5) << ": r=" << r << dendl;
if (r == -EBLOCKLISTED) {
dout(0) << ": client blocklisted" <<dendl;
std::scoped_lock locker(m_lock);
m_blocklisted = true;
} else if (r == -ENOENT) {
derr << ": mirroring object deleted" << dendl;
m_failed = true;
} else if (r < 0) {
derr << ": rewatch error: " << cpp_strerror(r) << dendl;
m_failed = true;
}
}
void InstanceWatcher::create_instance() {
dout(20) << dendl;
std::scoped_lock locker(m_lock);
librados::ObjectWriteOperation op;
op.create(false);
librados::AioCompletion *aio_comp =
librados::Rados::aio_create_completion(
this, &rados_callback<InstanceWatcher, &InstanceWatcher::handle_create_instance>);
int r = m_ioctx.aio_operate(m_oid, aio_comp, &op);
ceph_assert(r == 0);
aio_comp->release();
}
void InstanceWatcher::handle_create_instance(int r) {
dout(20) << ": r=" << r << dendl;
Context *on_init_finish = nullptr;
{
std::scoped_lock locker(m_lock);
if (r < 0) {
std::swap(on_init_finish, m_on_init_finish);
}
}
if (on_init_finish != nullptr) {
on_init_finish->complete(r);
return;
}
register_watcher();
}
void InstanceWatcher::register_watcher() {
dout(20) << dendl;
std::scoped_lock locker(m_lock);
Context *on_finish = new C_CallbackAdapter<
InstanceWatcher, &InstanceWatcher::handle_register_watcher>(this);
register_watch(on_finish);
}
void InstanceWatcher::handle_register_watcher(int r) {
dout(20) << ": r=" << r << dendl;
Context *on_init_finish = nullptr;
{
std::scoped_lock locker(m_lock);
if (r == 0) {
std::swap(on_init_finish, m_on_init_finish);
}
}
if (on_init_finish != nullptr) {
on_init_finish->complete(r);
return;
}
remove_instance();
}
void InstanceWatcher::unregister_watcher() {
dout(20) << dendl;
std::scoped_lock locker(m_lock);
Context *on_finish = new C_CallbackAdapter<
InstanceWatcher, &InstanceWatcher::handle_unregister_watcher>(this);
unregister_watch(new C_AsyncCallback<ContextWQ>(m_work_queue, on_finish));
}
void InstanceWatcher::handle_unregister_watcher(int r) {
dout(20) << ": r=" << r << dendl;
Context *on_shutdown_finish = nullptr;
{
std::scoped_lock locker(m_lock);
if (r < 0) {
std::swap(on_shutdown_finish, m_on_shutdown_finish);
}
}
if (on_shutdown_finish != nullptr) {
on_shutdown_finish->complete(r);
return;
}
remove_instance();
}
void InstanceWatcher::remove_instance() {
dout(20) << dendl;
std::scoped_lock locker(m_lock);
librados::ObjectWriteOperation op;
op.remove();
librados::AioCompletion *aio_comp =
librados::Rados::aio_create_completion(
this, &rados_callback<InstanceWatcher, &InstanceWatcher::handle_remove_instance>);
int r = m_ioctx.aio_operate(m_oid, aio_comp, &op);
ceph_assert(r == 0);
aio_comp->release();
}
void InstanceWatcher::handle_remove_instance(int r) {
dout(20) << ": r=" << r << dendl;
Context *on_init_finish = nullptr;
Context *on_shutdown_finish = nullptr;
{
std::scoped_lock locker(m_lock);
std::swap(on_init_finish, m_on_init_finish);
std::swap(on_shutdown_finish, m_on_shutdown_finish);
}
if (on_init_finish != nullptr) {
on_init_finish->complete(r);
}
if (on_shutdown_finish != nullptr) {
on_shutdown_finish->complete(r);
}
}
} // namespace mirror
} // namespace cephfs
| 6,748 | 25.570866 | 88 |
cc
|
null |
ceph-main/src/tools/cephfs_mirror/InstanceWatcher.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_INSTANCE_WATCHER_H
#define CEPHFS_MIRROR_INSTANCE_WATCHER_H
#include <string_view>
#include "common/ceph_mutex.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "Watcher.h"
class ContextWQ;
namespace cephfs {
namespace mirror {
// watch directory update notifications via per daemon rados
// object and invoke listener callback.
class InstanceWatcher : public Watcher {
public:
struct Listener {
virtual ~Listener() {
}
virtual void acquire_directory(std::string_view dir_path) = 0;
virtual void release_directory(std::string_view dir_path) = 0;
};
static InstanceWatcher *create(librados::IoCtx &ioctx,
Listener &listener, ContextWQ *work_queue) {
return new InstanceWatcher(ioctx, listener, work_queue);
}
InstanceWatcher(librados::IoCtx &ioctx, Listener &listener, ContextWQ *work_queue);
~InstanceWatcher();
void init(Context *on_finish);
void shutdown(Context *on_finish);
void handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist& bl) override;
void handle_rewatch_complete(int r) override;
bool is_blocklisted() {
std::scoped_lock locker(m_lock);
return m_blocklisted;
}
bool is_failed() {
std::scoped_lock locker(m_lock);
return m_failed;
}
private:
librados::IoCtx &m_ioctx;
Listener &m_listener;
ContextWQ *m_work_queue;
ceph::mutex m_lock;
Context *m_on_init_finish = nullptr;
Context *m_on_shutdown_finish = nullptr;
bool m_blocklisted = false;
bool m_failed = false;
void create_instance();
void handle_create_instance(int r);
void register_watcher();
void handle_register_watcher(int r);
void remove_instance();
void handle_remove_instance(int r);
void unregister_watcher();
void handle_unregister_watcher(int r);
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_INSTANCE_WATCHER_H
| 2,071 | 23.093023 | 85 |
h
|
null |
ceph-main/src/tools/cephfs_mirror/Mirror.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/ceph_argparse.h"
#include "common/ceph_context.h"
#include "common/common_init.h"
#include "common/Cond.h"
#include "common/debug.h"
#include "common/errno.h"
#include "common/Timer.h"
#include "common/WorkQueue.h"
#include "include/types.h"
#include "mon/MonClient.h"
#include "msg/Messenger.h"
#include "aio_utils.h"
#include "Mirror.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_cephfs_mirror
#undef dout_prefix
#define dout_prefix *_dout << "cephfs::mirror::Mirror " << __func__
namespace cephfs {
namespace mirror {
namespace {
const std::string SERVICE_DAEMON_MIRROR_ENABLE_FAILED_KEY("mirroring_failed");
class SafeTimerSingleton : public CommonSafeTimer<ceph::mutex> {
public:
ceph::mutex timer_lock = ceph::make_mutex("cephfs::mirror::timer_lock");
explicit SafeTimerSingleton(CephContext *cct)
: SafeTimer(cct, timer_lock, true) {
init();
}
};
class ThreadPoolSingleton : public ThreadPool {
public:
ContextWQ *work_queue = nullptr;
explicit ThreadPoolSingleton(CephContext *cct)
: ThreadPool(cct, "Mirror::thread_pool", "tp_mirror", 1) {
work_queue = new ContextWQ("Mirror::work_queue", ceph::make_timespan(60), this);
start();
}
};
} // anonymous namespace
struct Mirror::C_EnableMirroring : Context {
Mirror *mirror;
Filesystem filesystem;
uint64_t pool_id;
C_EnableMirroring(Mirror *mirror, const Filesystem &filesystem, uint64_t pool_id)
: mirror(mirror),
filesystem(filesystem),
pool_id(pool_id) {
}
void finish(int r) override {
enable_mirroring();
}
void enable_mirroring() {
Context *ctx = new C_CallbackAdapter<C_EnableMirroring,
&C_EnableMirroring::handle_enable_mirroring>(this);
mirror->enable_mirroring(filesystem, pool_id, ctx);
}
void handle_enable_mirroring(int r) {
mirror->handle_enable_mirroring(filesystem, r);
delete this;
}
// context needs to live post completion
void complete(int r) override {
finish(r);
}
};
struct Mirror::C_DisableMirroring : Context {
Mirror *mirror;
Filesystem filesystem;
C_DisableMirroring(Mirror *mirror, const Filesystem &filesystem)
: mirror(mirror),
filesystem(filesystem) {
}
void finish(int r) override {
disable_mirroring();
}
void disable_mirroring() {
Context *ctx = new C_CallbackAdapter<C_DisableMirroring,
&C_DisableMirroring::handle_disable_mirroring>(this);
mirror->disable_mirroring(filesystem, ctx);
}
void handle_disable_mirroring(int r) {
mirror->handle_disable_mirroring(filesystem, r);
delete this;
}
// context needs to live post completion
void complete(int r) override {
finish(r);
}
};
struct Mirror::C_PeerUpdate : Context {
Mirror *mirror;
Filesystem filesystem;
Peer peer;
bool remove = false;
C_PeerUpdate(Mirror *mirror, const Filesystem &filesystem,
const Peer &peer)
: mirror(mirror),
filesystem(filesystem),
peer(peer) {
}
C_PeerUpdate(Mirror *mirror, const Filesystem &filesystem,
const Peer &peer, bool remove)
: mirror(mirror),
filesystem(filesystem),
peer(peer),
remove(remove) {
}
void finish(int r) override {
if (remove) {
mirror->remove_peer(filesystem, peer);
} else {
mirror->add_peer(filesystem, peer);
}
}
};
struct Mirror::C_RestartMirroring : Context {
Mirror *mirror;
Filesystem filesystem;
uint64_t pool_id;
Peers peers;
C_RestartMirroring(Mirror *mirror, const Filesystem &filesystem,
uint64_t pool_id, const Peers &peers)
: mirror(mirror),
filesystem(filesystem),
pool_id(pool_id),
peers(peers) {
}
void finish(int r) override {
disable_mirroring();
}
void disable_mirroring() {
Context *ctx = new C_CallbackAdapter<C_RestartMirroring,
&C_RestartMirroring::handle_disable_mirroring>(this);
mirror->disable_mirroring(filesystem, ctx);
}
void handle_disable_mirroring(int r) {
enable_mirroring();
}
void enable_mirroring() {
std::scoped_lock locker(mirror->m_lock);
Context *ctx = new C_CallbackAdapter<C_RestartMirroring,
&C_RestartMirroring::handle_enable_mirroring>(this);
mirror->enable_mirroring(filesystem, pool_id, ctx, true);
}
void handle_enable_mirroring(int r) {
mirror->handle_enable_mirroring(filesystem, peers, r);
delete this;
}
// context needs to live post completion
void complete(int r) override {
finish(r);
}
};
Mirror::Mirror(CephContext *cct, const std::vector<const char*> &args,
MonClient *monc, Messenger *msgr)
: m_cct(cct),
m_args(args),
m_monc(monc),
m_msgr(msgr),
m_listener(this),
m_last_blocklist_check(ceph_clock_now()),
m_last_failure_check(ceph_clock_now()),
m_local(new librados::Rados()) {
auto thread_pool = &(cct->lookup_or_create_singleton_object<ThreadPoolSingleton>(
"cephfs::mirror::thread_pool", false, cct));
auto safe_timer = &(cct->lookup_or_create_singleton_object<SafeTimerSingleton>(
"cephfs::mirror::safe_timer", false, cct));
m_thread_pool = thread_pool;
m_work_queue = thread_pool->work_queue;
m_timer = safe_timer;
m_timer_lock = &safe_timer->timer_lock;
std::scoped_lock timer_lock(*m_timer_lock);
schedule_mirror_update_task();
}
Mirror::~Mirror() {
dout(10) << dendl;
{
std::scoped_lock timer_lock(*m_timer_lock);
m_timer->shutdown();
}
m_work_queue->drain();
delete m_work_queue;
{
std::scoped_lock locker(m_lock);
m_thread_pool->stop();
}
}
int Mirror::init_mon_client() {
dout(20) << dendl;
m_monc->set_messenger(m_msgr);
m_msgr->add_dispatcher_head(m_monc);
m_monc->set_want_keys(CEPH_ENTITY_TYPE_MON);
int r = m_monc->init();
if (r < 0) {
derr << ": failed to init mon client: " << cpp_strerror(r) << dendl;
return r;
}
r = m_monc->authenticate(std::chrono::duration<double>(m_cct->_conf.get_val<std::chrono::seconds>("client_mount_timeout")).count());
if (r < 0) {
derr << ": failed to authenticate to monitor: " << cpp_strerror(r) << dendl;
return r;
}
client_t me = m_monc->get_global_id();
m_msgr->set_myname(entity_name_t::CLIENT(me.v));
return 0;
}
int Mirror::init(std::string &reason) {
dout(20) << dendl;
std::scoped_lock locker(m_lock);
int r = m_local->init_with_context(m_cct);
if (r < 0) {
derr << ": could not initialize rados handler" << dendl;
return r;
}
r = m_local->connect();
if (r < 0) {
derr << ": error connecting to local cluster" << dendl;
return r;
}
m_service_daemon = std::make_unique<ServiceDaemon>(m_cct, m_local);
r = m_service_daemon->init();
if (r < 0) {
derr << ": error registering service daemon: " << cpp_strerror(r) << dendl;
return r;
}
r = init_mon_client();
if (r < 0) {
return r;
}
return 0;
}
void Mirror::shutdown() {
dout(20) << dendl;
m_stopping = true;
m_cluster_watcher->shutdown();
m_cond.notify_all();
}
void Mirror::reopen_logs() {
for (auto &[filesystem, mirror_action] : m_mirror_actions) {
mirror_action.fs_mirror->reopen_logs();
}
g_ceph_context->reopen_logs();
}
void Mirror::handle_signal(int signum) {
dout(10) << ": signal=" << signum << dendl;
std::scoped_lock locker(m_lock);
switch (signum) {
case SIGHUP:
reopen_logs();
break;
case SIGINT:
case SIGTERM:
shutdown();
break;
default:
ceph_abort_msgf("unexpected signal %d", signum);
}
}
void Mirror::handle_enable_mirroring(const Filesystem &filesystem,
const Peers &peers, int r) {
dout(20) << ": filesystem=" << filesystem << ", peers=" << peers
<< ", r=" << r << dendl;
std::scoped_lock locker(m_lock);
auto &mirror_action = m_mirror_actions.at(filesystem);
ceph_assert(mirror_action.action_in_progress);
mirror_action.action_in_progress = false;
m_cond.notify_all();
if (r < 0) {
derr << ": failed to initialize FSMirror for filesystem=" << filesystem
<< ": " << cpp_strerror(r) << dendl;
m_service_daemon->add_or_update_fs_attribute(filesystem.fscid,
SERVICE_DAEMON_MIRROR_ENABLE_FAILED_KEY,
true);
return;
}
for (auto &peer : peers) {
mirror_action.fs_mirror->add_peer(peer);
}
dout(10) << ": Initialized FSMirror for filesystem=" << filesystem << dendl;
}
void Mirror::handle_enable_mirroring(const Filesystem &filesystem, int r) {
dout(20) << ": filesystem=" << filesystem << ", r=" << r << dendl;
std::scoped_lock locker(m_lock);
auto &mirror_action = m_mirror_actions.at(filesystem);
ceph_assert(mirror_action.action_in_progress);
mirror_action.action_in_progress = false;
m_cond.notify_all();
if (r < 0) {
derr << ": failed to initialize FSMirror for filesystem=" << filesystem
<< ": " << cpp_strerror(r) << dendl;
m_service_daemon->add_or_update_fs_attribute(filesystem.fscid,
SERVICE_DAEMON_MIRROR_ENABLE_FAILED_KEY,
true);
return;
}
dout(10) << ": Initialized FSMirror for filesystem=" << filesystem << dendl;
}
void Mirror::enable_mirroring(const Filesystem &filesystem, uint64_t local_pool_id,
Context *on_finish, bool is_restart) {
ceph_assert(ceph_mutex_is_locked(m_lock));
auto &mirror_action = m_mirror_actions.at(filesystem);
if (is_restart) {
mirror_action.fs_mirror.reset();
} else {
ceph_assert(!mirror_action.action_in_progress);
}
ceph_assert(!mirror_action.fs_mirror);
dout(10) << ": starting FSMirror: filesystem=" << filesystem << dendl;
mirror_action.action_in_progress = true;
mirror_action.fs_mirror = std::make_unique<FSMirror>(m_cct, filesystem, local_pool_id,
m_service_daemon.get(), m_args, m_work_queue);
mirror_action.fs_mirror->init(new C_AsyncCallback<ContextWQ>(m_work_queue, on_finish));
}
void Mirror::mirroring_enabled(const Filesystem &filesystem, uint64_t local_pool_id) {
dout(10) << ": filesystem=" << filesystem << ", pool_id=" << local_pool_id << dendl;
std::scoped_lock locker(m_lock);
if (m_stopping) {
return;
}
auto p = m_mirror_actions.emplace(filesystem, MirrorAction(local_pool_id));
auto &mirror_action = p.first->second;
mirror_action.action_ctxs.push_back(new C_EnableMirroring(this, filesystem, local_pool_id));
}
void Mirror::handle_disable_mirroring(const Filesystem &filesystem, int r) {
dout(10) << ": filesystem=" << filesystem << ", r=" << r << dendl;
std::scoped_lock locker(m_lock);
auto &mirror_action = m_mirror_actions.at(filesystem);
if (!mirror_action.fs_mirror->is_init_failed()) {
ceph_assert(mirror_action.action_in_progress);
mirror_action.action_in_progress = false;
m_cond.notify_all();
}
if (!m_stopping) {
mirror_action.fs_mirror.reset();
if (mirror_action.action_ctxs.empty()) {
dout(10) << ": no pending actions for filesystem=" << filesystem << dendl;
m_mirror_actions.erase(filesystem);
}
}
}
void Mirror::disable_mirroring(const Filesystem &filesystem, Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_lock));
auto &mirror_action = m_mirror_actions.at(filesystem);
ceph_assert(mirror_action.fs_mirror);
ceph_assert(!mirror_action.action_in_progress);
if (mirror_action.fs_mirror->is_init_failed()) {
dout(10) << ": init failed for filesystem=" << filesystem << dendl;
m_work_queue->queue(on_finish, -EINVAL);
return;
}
mirror_action.action_in_progress = true;
mirror_action.fs_mirror->shutdown(new C_AsyncCallback<ContextWQ>(m_work_queue, on_finish));
}
void Mirror::mirroring_disabled(const Filesystem &filesystem) {
dout(10) << ": filesystem=" << filesystem << dendl;
std::scoped_lock locker(m_lock);
if (m_stopping) {
dout(5) << "shutting down" << dendl;
return;
}
auto &mirror_action = m_mirror_actions.at(filesystem);
mirror_action.action_ctxs.push_back(new C_DisableMirroring(this, filesystem));
}
void Mirror::add_peer(const Filesystem &filesystem, const Peer &peer) {
ceph_assert(ceph_mutex_is_locked(m_lock));
auto &mirror_action = m_mirror_actions.at(filesystem);
ceph_assert(mirror_action.fs_mirror);
ceph_assert(!mirror_action.action_in_progress);
mirror_action.fs_mirror->add_peer(peer);
}
void Mirror::peer_added(const Filesystem &filesystem, const Peer &peer) {
dout(20) << ": filesystem=" << filesystem << ", peer=" << peer << dendl;
std::scoped_lock locker(m_lock);
if (m_stopping) {
dout(5) << "shutting down" << dendl;
return;
}
auto &mirror_action = m_mirror_actions.at(filesystem);
mirror_action.action_ctxs.push_back(new C_PeerUpdate(this, filesystem, peer));
}
void Mirror::remove_peer(const Filesystem &filesystem, const Peer &peer) {
ceph_assert(ceph_mutex_is_locked(m_lock));
auto &mirror_action = m_mirror_actions.at(filesystem);
ceph_assert(mirror_action.fs_mirror);
ceph_assert(!mirror_action.action_in_progress);
mirror_action.fs_mirror->remove_peer(peer);
}
void Mirror::peer_removed(const Filesystem &filesystem, const Peer &peer) {
dout(20) << ": filesystem=" << filesystem << ", peer=" << peer << dendl;
std::scoped_lock locker(m_lock);
if (m_stopping) {
dout(5) << "shutting down" << dendl;
return;
}
auto &mirror_action = m_mirror_actions.at(filesystem);
mirror_action.action_ctxs.push_back(new C_PeerUpdate(this, filesystem, peer, true));
}
void Mirror::update_fs_mirrors() {
dout(20) << dendl;
auto now = ceph_clock_now();
double blocklist_interval = g_ceph_context->_conf.get_val<std::chrono::seconds>
("cephfs_mirror_restart_mirror_on_blocklist_interval").count();
bool check_blocklist = blocklist_interval > 0 && ((now - m_last_blocklist_check) >= blocklist_interval);
double failed_interval = g_ceph_context->_conf.get_val<std::chrono::seconds>
("cephfs_mirror_restart_mirror_on_failure_interval").count();
bool check_failure = failed_interval > 0 && ((now - m_last_failure_check) >= failed_interval);
{
std::scoped_lock locker(m_lock);
for (auto &[filesystem, mirror_action] : m_mirror_actions) {
auto failed = mirror_action.fs_mirror && mirror_action.fs_mirror->is_failed();
auto blocklisted = mirror_action.fs_mirror && mirror_action.fs_mirror->is_blocklisted();
if (check_failure && !mirror_action.action_in_progress && failed) {
// about to restart failed mirror instance -- nothing
// should interfere
dout(5) << ": filesystem=" << filesystem << " failed mirroring -- restarting" << dendl;
auto peers = mirror_action.fs_mirror->get_peers();
auto ctx = new C_RestartMirroring(this, filesystem, mirror_action.pool_id, peers);
ctx->complete(0);
} else if (check_blocklist && !mirror_action.action_in_progress && blocklisted) {
// about to restart blocklisted mirror instance -- nothing
// should interfere
dout(5) << ": filesystem=" << filesystem << " is blocklisted -- restarting" << dendl;
auto peers = mirror_action.fs_mirror->get_peers();
auto ctx = new C_RestartMirroring(this, filesystem, mirror_action.pool_id, peers);
ctx->complete(0);
}
if (!failed && !blocklisted && !mirror_action.action_ctxs.empty()
&& !mirror_action.action_in_progress) {
auto ctx = std::move(mirror_action.action_ctxs.front());
mirror_action.action_ctxs.pop_front();
ctx->complete(0);
}
}
if (check_blocklist) {
m_last_blocklist_check = now;
}
if (check_failure) {
m_last_failure_check = now;
}
}
schedule_mirror_update_task();
}
void Mirror::schedule_mirror_update_task() {
ceph_assert(m_timer_task == nullptr);
ceph_assert(ceph_mutex_is_locked(*m_timer_lock));
m_timer_task = new LambdaContext([this](int _) {
m_timer_task = nullptr;
update_fs_mirrors();
});
double after = g_ceph_context->_conf.get_val<std::chrono::seconds>
("cephfs_mirror_action_update_interval").count();
dout(20) << ": scheduling fs mirror update (" << m_timer_task << ") after "
<< after << " seconds" << dendl;
m_timer->add_event_after(after, m_timer_task);
}
void Mirror::run() {
dout(20) << dendl;
std::unique_lock locker(m_lock);
m_cluster_watcher.reset(new ClusterWatcher(m_cct, m_monc, m_service_daemon.get(), m_listener));
m_msgr->add_dispatcher_tail(m_cluster_watcher.get());
m_cluster_watcher->init();
m_cond.wait(locker, [this]{return m_stopping;});
locker.unlock();
{
std::scoped_lock timer_lock(*m_timer_lock);
if (m_timer_task != nullptr) {
dout(10) << ": canceling timer task=" << m_timer_task << dendl;
m_timer->cancel_event(m_timer_task);
m_timer_task = nullptr;
}
}
locker.lock();
for (auto &[filesystem, mirror_action] : m_mirror_actions) {
dout(10) << ": trying to shutdown filesystem=" << filesystem << dendl;
// wait for in-progress action and shutdown
m_cond.wait(locker, [&mirror_action=mirror_action]
{return !mirror_action.action_in_progress;});
if (mirror_action.fs_mirror &&
!mirror_action.fs_mirror->is_stopping() &&
!mirror_action.fs_mirror->is_init_failed()) {
C_SaferCond cond;
mirror_action.fs_mirror->shutdown(new C_AsyncCallback<ContextWQ>(m_work_queue, &cond));
int r = cond.wait();
dout(10) << ": shutdown filesystem=" << filesystem << ", r=" << r << dendl;
}
mirror_action.fs_mirror.reset();
}
}
} // namespace mirror
} // namespace cephfs
| 18,187 | 29.162521 | 134 |
cc
|
null |
ceph-main/src/tools/cephfs_mirror/Mirror.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_H
#define CEPHFS_MIRROR_H
#include <map>
#include <set>
#include <vector>
#include "common/ceph_mutex.h"
#include "common/WorkQueue.h"
#include "mds/FSMap.h"
#include "ClusterWatcher.h"
#include "FSMirror.h"
#include "ServiceDaemon.h"
#include "Types.h"
class Messenger;
class MonClient;
class ContextWQ;
namespace cephfs {
namespace mirror {
// this wraps up ClusterWatcher and FSMirrors to implement mirroring
// for ceph filesystems.
class Mirror {
public:
Mirror(CephContext *cct, const std::vector<const char*> &args,
MonClient *monc, Messenger *msgr);
~Mirror();
int init(std::string &reason);
void shutdown();
void run();
void handle_signal(int signum);
private:
static constexpr std::string_view MIRRORING_MODULE = "mirroring";
struct C_EnableMirroring;
struct C_DisableMirroring;
struct C_PeerUpdate;
struct C_RestartMirroring;
struct ClusterListener : ClusterWatcher::Listener {
Mirror *mirror;
ClusterListener(Mirror *mirror)
: mirror(mirror) {
}
void handle_mirroring_enabled(const FilesystemSpec &spec) override {
mirror->mirroring_enabled(spec.filesystem, spec.pool_id);
}
void handle_mirroring_disabled(const Filesystem &filesystem) override {
mirror->mirroring_disabled(filesystem);
}
void handle_peers_added(const Filesystem &filesystem, const Peer &peer) override {
mirror->peer_added(filesystem, peer);
}
void handle_peers_removed(const Filesystem &filesystem, const Peer &peer) override {
mirror->peer_removed(filesystem, peer);
}
};
struct MirrorAction {
MirrorAction(uint64_t pool_id) :
pool_id(pool_id) {
}
uint64_t pool_id; // for restarting blocklisted mirror instance
bool action_in_progress = false;
std::list<Context *> action_ctxs;
std::unique_ptr<FSMirror> fs_mirror;
};
ceph::mutex m_lock = ceph::make_mutex("cephfs::mirror::Mirror");
ceph::condition_variable m_cond;
CephContext *m_cct;
std::vector<const char *> m_args;
MonClient *m_monc;
Messenger *m_msgr;
ClusterListener m_listener;
ThreadPool *m_thread_pool = nullptr;
ContextWQ *m_work_queue = nullptr;
SafeTimer *m_timer = nullptr;
ceph::mutex *m_timer_lock = nullptr;
Context *m_timer_task = nullptr;
bool m_stopping = false;
std::unique_ptr<ClusterWatcher> m_cluster_watcher;
std::map<Filesystem, MirrorAction> m_mirror_actions;
utime_t m_last_blocklist_check;
utime_t m_last_failure_check;
RadosRef m_local;
std::unique_ptr<ServiceDaemon> m_service_daemon;
int init_mon_client();
// called via listener
void mirroring_enabled(const Filesystem &filesystem, uint64_t local_pool_id);
void mirroring_disabled(const Filesystem &filesystem);
void peer_added(const Filesystem &filesystem, const Peer &peer);
void peer_removed(const Filesystem &filesystem, const Peer &peer);
// mirror enable callback
void enable_mirroring(const Filesystem &filesystem, uint64_t local_pool_id,
Context *on_finish, bool is_restart=false);
void handle_enable_mirroring(const Filesystem &filesystem, int r);
void handle_enable_mirroring(const Filesystem &filesystem, const Peers &peers, int r);
// mirror disable callback
void disable_mirroring(const Filesystem &filesystem, Context *on_finish);
void handle_disable_mirroring(const Filesystem &filesystem, int r);
// peer update callback
void add_peer(const Filesystem &filesystem, const Peer &peer);
void remove_peer(const Filesystem &filesystem, const Peer &peer);
void schedule_mirror_update_task();
void update_fs_mirrors();
void reopen_logs();
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_H
| 3,834 | 26.198582 | 88 |
h
|
null |
ceph-main/src/tools/cephfs_mirror/MirrorWatcher.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/ceph_context.h"
#include "common/ceph_json.h"
#include "common/debug.h"
#include "common/errno.h"
#include "common/WorkQueue.h"
#include "include/stringify.h"
#include "msg/Messenger.h"
#include "aio_utils.h"
#include "MirrorWatcher.h"
#include "FSMirror.h"
#include "Types.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_cephfs_mirror
#undef dout_prefix
#define dout_prefix *_dout << "cephfs::mirror::MirrorWatcher " << __func__
namespace cephfs {
namespace mirror {
MirrorWatcher::MirrorWatcher(librados::IoCtx &ioctx, FSMirror *fs_mirror,
ContextWQ *work_queue)
: Watcher(ioctx, CEPHFS_MIRROR_OBJECT, work_queue),
m_ioctx(ioctx),
m_fs_mirror(fs_mirror),
m_work_queue(work_queue),
m_lock(ceph::make_mutex("cephfs::mirror::mirror_watcher")),
m_instance_id(stringify(m_ioctx.get_instance_id())) {
}
MirrorWatcher::~MirrorWatcher() {
}
void MirrorWatcher::init(Context *on_finish) {
dout(20) << dendl;
{
std::scoped_lock locker(m_lock);
ceph_assert(m_on_init_finish == nullptr);
m_on_init_finish = new LambdaContext([this, on_finish](int r) {
on_finish->complete(r);
if (m_on_shutdown_finish != nullptr) {
m_on_shutdown_finish->complete(0);
}
});
}
register_watcher();
}
void MirrorWatcher::shutdown(Context *on_finish) {
dout(20) << dendl;
{
std::scoped_lock locker(m_lock);
ceph_assert(m_on_shutdown_finish == nullptr);
if (m_on_init_finish != nullptr) {
dout(10) << ": delaying shutdown -- init in progress" << dendl;
m_on_shutdown_finish = new LambdaContext([this, on_finish](int r) {
m_on_shutdown_finish = nullptr;
shutdown(on_finish);
});
return;
}
m_on_shutdown_finish = on_finish;
}
unregister_watcher();
}
void MirrorWatcher::handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist& bl) {
dout(20) << dendl;
JSONFormatter f;
f.open_object_section("info");
encode_json("addr", m_fs_mirror->get_instance_addr(), &f);
f.close_section();
bufferlist outbl;
f.flush(outbl);
acknowledge_notify(notify_id, handle, outbl);
}
void MirrorWatcher::handle_rewatch_complete(int r) {
dout(5) << ": r=" << r << dendl;
if (r == -EBLOCKLISTED) {
dout(0) << ": client blocklisted" <<dendl;
std::scoped_lock locker(m_lock);
m_blocklisted = true;
} else if (r == -ENOENT) {
derr << ": mirroring object deleted" << dendl;
m_failed = true;
} else if (r < 0) {
derr << ": rewatch error: " << cpp_strerror(r) << dendl;
m_failed = true;
}
}
void MirrorWatcher::register_watcher() {
dout(20) << dendl;
std::scoped_lock locker(m_lock);
Context *on_finish = new C_CallbackAdapter<
MirrorWatcher, &MirrorWatcher::handle_register_watcher>(this);
register_watch(on_finish);
}
void MirrorWatcher::handle_register_watcher(int r) {
dout(20) << ": r=" << r << dendl;
Context *on_init_finish = nullptr;
{
std::scoped_lock locker(m_lock);
std::swap(on_init_finish, m_on_init_finish);
}
on_init_finish->complete(r);
}
void MirrorWatcher::unregister_watcher() {
dout(20) << dendl;
std::scoped_lock locker(m_lock);
Context *on_finish = new C_CallbackAdapter<
MirrorWatcher, &MirrorWatcher::handle_unregister_watcher>(this);
unregister_watch(new C_AsyncCallback<ContextWQ>(m_work_queue, on_finish));
}
void MirrorWatcher::handle_unregister_watcher(int r) {
dout(20) << ": r=" << r << dendl;
Context *on_shutdown_finish = nullptr;
{
std::scoped_lock locker(m_lock);
std::swap(on_shutdown_finish, m_on_shutdown_finish);
}
on_shutdown_finish->complete(r);
}
} // namespace mirror
} // namespace cephfs
| 4,185 | 27.09396 | 81 |
cc
|
null |
ceph-main/src/tools/cephfs_mirror/MirrorWatcher.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_MIRROR_WATCHER_H
#define CEPHFS_MIRROR_MIRROR_WATCHER_H
#include <string_view>
#include "common/ceph_mutex.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "Watcher.h"
class ContextWQ;
class Messenger;
namespace cephfs {
namespace mirror {
class FSMirror;
// watch for notifications via cephfs_mirror object (in metadata
// pool). this is used sending keepalived with keepalive payload
// being the rados instance address (used by the manager module
// to blocklist when needed).
class MirrorWatcher : public Watcher {
public:
static MirrorWatcher *create(librados::IoCtx &ioctx, FSMirror *fs_mirror,
ContextWQ *work_queue) {
return new MirrorWatcher(ioctx, fs_mirror, work_queue);
}
MirrorWatcher(librados::IoCtx &ioctx, FSMirror *fs_mirror,
ContextWQ *work_queue);
~MirrorWatcher();
void init(Context *on_finish);
void shutdown(Context *on_finish);
void handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist& bl) override;
void handle_rewatch_complete(int r) override;
bool is_blocklisted() {
std::scoped_lock locker(m_lock);
return m_blocklisted;
}
bool is_failed() {
std::scoped_lock locker(m_lock);
return m_failed;
}
private:
librados::IoCtx &m_ioctx;
FSMirror *m_fs_mirror;
ContextWQ *m_work_queue;
ceph::mutex m_lock;
std::string m_instance_id;
Context *m_on_init_finish = nullptr;
Context *m_on_shutdown_finish = nullptr;
bool m_blocklisted = false;
bool m_failed = false;
void register_watcher();
void handle_register_watcher(int r);
void unregister_watcher();
void handle_unregister_watcher(int r);
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_MIRROR_WATCHER_H
| 1,937 | 23.225 | 75 |
h
|
null |
ceph-main/src/tools/cephfs_mirror/PeerReplayer.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <stack>
#include <fcntl.h>
#include <algorithm>
#include <sys/time.h>
#include <sys/file.h>
#include <boost/scope_exit.hpp>
#include "common/admin_socket.h"
#include "common/ceph_context.h"
#include "common/debug.h"
#include "common/errno.h"
#include "FSMirror.h"
#include "PeerReplayer.h"
#include "Utils.h"
#include "json_spirit/json_spirit.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_cephfs_mirror
#undef dout_prefix
#define dout_prefix *_dout << "cephfs::mirror::PeerReplayer(" \
<< m_peer.uuid << ") " << __func__
using namespace std;
namespace cephfs {
namespace mirror {
namespace {
const std::string PEER_CONFIG_KEY_PREFIX = "cephfs/mirror/peer";
std::string snapshot_dir_path(CephContext *cct, const std::string &path) {
return path + "/" + cct->_conf->client_snapdir;
}
std::string snapshot_path(const std::string &snap_dir, const std::string &snap_name) {
return snap_dir + "/" + snap_name;
}
std::string snapshot_path(CephContext *cct, const std::string &path, const std::string &snap_name) {
return path + "/" + cct->_conf->client_snapdir + "/" + snap_name;
}
std::string entry_path(const std::string &dir, const std::string &name) {
return dir + "/" + name;
}
std::map<std::string, std::string> decode_snap_metadata(snap_metadata *snap_metadata,
size_t nr_snap_metadata) {
std::map<std::string, std::string> metadata;
for (size_t i = 0; i < nr_snap_metadata; ++i) {
metadata.emplace(snap_metadata[i].key, snap_metadata[i].value);
}
return metadata;
}
std::string peer_config_key(const std::string &fs_name, const std::string &uuid) {
return PEER_CONFIG_KEY_PREFIX + "/" + fs_name + "/" + uuid;
}
class PeerAdminSocketCommand {
public:
virtual ~PeerAdminSocketCommand() {
}
virtual int call(Formatter *f) = 0;
};
class StatusCommand : public PeerAdminSocketCommand {
public:
explicit StatusCommand(PeerReplayer *peer_replayer)
: peer_replayer(peer_replayer) {
}
int call(Formatter *f) override {
peer_replayer->peer_status(f);
return 0;
}
private:
PeerReplayer *peer_replayer;
};
// helper to open a directory relative to a file descriptor
int opendirat(MountRef mnt, int dirfd, const std::string &relpath, int flags,
ceph_dir_result **dirp) {
int r = ceph_openat(mnt, dirfd, relpath.c_str(), flags, 0);
if (r < 0) {
return r;
}
int fd = r;
r = ceph_fdopendir(mnt, fd, dirp);
ceph_close(mnt, fd);
return r;
}
} // anonymous namespace
class PeerReplayerAdminSocketHook : public AdminSocketHook {
public:
PeerReplayerAdminSocketHook(CephContext *cct, const Filesystem &filesystem,
const Peer &peer, PeerReplayer *peer_replayer)
: admin_socket(cct->get_admin_socket()) {
int r;
std::string cmd;
// mirror peer status format is name@id uuid
cmd = "fs mirror peer status "
+ stringify(filesystem.fs_name) + "@" + stringify(filesystem.fscid)
+ " "
+ stringify(peer.uuid);
r = admin_socket->register_command(
cmd, this, "get peer mirror status");
if (r == 0) {
commands[cmd] = new StatusCommand(peer_replayer);
}
}
~PeerReplayerAdminSocketHook() override {
admin_socket->unregister_commands(this);
for (auto &[command, cmdptr] : commands) {
delete cmdptr;
}
}
int call(std::string_view command, const cmdmap_t& cmdmap,
const bufferlist&,
Formatter *f, std::ostream &errss, bufferlist &out) override {
auto p = commands.at(std::string(command));
return p->call(f);
}
private:
typedef std::map<std::string, PeerAdminSocketCommand*, std::less<>> Commands;
AdminSocket *admin_socket;
Commands commands;
};
PeerReplayer::PeerReplayer(CephContext *cct, FSMirror *fs_mirror,
RadosRef local_cluster, const Filesystem &filesystem,
const Peer &peer, const std::set<std::string, std::less<>> &directories,
MountRef mount, ServiceDaemon *service_daemon)
: m_cct(cct),
m_fs_mirror(fs_mirror),
m_local_cluster(local_cluster),
m_filesystem(filesystem),
m_peer(peer),
m_directories(directories.begin(), directories.end()),
m_local_mount(mount),
m_service_daemon(service_daemon),
m_asok_hook(new PeerReplayerAdminSocketHook(cct, filesystem, peer, this)),
m_lock(ceph::make_mutex("cephfs::mirror::PeerReplayer::" + stringify(peer.uuid))) {
// reset sync stats sent via service daemon
m_service_daemon->add_or_update_peer_attribute(m_filesystem.fscid, m_peer,
SERVICE_DAEMON_FAILED_DIR_COUNT_KEY, (uint64_t)0);
m_service_daemon->add_or_update_peer_attribute(m_filesystem.fscid, m_peer,
SERVICE_DAEMON_RECOVERED_DIR_COUNT_KEY, (uint64_t)0);
}
PeerReplayer::~PeerReplayer() {
delete m_asok_hook;
}
int PeerReplayer::init() {
dout(20) << ": initial dir list=[" << m_directories << "]" << dendl;
for (auto &dir_root : m_directories) {
m_snap_sync_stats.emplace(dir_root, SnapSyncStat());
}
auto &remote_client = m_peer.remote.client_name;
auto &remote_cluster = m_peer.remote.cluster_name;
auto remote_filesystem = Filesystem{0, m_peer.remote.fs_name};
std::string key = peer_config_key(m_filesystem.fs_name, m_peer.uuid);
std::string cmd =
"{"
"\"prefix\": \"config-key get\", "
"\"key\": \"" + key + "\""
"}";
bufferlist in_bl;
bufferlist out_bl;
int r = m_local_cluster->mon_command(cmd, in_bl, &out_bl, nullptr);
dout(5) << ": mon command r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
return r;
}
std::string mon_host;
std::string cephx_key;
if (!r) {
json_spirit::mValue root;
if (!json_spirit::read(out_bl.to_str(), root)) {
derr << ": invalid config-key JSON" << dendl;
return -EBADMSG;
}
try {
auto &root_obj = root.get_obj();
mon_host = root_obj.at("mon_host").get_str();
cephx_key = root_obj.at("key").get_str();
dout(0) << ": remote monitor host=" << mon_host << dendl;
} catch (std::runtime_error&) {
derr << ": unexpected JSON received" << dendl;
return -EBADMSG;
}
}
r = connect(remote_client, remote_cluster, &m_remote_cluster, mon_host, cephx_key);
if (r < 0) {
derr << ": error connecting to remote cluster: " << cpp_strerror(r)
<< dendl;
return r;
}
r = mount(m_remote_cluster, remote_filesystem, false, &m_remote_mount);
if (r < 0) {
m_remote_cluster.reset();
derr << ": error mounting remote filesystem=" << remote_filesystem << dendl;
return r;
}
std::scoped_lock locker(m_lock);
auto nr_replayers = g_ceph_context->_conf.get_val<uint64_t>(
"cephfs_mirror_max_concurrent_directory_syncs");
dout(20) << ": spawning " << nr_replayers << " snapshot replayer(s)" << dendl;
while (nr_replayers-- > 0) {
std::unique_ptr<SnapshotReplayerThread> replayer(
new SnapshotReplayerThread(this));
std::string name("replayer-" + stringify(nr_replayers));
replayer->create(name.c_str());
m_replayers.push_back(std::move(replayer));
}
return 0;
}
void PeerReplayer::shutdown() {
dout(20) << dendl;
{
std::scoped_lock locker(m_lock);
ceph_assert(!m_stopping);
m_stopping = true;
m_cond.notify_all();
}
for (auto &replayer : m_replayers) {
replayer->join();
}
m_replayers.clear();
ceph_unmount(m_remote_mount);
ceph_release(m_remote_mount);
m_remote_mount = nullptr;
m_remote_cluster.reset();
}
void PeerReplayer::add_directory(string_view dir_root) {
dout(20) << ": dir_root=" << dir_root << dendl;
std::scoped_lock locker(m_lock);
m_directories.emplace_back(dir_root);
m_snap_sync_stats.emplace(dir_root, SnapSyncStat());
m_cond.notify_all();
}
void PeerReplayer::remove_directory(string_view dir_root) {
dout(20) << ": dir_root=" << dir_root << dendl;
auto _dir_root = std::string(dir_root);
std::scoped_lock locker(m_lock);
auto it = std::find(m_directories.begin(), m_directories.end(), _dir_root);
if (it != m_directories.end()) {
m_directories.erase(it);
}
auto it1 = m_registered.find(_dir_root);
if (it1 == m_registered.end()) {
m_snap_sync_stats.erase(_dir_root);
} else {
it1->second.canceled = true;
}
m_cond.notify_all();
}
boost::optional<std::string> PeerReplayer::pick_directory() {
dout(20) << dendl;
auto now = clock::now();
auto retry_timo = g_ceph_context->_conf.get_val<uint64_t>(
"cephfs_mirror_retry_failed_directories_interval");
boost::optional<std::string> candidate;
for (auto &dir_root : m_directories) {
auto &sync_stat = m_snap_sync_stats.at(dir_root);
if (sync_stat.failed) {
std::chrono::duration<double> d = now - *sync_stat.last_failed;
if (d.count() < retry_timo) {
continue;
}
}
if (!m_registered.count(dir_root)) {
candidate = dir_root;
break;
}
}
std::rotate(m_directories.begin(), m_directories.begin() + 1, m_directories.end());
return candidate;
}
int PeerReplayer::register_directory(const std::string &dir_root,
SnapshotReplayerThread *replayer) {
dout(20) << ": dir_root=" << dir_root << dendl;
ceph_assert(m_registered.find(dir_root) == m_registered.end());
DirRegistry registry;
int r = try_lock_directory(dir_root, replayer, ®istry);
if (r < 0) {
return r;
}
dout(5) << ": dir_root=" << dir_root << " registered with replayer="
<< replayer << dendl;
m_registered.emplace(dir_root, std::move(registry));
return 0;
}
void PeerReplayer::unregister_directory(const std::string &dir_root) {
dout(20) << ": dir_root=" << dir_root << dendl;
auto it = m_registered.find(dir_root);
ceph_assert(it != m_registered.end());
unlock_directory(it->first, it->second);
m_registered.erase(it);
if (std::find(m_directories.begin(), m_directories.end(), dir_root) == m_directories.end()) {
m_snap_sync_stats.erase(dir_root);
}
}
int PeerReplayer::try_lock_directory(const std::string &dir_root,
SnapshotReplayerThread *replayer, DirRegistry *registry) {
dout(20) << ": dir_root=" << dir_root << dendl;
int r = ceph_open(m_remote_mount, dir_root.c_str(), O_RDONLY | O_DIRECTORY, 0);
if (r < 0 && r != -ENOENT) {
derr << ": failed to open remote dir_root=" << dir_root << ": " << cpp_strerror(r)
<< dendl;
return r;
}
if (r == -ENOENT) {
// we snap under dir_root, so mode does not matter much
r = ceph_mkdirs(m_remote_mount, dir_root.c_str(), 0755);
if (r < 0) {
derr << ": failed to create remote directory=" << dir_root << ": " << cpp_strerror(r)
<< dendl;
return r;
}
r = ceph_open(m_remote_mount, dir_root.c_str(), O_RDONLY | O_DIRECTORY, 0);
if (r < 0) {
derr << ": failed to open remote dir_root=" << dir_root << ": " << cpp_strerror(r)
<< dendl;
return r;
}
}
int fd = r;
r = ceph_flock(m_remote_mount, fd, LOCK_EX | LOCK_NB, (uint64_t)replayer->get_thread_id());
if (r != 0) {
if (r == -EWOULDBLOCK) {
dout(5) << ": dir_root=" << dir_root << " is locked by cephfs-mirror, "
<< "will retry again" << dendl;
} else {
derr << ": failed to lock dir_root=" << dir_root << ": " << cpp_strerror(r)
<< dendl;
}
if (ceph_close(m_remote_mount, fd) < 0) {
derr << ": failed to close (cleanup) remote dir_root=" << dir_root << ": "
<< cpp_strerror(r) << dendl;
}
return r;
}
dout(10) << ": dir_root=" << dir_root << " locked" << dendl;
registry->fd = fd;
registry->replayer = replayer;
return 0;
}
void PeerReplayer::unlock_directory(const std::string &dir_root, const DirRegistry ®istry) {
dout(20) << ": dir_root=" << dir_root << dendl;
int r = ceph_flock(m_remote_mount, registry.fd, LOCK_UN,
(uint64_t)registry.replayer->get_thread_id());
if (r < 0) {
derr << ": failed to unlock remote dir_root=" << dir_root << ": " << cpp_strerror(r)
<< dendl;
return;
}
r = ceph_close(m_remote_mount, registry.fd);
if (r < 0) {
derr << ": failed to close remote dir_root=" << dir_root << ": " << cpp_strerror(r)
<< dendl;
}
dout(10) << ": dir_root=" << dir_root << " unlocked" << dendl;
}
int PeerReplayer::build_snap_map(const std::string &dir_root,
std::map<uint64_t, std::string> *snap_map, bool is_remote) {
auto snap_dir = snapshot_dir_path(m_cct, dir_root);
dout(20) << ": dir_root=" << dir_root << ", snap_dir=" << snap_dir
<< ", is_remote=" << is_remote << dendl;
auto lr_str = is_remote ? "remote" : "local";
auto mnt = is_remote ? m_remote_mount : m_local_mount;
ceph_dir_result *dirp = nullptr;
int r = ceph_opendir(mnt, snap_dir.c_str(), &dirp);
if (r < 0) {
if (is_remote && r == -ENOENT) {
return 0;
}
derr << ": failed to open " << lr_str << " snap directory=" << snap_dir
<< ": " << cpp_strerror(r) << dendl;
return r;
}
std::set<std::string> snaps;
auto entry = ceph_readdir(mnt, dirp);
while (entry != NULL) {
auto d_name = std::string(entry->d_name);
dout(20) << ": entry=" << d_name << dendl;
if (d_name != "." && d_name != ".." && d_name.rfind("_", 0) != 0) {
snaps.emplace(d_name);
}
entry = ceph_readdir(mnt, dirp);
}
int rv = 0;
for (auto &snap : snaps) {
snap_info info;
auto snap_path = snapshot_path(snap_dir, snap);
r = ceph_get_snap_info(mnt, snap_path.c_str(), &info);
if (r < 0) {
derr << ": failed to fetch " << lr_str << " snap info for snap_path=" << snap_path
<< ": " << cpp_strerror(r) << dendl;
rv = r;
break;
}
uint64_t snap_id;
if (is_remote) {
if (!info.nr_snap_metadata) {
derr << ": snap_path=" << snap_path << " has invalid metadata in remote snapshot"
<< dendl;
rv = -EINVAL;
} else {
auto metadata = decode_snap_metadata(info.snap_metadata, info.nr_snap_metadata);
dout(20) << ": snap_path=" << snap_path << ", metadata=" << metadata << dendl;
auto it = metadata.find(PRIMARY_SNAP_ID_KEY);
if (it == metadata.end()) {
derr << ": snap_path=" << snap_path << " has missing \"" << PRIMARY_SNAP_ID_KEY
<< "\" in metadata" << dendl;
rv = -EINVAL;
} else {
snap_id = std::stoull(it->second);
}
ceph_free_snap_info_buffer(&info);
}
} else {
snap_id = info.id;
}
if (rv != 0) {
break;
}
snap_map->emplace(snap_id, snap);
}
r = ceph_closedir(mnt, dirp);
if (r < 0) {
derr << ": failed to close " << lr_str << " snap directory=" << snap_dir
<< ": " << cpp_strerror(r) << dendl;
}
dout(10) << ": " << lr_str << " snap_map=" << *snap_map << dendl;
return rv;
}
int PeerReplayer::propagate_snap_deletes(const std::string &dir_root,
const std::set<std::string> &snaps) {
dout(5) << ": dir_root=" << dir_root << ", deleted snapshots=" << snaps << dendl;
for (auto &snap : snaps) {
dout(20) << ": deleting dir_root=" << dir_root << ", snapshot=" << snap
<< dendl;
int r = ceph_rmsnap(m_remote_mount, dir_root.c_str(), snap.c_str());
if (r < 0) {
derr << ": failed to delete remote snap dir_root=" << dir_root
<< ", snapshot=" << snaps << ": " << cpp_strerror(r) << dendl;
return r;
}
inc_deleted_snap(dir_root);
}
return 0;
}
int PeerReplayer::propagate_snap_renames(
const std::string &dir_root,
const std::set<std::pair<std::string,std::string>> &snaps) {
dout(10) << ": dir_root=" << dir_root << ", renamed snapshots=" << snaps << dendl;
for (auto &snapp : snaps) {
auto from = snapshot_path(m_cct, dir_root, snapp.first);
auto to = snapshot_path(m_cct, dir_root, snapp.second);
dout(20) << ": renaming dir_root=" << dir_root << ", snapshot from="
<< from << ", to=" << to << dendl;
int r = ceph_rename(m_remote_mount, from.c_str(), to.c_str());
if (r < 0) {
derr << ": failed to rename remote snap dir_root=" << dir_root
<< ", snapshot from =" << from << ", to=" << to << ": "
<< cpp_strerror(r) << dendl;
return r;
}
inc_renamed_snap(dir_root);
}
return 0;
}
int PeerReplayer::remote_mkdir(const std::string &epath, const struct ceph_statx &stx,
const FHandles &fh) {
dout(10) << ": remote epath=" << epath << dendl;
int r = ceph_mkdirat(m_remote_mount, fh.r_fd_dir_root, epath.c_str(), stx.stx_mode & ~S_IFDIR);
if (r < 0 && r != -EEXIST) {
derr << ": failed to create remote directory=" << epath << ": " << cpp_strerror(r)
<< dendl;
return r;
}
r = ceph_chownat(m_remote_mount, fh.r_fd_dir_root, epath.c_str(), stx.stx_uid, stx.stx_gid,
AT_SYMLINK_NOFOLLOW);
if (r < 0) {
derr << ": failed to chown remote directory=" << epath << ": " << cpp_strerror(r)
<< dendl;
return r;
}
r = ceph_chmodat(m_remote_mount, fh.r_fd_dir_root, epath.c_str(), stx.stx_mode & ~S_IFMT,
AT_SYMLINK_NOFOLLOW);
if (r < 0) {
derr << ": failed to chmod remote directory=" << epath << ": " << cpp_strerror(r)
<< dendl;
return r;
}
struct timespec times[] = {{stx.stx_atime.tv_sec, stx.stx_atime.tv_nsec},
{stx.stx_mtime.tv_sec, stx.stx_mtime.tv_nsec}};
r = ceph_utimensat(m_remote_mount, fh.r_fd_dir_root, epath.c_str(), times, AT_SYMLINK_NOFOLLOW);
if (r < 0) {
derr << ": failed to change [am]time on remote directory=" << epath << ": "
<< cpp_strerror(r) << dendl;
return r;
}
return 0;
}
#define NR_IOVECS 8 // # iovecs
#define IOVEC_SIZE (8 * 1024 * 1024) // buffer size for each iovec
int PeerReplayer::copy_to_remote(const std::string &dir_root, const std::string &epath,
const struct ceph_statx &stx, const FHandles &fh) {
dout(10) << ": dir_root=" << dir_root << ", epath=" << epath << dendl;
int l_fd;
int r_fd;
void *ptr;
struct iovec iov[NR_IOVECS];
int r = ceph_openat(m_local_mount, fh.c_fd, epath.c_str(), O_RDONLY | O_NOFOLLOW, 0);
if (r < 0) {
derr << ": failed to open local file path=" << epath << ": "
<< cpp_strerror(r) << dendl;
return r;
}
l_fd = r;
r = ceph_openat(m_remote_mount, fh.r_fd_dir_root, epath.c_str(),
O_CREAT | O_TRUNC | O_WRONLY | O_NOFOLLOW, stx.stx_mode);
if (r < 0) {
derr << ": failed to create remote file path=" << epath << ": "
<< cpp_strerror(r) << dendl;
goto close_local_fd;
}
r_fd = r;
ptr = malloc(NR_IOVECS * IOVEC_SIZE);
if (!ptr) {
r = -ENOMEM;
derr << ": failed to allocate memory" << dendl;
goto close_remote_fd;
}
while (true) {
if (should_backoff(dir_root, &r)) {
dout(0) << ": backing off r=" << r << dendl;
break;
}
for (int i = 0; i < NR_IOVECS; ++i) {
iov[i].iov_base = (char*)ptr + IOVEC_SIZE*i;
iov[i].iov_len = IOVEC_SIZE;
}
r = ceph_preadv(m_local_mount, l_fd, iov, NR_IOVECS, -1);
if (r < 0) {
derr << ": failed to read local file path=" << epath << ": "
<< cpp_strerror(r) << dendl;
break;
}
if (r == 0) {
break;
}
int iovs = (int)(r / IOVEC_SIZE);
int t = r % IOVEC_SIZE;
if (t) {
iov[iovs].iov_len = t;
++iovs;
}
r = ceph_pwritev(m_remote_mount, r_fd, iov, iovs, -1);
if (r < 0) {
derr << ": failed to write remote file path=" << epath << ": "
<< cpp_strerror(r) << dendl;
break;
}
}
if (r == 0) {
r = ceph_fsync(m_remote_mount, r_fd, 0);
if (r < 0) {
derr << ": failed to sync data for file path=" << epath << ": "
<< cpp_strerror(r) << dendl;
}
}
free(ptr);
close_remote_fd:
if (ceph_close(m_remote_mount, r_fd) < 0) {
derr << ": failed to close remote fd path=" << epath << ": " << cpp_strerror(r)
<< dendl;
return -EINVAL;
}
close_local_fd:
if (ceph_close(m_local_mount, l_fd) < 0) {
derr << ": failed to close local fd path=" << epath << ": " << cpp_strerror(r)
<< dendl;
return -EINVAL;
}
return r == 0 ? 0 : r;
}
int PeerReplayer::remote_file_op(const std::string &dir_root, const std::string &epath,
const struct ceph_statx &stx, const FHandles &fh,
bool need_data_sync, bool need_attr_sync) {
dout(10) << ": dir_root=" << dir_root << ", epath=" << epath << ", need_data_sync=" << need_data_sync
<< ", need_attr_sync=" << need_attr_sync << dendl;
int r;
if (need_data_sync) {
if (S_ISREG(stx.stx_mode)) {
r = copy_to_remote(dir_root, epath, stx, fh);
if (r < 0) {
derr << ": failed to copy path=" << epath << ": " << cpp_strerror(r) << dendl;
return r;
}
} else if (S_ISLNK(stx.stx_mode)) {
// free the remote link before relinking
r = ceph_unlinkat(m_remote_mount, fh.r_fd_dir_root, epath.c_str(), 0);
if (r < 0 && r != -ENOENT) {
derr << ": failed to remove remote symlink=" << epath << dendl;
return r;
}
char *target = (char *)alloca(stx.stx_size+1);
r = ceph_readlinkat(m_local_mount, fh.c_fd, epath.c_str(), target, stx.stx_size);
if (r < 0) {
derr << ": failed to readlink local path=" << epath << ": " << cpp_strerror(r)
<< dendl;
return r;
}
target[stx.stx_size] = '\0';
r = ceph_symlinkat(m_remote_mount, target, fh.r_fd_dir_root, epath.c_str());
if (r < 0 && r != EEXIST) {
derr << ": failed to symlink remote path=" << epath << " to target=" << target
<< ": " << cpp_strerror(r) << dendl;
return r;
}
} else {
dout(5) << ": skipping entry=" << epath << ": unsupported mode=" << stx.stx_mode
<< dendl;
return 0;
}
}
if (need_attr_sync) {
r = ceph_chownat(m_remote_mount, fh.r_fd_dir_root, epath.c_str(), stx.stx_uid, stx.stx_gid,
AT_SYMLINK_NOFOLLOW);
if (r < 0) {
derr << ": failed to chown remote directory=" << epath << ": " << cpp_strerror(r)
<< dendl;
return r;
}
r = ceph_chmodat(m_remote_mount, fh.r_fd_dir_root, epath.c_str(), stx.stx_mode & ~S_IFMT,
AT_SYMLINK_NOFOLLOW);
if (r < 0) {
derr << ": failed to chmod remote directory=" << epath << ": " << cpp_strerror(r)
<< dendl;
return r;
}
struct timespec times[] = {{stx.stx_atime.tv_sec, stx.stx_atime.tv_nsec},
{stx.stx_mtime.tv_sec, stx.stx_mtime.tv_nsec}};
r = ceph_utimensat(m_remote_mount, fh.r_fd_dir_root, epath.c_str(), times, AT_SYMLINK_NOFOLLOW);
if (r < 0) {
derr << ": failed to change [am]time on remote directory=" << epath << ": "
<< cpp_strerror(r) << dendl;
return r;
}
}
return 0;
}
int PeerReplayer::cleanup_remote_dir(const std::string &dir_root,
const std::string &epath, const FHandles &fh) {
dout(20) << ": dir_root=" << dir_root << ", epath=" << epath
<< dendl;
struct ceph_statx tstx;
int r = ceph_statxat(m_remote_mount, fh.r_fd_dir_root, epath.c_str(), &tstx,
CEPH_STATX_MODE | CEPH_STATX_UID | CEPH_STATX_GID |
CEPH_STATX_SIZE | CEPH_STATX_ATIME | CEPH_STATX_MTIME,
AT_STATX_DONT_SYNC | AT_SYMLINK_NOFOLLOW);
if (r < 0) {
derr << ": failed to stat remote directory=" << epath << ": "
<< cpp_strerror(r) << dendl;
return r;
}
ceph_dir_result *tdirp;
r = opendirat(m_remote_mount, fh.r_fd_dir_root, epath, AT_SYMLINK_NOFOLLOW,
&tdirp);
if (r < 0) {
derr << ": failed to open remote directory=" << epath << ": "
<< cpp_strerror(r) << dendl;
return r;
}
std::stack<SyncEntry> rm_stack;
rm_stack.emplace(SyncEntry(epath, tdirp, tstx));
while (!rm_stack.empty()) {
if (should_backoff(dir_root, &r)) {
dout(0) << ": backing off r=" << r << dendl;
break;
}
dout(20) << ": " << rm_stack.size() << " entries in stack" << dendl;
std::string e_name;
auto &entry = rm_stack.top();
dout(20) << ": top of stack path=" << entry.epath << dendl;
if (entry.is_directory()) {
struct ceph_statx stx;
struct dirent de;
while (true) {
r = ceph_readdirplus_r(m_remote_mount, entry.dirp, &de, &stx,
CEPH_STATX_MODE, AT_STATX_DONT_SYNC | AT_SYMLINK_NOFOLLOW, NULL);
if (r < 0) {
derr << ": failed to read remote directory=" << entry.epath << dendl;
break;
}
if (r == 0) {
break;
}
auto d_name = std::string(de.d_name);
if (d_name != "." && d_name != "..") {
e_name = d_name;
break;
}
}
if (r == 0) {
r = ceph_unlinkat(m_remote_mount, fh.r_fd_dir_root, entry.epath.c_str(), AT_REMOVEDIR);
if (r < 0) {
derr << ": failed to remove remote directory=" << entry.epath << ": "
<< cpp_strerror(r) << dendl;
break;
}
dout(10) << ": done for remote directory=" << entry.epath << dendl;
if (ceph_closedir(m_remote_mount, entry.dirp) < 0) {
derr << ": failed to close remote directory=" << entry.epath << dendl;
}
rm_stack.pop();
continue;
}
if (r < 0) {
break;
}
auto epath = entry_path(entry.epath, e_name);
if (S_ISDIR(stx.stx_mode)) {
ceph_dir_result *dirp;
r = opendirat(m_remote_mount, fh.r_fd_dir_root, epath, AT_SYMLINK_NOFOLLOW,
&dirp);
if (r < 0) {
derr << ": failed to open remote directory=" << epath << ": "
<< cpp_strerror(r) << dendl;
break;
}
rm_stack.emplace(SyncEntry(epath, dirp, stx));
} else {
rm_stack.emplace(SyncEntry(epath, stx));
}
} else {
r = ceph_unlinkat(m_remote_mount, fh.r_fd_dir_root, entry.epath.c_str(), 0);
if (r < 0) {
derr << ": failed to remove remote directory=" << entry.epath << ": "
<< cpp_strerror(r) << dendl;
break;
}
dout(10) << ": done for remote file=" << entry.epath << dendl;
rm_stack.pop();
}
}
while (!rm_stack.empty()) {
auto &entry = rm_stack.top();
if (entry.is_directory()) {
dout(20) << ": closing remote directory=" << entry.epath << dendl;
if (ceph_closedir(m_remote_mount, entry.dirp) < 0) {
derr << ": failed to close remote directory=" << entry.epath << dendl;
}
}
rm_stack.pop();
}
return r;
}
int PeerReplayer::should_sync_entry(const std::string &epath, const struct ceph_statx &cstx,
const FHandles &fh, bool *need_data_sync, bool *need_attr_sync) {
dout(10) << ": epath=" << epath << dendl;
*need_data_sync = false;
*need_attr_sync = false;
struct ceph_statx pstx;
int r = ceph_statxat(fh.p_mnt, fh.p_fd, epath.c_str(), &pstx,
CEPH_STATX_MODE | CEPH_STATX_UID | CEPH_STATX_GID |
CEPH_STATX_SIZE | CEPH_STATX_CTIME | CEPH_STATX_MTIME,
AT_STATX_DONT_SYNC | AT_SYMLINK_NOFOLLOW);
if (r < 0 && r != -ENOENT && r != -ENOTDIR) {
derr << ": failed to stat prev entry= " << epath << ": " << cpp_strerror(r)
<< dendl;
return r;
}
if (r < 0) {
// inode does not exist in prev snapshot or file type has changed
// (file was S_IFREG earlier, S_IFDIR now).
dout(5) << ": entry=" << epath << ", r=" << r << dendl;
*need_data_sync = true;
*need_attr_sync = true;
return 0;
}
dout(10) << ": local cur statx: mode=" << cstx.stx_mode << ", uid=" << cstx.stx_uid
<< ", gid=" << cstx.stx_gid << ", size=" << cstx.stx_size << ", ctime="
<< cstx.stx_ctime << ", mtime=" << cstx.stx_mtime << dendl;
dout(10) << ": local prev statx: mode=" << pstx.stx_mode << ", uid=" << pstx.stx_uid
<< ", gid=" << pstx.stx_gid << ", size=" << pstx.stx_size << ", ctime="
<< pstx.stx_ctime << ", mtime=" << pstx.stx_mtime << dendl;
if ((cstx.stx_mode & S_IFMT) != (pstx.stx_mode & S_IFMT)) {
dout(5) << ": entry=" << epath << " has mode mismatch" << dendl;
*need_data_sync = true;
*need_attr_sync = true;
} else {
*need_data_sync = (cstx.stx_size != pstx.stx_size) || (cstx.stx_mtime != pstx.stx_mtime);
*need_attr_sync = (cstx.stx_ctime != pstx.stx_ctime);
}
return 0;
}
int PeerReplayer::propagate_deleted_entries(const std::string &dir_root,
const std::string &epath, const FHandles &fh) {
dout(10) << ": dir_root=" << dir_root << ", epath=" << epath << dendl;
ceph_dir_result *dirp;
int r = opendirat(fh.p_mnt, fh.p_fd, epath, AT_SYMLINK_NOFOLLOW, &dirp);
if (r < 0) {
if (r == -ELOOP) {
dout(5) << ": epath=" << epath << " is a symbolic link -- mode sync"
<< " done when traversing parent" << dendl;
return 0;
}
if (r == -ENOTDIR) {
dout(5) << ": epath=" << epath << " is not a directory -- mode sync"
<< " done when traversing parent" << dendl;
return 0;
}
if (r == -ENOENT) {
dout(5) << ": epath=" << epath << " missing in previous-snap/remote dir-root"
<< dendl;
}
return r;
}
struct dirent *dire = (struct dirent *)alloca(512 * sizeof(struct dirent));
while (true) {
if (should_backoff(dir_root, &r)) {
dout(0) << ": backing off r=" << r << dendl;
break;
}
int len = ceph_getdents(fh.p_mnt, dirp, (char *)dire, 512);
if (len < 0) {
derr << ": failed to read directory entries: " << cpp_strerror(len) << dendl;
r = len;
// flip errno to signal that we got an err (possible the
// snapshot getting deleted in midst).
if (r == -ENOENT) {
r = -EINVAL;
}
break;
}
if (len == 0) {
dout(10) << ": reached EOD" << dendl;
break;
}
int nr = len / sizeof(struct dirent);
for (int i = 0; i < nr; ++i) {
if (should_backoff(dir_root, &r)) {
dout(0) << ": backing off r=" << r << dendl;
break;
}
std::string d_name = std::string(dire[i].d_name);
if (d_name == "." || d_name == "..") {
continue;
}
struct ceph_statx pstx;
auto dpath = entry_path(epath, d_name);
r = ceph_statxat(fh.p_mnt, fh.p_fd, dpath.c_str(), &pstx,
CEPH_STATX_MODE, AT_STATX_DONT_SYNC | AT_SYMLINK_NOFOLLOW);
if (r < 0) {
derr << ": failed to stat (prev) directory=" << dpath << ": "
<< cpp_strerror(r) << dendl;
// flip errno to signal that we got an err (possible the
// snapshot getting deleted in midst).
if (r == -ENOENT) {
r = -EINVAL;
}
return r;
}
struct ceph_statx cstx;
r = ceph_statxat(m_local_mount, fh.c_fd, dpath.c_str(), &cstx,
CEPH_STATX_MODE, AT_STATX_DONT_SYNC | AT_SYMLINK_NOFOLLOW);
if (r < 0 && r != -ENOENT) {
derr << ": failed to stat local (cur) directory=" << dpath << ": "
<< cpp_strerror(r) << dendl;
return r;
}
bool purge_remote = true;
if (r == 0) {
// directory entry present in both snapshots -- check inode
// type
if ((pstx.stx_mode & S_IFMT) == (cstx.stx_mode & S_IFMT)) {
dout(5) << ": mode matches for entry=" << d_name << dendl;
purge_remote = false;
} else {
dout(5) << ": mode mismatch for entry=" << d_name << dendl;
}
} else {
dout(5) << ": entry=" << d_name << " missing in current snapshot" << dendl;
}
if (purge_remote) {
dout(5) << ": purging remote entry=" << dpath << dendl;
if (S_ISDIR(pstx.stx_mode)) {
r = cleanup_remote_dir(dir_root, dpath, fh);
} else {
r = ceph_unlinkat(m_remote_mount, fh.r_fd_dir_root, dpath.c_str(), 0);
}
if (r < 0 && r != -ENOENT) {
derr << ": failed to cleanup remote entry=" << d_name << ": "
<< cpp_strerror(r) << dendl;
return r;
}
}
}
}
ceph_closedir(fh.p_mnt, dirp);
return r;
}
int PeerReplayer::open_dir(MountRef mnt, const std::string &dir_path,
boost::optional<uint64_t> snap_id) {
dout(20) << ": dir_path=" << dir_path << dendl;
if (snap_id) {
dout(20) << ": expected snapshot id=" << *snap_id << dendl;
}
int fd = ceph_open(mnt, dir_path.c_str(), O_DIRECTORY | O_RDONLY, 0);
if (fd < 0) {
derr << ": cannot open dir_path=" << dir_path << ": " << cpp_strerror(fd)
<< dendl;
return fd;
}
if (!snap_id) {
return fd;
}
snap_info info;
int r = ceph_get_snap_info(mnt, dir_path.c_str(), &info);
if (r < 0) {
derr << ": failed to fetch snap_info for path=" << dir_path
<< ": " << cpp_strerror(r) << dendl;
ceph_close(mnt, fd);
return r;
}
if (info.id != *snap_id) {
dout(5) << ": got mismatching snapshot id for path=" << dir_path << " (" << info.id
<< " vs " << *snap_id << ") -- possible recreate" << dendl;
ceph_close(mnt, fd);
return -EINVAL;
}
return fd;
}
int PeerReplayer::pre_sync_check_and_open_handles(
const std::string &dir_root,
const Snapshot ¤t, boost::optional<Snapshot> prev,
FHandles *fh) {
dout(20) << ": dir_root=" << dir_root << ", current=" << current << dendl;
if (prev) {
dout(20) << ": prev=" << prev << dendl;
}
auto cur_snap_path = snapshot_path(m_cct, dir_root, current.first);
auto fd = open_dir(m_local_mount, cur_snap_path, current.second);
if (fd < 0) {
return fd;
}
// current snapshot file descriptor
fh->c_fd = fd;
MountRef mnt;
if (prev) {
mnt = m_local_mount;
auto prev_snap_path = snapshot_path(m_cct, dir_root, (*prev).first);
fd = open_dir(mnt, prev_snap_path, (*prev).second);
} else {
mnt = m_remote_mount;
fd = open_dir(mnt, dir_root, boost::none);
}
if (fd < 0) {
if (!prev || fd != -ENOENT) {
ceph_close(m_local_mount, fh->c_fd);
return fd;
}
// ENOENT of previous snap
dout(5) << ": previous snapshot=" << *prev << " missing" << dendl;
mnt = m_remote_mount;
fd = open_dir(mnt, dir_root, boost::none);
if (fd < 0) {
ceph_close(m_local_mount, fh->c_fd);
return fd;
}
}
// "previous" snapshot or dir_root file descriptor
fh->p_fd = fd;
fh->p_mnt = mnt;
{
std::scoped_lock locker(m_lock);
auto it = m_registered.find(dir_root);
ceph_assert(it != m_registered.end());
fh->r_fd_dir_root = it->second.fd;
}
dout(5) << ": using " << ((fh->p_mnt == m_local_mount) ? "local (previous) snapshot" : "remote dir_root")
<< " for incremental transfer" << dendl;
return 0;
}
// sync the mode of the remote dir_root with that of the local dir_root
int PeerReplayer::sync_perms(const std::string& path) {
int r = 0;
struct ceph_statx tstx;
r = ceph_statx(m_local_mount, path.c_str(), &tstx, CEPH_STATX_MODE,
AT_STATX_DONT_SYNC | AT_SYMLINK_NOFOLLOW);
if (r < 0) {
derr << ": failed to fetch stat for local path: "
<< cpp_strerror(r) << dendl;
return r;
}
r = ceph_chmod(m_remote_mount, path.c_str(), tstx.stx_mode);
if (r < 0) {
derr << ": failed to set mode for remote path: "
<< cpp_strerror(r) << dendl;
return r;
}
return 0;
}
void PeerReplayer::post_sync_close_handles(const FHandles &fh) {
dout(20) << dendl;
// @FHandles.r_fd_dir_root is closed in @unregister_directory since
// its used to acquire an exclusive lock on remote dir_root.
ceph_close(m_local_mount, fh.c_fd);
ceph_close(fh.p_mnt, fh.p_fd);
}
int PeerReplayer::do_synchronize(const std::string &dir_root, const Snapshot ¤t,
boost::optional<Snapshot> prev) {
dout(20) << ": dir_root=" << dir_root << ", current=" << current << dendl;
if (prev) {
dout(20) << ": incremental sync check from prev=" << prev << dendl;
}
FHandles fh;
int r = pre_sync_check_and_open_handles(dir_root, current, prev, &fh);
if (r < 0) {
dout(5) << ": cannot proceeed with sync: " << cpp_strerror(r) << dendl;
return r;
}
BOOST_SCOPE_EXIT_ALL( (this)(&fh) ) {
post_sync_close_handles(fh);
};
// record that we are going to "dirty" the data under this
// directory root
auto snap_id_str{stringify(current.second)};
r = ceph_fsetxattr(m_remote_mount, fh.r_fd_dir_root, "ceph.mirror.dirty_snap_id",
snap_id_str.c_str(), snap_id_str.size(), 0);
if (r < 0) {
derr << ": error setting \"ceph.mirror.dirty_snap_id\" on dir_root=" << dir_root
<< ": " << cpp_strerror(r) << dendl;
return r;
}
struct ceph_statx tstx;
r = ceph_fstatx(m_local_mount, fh.c_fd, &tstx,
CEPH_STATX_MODE | CEPH_STATX_UID | CEPH_STATX_GID |
CEPH_STATX_SIZE | CEPH_STATX_ATIME | CEPH_STATX_MTIME,
AT_STATX_DONT_SYNC | AT_SYMLINK_NOFOLLOW);
if (r < 0) {
derr << ": failed to stat snap=" << current.first << ": " << cpp_strerror(r)
<< dendl;
return r;
}
ceph_dir_result *tdirp;
r = ceph_fdopendir(m_local_mount, fh.c_fd, &tdirp);
if (r < 0) {
derr << ": failed to open local snap=" << current.first << ": " << cpp_strerror(r)
<< dendl;
return r;
}
std::stack<SyncEntry> sync_stack;
sync_stack.emplace(SyncEntry(".", tdirp, tstx));
while (!sync_stack.empty()) {
if (should_backoff(dir_root, &r)) {
dout(0) << ": backing off r=" << r << dendl;
break;
}
dout(20) << ": " << sync_stack.size() << " entries in stack" << dendl;
std::string e_name;
auto &entry = sync_stack.top();
dout(20) << ": top of stack path=" << entry.epath << dendl;
if (entry.is_directory()) {
// entry is a directory -- propagate deletes for missing entries
// (and changed inode types) to the remote filesystem.
if (!entry.needs_remote_sync()) {
r = propagate_deleted_entries(dir_root, entry.epath, fh);
if (r < 0 && r != -ENOENT) {
derr << ": failed to propagate missing dirs: " << cpp_strerror(r) << dendl;
break;
}
entry.set_remote_synced();
}
struct ceph_statx stx;
struct dirent de;
while (true) {
r = ceph_readdirplus_r(m_local_mount, entry.dirp, &de, &stx,
CEPH_STATX_MODE | CEPH_STATX_UID | CEPH_STATX_GID |
CEPH_STATX_SIZE | CEPH_STATX_ATIME | CEPH_STATX_MTIME,
AT_STATX_DONT_SYNC | AT_SYMLINK_NOFOLLOW, NULL);
if (r < 0) {
derr << ": failed to local read directory=" << entry.epath << dendl;
break;
}
if (r == 0) {
break;
}
auto d_name = std::string(de.d_name);
if (d_name != "." && d_name != "..") {
e_name = d_name;
break;
}
}
if (r == 0) {
dout(10) << ": done for directory=" << entry.epath << dendl;
if (ceph_closedir(m_local_mount, entry.dirp) < 0) {
derr << ": failed to close local directory=" << entry.epath << dendl;
}
sync_stack.pop();
continue;
}
if (r < 0) {
break;
}
auto epath = entry_path(entry.epath, e_name);
if (S_ISDIR(stx.stx_mode)) {
r = remote_mkdir(epath, stx, fh);
if (r < 0) {
break;
}
ceph_dir_result *dirp;
r = opendirat(m_local_mount, fh.c_fd, epath, AT_SYMLINK_NOFOLLOW, &dirp);
if (r < 0) {
derr << ": failed to open local directory=" << epath << ": "
<< cpp_strerror(r) << dendl;
break;
}
sync_stack.emplace(SyncEntry(epath, dirp, stx));
} else {
sync_stack.emplace(SyncEntry(epath, stx));
}
} else {
bool need_data_sync = true;
bool need_attr_sync = true;
r = should_sync_entry(entry.epath, entry.stx, fh,
&need_data_sync, &need_attr_sync);
if (r < 0) {
break;
}
dout(5) << ": entry=" << entry.epath << ", data_sync=" << need_data_sync
<< ", attr_sync=" << need_attr_sync << dendl;
if (need_data_sync || need_attr_sync) {
r = remote_file_op(dir_root, entry.epath, entry.stx, fh, need_data_sync,
need_attr_sync);
if (r < 0) {
break;
}
}
dout(10) << ": done for epath=" << entry.epath << dendl;
sync_stack.pop();
}
}
while (!sync_stack.empty()) {
auto &entry = sync_stack.top();
if (entry.is_directory()) {
dout(20) << ": closing local directory=" << entry.epath << dendl;
if (ceph_closedir(m_local_mount, entry.dirp) < 0) {
derr << ": failed to close local directory=" << entry.epath << dendl;
}
}
sync_stack.pop();
}
return r;
}
int PeerReplayer::synchronize(const std::string &dir_root, const Snapshot ¤t,
boost::optional<Snapshot> prev) {
dout(20) << ": dir_root=" << dir_root << ", current=" << current << dendl;
if (prev) {
dout(20) << ": prev=" << prev << dendl;
}
int r = ceph_getxattr(m_remote_mount, dir_root.c_str(), "ceph.mirror.dirty_snap_id", nullptr, 0);
if (r < 0 && r != -ENODATA) {
derr << ": failed to fetch primary_snap_id length from dir_root=" << dir_root
<< ": " << cpp_strerror(r) << dendl;
return r;
}
// no xattr, can't determine which snap the data belongs to!
if (r < 0) {
dout(5) << ": missing \"ceph.mirror.dirty_snap_id\" xattr on remote -- using"
<< " incremental sync with remote scan" << dendl;
r = do_synchronize(dir_root, current, boost::none);
} else {
size_t xlen = r;
char *val = (char *)alloca(xlen+1);
r = ceph_getxattr(m_remote_mount, dir_root.c_str(), "ceph.mirror.dirty_snap_id", (void*)val, xlen);
if (r < 0) {
derr << ": failed to fetch \"dirty_snap_id\" for dir_root: " << dir_root
<< ": " << cpp_strerror(r) << dendl;
return r;
}
val[xlen] = '\0';
uint64_t dirty_snap_id = atoll(val);
dout(20) << ": dirty_snap_id: " << dirty_snap_id << " vs (" << current.second
<< "," << (prev ? stringify((*prev).second) : "~") << ")" << dendl;
if (prev && (dirty_snap_id == (*prev).second || dirty_snap_id == current.second)) {
dout(5) << ": match -- using incremental sync with local scan" << dendl;
r = do_synchronize(dir_root, current, prev);
} else {
dout(5) << ": mismatch -- using incremental sync with remote scan" << dendl;
r = do_synchronize(dir_root, current, boost::none);
}
}
// snap sync failed -- bail out!
if (r < 0) {
return r;
}
auto cur_snap_id_str{stringify(current.second)};
snap_metadata snap_meta[] = {{PRIMARY_SNAP_ID_KEY.c_str(), cur_snap_id_str.c_str()}};
r = ceph_mksnap(m_remote_mount, dir_root.c_str(), current.first.c_str(), 0755,
snap_meta, sizeof(snap_meta)/sizeof(snap_metadata));
if (r < 0) {
derr << ": failed to snap remote directory dir_root=" << dir_root
<< ": " << cpp_strerror(r) << dendl;
}
return r;
}
int PeerReplayer::do_sync_snaps(const std::string &dir_root) {
dout(20) << ": dir_root=" << dir_root << dendl;
std::map<uint64_t, std::string> local_snap_map;
std::map<uint64_t, std::string> remote_snap_map;
int r = build_snap_map(dir_root, &local_snap_map);
if (r < 0) {
derr << ": failed to build local snap map" << dendl;
return r;
}
r = build_snap_map(dir_root, &remote_snap_map, true);
if (r < 0) {
derr << ": failed to build remote snap map" << dendl;
return r;
}
// infer deleted and renamed snapshots from local and remote
// snap maps
std::set<std::string> snaps_deleted;
std::set<std::pair<std::string,std::string>> snaps_renamed;
for (auto &[primary_snap_id, snap_name] : remote_snap_map) {
auto it = local_snap_map.find(primary_snap_id);
if (it == local_snap_map.end()) {
snaps_deleted.emplace(snap_name);
} else if (it->second != snap_name) {
snaps_renamed.emplace(std::make_pair(snap_name, it->second));
}
}
r = propagate_snap_deletes(dir_root, snaps_deleted);
if (r < 0) {
derr << ": failed to propgate deleted snapshots" << dendl;
return r;
}
r = propagate_snap_renames(dir_root, snaps_renamed);
if (r < 0) {
derr << ": failed to propgate renamed snapshots" << dendl;
return r;
}
// start mirroring snapshots from the last snap-id synchronized
uint64_t last_snap_id = 0;
std::string last_snap_name;
if (!remote_snap_map.empty()) {
auto last = remote_snap_map.rbegin();
last_snap_id = last->first;
last_snap_name = last->second;
set_last_synced_snap(dir_root, last_snap_id, last_snap_name);
}
dout(5) << ": last snap-id transferred=" << last_snap_id << dendl;
auto it = local_snap_map.upper_bound(last_snap_id);
if (it == local_snap_map.end()) {
dout(20) << ": nothing to synchronize" << dendl;
return 0;
}
auto snaps_per_cycle = g_ceph_context->_conf.get_val<uint64_t>(
"cephfs_mirror_max_snapshot_sync_per_cycle");
dout(10) << ": synchronizing from snap-id=" << it->first << dendl;
for (; it != local_snap_map.end(); ++it) {
set_current_syncing_snap(dir_root, it->first, it->second);
auto start = clock::now();
boost::optional<Snapshot> prev = boost::none;
if (last_snap_id != 0) {
prev = std::make_pair(last_snap_name, last_snap_id);
}
r = synchronize(dir_root, std::make_pair(it->second, it->first), prev);
if (r < 0) {
derr << ": failed to synchronize dir_root=" << dir_root
<< ", snapshot=" << it->second << dendl;
clear_current_syncing_snap(dir_root);
return r;
}
std::chrono::duration<double> duration = clock::now() - start;
set_last_synced_stat(dir_root, it->first, it->second, duration.count());
if (--snaps_per_cycle == 0) {
break;
}
last_snap_name = it->second;
last_snap_id = it->first;
}
return 0;
}
void PeerReplayer::sync_snaps(const std::string &dir_root,
std::unique_lock<ceph::mutex> &locker) {
dout(20) << ": dir_root=" << dir_root << dendl;
locker.unlock();
int r = do_sync_snaps(dir_root);
if (r < 0) {
derr << ": failed to sync snapshots for dir_root=" << dir_root << dendl;
}
locker.lock();
if (r < 0) {
_inc_failed_count(dir_root);
} else {
_reset_failed_count(dir_root);
}
}
void PeerReplayer::run(SnapshotReplayerThread *replayer) {
dout(10) << ": snapshot replayer=" << replayer << dendl;
time last_directory_scan = clock::zero();
auto scan_interval = g_ceph_context->_conf.get_val<uint64_t>(
"cephfs_mirror_directory_scan_interval");
std::unique_lock locker(m_lock);
while (true) {
// do not check if client is blocklisted under lock
m_cond.wait_for(locker, 1s, [this]{return is_stopping();});
if (is_stopping()) {
dout(5) << ": exiting" << dendl;
break;
}
locker.unlock();
if (m_fs_mirror->is_blocklisted()) {
dout(5) << ": exiting as client is blocklisted" << dendl;
break;
}
locker.lock();
auto now = clock::now();
std::chrono::duration<double> timo = now - last_directory_scan;
if (timo.count() >= scan_interval && m_directories.size()) {
dout(20) << ": trying to pick from " << m_directories.size() << " directories" << dendl;
auto dir_root = pick_directory();
if (dir_root) {
dout(5) << ": picked dir_root=" << *dir_root << dendl;
int r = register_directory(*dir_root, replayer);
if (r == 0) {
r = sync_perms(*dir_root);
if (r < 0) {
_inc_failed_count(*dir_root);
} else {
sync_snaps(*dir_root, locker);
}
unregister_directory(*dir_root);
}
}
last_directory_scan = now;
}
}
}
void PeerReplayer::peer_status(Formatter *f) {
std::scoped_lock locker(m_lock);
f->open_object_section("stats");
for (auto &[dir_root, sync_stat] : m_snap_sync_stats) {
f->open_object_section(dir_root);
if (sync_stat.failed) {
f->dump_string("state", "failed");
} else if (!sync_stat.current_syncing_snap) {
f->dump_string("state", "idle");
} else {
f->dump_string("state", "syncing");
f->open_object_section("current_sycning_snap");
f->dump_unsigned("id", (*sync_stat.current_syncing_snap).first);
f->dump_string("name", (*sync_stat.current_syncing_snap).second);
f->close_section();
}
if (sync_stat.last_synced_snap) {
f->open_object_section("last_synced_snap");
f->dump_unsigned("id", (*sync_stat.last_synced_snap).first);
f->dump_string("name", (*sync_stat.last_synced_snap).second);
if (sync_stat.last_sync_duration) {
f->dump_float("sync_duration", *sync_stat.last_sync_duration);
f->dump_stream("sync_time_stamp") << sync_stat.last_synced;
}
f->close_section();
}
f->dump_unsigned("snaps_synced", sync_stat.synced_snap_count);
f->dump_unsigned("snaps_deleted", sync_stat.deleted_snap_count);
f->dump_unsigned("snaps_renamed", sync_stat.renamed_snap_count);
f->close_section(); // dir_root
}
f->close_section(); // stats
}
void PeerReplayer::reopen_logs() {
std::scoped_lock locker(m_lock);
if (m_remote_cluster) {
reinterpret_cast<CephContext *>(m_remote_cluster->cct())->reopen_logs();
}
}
} // namespace mirror
} // namespace cephfs
| 50,497 | 30.920354 | 107 |
cc
|
null |
ceph-main/src/tools/cephfs_mirror/PeerReplayer.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_PEER_REPLAYER_H
#define CEPHFS_MIRROR_PEER_REPLAYER_H
#include "common/Formatter.h"
#include "common/Thread.h"
#include "mds/FSMap.h"
#include "ServiceDaemon.h"
#include "Types.h"
namespace cephfs {
namespace mirror {
class FSMirror;
class PeerReplayerAdminSocketHook;
class PeerReplayer {
public:
PeerReplayer(CephContext *cct, FSMirror *fs_mirror,
RadosRef local_cluster, const Filesystem &filesystem,
const Peer &peer, const std::set<std::string, std::less<>> &directories,
MountRef mount, ServiceDaemon *service_daemon);
~PeerReplayer();
// initialize replayer for a peer
int init();
// shutdown replayer for a peer
void shutdown();
// add a directory to mirror queue
void add_directory(std::string_view dir_root);
// remove a directory from queue
void remove_directory(std::string_view dir_root);
// admin socket helpers
void peer_status(Formatter *f);
// reopen logs
void reopen_logs();
private:
inline static const std::string PRIMARY_SNAP_ID_KEY = "primary_snap_id";
inline static const std::string SERVICE_DAEMON_FAILED_DIR_COUNT_KEY = "failure_count";
inline static const std::string SERVICE_DAEMON_RECOVERED_DIR_COUNT_KEY = "recovery_count";
using Snapshot = std::pair<std::string, uint64_t>;
// file descriptor "triplet" for synchronizing a snapshot
// w/ an added MountRef for accessing "previous" snapshot.
struct FHandles {
// open file descriptor on the snap directory for snapshot
// currently being synchronized. Always use this fd with
// @m_local_mount.
int c_fd;
// open file descriptor on the "previous" snapshot or on
// dir_root on remote filesystem (based on if the snapshot
// can be used for incremental transfer). Always use this
// fd with p_mnt which either points to @m_local_mount (
// for local incremental comparison) or @m_remote_mount (
// for remote incremental comparison).
int p_fd;
MountRef p_mnt;
// open file descriptor on dir_root on remote filesystem.
// Always use this fd with @m_remote_mount.
int r_fd_dir_root;
};
bool is_stopping() {
return m_stopping;
}
struct Replayer;
class SnapshotReplayerThread : public Thread {
public:
SnapshotReplayerThread(PeerReplayer *peer_replayer)
: m_peer_replayer(peer_replayer) {
}
void *entry() override {
m_peer_replayer->run(this);
return 0;
}
private:
PeerReplayer *m_peer_replayer;
};
struct DirRegistry {
int fd;
bool canceled = false;
SnapshotReplayerThread *replayer;
};
struct SyncEntry {
std::string epath;
ceph_dir_result *dirp; // valid for directories
struct ceph_statx stx;
// set by incremental sync _after_ ensuring missing entries
// in the currently synced snapshot have been propagated to
// the remote filesystem.
bool remote_synced = false;
SyncEntry(std::string_view path,
const struct ceph_statx &stx)
: epath(path),
stx(stx) {
}
SyncEntry(std::string_view path,
ceph_dir_result *dirp,
const struct ceph_statx &stx)
: epath(path),
dirp(dirp),
stx(stx) {
}
bool is_directory() const {
return S_ISDIR(stx.stx_mode);
}
bool needs_remote_sync() const {
return remote_synced;
}
void set_remote_synced() {
remote_synced = true;
}
};
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
// stats sent to service daemon
struct ServiceDaemonStats {
uint64_t failed_dir_count = 0;
uint64_t recovered_dir_count = 0;
};
struct SnapSyncStat {
uint64_t nr_failures = 0; // number of consecutive failures
boost::optional<time> last_failed; // lat failed timestamp
bool failed = false; // hit upper cap for consecutive failures
boost::optional<std::pair<uint64_t, std::string>> last_synced_snap;
boost::optional<std::pair<uint64_t, std::string>> current_syncing_snap;
uint64_t synced_snap_count = 0;
uint64_t deleted_snap_count = 0;
uint64_t renamed_snap_count = 0;
time last_synced = clock::zero();
boost::optional<double> last_sync_duration;
};
void _inc_failed_count(const std::string &dir_root) {
auto max_failures = g_ceph_context->_conf.get_val<uint64_t>(
"cephfs_mirror_max_consecutive_failures_per_directory");
auto &sync_stat = m_snap_sync_stats.at(dir_root);
sync_stat.last_failed = clock::now();
if (++sync_stat.nr_failures >= max_failures && !sync_stat.failed) {
sync_stat.failed = true;
++m_service_daemon_stats.failed_dir_count;
m_service_daemon->add_or_update_peer_attribute(m_filesystem.fscid, m_peer,
SERVICE_DAEMON_FAILED_DIR_COUNT_KEY,
m_service_daemon_stats.failed_dir_count);
}
}
void _reset_failed_count(const std::string &dir_root) {
auto &sync_stat = m_snap_sync_stats.at(dir_root);
if (sync_stat.failed) {
++m_service_daemon_stats.recovered_dir_count;
m_service_daemon->add_or_update_peer_attribute(m_filesystem.fscid, m_peer,
SERVICE_DAEMON_RECOVERED_DIR_COUNT_KEY,
m_service_daemon_stats.recovered_dir_count);
}
sync_stat.nr_failures = 0;
sync_stat.failed = false;
sync_stat.last_failed = boost::none;
}
void _set_last_synced_snap(const std::string &dir_root, uint64_t snap_id,
const std::string &snap_name) {
auto &sync_stat = m_snap_sync_stats.at(dir_root);
sync_stat.last_synced_snap = std::make_pair(snap_id, snap_name);
sync_stat.current_syncing_snap = boost::none;
}
void set_last_synced_snap(const std::string &dir_root, uint64_t snap_id,
const std::string &snap_name) {
std::scoped_lock locker(m_lock);
_set_last_synced_snap(dir_root, snap_id, snap_name);
}
void set_current_syncing_snap(const std::string &dir_root, uint64_t snap_id,
const std::string &snap_name) {
std::scoped_lock locker(m_lock);
auto &sync_stat = m_snap_sync_stats.at(dir_root);
sync_stat.current_syncing_snap = std::make_pair(snap_id, snap_name);
}
void clear_current_syncing_snap(const std::string &dir_root) {
std::scoped_lock locker(m_lock);
auto &sync_stat = m_snap_sync_stats.at(dir_root);
sync_stat.current_syncing_snap = boost::none;
}
void inc_deleted_snap(const std::string &dir_root) {
std::scoped_lock locker(m_lock);
auto &sync_stat = m_snap_sync_stats.at(dir_root);
++sync_stat.deleted_snap_count;
}
void inc_renamed_snap(const std::string &dir_root) {
std::scoped_lock locker(m_lock);
auto &sync_stat = m_snap_sync_stats.at(dir_root);
++sync_stat.renamed_snap_count;
}
void set_last_synced_stat(const std::string &dir_root, uint64_t snap_id,
const std::string &snap_name, double duration) {
std::scoped_lock locker(m_lock);
_set_last_synced_snap(dir_root, snap_id, snap_name);
auto &sync_stat = m_snap_sync_stats.at(dir_root);
sync_stat.last_synced = clock::now();
sync_stat.last_sync_duration = duration;
++sync_stat.synced_snap_count;
}
bool should_backoff(const std::string &dir_root, int *retval) {
if (m_fs_mirror->is_blocklisted()) {
*retval = -EBLOCKLISTED;
return true;
}
std::scoped_lock locker(m_lock);
if (is_stopping()) {
// ceph defines EBLOCKLISTED to ESHUTDOWN (108). so use
// EINPROGRESS to identify shutdown.
*retval = -EINPROGRESS;
return true;
}
auto &dr = m_registered.at(dir_root);
if (dr.canceled) {
*retval = -ECANCELED;
return true;
}
*retval = 0;
return false;
}
typedef std::vector<std::unique_ptr<SnapshotReplayerThread>> SnapshotReplayers;
CephContext *m_cct;
FSMirror *m_fs_mirror;
RadosRef m_local_cluster;
Filesystem m_filesystem;
Peer m_peer;
// probably need to be encapsulated when supporting cancelations
std::map<std::string, DirRegistry> m_registered;
std::vector<std::string> m_directories;
std::map<std::string, SnapSyncStat> m_snap_sync_stats;
MountRef m_local_mount;
ServiceDaemon *m_service_daemon;
PeerReplayerAdminSocketHook *m_asok_hook = nullptr;
ceph::mutex m_lock;
ceph::condition_variable m_cond;
RadosRef m_remote_cluster;
MountRef m_remote_mount;
bool m_stopping = false;
SnapshotReplayers m_replayers;
ServiceDaemonStats m_service_daemon_stats;
void run(SnapshotReplayerThread *replayer);
boost::optional<std::string> pick_directory();
int register_directory(const std::string &dir_root, SnapshotReplayerThread *replayer);
void unregister_directory(const std::string &dir_root);
int try_lock_directory(const std::string &dir_root, SnapshotReplayerThread *replayer,
DirRegistry *registry);
void unlock_directory(const std::string &dir_root, const DirRegistry ®istry);
void sync_snaps(const std::string &dir_root, std::unique_lock<ceph::mutex> &locker);
int build_snap_map(const std::string &dir_root, std::map<uint64_t, std::string> *snap_map,
bool is_remote=false);
int propagate_snap_deletes(const std::string &dir_root, const std::set<std::string> &snaps);
int propagate_snap_renames(const std::string &dir_root,
const std::set<std::pair<std::string,std::string>> &snaps);
int propagate_deleted_entries(const std::string &dir_root, const std::string &epath,
const FHandles &fh);
int cleanup_remote_dir(const std::string &dir_root, const std::string &epath,
const FHandles &fh);
int should_sync_entry(const std::string &epath, const struct ceph_statx &cstx,
const FHandles &fh, bool *need_data_sync, bool *need_attr_sync);
int open_dir(MountRef mnt, const std::string &dir_path, boost::optional<uint64_t> snap_id);
int pre_sync_check_and_open_handles(const std::string &dir_root, const Snapshot ¤t,
boost::optional<Snapshot> prev, FHandles *fh);
void post_sync_close_handles(const FHandles &fh);
int do_synchronize(const std::string &dir_root, const Snapshot ¤t,
boost::optional<Snapshot> prev);
int synchronize(const std::string &dir_root, const Snapshot ¤t,
boost::optional<Snapshot> prev);
int do_sync_snaps(const std::string &dir_root);
int remote_mkdir(const std::string &epath, const struct ceph_statx &stx, const FHandles &fh);
int remote_file_op(const std::string &dir_root, const std::string &epath, const struct ceph_statx &stx,
const FHandles &fh, bool need_data_sync, bool need_attr_sync);
int copy_to_remote(const std::string &dir_root, const std::string &epath, const struct ceph_statx &stx,
const FHandles &fh);
int sync_perms(const std::string& path);
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_PEER_REPLAYER_H
| 11,372 | 34.429907 | 105 |
h
|
null |
ceph-main/src/tools/cephfs_mirror/ServiceDaemon.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/debug.h"
#include "common/errno.h"
#include "common/Timer.h"
#include "include/stringify.h"
#include "ServiceDaemon.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_cephfs_mirror
#undef dout_prefix
#define dout_prefix *_dout << "cephfs::mirror::ServiceDaemon: " << this << " " \
<< __func__
namespace cephfs {
namespace mirror {
namespace {
struct AttributeDumpVisitor : public boost::static_visitor<void> {
ceph::Formatter *f;
std::string name;
AttributeDumpVisitor(ceph::Formatter *f, std::string_view name)
: f(f), name(name) {
}
void operator()(bool val) const {
f->dump_bool(name.c_str(), val);
}
void operator()(uint64_t val) const {
f->dump_unsigned(name.c_str(), val);
}
void operator()(const std::string &val) const {
f->dump_string(name.c_str(), val);
}
};
} // anonymous namespace
ServiceDaemon::ServiceDaemon(CephContext *cct, RadosRef rados)
: m_cct(cct),
m_rados(rados),
m_timer(new SafeTimer(cct, m_timer_lock, true)) {
m_timer->init();
}
ServiceDaemon::~ServiceDaemon() {
dout(10) << dendl;
{
std::scoped_lock timer_lock(m_timer_lock);
if (m_timer_ctx != nullptr) {
dout(5) << ": canceling timer task=" << m_timer_ctx << dendl;
m_timer->cancel_event(m_timer_ctx);
}
m_timer->shutdown();
}
delete m_timer;
}
int ServiceDaemon::init() {
dout(20) << dendl;
std::string id = m_cct->_conf->name.get_id();
if (id.find(CEPHFS_MIRROR_AUTH_ID_PREFIX) == 0) {
id = id.substr(CEPHFS_MIRROR_AUTH_ID_PREFIX.size());
}
std::string instance_id = stringify(m_rados->get_instance_id());
std::map<std::string, std::string> service_metadata = {{"id", id},
{"instance_id", instance_id}};
int r = m_rados->service_daemon_register("cephfs-mirror", instance_id,
service_metadata);
if (r < 0) {
return r;
}
return 0;
}
void ServiceDaemon::add_filesystem(fs_cluster_id_t fscid, std::string_view fs_name) {
dout(10) << ": fscid=" << fscid << ", fs_name=" << fs_name << dendl;
{
std::scoped_lock locker(m_lock);
m_filesystems.emplace(fscid, Filesystem(fs_name));
}
schedule_update_status();
}
void ServiceDaemon::remove_filesystem(fs_cluster_id_t fscid) {
dout(10) << ": fscid=" << fscid << dendl;
{
std::scoped_lock locker(m_lock);
m_filesystems.erase(fscid);
}
schedule_update_status();
}
void ServiceDaemon::add_peer(fs_cluster_id_t fscid, const Peer &peer) {
dout(10) << ": peer=" << peer << dendl;
{
std::scoped_lock locker(m_lock);
auto fs_it = m_filesystems.find(fscid);
if (fs_it == m_filesystems.end()) {
return;
}
fs_it->second.peer_attributes.emplace(peer, Attributes{});
}
schedule_update_status();
}
void ServiceDaemon::remove_peer(fs_cluster_id_t fscid, const Peer &peer) {
dout(10) << ": peer=" << peer << dendl;
{
std::scoped_lock locker(m_lock);
auto fs_it = m_filesystems.find(fscid);
if (fs_it == m_filesystems.end()) {
return;
}
fs_it->second.peer_attributes.erase(peer);
}
schedule_update_status();
}
void ServiceDaemon::add_or_update_fs_attribute(fs_cluster_id_t fscid, std::string_view key,
AttributeValue value) {
dout(10) << ": fscid=" << fscid << dendl;
{
std::scoped_lock locker(m_lock);
auto fs_it = m_filesystems.find(fscid);
if (fs_it == m_filesystems.end()) {
return;
}
fs_it->second.fs_attributes[std::string(key)] = value;
}
schedule_update_status();
}
void ServiceDaemon::add_or_update_peer_attribute(fs_cluster_id_t fscid, const Peer &peer,
std::string_view key, AttributeValue value) {
dout(10) << ": fscid=" << fscid << dendl;
{
std::scoped_lock locker(m_lock);
auto fs_it = m_filesystems.find(fscid);
if (fs_it == m_filesystems.end()) {
return;
}
auto peer_it = fs_it->second.peer_attributes.find(peer);
if (peer_it == fs_it->second.peer_attributes.end()) {
return;
}
peer_it->second[std::string(key)] = value;
}
schedule_update_status();
}
void ServiceDaemon::schedule_update_status() {
dout(10) << dendl;
std::scoped_lock timer_lock(m_timer_lock);
if (m_timer_ctx != nullptr) {
return;
}
m_timer_ctx = new LambdaContext([this] {
m_timer_ctx = nullptr;
update_status();
});
m_timer->add_event_after(1, m_timer_ctx);
}
void ServiceDaemon::update_status() {
dout(20) << ": " << m_filesystems.size() << " filesystem(s)" << dendl;
ceph::JSONFormatter f;
{
std::scoped_lock locker(m_lock);
f.open_object_section("filesystems");
for (auto &[fscid, filesystem] : m_filesystems) {
f.open_object_section(stringify(fscid).c_str());
f.dump_string("name", filesystem.fs_name);
for (auto &[attr_name, attr_value] : filesystem.fs_attributes) {
AttributeDumpVisitor visitor(&f, attr_name);
boost::apply_visitor(visitor, attr_value);
}
f.open_object_section("peers");
for (auto &[peer, attributes] : filesystem.peer_attributes) {
f.open_object_section(peer.uuid);
f.dump_object("remote", peer.remote);
f.open_object_section("stats");
for (auto &[attr_name, attr_value] : attributes) {
AttributeDumpVisitor visitor(&f, attr_name);
boost::apply_visitor(visitor, attr_value);
}
f.close_section(); // stats
f.close_section(); // peer.uuid
}
f.close_section(); // peers
f.close_section(); // fscid
}
f.close_section(); // filesystems
}
std::stringstream ss;
f.flush(ss);
int r = m_rados->service_daemon_update_status({{"status_json", ss.str()}});
if (r < 0) {
derr << ": failed to update service daemon status: " << cpp_strerror(r)
<< dendl;
}
}
} // namespace mirror
} // namespace cephfs
| 6,223 | 26.539823 | 94 |
cc
|
null |
ceph-main/src/tools/cephfs_mirror/ServiceDaemon.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_SERVICE_DAEMON_H
#define CEPHFS_MIRROR_SERVICE_DAEMON_H
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include "mds/FSMap.h"
#include "Types.h"
namespace cephfs {
namespace mirror {
class ServiceDaemon {
public:
ServiceDaemon(CephContext *cct, RadosRef rados);
~ServiceDaemon();
int init();
void add_filesystem(fs_cluster_id_t fscid, std::string_view fs_name);
void remove_filesystem(fs_cluster_id_t fscid);
void add_peer(fs_cluster_id_t fscid, const Peer &peer);
void remove_peer(fs_cluster_id_t fscid, const Peer &peer);
void add_or_update_fs_attribute(fs_cluster_id_t fscid, std::string_view key,
AttributeValue value);
void add_or_update_peer_attribute(fs_cluster_id_t fscid, const Peer &peer,
std::string_view key, AttributeValue value);
private:
struct Filesystem {
std::string fs_name;
Attributes fs_attributes;
std::map<Peer, Attributes> peer_attributes;
Filesystem(std::string_view fs_name)
: fs_name(fs_name) {
}
};
const std::string CEPHFS_MIRROR_AUTH_ID_PREFIX = "cephfs-mirror.";
CephContext *m_cct;
RadosRef m_rados;
SafeTimer *m_timer;
ceph::mutex m_timer_lock = ceph::make_mutex("cephfs::mirror::ServiceDaemon");
ceph::mutex m_lock = ceph::make_mutex("cephfs::mirror::service_daemon");
Context *m_timer_ctx = nullptr;
std::map<fs_cluster_id_t, Filesystem> m_filesystems;
void schedule_update_status();
void update_status();
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_SERVICE_DAEMON_H
| 1,713 | 26.206349 | 80 |
h
|
null |
ceph-main/src/tools/cephfs_mirror/Types.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "Types.h"
namespace cephfs {
namespace mirror {
std::ostream& operator<<(std::ostream& out, const Filesystem &filesystem) {
out << "{fscid=" << filesystem.fscid << ", fs_name=" << filesystem.fs_name << "}";
return out;
}
std::ostream& operator<<(std::ostream& out, const FilesystemSpec &spec) {
out << "{filesystem=" << spec.filesystem << ", pool_id=" << spec.pool_id << "}";
return out;
}
} // namespace mirror
} // namespace cephfs
| 555 | 24.272727 | 84 |
cc
|
null |
ceph-main/src/tools/cephfs_mirror/Types.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_TYPES_H
#define CEPHFS_MIRROR_TYPES_H
#include <set>
#include <iostream>
#include <string_view>
#include "include/rados/librados.hpp"
#include "include/cephfs/libcephfs.h"
#include "mds/mdstypes.h"
namespace cephfs {
namespace mirror {
static const std::string CEPHFS_MIRROR_OBJECT("cephfs_mirror");
typedef boost::variant<bool, uint64_t, std::string> AttributeValue;
typedef std::map<std::string, AttributeValue> Attributes;
// distinct filesystem identifier
struct Filesystem {
fs_cluster_id_t fscid;
std::string fs_name;
bool operator==(const Filesystem &rhs) const {
return (fscid == rhs.fscid &&
fs_name == rhs.fs_name);
}
bool operator!=(const Filesystem &rhs) const {
return !(*this == rhs);
}
bool operator<(const Filesystem &rhs) const {
if (fscid != rhs.fscid) {
return fscid < rhs.fscid;
}
return fs_name < rhs.fs_name;
}
};
// specification of a filesystem -- pool id the metadata pool id.
struct FilesystemSpec {
FilesystemSpec() = default;
FilesystemSpec(const Filesystem &filesystem, uint64_t pool_id)
: filesystem(filesystem),
pool_id(pool_id) {
}
FilesystemSpec(fs_cluster_id_t fscid, std::string_view fs_name, uint64_t pool_id)
: filesystem(Filesystem{fscid, std::string(fs_name)}),
pool_id(pool_id) {
}
Filesystem filesystem;
uint64_t pool_id;
bool operator==(const FilesystemSpec &rhs) const {
return (filesystem == rhs.filesystem &&
pool_id == rhs.pool_id);
}
bool operator<(const FilesystemSpec &rhs) const {
if (filesystem != rhs.filesystem) {
return filesystem < rhs.filesystem;
}
return pool_id < rhs.pool_id;
}
};
std::ostream& operator<<(std::ostream& out, const Filesystem &filesystem);
std::ostream& operator<<(std::ostream& out, const FilesystemSpec &spec);
typedef std::shared_ptr<librados::Rados> RadosRef;
typedef std::shared_ptr<librados::IoCtx> IoCtxRef;
// not a shared_ptr since the type is incomplete
typedef ceph_mount_info *MountRef;
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_TYPES_H
| 2,221 | 24.25 | 83 |
h
|
null |
ceph-main/src/tools/cephfs_mirror/Utils.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/ceph_argparse.h"
#include "common/ceph_context.h"
#include "common/common_init.h"
#include "common/debug.h"
#include "common/errno.h"
#include "Utils.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_cephfs_mirror
#undef dout_prefix
#define dout_prefix *_dout << "cephfs::mirror::Utils " << __func__
namespace cephfs {
namespace mirror {
int connect(std::string_view client_name, std::string_view cluster_name,
RadosRef *cluster, std::string_view mon_host, std::string_view cephx_key,
std::vector<const char *> args) {
dout(20) << ": connecting to cluster=" << cluster_name << ", client=" << client_name
<< ", mon_host=" << mon_host << dendl;
CephInitParameters iparams(CEPH_ENTITY_TYPE_CLIENT);
if (client_name.empty() || !iparams.name.from_str(client_name)) {
derr << ": error initializing cluster handle for " << cluster_name << dendl;
return -EINVAL;
}
CephContext *cct = common_preinit(iparams, CODE_ENVIRONMENT_LIBRARY,
CINIT_FLAG_UNPRIVILEGED_DAEMON_DEFAULTS);
if (mon_host.empty()) {
cct->_conf->cluster = cluster_name;
}
int r = cct->_conf.parse_config_files(nullptr, nullptr, 0);
if (r < 0 && r != -ENOENT) {
derr << ": could not read ceph conf: " << ": " << cpp_strerror(r) << dendl;
return r;
}
cct->_conf.parse_env(cct->get_module_type());
if (!args.empty()) {
r = cct->_conf.parse_argv(args);
if (r < 0) {
derr << ": could not parse command line args: " << cpp_strerror(r) << dendl;
cct->put();
return r;
}
}
cct->_conf.parse_env(cct->get_module_type());
if (!mon_host.empty()) {
r = cct->_conf.set_val("mon_host", std::string(mon_host));
if (r < 0) {
derr << "failed to set mon_host config: " << cpp_strerror(r) << dendl;
cct->put();
return r;
}
}
if (!cephx_key.empty()) {
r = cct->_conf.set_val("key", std::string(cephx_key));
if (r < 0) {
derr << "failed to set key config: " << cpp_strerror(r) << dendl;
cct->put();
return r;
}
}
dout(10) << ": using mon addr=" << cct->_conf.get_val<std::string>("mon_host") << dendl;
cluster->reset(new librados::Rados());
r = (*cluster)->init_with_context(cct);
ceph_assert(r == 0);
cct->put();
r = (*cluster)->connect();
if (r < 0) {
derr << ": error connecting to " << cluster_name << ": " << cpp_strerror(r)
<< dendl;
return r;
}
dout(10) << ": connected to cluster=" << cluster_name << " using client="
<< client_name << dendl;
return 0;
}
int mount(RadosRef cluster, const Filesystem &filesystem, bool cross_check_fscid,
MountRef *mount) {
dout(20) << ": filesystem=" << filesystem << dendl;
ceph_mount_info *cmi;
int r = ceph_create_with_context(&cmi, reinterpret_cast<CephContext*>(cluster->cct()));
if (r < 0) {
derr << ": mount error: " << cpp_strerror(r) << dendl;
return r;
}
r = ceph_conf_set(cmi, "client_mount_uid", "0");
if (r < 0) {
derr << ": mount error: " << cpp_strerror(r) << dendl;
return r;
}
r = ceph_conf_set(cmi, "client_mount_gid", "0");
if (r < 0) {
derr << ": mount error: " << cpp_strerror(r) << dendl;
return r;
}
// mount timeout applies for local and remote mounts.
auto mount_timeout = g_ceph_context->_conf.get_val<std::chrono::seconds>
("cephfs_mirror_mount_timeout").count();
r = ceph_set_mount_timeout(cmi, mount_timeout);
if (r < 0) {
derr << ": mount error: " << cpp_strerror(r) << dendl;
return r;
}
r = ceph_init(cmi);
if (r < 0) {
derr << ": mount error: " << cpp_strerror(r) << dendl;
return r;
}
r = ceph_select_filesystem(cmi, filesystem.fs_name.c_str());
if (r < 0) {
derr << ": mount error: " << cpp_strerror(r) << dendl;
return r;
}
r = ceph_mount(cmi, NULL);
if (r < 0) {
derr << ": mount error: " << cpp_strerror(r) << dendl;
return r;
}
auto fs_id = ceph_get_fs_cid(cmi);
if (cross_check_fscid && fs_id != filesystem.fscid) {
// this can happen in the most remotest possibility when a
// filesystem is deleted and recreated with the same name.
// since all this is driven asynchronously, we were able to
// mount the recreated filesystem. so bubble up the error.
// cleanup will eventually happen since a mirror disable event
// would have been queued.
derr << ": filesystem-id mismatch " << fs_id << " vs " << filesystem.fscid
<< dendl;
// ignore errors, we are shutting down anyway.
ceph_unmount(cmi);
return -EINVAL;
}
dout(10) << ": mounted filesystem=" << filesystem << dendl;
*mount = cmi;
return 0;
}
} // namespace mirror
} // namespace cephfs
| 4,870 | 28.167665 | 90 |
cc
|
null |
ceph-main/src/tools/cephfs_mirror/Utils.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_UTILS_H
#define CEPHFS_MIRROR_UTILS_H
#include "Types.h"
namespace cephfs {
namespace mirror {
int connect(std::string_view client_name, std::string_view cluster_name,
RadosRef *cluster, std::string_view mon_host={}, std::string_view cephx_key={},
std::vector<const char *> args={});
int mount(RadosRef cluster, const Filesystem &filesystem, bool cross_check_fscid,
MountRef *mount);
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_UTILS_H
| 621 | 26.043478 | 91 |
h
|
null |
ceph-main/src/tools/cephfs_mirror/Watcher.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/ceph_context.h"
#include "common/debug.h"
#include "common/errno.h"
#include "common/WorkQueue.h"
#include "include/stringify.h"
#include "aio_utils.h"
#include "watcher/RewatchRequest.h"
#include "Watcher.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_cephfs_mirror
#undef dout_prefix
#define dout_prefix *_dout << "cephfs::mirror::Watcher " << __func__
using cephfs::mirror::watcher::RewatchRequest;
namespace cephfs {
namespace mirror {
namespace {
struct C_UnwatchAndFlush : public Context {
librados::Rados rados;
Context *on_finish;
bool flushing = false;
int ret_val = 0;
C_UnwatchAndFlush(librados::IoCtx &ioctx, Context *on_finish)
: rados(ioctx), on_finish(on_finish) {
}
void complete(int r) override {
if (ret_val == 0 && r < 0) {
ret_val = r;
}
if (!flushing) {
flushing = true;
librados::AioCompletion *aio_comp =
librados::Rados::aio_create_completion(
this, &rados_callback<Context, &Context::complete>);
r = rados.aio_watch_flush(aio_comp);
ceph_assert(r == 0);
aio_comp->release();
return;
}
// ensure our reference to the RadosClient is released prior
// to completing the callback to avoid racing an explicit
// librados shutdown
Context *ctx = on_finish;
r = ret_val;
delete this;
ctx->complete(r);
}
void finish(int r) override {
}
};
} // anonymous namespace
Watcher::Watcher(librados::IoCtx &ioctx, std::string_view oid, ContextWQ *work_queue)
: m_oid(oid),
m_ioctx(ioctx),
m_work_queue(work_queue),
m_lock(ceph::make_shared_mutex("cephfs::mirror::snap_watcher")),
m_state(STATE_IDLE),
m_watch_ctx(*this) {
}
Watcher::~Watcher() {
}
void Watcher::register_watch(Context *on_finish) {
dout(20) << dendl;
std::scoped_lock locker(m_lock);
m_state = STATE_REGISTERING;
on_finish = new C_RegisterWatch(this, on_finish);
librados::AioCompletion *aio_comp =
librados::Rados::aio_create_completion(on_finish, &rados_callback<Context, &Context::complete>);
int r = m_ioctx.aio_watch(m_oid, aio_comp, &m_watch_handle, &m_watch_ctx);
ceph_assert(r == 0);
aio_comp->release();
}
void Watcher::handle_register_watch(int r, Context *on_finish) {
dout(20) << ": r=" << r << dendl;
bool watch_error = false;
Context *unregister_watch_ctx = nullptr;
{
std::scoped_lock locker(m_lock);
ceph_assert(m_state == STATE_REGISTERING);
m_state = STATE_IDLE;
if (r < 0) {
derr << ": failed to register watch: " << cpp_strerror(r) << dendl;
m_watch_handle = 0;
}
if (m_unregister_watch_ctx != nullptr) {
std::swap(unregister_watch_ctx, m_unregister_watch_ctx);
} else if (r == 0 && m_watch_error) {
derr << ": re-registering after watch error" << dendl;
m_state = STATE_REGISTERING;
watch_error = true;
} else {
m_watch_blocklisted = (r == -EBLOCKLISTED);
}
}
on_finish->complete(r);
if (unregister_watch_ctx != nullptr) {
unregister_watch_ctx->complete(0);
} else if (watch_error) {
rewatch();
}
}
void Watcher::unregister_watch(Context *on_finish) {
dout(20) << dendl;
{
std::scoped_lock locker(m_lock);
if (m_state != STATE_IDLE) {
dout(10) << ": delaying unregister -- watch register in progress" << dendl;
ceph_assert(m_unregister_watch_ctx == nullptr);
m_unregister_watch_ctx = new LambdaContext([this, on_finish](int r) {
unregister_watch(on_finish);
});
return;
} else if (is_registered()) {
// watch is registered -- unwatch
librados::AioCompletion *aio_comp =
librados::Rados::aio_create_completion(new C_UnwatchAndFlush(m_ioctx, on_finish),
&rados_callback<Context, &Context::complete>);
int r = m_ioctx.aio_unwatch(m_watch_handle, aio_comp);
ceph_assert(r == 0);
aio_comp->release();
m_watch_handle = 0;
m_watch_blocklisted = false;
return;
}
}
on_finish->complete(0);
}
void Watcher::handle_error(uint64_t handle, int err) {
derr << ": handle=" << handle << ": " << cpp_strerror(err) << dendl;
std::scoped_lock locker(m_lock);
m_watch_error = true;
if (is_registered()) {
m_state = STATE_REWATCHING;
if (err == -EBLOCKLISTED) {
m_watch_blocklisted = true;
}
m_work_queue->queue(new LambdaContext([this] {
rewatch();
}), 0);
}
}
void Watcher::rewatch() {
dout(20) << dendl;
Context *unregister_watch_ctx = nullptr;
{
std::unique_lock locker(m_lock);
ceph_assert(m_state == STATE_REWATCHING);
if (m_unregister_watch_ctx != nullptr) {
m_state = STATE_IDLE;
std::swap(unregister_watch_ctx, m_unregister_watch_ctx);
} else {
m_watch_error = false;
Context *ctx = new C_CallbackAdapter<Watcher, &Watcher::handle_rewatch>(this);
auto req = RewatchRequest::create(m_ioctx, m_oid, m_lock,
&m_watch_ctx, &m_watch_handle, ctx);
req->send();
return;
}
}
unregister_watch_ctx->complete(0);
}
void Watcher::handle_rewatch(int r) {
dout(20) << ": r=" << r << dendl;
bool watch_error = false;
Context *unregister_watch_ctx = nullptr;
{
std::scoped_lock locker(m_lock);
ceph_assert(m_state == STATE_REWATCHING);
m_watch_blocklisted = false;
if (m_unregister_watch_ctx != nullptr) {
dout(10) << ": skipping rewatch -- unregistering" << dendl;
m_state = STATE_IDLE;
std::swap(unregister_watch_ctx, m_unregister_watch_ctx);
} else if (r == -EBLOCKLISTED) {
m_watch_blocklisted = true;
derr << ": client blocklisted" << dendl;
} else if (r == -ENOENT) {
dout(5) << ": object " << m_oid << " does not exist" << dendl;
} else if (r < 0) {
derr << ": failed to rewatch: " << cpp_strerror(r) << dendl;
watch_error = true;
} else if (m_watch_error) {
derr << ": re-registering watch after error" << dendl;
watch_error = true;
}
}
if (unregister_watch_ctx != nullptr) {
unregister_watch_ctx->complete(0);
return;
} else if (watch_error) {
rewatch();
return;
}
Context *ctx = new C_CallbackAdapter<Watcher, &Watcher::handle_rewatch_callback>(this);
m_work_queue->queue(ctx, r);
}
void Watcher::handle_rewatch_callback(int r) {
dout(10) << ": r=" << r << dendl;
handle_rewatch_complete(r);
bool watch_error = false;
Context *unregister_watch_ctx = nullptr;
{
std::scoped_lock locker(m_lock);
ceph_assert(m_state == STATE_REWATCHING);
if (m_unregister_watch_ctx != nullptr) {
m_state = STATE_IDLE;
std::swap(unregister_watch_ctx, m_unregister_watch_ctx);
} else if (r == -EBLOCKLISTED || r == -ENOENT) {
m_state = STATE_IDLE;
} else if (r < 0 || m_watch_error) {
watch_error = true;
} else {
m_state = STATE_IDLE;
}
}
if (unregister_watch_ctx != nullptr) {
unregister_watch_ctx->complete(0);
} else if (watch_error) {
rewatch();
}
}
void Watcher::acknowledge_notify(uint64_t notify_id, uint64_t handle, bufferlist &bl) {
m_ioctx.notify_ack(m_oid, notify_id, handle, bl);
}
void Watcher::WatchCtx::handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist& bl) {
dout(20) << ": notify_id=" << notify_id << ", handle=" << handle
<< ", notifier_id=" << notifier_id << dendl;
watcher.handle_notify(notify_id, handle, notifier_id, bl);
}
void Watcher::WatchCtx::handle_error(uint64_t handle, int err) {
dout(20) << dendl;
watcher.handle_error(handle, err);
}
} // namespace mirror
} // namespace cephfs
| 8,024 | 27.059441 | 100 |
cc
|
null |
ceph-main/src/tools/cephfs_mirror/Watcher.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_WATCHER_H
#define CEPHFS_MIRROR_WATCHER_H
#include <string_view>
#include "common/ceph_mutex.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
class ContextWQ;
namespace cephfs {
namespace mirror {
// generic watcher class -- establish watch on a given rados object
// and invoke handle_notify() when notified. On notify error, try
// to re-establish the watch. Errors during rewatch are notified via
// handle_rewatch_complete().
class Watcher {
public:
Watcher(librados::IoCtx &ioctx, std::string_view oid, ContextWQ *work_queue);
virtual ~Watcher();
void register_watch(Context *on_finish);
void unregister_watch(Context *on_finish);
protected:
std::string m_oid;
void acknowledge_notify(uint64_t notify_if, uint64_t handle, bufferlist &bl);
bool is_registered() const {
return m_state == STATE_IDLE && m_watch_handle != 0;
}
bool is_unregistered() const {
return m_state == STATE_IDLE && m_watch_handle == 0;
}
virtual void handle_rewatch_complete(int r) { }
private:
enum State {
STATE_IDLE,
STATE_REGISTERING,
STATE_REWATCHING
};
struct WatchCtx : public librados::WatchCtx2 {
Watcher &watcher;
WatchCtx(Watcher &parent) : watcher(parent) {}
void handle_notify(uint64_t notify_id,
uint64_t handle,
uint64_t notifier_id,
bufferlist& bl) override;
void handle_error(uint64_t handle, int err) override;
};
struct C_RegisterWatch : public Context {
Watcher *watcher;
Context *on_finish;
C_RegisterWatch(Watcher *watcher, Context *on_finish)
: watcher(watcher),
on_finish(on_finish) {
}
void finish(int r) override {
watcher->handle_register_watch(r, on_finish);
}
};
librados::IoCtx &m_ioctx;
ContextWQ *m_work_queue;
mutable ceph::shared_mutex m_lock;
State m_state;
bool m_watch_error = false;
bool m_watch_blocklisted = false;
uint64_t m_watch_handle;
WatchCtx m_watch_ctx;
Context *m_unregister_watch_ctx = nullptr;
virtual void handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist& bl) = 0;
void handle_error(uint64_t handle, int err);
void rewatch();
void handle_rewatch(int r);
void handle_rewatch_callback(int r);
void handle_register_watch(int r, Context *on_finish);
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_WATCHER_H
| 2,590 | 24.15534 | 79 |
h
|
null |
ceph-main/src/tools/cephfs_mirror/aio_utils.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_AIO_UTILS_H
#define CEPHFS_MIRROR_AIO_UTILS_H
#include "include/rados/librados.hpp"
namespace cephfs {
namespace mirror {
template <typename T, void(T::*MF)(int)>
void rados_callback(rados_completion_t c, void *arg) {
T *obj = reinterpret_cast<T*>(arg);
int r = rados_aio_get_return_value(c);
(obj->*MF)(r);
}
template <typename T, void (T::*MF)(int)>
class C_CallbackAdapter : public Context {
T *obj;
public:
C_CallbackAdapter(T *obj)
: obj(obj) {
}
protected:
void finish(int r) override {
(obj->*MF)(r);
}
};
template <typename WQ>
struct C_AsyncCallback : public Context {
WQ *op_work_queue;
Context *on_finish;
C_AsyncCallback(WQ *op_work_queue, Context *on_finish)
: op_work_queue(op_work_queue), on_finish(on_finish) {
}
~C_AsyncCallback() override {
delete on_finish;
}
void finish(int r) override {
op_work_queue->queue(on_finish, r);
on_finish = nullptr;
}
};
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_AIO_UTILS_H
| 1,137 | 20.074074 | 70 |
h
|
null |
ceph-main/src/tools/cephfs_mirror/main.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/ceph_argparse.h"
#include "common/config.h"
#include "common/debug.h"
#include "common/errno.h"
#include "common/async/context_pool.h"
#include "common/Preforker.h"
#include "global/global_init.h"
#include "global/signal_handler.h"
#include "mon/MonClient.h"
#include "msg/Messenger.h"
#include "Mirror.h"
#include <vector>
using namespace std;
void usage() {
std::cout << "usage: cephfs-mirror [options...]" << std::endl;
std::cout << "options:\n";
std::cout << " --mon-host monaddress[:port] connect to specified monitor\n";
std::cout << " --keyring=<path> path to keyring for local cluster\n";
std::cout << " --log-file=<logfile> file to log debug output\n";
std::cout << " --debug-cephfs-mirror=<log-level>/<memory-level> set cephfs-mirror debug level\n";
generic_server_usage();
}
cephfs::mirror::Mirror *mirror = nullptr;
static void handle_signal(int signum) {
if (mirror) {
mirror->handle_signal(signum);
}
}
int main(int argc, const char **argv) {
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
::exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
::exit(0);
}
auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_DAEMON,
CINIT_FLAG_UNPRIVILEGED_DAEMON_DEFAULTS);
Preforker forker;
if (global_init_prefork(g_ceph_context) >= 0) {
std::string err;
int r = forker.prefork(err);
if (r < 0) {
cerr << err << std::endl;
return r;
}
if (forker.is_parent()) {
g_ceph_context->_log->start();
if (forker.parent_wait(err) != 0) {
return -ENXIO;
}
return 0;
}
global_init_postfork_start(g_ceph_context);
}
common_init_finish(g_ceph_context);
bool daemonize = g_conf().get_val<bool>("daemonize");
if (daemonize) {
global_init_postfork_finish(g_ceph_context);
forker.daemonize();
}
init_async_signal_handler();
register_async_signal_handler(SIGHUP, handle_signal);
register_async_signal_handler_oneshot(SIGINT, handle_signal);
register_async_signal_handler_oneshot(SIGTERM, handle_signal);
auto cmd_args = argv_to_vec(argc, argv);
Messenger *msgr = Messenger::create_client_messenger(g_ceph_context, "client");
msgr->set_default_policy(Messenger::Policy::lossy_client(0));
std::string reason;
ceph::async::io_context_pool ctxpool(1);
MonClient monc(MonClient(g_ceph_context, ctxpool));
int r = monc.build_initial_monmap();
if (r < 0) {
cerr << "failed to generate initial monmap" << std::endl;
goto cleanup_messenger;
}
msgr->start();
mirror = new cephfs::mirror::Mirror(g_ceph_context, cmd_args, &monc, msgr);
r = mirror->init(reason);
if (r < 0) {
std::cerr << "failed to initialize cephfs-mirror: " << reason << std::endl;
goto cleanup;
}
mirror->run();
delete mirror;
cleanup:
monc.shutdown();
cleanup_messenger:
msgr->shutdown();
msgr->wait();
delete msgr;
unregister_async_signal_handler(SIGHUP, handle_signal);
unregister_async_signal_handler(SIGINT, handle_signal);
unregister_async_signal_handler(SIGTERM, handle_signal);
shutdown_async_signal_handler();
return forker.signal_exit(r);
}
| 3,429 | 26.44 | 101 |
cc
|
null |
ceph-main/src/tools/cephfs_mirror/watcher/RewatchRequest.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/ceph_mutex.h"
#include "common/debug.h"
#include "common/errno.h"
#include "include/Context.h"
#include "tools/cephfs_mirror/aio_utils.h"
#include "RewatchRequest.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_cephfs_mirror
#undef dout_prefix
#define dout_prefix *_dout << "cephfs::mirror::watcher:RewatchRequest " << __func__
namespace cephfs {
namespace mirror {
namespace watcher {
RewatchRequest::RewatchRequest(librados::IoCtx &ioctx, const std::string &oid,
ceph::shared_mutex &watch_lock,
librados::WatchCtx2 *watch_ctx,
uint64_t *watch_handle, Context *on_finish)
: m_ioctx(ioctx), m_oid(oid), m_lock(watch_lock),
m_watch_ctx(watch_ctx), m_watch_handle(watch_handle),
m_on_finish(on_finish) {
}
void RewatchRequest::send() {
unwatch();
}
void RewatchRequest::unwatch() {
ceph_assert(ceph_mutex_is_wlocked(m_lock));
if (*m_watch_handle == 0) {
rewatch();
return;
}
dout(10) << dendl;
uint64_t watch_handle = 0;
std::swap(*m_watch_handle, watch_handle);
librados::AioCompletion *aio_comp =
librados::Rados::aio_create_completion(
this, &rados_callback<RewatchRequest, &RewatchRequest::handle_unwatch>);
int r = m_ioctx.aio_unwatch(watch_handle, aio_comp);
ceph_assert(r == 0);
aio_comp->release();
}
void RewatchRequest::handle_unwatch(int r) {
dout(20) << ": r=" << r << dendl;
if (r == -EBLOCKLISTED) {
derr << ": client blocklisted" << dendl;
finish(r);
return;
} else if (r < 0) {
derr << ": failed to unwatch: " << cpp_strerror(r) << dendl;
}
rewatch();
}
void RewatchRequest::rewatch() {
dout(20) << dendl;
librados::AioCompletion *aio_comp =
librados::Rados::aio_create_completion(
this, &rados_callback<RewatchRequest, &RewatchRequest::handle_rewatch>);
int r = m_ioctx.aio_watch(m_oid, aio_comp, &m_rewatch_handle, m_watch_ctx);
ceph_assert(r == 0);
aio_comp->release();
}
void RewatchRequest::handle_rewatch(int r) {
dout(20) << ": r=" << r << dendl;
if (r < 0) {
derr << ": failed to watch object: " << cpp_strerror(r) << dendl;
m_rewatch_handle = 0;
}
{
std::unique_lock locker(m_lock);
*m_watch_handle = m_rewatch_handle;
}
finish(r);
}
void RewatchRequest::finish(int r) {
dout(20) << ": r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace watcher
} // namespace mirror
} // namespace cephfs
| 2,617 | 24.417476 | 83 |
cc
|
null |
ceph-main/src/tools/cephfs_mirror/watcher/RewatchRequest.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPHFS_MIRROR_WATCHER_REWATCH_REQUEST_H
#define CEPHFS_MIRROR_WATCHER_REWATCH_REQUEST_H
#include "common/ceph_mutex.h"
#include "include/int_types.h"
#include "include/rados/librados.hpp"
struct Context;
namespace cephfs {
namespace mirror {
namespace watcher {
// Rewatch an existing watch -- the watch can be in an operatioal
// or error state.
class RewatchRequest {
public:
static RewatchRequest *create(librados::IoCtx &ioctx, const std::string &oid,
ceph::shared_mutex &watch_lock,
librados::WatchCtx2 *watch_ctx,
uint64_t *watch_handle, Context *on_finish) {
return new RewatchRequest(ioctx, oid, watch_lock, watch_ctx, watch_handle,
on_finish);
}
RewatchRequest(librados::IoCtx &ioctx, const std::string &oid,
ceph::shared_mutex &watch_lock, librados::WatchCtx2 *watch_ctx,
uint64_t *watch_handle, Context *on_finish);
void send();
private:
librados::IoCtx& m_ioctx;
std::string m_oid;
ceph::shared_mutex &m_lock;
librados::WatchCtx2 *m_watch_ctx;
uint64_t *m_watch_handle;
Context *m_on_finish;
uint64_t m_rewatch_handle = 0;
void unwatch();
void handle_unwatch(int r);
void rewatch();
void handle_rewatch(int r);
void finish(int r);
};
} // namespace watcher
} // namespace mirror
} // namespace cephfs
#endif // CEPHFS_MIRROR_WATCHER_REWATCH_REQUEST_H
| 1,581 | 24.934426 | 80 |
h
|
null |
ceph-main/src/tools/erasure-code/ceph-erasure-code-tool.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/buffer.h"
#include "include/stringify.h"
#include "common/ceph_argparse.h"
#include "common/config_proxy.h"
#include "common/errno.h"
#include "erasure-code/ErasureCode.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "global/global_context.h"
#include "global/global_init.h"
#include "osd/ECUtil.h"
#include <iostream>
#include <map>
#include <string>
#include <vector>
#include <boost/algorithm/string.hpp>
using namespace std;
std::vector<std::string> display_params = {
"chunk_count", "data_chunk_count", "coding_chunk_count"
};
void usage(const std::string message, ostream &out) {
if (!message.empty()) {
out << message << std::endl;
out << "" << std::endl;
}
out << "usage: ceph-erasure-code-tool test-plugin-exists <plugin>" << std::endl;
out << " ceph-erasure-code-tool validate-profile <profile> [<display-param> ...]" << std::endl;
out << " ceph-erasure-code-tool calc-chunk-size <profile> <object_size>" << std::endl;
out << " ceph-erasure-code-tool encode <profile> <stripe_unit> <want_to_encode> <fname>" << std::endl;
out << " ceph-erasure-code-tool decode <profile> <stripe_unit> <want_to_decode> <fname>" << std::endl;
out << "" << std::endl;
out << " plugin - plugin name" << std::endl;
out << " profile - comma separated list of erasure-code profile settings" << std::endl;
out << " example: plugin=jerasure,technique=reed_sol_van,k=3,m=2" << std::endl;
out << " display-param - parameter to display (display all if empty)" << std::endl;
out << " may be: " << display_params << std::endl;
out << " object_size - object size" << std::endl;
out << " stripe_unit - stripe unit" << std::endl;
out << " want_to_encode - comma separated list of shards to encode" << std::endl;
out << " want_to_decode - comma separated list of shards to decode" << std::endl;
out << " fname - name for input/output files" << std::endl;
out << " when encoding input is read form {fname} file," << std::endl;
out << " result is stored in {fname}.{shard} files" << std::endl;
out << " when decoding input is read form {fname}.{shard} files," << std::endl;
out << " result is stored in {fname} file" << std::endl;
}
int ec_init(const std::string &profile_str,
const std::string &stripe_unit_str,
ceph::ErasureCodeInterfaceRef *ec_impl,
std::unique_ptr<ECUtil::stripe_info_t> *sinfo) {
ceph::ErasureCodeProfile profile;
std::vector<std::string> opts;
boost::split(opts, profile_str, boost::is_any_of(", "));
for (auto &opt_str : opts) {
std::vector<std::string> opt;
boost::split(opt, opt_str, boost::is_any_of("="));
if (opt.size() <= 1) {
usage("invalid profile", std::cerr);
return 1;
}
profile[opt[0]] = opt[1];
}
auto plugin = profile.find("plugin");
if (plugin == profile.end()) {
usage("invalid profile: plugin not specified", std::cerr);
return 1;
}
stringstream ss;
ceph::ErasureCodePluginRegistry::instance().factory(
plugin->second, g_conf().get_val<std::string>("erasure_code_dir"),
profile, ec_impl, &ss);
if (!*ec_impl) {
usage("invalid profile: " + ss.str(), std::cerr);
return 1;
}
if (sinfo == nullptr) {
return 0;
}
uint64_t stripe_unit = atoi(stripe_unit_str.c_str());
if (stripe_unit <= 0) {
usage("invalid stripe unit", std::cerr);
return 1;
}
uint64_t stripe_size = atoi(profile["k"].c_str());
ceph_assert(stripe_size > 0);
uint64_t stripe_width = stripe_size * stripe_unit;
sinfo->reset(new ECUtil::stripe_info_t(stripe_size, stripe_width));
return 0;
}
int do_test_plugin_exists(const std::vector<const char*> &args) {
if (args.size() < 1) {
usage("not enought arguments", std::cerr);
return 1;
}
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
ErasureCodePlugin *plugin;
stringstream ss;
std::lock_guard l{instance.lock};
int r = instance.load(
args[0], g_conf().get_val<std::string>("erasure_code_dir"), &plugin, &ss);
std::cerr << ss.str() << endl;
return r;
}
int do_validate_profile(const std::vector<const char*> &args) {
if (args.size() < 1) {
usage("not enought arguments", std::cerr);
return 1;
}
ceph::ErasureCodeInterfaceRef ec_impl;
int r = ec_init(args[0], {}, &ec_impl, nullptr);
if (r < 0) {
return r;
}
if (args.size() > 1) {
std::set<std::string> valid_params(display_params.begin(),
display_params.end());
display_params.clear();
for (size_t i = 1; i < args.size(); i++) {
if (!valid_params.count(args[i])) {
usage("invalid display param: " + std::string(args[i]), std::cerr);
return 1;
}
display_params.push_back(args[i]);
}
}
for (auto ¶m : display_params) {
if (display_params.size() > 1) {
std::cout << param << ": ";
}
if (param == "chunk_count") {
std::cout << ec_impl->get_chunk_count() << std::endl;
} else if (param == "data_chunk_count") {
std::cout << ec_impl->get_data_chunk_count() << std::endl;
} else if (param == "coding_chunk_count") {
std::cout << ec_impl->get_coding_chunk_count() << std::endl;
} else {
ceph_abort_msgf("unknown display_param: %s", param.c_str());
}
}
return 0;
}
int do_calc_chunk_size(const std::vector<const char*> &args) {
if (args.size() < 2) {
usage("not enought arguments", std::cerr);
return 1;
}
ceph::ErasureCodeInterfaceRef ec_impl;
int r = ec_init(args[0], {}, &ec_impl, nullptr);
if (r < 0) {
return r;
}
uint64_t object_size = atoi(args[1]);
if (object_size <= 0) {
usage("invalid object size", std::cerr);
return 1;
}
std::cout << ec_impl->get_chunk_size(object_size) << std::endl;
return 0;
}
int do_encode(const std::vector<const char*> &args) {
if (args.size() < 4) {
usage("not enought arguments", std::cerr);
return 1;
}
ceph::ErasureCodeInterfaceRef ec_impl;
std::unique_ptr<ECUtil::stripe_info_t> sinfo;
int r = ec_init(args[0], args[1], &ec_impl, &sinfo);
if (r < 0) {
return r;
}
std::set<int> want;
std::vector<std::string> shards;
boost::split(shards, args[2], boost::is_any_of(","));
for (auto &shard : shards) {
want.insert(atoi(shard.c_str()));
}
ceph::bufferlist decoded_data;
std::string fname = args[3];
std::string error;
r = decoded_data.read_file(fname.c_str(), &error);
if (r < 0) {
std::cerr << "failed to read " << fname << ": " << error << std::endl;
return 1;
}
uint64_t stripe_width = sinfo->get_stripe_width();
if (decoded_data.length() % stripe_width != 0) {
uint64_t pad = stripe_width - decoded_data.length() % stripe_width;
decoded_data.append_zero(pad);
}
std::map<int, ceph::bufferlist> encoded_data;
r = ECUtil::encode(*sinfo, ec_impl, decoded_data, want, &encoded_data);
if (r < 0) {
std::cerr << "failed to encode: " << cpp_strerror(r) << std::endl;
return 1;
}
for (auto &[shard, bl] : encoded_data) {
std::string name = fname + "." + stringify(shard);
r = bl.write_file(name.c_str());
if (r < 0) {
std::cerr << "failed to write " << name << ": " << cpp_strerror(r)
<< std::endl;
return 1;
}
}
return 0;
}
int do_decode(const std::vector<const char*> &args) {
if (args.size() < 4) {
usage("not enought arguments", std::cerr);
return 1;
}
ceph::ErasureCodeInterfaceRef ec_impl;
std::unique_ptr<ECUtil::stripe_info_t> sinfo;
int r = ec_init(args[0], args[1], &ec_impl, &sinfo);
if (r < 0) {
return r;
}
std::map<int, ceph::bufferlist> encoded_data;
std::vector<std::string> shards;
boost::split(shards, args[2], boost::is_any_of(","));
for (auto &shard : shards) {
encoded_data[atoi(shard.c_str())] = {};
}
ceph::bufferlist decoded_data;
std::string fname = args[3];
for (auto &[shard, bl] : encoded_data) {
std::string name = fname + "." + stringify(shard);
std::string error;
r = bl.read_file(name.c_str(), &error);
if (r < 0) {
std::cerr << "failed to read " << name << ": " << error << std::endl;
return 1;
}
}
r = ECUtil::decode(*sinfo, ec_impl, encoded_data, &decoded_data);
if (r < 0) {
std::cerr << "failed to decode: " << cpp_strerror(r) << std::endl;
return 1;
}
r = decoded_data.write_file(fname.c_str());
if (r < 0) {
std::cerr << "failed to write " << fname << ": " << cpp_strerror(r)
<< std::endl;
return 1;
}
return 0;
}
int main(int argc, const char **argv) {
auto args = argv_to_vec(argc, argv);
auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_MON_CONFIG);
if (args.empty() || args[0] == std::string("-h") ||
args[0] == std::string("--help")) {
usage("", std::cout);
return 0;
}
if (args.size() < 1) {
usage("not enought arguments", std::cerr);
return 1;
}
std::string cmd = args[0];
std::vector<const char*> cmd_args(args.begin() + 1, args.end());
if (cmd == "test-plugin-exists") {
return do_test_plugin_exists(cmd_args);
} else if (cmd == "validate-profile") {
return do_validate_profile(cmd_args);
} else if (cmd == "calc-chunk-size") {
return do_calc_chunk_size(cmd_args);
} else if (cmd == "encode") {
return do_encode(cmd_args);
} else if (cmd == "decode") {
return do_decode(cmd_args);
}
usage("invalid command: " + cmd, std::cerr);
return 1;
}
| 9,905 | 29.574074 | 110 |
cc
|
null |
ceph-main/src/tools/immutable_object_cache/CacheClient.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <boost/bind/bind.hpp>
#include "CacheClient.h"
#include "common/Cond.h"
#include "common/version.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_immutable_obj_cache
#undef dout_prefix
#define dout_prefix *_dout << "ceph::cache::CacheClient: " << this << " " \
<< __func__ << ": "
namespace ceph {
namespace immutable_obj_cache {
CacheClient::CacheClient(const std::string& file, CephContext* ceph_ctx)
: m_cct(ceph_ctx), m_io_service_work(m_io_service),
m_dm_socket(m_io_service), m_ep(stream_protocol::endpoint(file)),
m_io_thread(nullptr), m_session_work(false), m_writing(false),
m_reading(false), m_sequence_id(0) {
m_worker_thread_num =
m_cct->_conf.get_val<uint64_t>(
"immutable_object_cache_client_dedicated_thread_num");
if (m_worker_thread_num != 0) {
m_worker = new boost::asio::io_service();
m_worker_io_service_work = new boost::asio::io_service::work(*m_worker);
for (uint64_t i = 0; i < m_worker_thread_num; i++) {
std::thread* thd = new std::thread([this](){m_worker->run();});
m_worker_threads.push_back(thd);
}
}
m_bp_header = buffer::create(get_header_size());
}
CacheClient::~CacheClient() {
stop();
}
void CacheClient::run() {
m_io_thread.reset(new std::thread([this](){m_io_service.run(); }));
}
bool CacheClient::is_session_work() {
return m_session_work.load() == true;
}
int CacheClient::stop() {
m_session_work.store(false);
m_io_service.stop();
if (m_io_thread != nullptr) {
m_io_thread->join();
}
if (m_worker_thread_num != 0) {
m_worker->stop();
for (auto thd : m_worker_threads) {
thd->join();
delete thd;
}
delete m_worker_io_service_work;
delete m_worker;
}
return 0;
}
// close domain socket
void CacheClient::close() {
m_session_work.store(false);
boost::system::error_code close_ec;
m_dm_socket.close(close_ec);
if (close_ec) {
ldout(m_cct, 20) << "close: " << close_ec.message() << dendl;
}
}
// sync connect
int CacheClient::connect() {
int ret = -1;
C_SaferCond cond;
Context* on_finish = new LambdaContext([&cond, &ret](int err) {
ret = err;
cond.complete(err);
});
connect(on_finish);
cond.wait();
return ret;
}
// async connect
void CacheClient::connect(Context* on_finish) {
m_dm_socket.async_connect(m_ep,
boost::bind(&CacheClient::handle_connect, this,
on_finish, boost::asio::placeholders::error));
}
void CacheClient::handle_connect(Context* on_finish,
const boost::system::error_code& err) {
if (err) {
ldout(m_cct, 20) << "fails to connect to cache server. error : "
<< err.message() << dendl;
fault(ASIO_ERROR_CONNECT, err);
on_finish->complete(-1);
return;
}
ldout(m_cct, 20) << "successfully connected to cache server." << dendl;
on_finish->complete(0);
}
void CacheClient::lookup_object(std::string pool_nspace, uint64_t pool_id,
uint64_t snap_id, uint64_t object_size,
std::string oid,
CacheGenContextURef&& on_finish) {
ldout(m_cct, 20) << dendl;
ObjectCacheRequest* req = new ObjectCacheReadData(RBDSC_READ,
++m_sequence_id, 0, 0, pool_id,
snap_id, object_size, oid, pool_nspace);
req->process_msg = std::move(on_finish);
req->encode();
{
std::lock_guard locker{m_lock};
m_outcoming_bl.append(req->get_payload_bufferlist());
ceph_assert(m_seq_to_req.find(req->seq) == m_seq_to_req.end());
m_seq_to_req[req->seq] = req;
}
// try to send message to server.
try_send();
// try to receive ack from server.
try_receive();
}
void CacheClient::try_send() {
ldout(m_cct, 20) << dendl;
if (!m_writing.load()) {
m_writing.store(true);
send_message();
}
}
void CacheClient::send_message() {
ldout(m_cct, 20) << dendl;
bufferlist bl;
{
std::lock_guard locker{m_lock};
bl.swap(m_outcoming_bl);
ceph_assert(m_outcoming_bl.length() == 0);
}
// send bytes as many as possible.
boost::asio::async_write(m_dm_socket,
boost::asio::buffer(bl.c_str(), bl.length()),
boost::asio::transfer_exactly(bl.length()),
[this, bl](const boost::system::error_code& err, size_t cb) {
if (err || cb != bl.length()) {
fault(ASIO_ERROR_WRITE, err);
return;
}
ceph_assert(cb == bl.length());
{
std::lock_guard locker{m_lock};
if (m_outcoming_bl.length() == 0) {
m_writing.store(false);
return;
}
}
// still have left bytes, continue to send.
send_message();
});
try_receive();
}
void CacheClient::try_receive() {
ldout(m_cct, 20) << dendl;
if (!m_reading.load()) {
m_reading.store(true);
receive_message();
}
}
void CacheClient::receive_message() {
ldout(m_cct, 20) << dendl;
ceph_assert(m_reading.load());
read_reply_header();
}
void CacheClient::read_reply_header() {
ldout(m_cct, 20) << dendl;
/* create new head buffer for every reply */
bufferptr bp_head(buffer::create(get_header_size()));
auto raw_ptr = bp_head.c_str();
boost::asio::async_read(m_dm_socket,
boost::asio::buffer(raw_ptr, get_header_size()),
boost::asio::transfer_exactly(get_header_size()),
boost::bind(&CacheClient::handle_reply_header,
this, bp_head,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
}
void CacheClient::handle_reply_header(bufferptr bp_head,
const boost::system::error_code& ec,
size_t bytes_transferred) {
ldout(m_cct, 20) << dendl;
if (ec || bytes_transferred != get_header_size()) {
fault(ASIO_ERROR_READ, ec);
return;
}
ceph_assert(bytes_transferred == bp_head.length());
uint32_t data_len = get_data_len(bp_head.c_str());
bufferptr bp_data(buffer::create(data_len));
read_reply_data(std::move(bp_head), std::move(bp_data), data_len);
}
void CacheClient::read_reply_data(bufferptr&& bp_head,
bufferptr&& bp_data,
const uint64_t data_len) {
ldout(m_cct, 20) << dendl;
auto raw_ptr = bp_data.c_str();
boost::asio::async_read(m_dm_socket, boost::asio::buffer(raw_ptr, data_len),
boost::asio::transfer_exactly(data_len),
boost::bind(&CacheClient::handle_reply_data,
this, std::move(bp_head), std::move(bp_data), data_len,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
}
void CacheClient::handle_reply_data(bufferptr bp_head,
bufferptr bp_data,
const uint64_t data_len,
const boost::system::error_code& ec,
size_t bytes_transferred) {
ldout(m_cct, 20) << dendl;
if (ec || bytes_transferred != data_len) {
fault(ASIO_ERROR_WRITE, ec);
return;
}
ceph_assert(bp_data.length() == data_len);
bufferlist data_buffer;
data_buffer.append(std::move(bp_head));
data_buffer.append(std::move(bp_data));
ObjectCacheRequest* reply = decode_object_cache_request(data_buffer);
data_buffer.clear();
ceph_assert(data_buffer.length() == 0);
process(reply, reply->seq);
{
std::lock_guard locker{m_lock};
if (m_seq_to_req.size() == 0 && m_outcoming_bl.length()) {
m_reading.store(false);
return;
}
}
if (is_session_work()) {
receive_message();
}
}
void CacheClient::process(ObjectCacheRequest* reply, uint64_t seq_id) {
ldout(m_cct, 20) << dendl;
ObjectCacheRequest* current_request = nullptr;
{
std::lock_guard locker{m_lock};
ceph_assert(m_seq_to_req.find(seq_id) != m_seq_to_req.end());
current_request = m_seq_to_req[seq_id];
m_seq_to_req.erase(seq_id);
}
ceph_assert(current_request != nullptr);
auto process_reply = new LambdaContext([current_request, reply]
(bool dedicated) {
if (dedicated) {
// dedicated thrad to execute this context.
}
current_request->process_msg.release()->complete(reply);
delete current_request;
delete reply;
});
if (m_worker_thread_num != 0) {
m_worker->post([process_reply]() {
process_reply->complete(true);
});
} else {
process_reply->complete(false);
}
}
// if there is one request fails, just execute fault, then shutdown RO.
void CacheClient::fault(const int err_type,
const boost::system::error_code& ec) {
ldout(m_cct, 20) << "fault." << ec.message() << dendl;
if (err_type == ASIO_ERROR_CONNECT) {
ceph_assert(!m_session_work.load());
if (ec == boost::asio::error::connection_refused) {
ldout(m_cct, 20) << "Connecting RO daenmon fails : "<< ec.message()
<< ". Immutable-object-cache daemon is down ? "
<< "Data will be read from ceph cluster " << dendl;
} else {
ldout(m_cct, 20) << "Connecting RO daemon fails : "
<< ec.message() << dendl;
}
if (m_dm_socket.is_open()) {
// Set to indicate what error occurred, if any.
// Note that, even if the function indicates an error,
// the underlying descriptor is closed.
boost::system::error_code close_ec;
m_dm_socket.close(close_ec);
if (close_ec) {
ldout(m_cct, 20) << "close: " << close_ec.message() << dendl;
}
}
return;
}
if (!m_session_work.load()) {
return;
}
/* when current session don't work, ASIO will don't receive any new request from hook.
* On the other hand, for pending request of ASIO, cancle these request,
* then call their callback. these request which are cancled by this method,
* will be re-dispatched to RADOS layer.
* make sure just have one thread to modify execute below code. */
m_session_work.store(false);
if (err_type == ASIO_ERROR_MSG_INCOMPLETE) {
ldout(m_cct, 20) << "ASIO In-complete message." << ec.message() << dendl;
ceph_assert(0);
}
if (err_type == ASIO_ERROR_READ) {
ldout(m_cct, 20) << "ASIO async read fails : " << ec.message() << dendl;
}
if (err_type == ASIO_ERROR_WRITE) {
ldout(m_cct, 20) << "ASIO asyn write fails : " << ec.message() << dendl;
// CacheClient should not occur this error.
ceph_assert(0);
}
// currently, for any asio error, just shutdown RO.
close();
/* all pending request, which have entered into ASIO,
* will be re-dispatched to RADOS.*/
{
std::lock_guard locker{m_lock};
for (auto it : m_seq_to_req) {
it.second->type = RBDSC_READ_RADOS;
it.second->process_msg->complete(it.second);
}
m_seq_to_req.clear();
}
ldout(m_cct, 20) << "Because ASIO domain socket fails, just shutdown RO.\
Later all reading will be re-dispatched RADOS layer"
<< ec.message() << dendl;
}
// TODO : re-implement this method
int CacheClient::register_client(Context* on_finish) {
ObjectCacheRequest* reg_req = new ObjectCacheRegData(RBDSC_REGISTER,
m_sequence_id++,
ceph_version_to_str());
reg_req->encode();
bufferlist bl;
bl.append(reg_req->get_payload_bufferlist());
uint64_t ret;
boost::system::error_code ec;
ret = boost::asio::write(m_dm_socket,
boost::asio::buffer(bl.c_str(), bl.length()), ec);
if (ec || ret != bl.length()) {
fault(ASIO_ERROR_WRITE, ec);
return -1;
}
delete reg_req;
ret = boost::asio::read(m_dm_socket,
boost::asio::buffer(m_bp_header.c_str(), get_header_size()), ec);
if (ec || ret != get_header_size()) {
fault(ASIO_ERROR_READ, ec);
return -1;
}
uint64_t data_len = get_data_len(m_bp_header.c_str());
bufferptr bp_data(buffer::create(data_len));
ret = boost::asio::read(m_dm_socket, boost::asio::buffer(bp_data.c_str(),
data_len), ec);
if (ec || ret != data_len) {
fault(ASIO_ERROR_READ, ec);
return -1;
}
bufferlist data_buffer;
data_buffer.append(m_bp_header);
data_buffer.append(std::move(bp_data));
ObjectCacheRequest* req = decode_object_cache_request(data_buffer);
if (req->type == RBDSC_REGISTER_REPLY) {
m_session_work.store(true);
on_finish->complete(0);
} else {
on_finish->complete(-1);
}
delete req;
return 0;
}
} // namespace immutable_obj_cache
} // namespace ceph
| 13,486 | 29.933486 | 90 |
cc
|
null |
ceph-main/src/tools/immutable_object_cache/CacheClient.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_CACHE_CLIENT_H
#define CEPH_CACHE_CACHE_CLIENT_H
#include <atomic>
#include <boost/asio.hpp>
#include <boost/asio/error.hpp>
#include <boost/algorithm/string.hpp>
#include "include/ceph_assert.h"
#include "common/ceph_mutex.h"
#include "include/Context.h"
#include "Types.h"
#include "SocketCommon.h"
using boost::asio::local::stream_protocol;
namespace ceph {
namespace immutable_obj_cache {
class CacheClient {
public:
CacheClient(const std::string& file, CephContext* ceph_ctx);
~CacheClient();
void run();
bool is_session_work();
void close();
int stop();
int connect();
void connect(Context* on_finish);
void lookup_object(std::string pool_nspace, uint64_t pool_id,
uint64_t snap_id, uint64_t object_size, std::string oid,
CacheGenContextURef&& on_finish);
int register_client(Context* on_finish);
private:
void send_message();
void try_send();
void fault(const int err_type, const boost::system::error_code& err);
void handle_connect(Context* on_finish, const boost::system::error_code& err);
void try_receive();
void receive_message();
void process(ObjectCacheRequest* reply, uint64_t seq_id);
void read_reply_header();
void handle_reply_header(bufferptr bp_head,
const boost::system::error_code& ec,
size_t bytes_transferred);
void read_reply_data(bufferptr&& bp_head, bufferptr&& bp_data,
const uint64_t data_len);
void handle_reply_data(bufferptr bp_head, bufferptr bp_data,
const uint64_t data_len,
const boost::system::error_code& ec,
size_t bytes_transferred);
private:
CephContext* m_cct;
boost::asio::io_service m_io_service;
boost::asio::io_service::work m_io_service_work;
stream_protocol::socket m_dm_socket;
stream_protocol::endpoint m_ep;
std::shared_ptr<std::thread> m_io_thread;
std::atomic<bool> m_session_work;
uint64_t m_worker_thread_num;
boost::asio::io_service* m_worker;
std::vector<std::thread*> m_worker_threads;
boost::asio::io_service::work* m_worker_io_service_work;
std::atomic<bool> m_writing;
std::atomic<bool> m_reading;
std::atomic<uint64_t> m_sequence_id;
ceph::mutex m_lock =
ceph::make_mutex("ceph::cache::cacheclient::m_lock");
std::map<uint64_t, ObjectCacheRequest*> m_seq_to_req;
bufferlist m_outcoming_bl;
bufferptr m_bp_header;
};
} // namespace immutable_obj_cache
} // namespace ceph
#endif // CEPH_CACHE_CACHE_CLIENT_H
| 2,677 | 30.505882 | 80 |
h
|
null |
ceph-main/src/tools/immutable_object_cache/CacheController.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "CacheController.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_immutable_obj_cache
#undef dout_prefix
#define dout_prefix *_dout << "ceph::cache::CacheController: " << this << " " \
<< __func__ << ": "
namespace ceph {
namespace immutable_obj_cache {
CacheController::CacheController(CephContext *cct,
const std::vector<const char*> &args):
m_args(args), m_cct(cct) {
ldout(m_cct, 20) << dendl;
}
CacheController::~CacheController() {
delete m_cache_server;
delete m_object_cache_store;
}
int CacheController::init() {
ldout(m_cct, 20) << dendl;
m_object_cache_store = new ObjectCacheStore(m_cct);
// TODO(dehao): make this configurable
int r = m_object_cache_store->init(true);
if (r < 0) {
lderr(m_cct) << "init error\n" << dendl;
return r;
}
r = m_object_cache_store->init_cache();
if (r < 0) {
lderr(m_cct) << "init error\n" << dendl;
}
return r;
}
int CacheController::shutdown() {
ldout(m_cct, 20) << dendl;
int r;
if (m_cache_server != nullptr) {
r = m_cache_server->stop();
if (r < 0) {
lderr(m_cct) << "stop error\n" << dendl;
return r;
}
}
r = m_object_cache_store->shutdown();
if (r < 0) {
lderr(m_cct) << "stop error\n" << dendl;
return r;
}
return r;
}
void CacheController::handle_signal(int signum) {
shutdown();
}
int CacheController::run() {
try {
std::string controller_path =
m_cct->_conf.get_val<std::string>("immutable_object_cache_sock");
if (controller_path.empty()) {
lderr(m_cct) << "'immutable_object_cache_sock' path not set" << dendl;
return -EINVAL;
}
std::remove(controller_path.c_str());
m_cache_server = new CacheServer(m_cct, controller_path,
std::bind(&CacheController::handle_request, this,
std::placeholders::_1, std::placeholders::_2));
int ret = m_cache_server->run();
if (ret != 0) {
return ret;
}
return 0;
} catch (std::exception& e) {
lderr(m_cct) << "Exception: " << e.what() << dendl;
return -EFAULT;
}
}
void CacheController::handle_request(CacheSession* session,
ObjectCacheRequest* req) {
ldout(m_cct, 20) << dendl;
switch (req->get_request_type()) {
case RBDSC_REGISTER: {
// TODO(dehao): skip register and allow clients to lookup directly
auto req_reg_data = reinterpret_cast <ObjectCacheRegData*> (req);
session->set_client_version(req_reg_data->version);
ObjectCacheRequest* reply = new ObjectCacheRegReplyData(
RBDSC_REGISTER_REPLY, req->seq);
session->send(reply);
break;
}
case RBDSC_READ: {
// lookup object in local cache store
std::string cache_path;
ObjectCacheReadData* req_read_data =
reinterpret_cast <ObjectCacheReadData*> (req);
bool return_dne_path = session->client_version().empty();
int ret = m_object_cache_store->lookup_object(
req_read_data->pool_namespace, req_read_data->pool_id,
req_read_data->snap_id, req_read_data->object_size,
req_read_data->oid, return_dne_path, cache_path);
ObjectCacheRequest* reply = nullptr;
if (ret != OBJ_CACHE_PROMOTED && ret != OBJ_CACHE_DNE) {
reply = new ObjectCacheReadRadosData(RBDSC_READ_RADOS, req->seq);
} else {
reply = new ObjectCacheReadReplyData(RBDSC_READ_REPLY,
req->seq, cache_path);
}
session->send(reply);
break;
}
default:
ldout(m_cct, 5) << "can't recongize request" << dendl;
ceph_assert(0);
}
}
} // namespace immutable_obj_cache
} // namespace ceph
| 3,854 | 26.535714 | 79 |
cc
|
null |
ceph-main/src/tools/immutable_object_cache/CacheController.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_CACHE_CONTROLLER_H
#define CEPH_CACHE_CACHE_CONTROLLER_H
#include "common/ceph_context.h"
#include "common/WorkQueue.h"
#include "CacheServer.h"
#include "ObjectCacheStore.h"
namespace ceph {
namespace immutable_obj_cache {
class CacheController {
public:
CacheController(CephContext *cct, const std::vector<const char*> &args);
~CacheController();
int init();
int shutdown();
void handle_signal(int sinnum);
int run();
void handle_request(CacheSession* session, ObjectCacheRequest* msg);
private:
CacheServer *m_cache_server = nullptr;
std::vector<const char*> m_args;
CephContext *m_cct;
ObjectCacheStore *m_object_cache_store = nullptr;
};
} // namespace immutable_obj_cache
} // namespace ceph
#endif // CEPH_CACHE_CACHE_CONTROLLER_H
| 894 | 20.829268 | 74 |
h
|
null |
ceph-main/src/tools/immutable_object_cache/CacheServer.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <boost/bind/bind.hpp>
#include "common/debug.h"
#include "common/ceph_context.h"
#include "CacheServer.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_immutable_obj_cache
#undef dout_prefix
#define dout_prefix *_dout << "ceph::cache::CacheServer: " << this << " " \
<< __func__ << ": "
namespace ceph {
namespace immutable_obj_cache {
CacheServer::CacheServer(CephContext* cct, const std::string& file,
ProcessMsg processmsg)
: cct(cct), m_server_process_msg(processmsg),
m_local_path(file), m_acceptor(m_io_service) {}
CacheServer::~CacheServer() {
stop();
}
int CacheServer::run() {
ldout(cct, 20) << dendl;
int ret = start_accept();
if (ret != 0) {
return ret;
}
boost::system::error_code ec;
ret = m_io_service.run(ec);
if (ec) {
ldout(cct, 1) << "m_io_service run fails: " << ec.message() << dendl;
return -1;
}
return 0;
}
int CacheServer::stop() {
m_io_service.stop();
return 0;
}
int CacheServer::start_accept() {
ldout(cct, 20) << dendl;
boost::system::error_code ec;
m_acceptor.open(m_local_path.protocol(), ec);
if (ec) {
lderr(cct) << "failed to open domain socket: " << ec.message() << dendl;
return -ec.value();
}
m_acceptor.bind(m_local_path, ec);
if (ec) {
lderr(cct) << "failed to bind to domain socket '"
<< m_local_path << "': " << ec.message() << dendl;
return -ec.value();
}
m_acceptor.listen(boost::asio::socket_base::max_connections, ec);
if (ec) {
lderr(cct) << "failed to listen on domain socket: " << ec.message()
<< dendl;
return -ec.value();
}
accept();
return 0;
}
void CacheServer::accept() {
CacheSessionPtr new_session = nullptr;
new_session.reset(new CacheSession(m_io_service,
m_server_process_msg, cct));
m_acceptor.async_accept(new_session->socket(),
boost::bind(&CacheServer::handle_accept, this, new_session,
boost::asio::placeholders::error));
}
void CacheServer::handle_accept(CacheSessionPtr new_session,
const boost::system::error_code& error) {
ldout(cct, 20) << dendl;
if (error) {
// operation_absort
lderr(cct) << "async accept fails : " << error.message() << dendl;
return;
}
// TODO(dehao) : session setting
new_session->start();
// lanuch next accept
accept();
}
} // namespace immutable_obj_cache
} // namespace ceph
| 2,593 | 23.242991 | 76 |
cc
|
null |
ceph-main/src/tools/immutable_object_cache/CacheServer.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_CACHE_SERVER_H
#define CEPH_CACHE_CACHE_SERVER_H
#include <boost/asio.hpp>
#include <boost/asio/error.hpp>
#include "Types.h"
#include "SocketCommon.h"
#include "CacheSession.h"
using boost::asio::local::stream_protocol;
namespace ceph {
namespace immutable_obj_cache {
class CacheServer {
public:
CacheServer(CephContext* cct, const std::string& file, ProcessMsg processmsg);
~CacheServer();
int run();
int start_accept();
int stop();
private:
void accept();
void handle_accept(CacheSessionPtr new_session,
const boost::system::error_code& error);
private:
CephContext* cct;
boost::asio::io_service m_io_service;
ProcessMsg m_server_process_msg;
stream_protocol::endpoint m_local_path;
stream_protocol::acceptor m_acceptor;
};
} // namespace immutable_obj_cache
} // namespace ceph
#endif
| 969 | 20.086957 | 80 |
h
|
null |
ceph-main/src/tools/immutable_object_cache/CacheSession.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <boost/bind/bind.hpp>
#include "common/debug.h"
#include "common/ceph_context.h"
#include "CacheSession.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_immutable_obj_cache
#undef dout_prefix
#define dout_prefix *_dout << "ceph::cache::CacheSession: " << this << " " \
<< __func__ << ": "
namespace ceph {
namespace immutable_obj_cache {
CacheSession::CacheSession(io_service& io_service,
ProcessMsg processmsg,
CephContext* cct)
: m_dm_socket(io_service),
m_server_process_msg(processmsg), m_cct(cct) {
m_bp_header = buffer::create(get_header_size());
}
CacheSession::~CacheSession() {
close();
}
stream_protocol::socket& CacheSession::socket() {
return m_dm_socket;
}
void CacheSession::set_client_version(const std::string &version) {
m_client_version = version;
}
const std::string &CacheSession::client_version() const {
return m_client_version;
}
void CacheSession::close() {
if (m_dm_socket.is_open()) {
boost::system::error_code close_ec;
m_dm_socket.close(close_ec);
if (close_ec) {
ldout(m_cct, 20) << "close: " << close_ec.message() << dendl;
}
}
}
void CacheSession::start() {
read_request_header();
}
void CacheSession::read_request_header() {
ldout(m_cct, 20) << dendl;
boost::asio::async_read(m_dm_socket,
boost::asio::buffer(m_bp_header.c_str(), get_header_size()),
boost::asio::transfer_exactly(get_header_size()),
boost::bind(&CacheSession::handle_request_header,
shared_from_this(), boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
}
void CacheSession::handle_request_header(const boost::system::error_code& err,
size_t bytes_transferred) {
ldout(m_cct, 20) << dendl;
if (err || bytes_transferred != get_header_size()) {
fault(err);
return;
}
read_request_data(get_data_len(m_bp_header.c_str()));
}
void CacheSession::read_request_data(uint64_t data_len) {
ldout(m_cct, 20) << dendl;
bufferptr bp_data(buffer::create(data_len));
boost::asio::async_read(m_dm_socket,
boost::asio::buffer(bp_data.c_str(), bp_data.length()),
boost::asio::transfer_exactly(data_len),
boost::bind(&CacheSession::handle_request_data,
shared_from_this(), bp_data, data_len,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
}
void CacheSession::handle_request_data(bufferptr bp, uint64_t data_len,
const boost::system::error_code& err,
size_t bytes_transferred) {
ldout(m_cct, 20) << dendl;
if (err || bytes_transferred != data_len) {
fault(err);
return;
}
bufferlist bl_data;
bl_data.append(m_bp_header);
bl_data.append(std::move(bp));
ObjectCacheRequest* req = decode_object_cache_request(bl_data);
process(req);
delete req;
read_request_header();
}
void CacheSession::process(ObjectCacheRequest* req) {
ldout(m_cct, 20) << dendl;
m_server_process_msg(this, req);
}
void CacheSession::send(ObjectCacheRequest* reply) {
ldout(m_cct, 20) << dendl;
bufferlist bl;
reply->encode();
bl.append(reply->get_payload_bufferlist());
boost::asio::async_write(m_dm_socket,
boost::asio::buffer(bl.c_str(), bl.length()),
boost::asio::transfer_exactly(bl.length()),
[this, bl, reply](const boost::system::error_code& err,
size_t bytes_transferred) {
delete reply;
if (err || bytes_transferred != bl.length()) {
fault(err);
return;
}
});
}
void CacheSession::fault(const boost::system::error_code& ec) {
ldout(m_cct, 20) << "session fault : " << ec.message() << dendl;
}
} // namespace immutable_obj_cache
} // namespace ceph
| 3,992 | 27.319149 | 78 |
cc
|
null |
ceph-main/src/tools/immutable_object_cache/CacheSession.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_SESSION_H
#define CEPH_CACHE_SESSION_H
#include <boost/asio.hpp>
#include <boost/asio/error.hpp>
#include "Types.h"
#include "SocketCommon.h"
using boost::asio::local::stream_protocol;
using boost::asio::io_service;
namespace ceph {
namespace immutable_obj_cache {
class CacheSession : public std::enable_shared_from_this<CacheSession> {
public:
CacheSession(io_service& io_service, ProcessMsg process_msg,
CephContext* ctx);
~CacheSession();
stream_protocol::socket& socket();
void close();
void start();
void read_request_header();
void handle_request_header(const boost::system::error_code& err,
size_t bytes_transferred);
void read_request_data(uint64_t data_len);
void handle_request_data(bufferptr bp, uint64_t data_len,
const boost::system::error_code& err,
size_t bytes_transferred);
void process(ObjectCacheRequest* req);
void fault(const boost::system::error_code& ec);
void send(ObjectCacheRequest* msg);
void set_client_version(const std::string &version);
const std::string &client_version() const;
private:
stream_protocol::socket m_dm_socket;
ProcessMsg m_server_process_msg;
CephContext* m_cct;
std::string m_client_version;
bufferptr m_bp_header;
};
typedef std::shared_ptr<CacheSession> CacheSessionPtr;
} // namespace immutable_obj_cache
} // namespace ceph
#endif // CEPH_CACHE_SESSION_H
| 1,578 | 26.701754 | 72 |
h
|
null |
ceph-main/src/tools/immutable_object_cache/ObjectCacheStore.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "ObjectCacheStore.h"
#include "Utils.h"
#include <filesystem>
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_immutable_obj_cache
#undef dout_prefix
#define dout_prefix *_dout << "ceph::cache::ObjectCacheStore: " << this << " " \
<< __func__ << ": "
namespace fs = std::filesystem;
namespace ceph {
namespace immutable_obj_cache {
namespace {
class SafeTimerSingleton : public CommonSafeTimer<ceph::mutex> {
public:
ceph::mutex lock = ceph::make_mutex
("ceph::immutable_object_cache::SafeTimerSingleton::lock");
explicit SafeTimerSingleton(CephContext *cct)
: CommonSafeTimer(cct, lock, true) {
init();
}
~SafeTimerSingleton() {
std::lock_guard locker{lock};
shutdown();
}
};
} // anonymous namespace
enum ThrottleTargetCode {
ROC_QOS_IOPS_THROTTLE = 1,
ROC_QOS_BPS_THROTTLE = 2
};
ObjectCacheStore::ObjectCacheStore(CephContext *cct)
: m_cct(cct), m_rados(new librados::Rados()) {
m_cache_root_dir =
m_cct->_conf.get_val<std::string>("immutable_object_cache_path");
if (m_cache_root_dir.back() != '/') {
m_cache_root_dir += "/";
}
uint64_t cache_max_size =
m_cct->_conf.get_val<Option::size_t>("immutable_object_cache_max_size");
double cache_watermark =
m_cct->_conf.get_val<double>("immutable_object_cache_watermark");
uint64_t max_inflight_ops =
m_cct->_conf.get_val<uint64_t>("immutable_object_cache_max_inflight_ops");
uint64_t limit = 0;
if ((limit = m_cct->_conf.get_val<uint64_t>
("immutable_object_cache_qos_iops_limit")) != 0) {
apply_qos_tick_and_limit(ROC_QOS_IOPS_THROTTLE,
m_cct->_conf.get_val<std::chrono::milliseconds>
("immutable_object_cache_qos_schedule_tick_min"),
limit,
m_cct->_conf.get_val<uint64_t>
("immutable_object_cache_qos_iops_burst"),
m_cct->_conf.get_val<std::chrono::seconds>
("immutable_object_cache_qos_iops_burst_seconds"));
}
if ((limit = m_cct->_conf.get_val<uint64_t>
("immutable_object_cache_qos_bps_limit")) != 0) {
apply_qos_tick_and_limit(ROC_QOS_BPS_THROTTLE,
m_cct->_conf.get_val<std::chrono::milliseconds>
("immutable_object_cache_qos_schedule_tick_min"),
limit,
m_cct->_conf.get_val<uint64_t>
("immutable_object_cache_qos_bps_burst"),
m_cct->_conf.get_val<std::chrono::seconds>
("immutable_object_cache_qos_bps_burst_seconds"));
}
if ((cache_watermark <= 0) || (cache_watermark > 1)) {
lderr(m_cct) << "Invalid water mark provided, set it to default." << dendl;
cache_watermark = 0.9;
}
m_policy = new SimplePolicy(m_cct, cache_max_size, max_inflight_ops,
cache_watermark);
}
ObjectCacheStore::~ObjectCacheStore() {
delete m_policy;
if (m_qos_enabled_flag & ROC_QOS_IOPS_THROTTLE) {
ceph_assert(m_throttles[ROC_QOS_IOPS_THROTTLE] != nullptr);
delete m_throttles[ROC_QOS_IOPS_THROTTLE];
}
if (m_qos_enabled_flag & ROC_QOS_BPS_THROTTLE) {
ceph_assert(m_throttles[ROC_QOS_BPS_THROTTLE] != nullptr);
delete m_throttles[ROC_QOS_BPS_THROTTLE];
}
}
int ObjectCacheStore::init(bool reset) {
ldout(m_cct, 20) << dendl;
int ret = m_rados->init_with_context(m_cct);
if (ret < 0) {
lderr(m_cct) << "fail to init Ceph context" << dendl;
return ret;
}
ret = m_rados->connect();
if (ret < 0) {
lderr(m_cct) << "fail to connect to cluster" << dendl;
return ret;
}
// TODO(dehao): fsck and reuse existing cache objects
if (reset) {
try {
if (fs::exists(m_cache_root_dir)) {
// remove all sub folders
for (auto& p : fs::directory_iterator(m_cache_root_dir)) {
fs::remove_all(p.path());
}
} else {
fs::create_directories(m_cache_root_dir);
}
} catch (const fs::filesystem_error& e) {
lderr(m_cct) << "failed to initialize cache store directory: "
<< e.what() << dendl;
return -e.code().value();
}
}
return 0;
}
int ObjectCacheStore::shutdown() {
ldout(m_cct, 20) << dendl;
m_rados->shutdown();
return 0;
}
int ObjectCacheStore::init_cache() {
ldout(m_cct, 20) << dendl;
std::string cache_dir = m_cache_root_dir;
return 0;
}
int ObjectCacheStore::do_promote(std::string pool_nspace, uint64_t pool_id,
uint64_t snap_id, std::string object_name) {
ldout(m_cct, 20) << "to promote object: " << object_name
<< " from pool id: " << pool_id
<< " namespace: " << pool_nspace
<< " snapshot: " << snap_id << dendl;
int ret = 0;
std::string cache_file_name =
get_cache_file_name(pool_nspace, pool_id, snap_id, object_name);
librados::IoCtx ioctx;
{
std::lock_guard _locker{m_ioctx_map_lock};
if (m_ioctx_map.find(pool_id) == m_ioctx_map.end()) {
ret = m_rados->ioctx_create2(pool_id, ioctx);
if (ret < 0) {
lderr(m_cct) << "fail to create ioctx" << dendl;
return ret;
}
m_ioctx_map.emplace(pool_id, ioctx);
} else {
ioctx = m_ioctx_map[pool_id];
}
}
ioctx.set_namespace(pool_nspace);
ioctx.snap_set_read(snap_id);
librados::bufferlist* read_buf = new librados::bufferlist();
auto ctx = new LambdaContext([this, read_buf, cache_file_name](int ret) {
handle_promote_callback(ret, read_buf, cache_file_name);
});
return promote_object(&ioctx, object_name, read_buf, ctx);
}
int ObjectCacheStore::handle_promote_callback(int ret, bufferlist* read_buf,
std::string cache_file_name) {
ldout(m_cct, 20) << " cache_file_name: " << cache_file_name << dendl;
// rados read error
if (ret != -ENOENT && ret < 0) {
lderr(m_cct) << "fail to read from rados" << dendl;
m_policy->update_status(cache_file_name, OBJ_CACHE_NONE);
delete read_buf;
return ret;
}
auto state = OBJ_CACHE_PROMOTED;
if (ret == -ENOENT) {
// object is empty
state = OBJ_CACHE_DNE;
ret = 0;
} else {
std::string cache_file_path = get_cache_file_path(cache_file_name, true);
if (cache_file_path == "") {
lderr(m_cct) << "fail to write cache file" << dendl;
m_policy->update_status(cache_file_name, OBJ_CACHE_NONE);
delete read_buf;
return -ENOSPC;
}
ret = read_buf->write_file(cache_file_path.c_str());
if (ret < 0) {
lderr(m_cct) << "fail to write cache file" << dendl;
m_policy->update_status(cache_file_name, OBJ_CACHE_NONE);
delete read_buf;
return ret;
}
}
// update metadata
ceph_assert(OBJ_CACHE_SKIP == m_policy->get_status(cache_file_name));
m_policy->update_status(cache_file_name, state, read_buf->length());
ceph_assert(state == m_policy->get_status(cache_file_name));
delete read_buf;
evict_objects();
return ret;
}
int ObjectCacheStore::lookup_object(std::string pool_nspace, uint64_t pool_id,
uint64_t snap_id, uint64_t object_size,
std::string object_name,
bool return_dne_path,
std::string& target_cache_file_path) {
ldout(m_cct, 20) << "object name = " << object_name
<< " in pool ID : " << pool_id << dendl;
int pret = -1;
std::string cache_file_name =
get_cache_file_name(pool_nspace, pool_id, snap_id, object_name);
cache_status_t ret = m_policy->lookup_object(cache_file_name);
switch (ret) {
case OBJ_CACHE_NONE: {
if (take_token_from_throttle(object_size, 1)) {
pret = do_promote(pool_nspace, pool_id, snap_id, object_name);
if (pret < 0) {
lderr(m_cct) << "fail to start promote" << dendl;
}
} else {
m_policy->update_status(cache_file_name, OBJ_CACHE_NONE);
}
return ret;
}
case OBJ_CACHE_PROMOTED:
target_cache_file_path = get_cache_file_path(cache_file_name);
return ret;
case OBJ_CACHE_DNE:
if (return_dne_path) {
target_cache_file_path = get_cache_file_path(cache_file_name);
}
return ret;
case OBJ_CACHE_SKIP:
return ret;
default:
lderr(m_cct) << "unrecognized object cache status" << dendl;
ceph_assert(0);
}
}
int ObjectCacheStore::promote_object(librados::IoCtx* ioctx,
std::string object_name,
librados::bufferlist* read_buf,
Context* on_finish) {
ldout(m_cct, 20) << "object name = " << object_name << dendl;
librados::AioCompletion* read_completion = create_rados_callback(on_finish);
// issue a zero-sized read req to get the entire obj
int ret = ioctx->aio_read(object_name, read_completion, read_buf, 0, 0);
if (ret < 0) {
lderr(m_cct) << "failed to read from rados" << dendl;
}
read_completion->release();
return ret;
}
int ObjectCacheStore::evict_objects() {
ldout(m_cct, 20) << dendl;
std::list<std::string> obj_list;
m_policy->get_evict_list(&obj_list);
for (auto& obj : obj_list) {
do_evict(obj);
}
return 0;
}
int ObjectCacheStore::do_evict(std::string cache_file) {
ldout(m_cct, 20) << "file = " << cache_file << dendl;
if (cache_file == "") {
return 0;
}
std::string cache_file_path = get_cache_file_path(cache_file);
ldout(m_cct, 20) << "evict cache: " << cache_file_path << dendl;
// TODO(dehao): possible race on read?
int ret = std::remove(cache_file_path.c_str());
// evict metadata
if (ret == 0) {
m_policy->update_status(cache_file, OBJ_CACHE_SKIP);
m_policy->evict_entry(cache_file);
}
return ret;
}
std::string ObjectCacheStore::get_cache_file_name(std::string pool_nspace,
uint64_t pool_id,
uint64_t snap_id,
std::string oid) {
return pool_nspace + ":" + std::to_string(pool_id) + ":" +
std::to_string(snap_id) + ":" + oid;
}
std::string ObjectCacheStore::get_cache_file_path(std::string cache_file_name,
bool mkdir) {
ldout(m_cct, 20) << cache_file_name <<dendl;
uint32_t crc = 0;
crc = ceph_crc32c(0, (unsigned char *)cache_file_name.c_str(),
cache_file_name.length());
std::string cache_file_dir = std::to_string(crc % 100) + "/";
if (mkdir) {
ldout(m_cct, 20) << "creating cache dir: " << cache_file_dir <<dendl;
std::error_code ec;
std::string new_dir = m_cache_root_dir + cache_file_dir;
if (fs::exists(new_dir, ec)) {
ldout(m_cct, 20) << "cache dir exists: " << cache_file_dir <<dendl;
return new_dir + cache_file_name;
}
if (!fs::create_directories(new_dir, ec)) {
ldout(m_cct, 5) << "fail to create cache dir: " << new_dir
<< "error: " << ec.message() << dendl;
return "";
}
}
return m_cache_root_dir + cache_file_dir + cache_file_name;
}
void ObjectCacheStore::handle_throttle_ready(uint64_t tokens, uint64_t type) {
m_io_throttled = false;
std::lock_guard lock(m_throttle_lock);
if (type & ROC_QOS_IOPS_THROTTLE){
m_iops_tokens += tokens;
} else if (type & ROC_QOS_BPS_THROTTLE){
m_bps_tokens += tokens;
} else {
lderr(m_cct) << "unknow throttle type." << dendl;
}
}
bool ObjectCacheStore::take_token_from_throttle(uint64_t object_size,
uint64_t object_num) {
if (m_io_throttled == true) {
return false;
}
int flag = 0;
bool wait = false;
if (!wait && (m_qos_enabled_flag & ROC_QOS_IOPS_THROTTLE)) {
std::lock_guard lock(m_throttle_lock);
if (object_num > m_iops_tokens) {
wait = m_throttles[ROC_QOS_IOPS_THROTTLE]->get(object_num, this,
&ObjectCacheStore::handle_throttle_ready, object_num,
ROC_QOS_IOPS_THROTTLE);
} else {
m_iops_tokens -= object_num;
flag = 1;
}
}
if (!wait && (m_qos_enabled_flag & ROC_QOS_BPS_THROTTLE)) {
std::lock_guard lock(m_throttle_lock);
if (object_size > m_bps_tokens) {
wait = m_throttles[ROC_QOS_BPS_THROTTLE]->get(object_size, this,
&ObjectCacheStore::handle_throttle_ready, object_size,
ROC_QOS_BPS_THROTTLE);
} else {
m_bps_tokens -= object_size;
}
}
if (wait) {
m_io_throttled = true;
// when passing iops throttle, but limit in bps throttle, recovery
if (flag == 1) {
std::lock_guard lock(m_throttle_lock);
m_iops_tokens += object_num;
}
}
return !wait;
}
static const std::map<uint64_t, std::string> THROTTLE_FLAGS = {
{ ROC_QOS_IOPS_THROTTLE, "roc_qos_iops_throttle" },
{ ROC_QOS_BPS_THROTTLE, "roc_qos_bps_throttle" }
};
void ObjectCacheStore::apply_qos_tick_and_limit(
const uint64_t flag,
std::chrono::milliseconds min_tick,
uint64_t limit,
uint64_t burst,
std::chrono::seconds burst_seconds) {
SafeTimerSingleton* safe_timer_singleton = nullptr;
TokenBucketThrottle* throttle = nullptr;
safe_timer_singleton =
&m_cct->lookup_or_create_singleton_object<SafeTimerSingleton>(
"tools::immutable_object_cache", false, m_cct);
SafeTimer* timer = safe_timer_singleton;
ceph::mutex* timer_lock = &safe_timer_singleton->lock;
m_qos_enabled_flag |= flag;
auto throttle_flags_it = THROTTLE_FLAGS.find(flag);
ceph_assert(throttle_flags_it != THROTTLE_FLAGS.end());
throttle = new TokenBucketThrottle(m_cct, throttle_flags_it->second,
0, 0, timer, timer_lock);
throttle->set_schedule_tick_min(min_tick.count());
int ret = throttle->set_limit(limit, burst, burst_seconds.count());
if (ret < 0) {
lderr(m_cct) << throttle->get_name() << ": invalid qos parameter: "
<< "burst(" << burst << ") is less than "
<< "limit(" << limit << ")" << dendl;
throttle->set_limit(limit, 0, 1);
}
ceph_assert(m_throttles.find(flag) == m_throttles.end());
m_throttles.insert({flag, throttle});
}
} // namespace immutable_obj_cache
} // namespace ceph
| 14,441 | 30.25974 | 80 |
cc
|
null |
ceph-main/src/tools/immutable_object_cache/ObjectCacheStore.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_OBJECT_CACHE_STORE_H
#define CEPH_CACHE_OBJECT_CACHE_STORE_H
#include "common/ceph_context.h"
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include "common/Throttle.h"
#include "common/Cond.h"
#include "include/rados/librados.hpp"
#include "SimplePolicy.h"
using librados::Rados;
using librados::IoCtx;
class Context;
namespace ceph {
namespace immutable_obj_cache {
typedef std::shared_ptr<librados::Rados> RadosRef;
typedef std::shared_ptr<librados::IoCtx> IoCtxRef;
class ObjectCacheStore {
public:
ObjectCacheStore(CephContext *cct);
~ObjectCacheStore();
int init(bool reset);
int shutdown();
int init_cache();
int lookup_object(std::string pool_nspace,
uint64_t pool_id, uint64_t snap_id,
uint64_t object_size,
std::string object_name,
bool return_dne_path,
std::string& target_cache_file_path);
private:
enum ThrottleTypeCode {
THROTTLE_CODE_BYTE,
THROTTLE_CODE_OBJECT
};
std::string get_cache_file_name(std::string pool_nspace, uint64_t pool_id,
uint64_t snap_id, std::string oid);
std::string get_cache_file_path(std::string cache_file_name,
bool mkdir = false);
int evict_objects();
int do_promote(std::string pool_nspace, uint64_t pool_id,
uint64_t snap_id, std::string object_name);
int promote_object(librados::IoCtx*, std::string object_name,
librados::bufferlist* read_buf,
Context* on_finish);
int handle_promote_callback(int, bufferlist*, std::string);
int do_evict(std::string cache_file);
bool take_token_from_throttle(uint64_t object_size, uint64_t object_num);
void handle_throttle_ready(uint64_t tokens, uint64_t type);
void apply_qos_tick_and_limit(const uint64_t flag,
std::chrono::milliseconds min_tick,
uint64_t limit, uint64_t burst,
std::chrono::seconds burst_seconds);
CephContext *m_cct;
RadosRef m_rados;
std::map<uint64_t, librados::IoCtx> m_ioctx_map;
ceph::mutex m_ioctx_map_lock =
ceph::make_mutex("ceph::cache::ObjectCacheStore::m_ioctx_map_lock");
Policy* m_policy;
std::string m_cache_root_dir;
// throttle mechanism
uint64_t m_qos_enabled_flag{0};
std::map<uint64_t, TokenBucketThrottle*> m_throttles;
bool m_io_throttled{false};
ceph::mutex m_throttle_lock =
ceph::make_mutex("ceph::cache::ObjectCacheStore::m_throttle_lock");;
uint64_t m_iops_tokens{0};
uint64_t m_bps_tokens{0};
};
} // namespace immutable_obj_cache
} // ceph
#endif // CEPH_CACHE_OBJECT_CACHE_STORE_H
| 2,857 | 32.232558 | 76 |
h
|
null |
ceph-main/src/tools/immutable_object_cache/Policy.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_POLICY_H
#define CEPH_CACHE_POLICY_H
#include <list>
#include <string>
namespace ceph {
namespace immutable_obj_cache {
typedef enum {
OBJ_CACHE_NONE = 0,
OBJ_CACHE_PROMOTED,
OBJ_CACHE_SKIP,
OBJ_CACHE_DNE,
} cache_status_t;
class Policy {
public:
Policy() {}
virtual ~Policy() {}
virtual cache_status_t lookup_object(std::string) = 0;
virtual int evict_entry(std::string) = 0;
virtual void update_status(std::string, cache_status_t,
uint64_t size = 0) = 0;
virtual cache_status_t get_status(std::string) = 0;
virtual void get_evict_list(std::list<std::string>* obj_list) = 0;
};
} // namespace immutable_obj_cache
} // namespace ceph
#endif
| 818 | 22.4 | 70 |
h
|
null |
ceph-main/src/tools/immutable_object_cache/SimplePolicy.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/debug.h"
#include "SimplePolicy.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_immutable_obj_cache
#undef dout_prefix
#define dout_prefix *_dout << "ceph::cache::SimplePolicy: " << this << " " \
<< __func__ << ": "
namespace ceph {
namespace immutable_obj_cache {
SimplePolicy::SimplePolicy(CephContext *cct, uint64_t cache_size,
uint64_t max_inflight, double watermark)
: cct(cct), m_watermark(watermark), m_max_inflight_ops(max_inflight),
m_max_cache_size(cache_size) {
ldout(cct, 20) << "max cache size= " << m_max_cache_size
<< " ,watermark= " << m_watermark
<< " ,max inflight ops= " << m_max_inflight_ops << dendl;
m_cache_size = 0;
}
SimplePolicy::~SimplePolicy() {
ldout(cct, 20) << dendl;
for (auto it : m_cache_map) {
Entry* entry = (it.second);
delete entry;
}
}
cache_status_t SimplePolicy::alloc_entry(std::string file_name) {
ldout(cct, 20) << "alloc entry for: " << file_name << dendl;
std::unique_lock wlocker{m_cache_map_lock};
// cache hit when promoting
if (m_cache_map.find(file_name) != m_cache_map.end()) {
ldout(cct, 20) << "object is under promoting: " << file_name << dendl;
return OBJ_CACHE_SKIP;
}
if ((m_cache_size < m_max_cache_size) &&
(inflight_ops < m_max_inflight_ops)) {
Entry* entry = new Entry();
ceph_assert(entry != nullptr);
m_cache_map[file_name] = entry;
wlocker.unlock();
update_status(file_name, OBJ_CACHE_SKIP);
return OBJ_CACHE_NONE; // start promotion request
}
// if there's no free entry, return skip to read from rados
return OBJ_CACHE_SKIP;
}
cache_status_t SimplePolicy::lookup_object(std::string file_name) {
ldout(cct, 20) << "lookup: " << file_name << dendl;
std::shared_lock rlocker{m_cache_map_lock};
auto entry_it = m_cache_map.find(file_name);
// simply promote on first lookup
if (entry_it == m_cache_map.end()) {
rlocker.unlock();
return alloc_entry(file_name);
}
Entry* entry = entry_it->second;
if (entry->status == OBJ_CACHE_PROMOTED || entry->status == OBJ_CACHE_DNE) {
// bump pos in lru on hit
m_promoted_lru.lru_touch(entry);
}
return entry->status;
}
void SimplePolicy::update_status(std::string file_name,
cache_status_t new_status, uint64_t size) {
ldout(cct, 20) << "update status for: " << file_name
<< " new status = " << new_status << dendl;
std::unique_lock locker{m_cache_map_lock};
auto entry_it = m_cache_map.find(file_name);
if (entry_it == m_cache_map.end()) {
return;
}
ceph_assert(entry_it != m_cache_map.end());
Entry* entry = entry_it->second;
// to promote
if (entry->status == OBJ_CACHE_NONE && new_status== OBJ_CACHE_SKIP) {
entry->status = new_status;
entry->file_name = file_name;
inflight_ops++;
return;
}
// promoting done
if (entry->status == OBJ_CACHE_SKIP && (new_status== OBJ_CACHE_PROMOTED ||
new_status== OBJ_CACHE_DNE)) {
m_promoted_lru.lru_insert_top(entry);
entry->status = new_status;
entry->size = size;
m_cache_size += entry->size;
inflight_ops--;
return;
}
// promoting failed
if (entry->status == OBJ_CACHE_SKIP && new_status== OBJ_CACHE_NONE) {
// mark this entry as free
entry->file_name = "";
entry->status = new_status;
m_cache_map.erase(entry_it);
inflight_ops--;
delete entry;
return;
}
// to evict
if ((entry->status == OBJ_CACHE_PROMOTED || entry->status == OBJ_CACHE_DNE) &&
new_status== OBJ_CACHE_NONE) {
// mark this entry as free
uint64_t size = entry->size;
entry->file_name = "";
entry->size = 0;
entry->status = new_status;
m_promoted_lru.lru_remove(entry);
m_cache_map.erase(entry_it);
m_cache_size -= size;
delete entry;
return;
}
}
int SimplePolicy::evict_entry(std::string file_name) {
ldout(cct, 20) << "to evict: " << file_name << dendl;
update_status(file_name, OBJ_CACHE_NONE);
return 0;
}
cache_status_t SimplePolicy::get_status(std::string file_name) {
ldout(cct, 20) << file_name << dendl;
std::shared_lock locker{m_cache_map_lock};
auto entry_it = m_cache_map.find(file_name);
if (entry_it == m_cache_map.end()) {
return OBJ_CACHE_NONE;
}
return entry_it->second->status;
}
void SimplePolicy::get_evict_list(std::list<std::string>* obj_list) {
ldout(cct, 20) << dendl;
std::unique_lock locker{m_cache_map_lock};
// check free ratio, pop entries from LRU
if ((double)m_cache_size > m_max_cache_size * m_watermark) {
// TODO(dehao): make this configurable
int evict_num = m_cache_map.size() * 0.1;
for (int i = 0; i < evict_num; i++) {
Entry* entry = reinterpret_cast<Entry*>(m_promoted_lru.lru_expire());
if (entry == nullptr) {
continue;
}
std::string file_name = entry->file_name;
obj_list->push_back(file_name);
}
}
}
// for unit test
uint64_t SimplePolicy::get_free_size() {
return m_max_cache_size - m_cache_size;
}
uint64_t SimplePolicy::get_promoting_entry_num() {
uint64_t index = 0;
std::shared_lock rlocker{m_cache_map_lock};
for (auto it : m_cache_map) {
if (it.second->status == OBJ_CACHE_SKIP) {
index++;
}
}
return index;
}
uint64_t SimplePolicy::get_promoted_entry_num() {
return m_promoted_lru.lru_get_size();
}
std::string SimplePolicy::get_evict_entry() {
Entry* entry = reinterpret_cast<Entry*>(m_promoted_lru.lru_get_next_expire());
if (entry == nullptr) {
return "";
}
return entry->file_name;
}
} // namespace immutable_obj_cache
} // namespace ceph
| 5,870 | 26.0553 | 80 |
cc
|
null |
ceph-main/src/tools/immutable_object_cache/SimplePolicy.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_SIMPLE_POLICY_H
#define CEPH_CACHE_SIMPLE_POLICY_H
#include "common/ceph_context.h"
#include "common/ceph_mutex.h"
#include "include/lru.h"
#include "Policy.h"
#include <unordered_map>
#include <string>
namespace ceph {
namespace immutable_obj_cache {
class SimplePolicy : public Policy {
public:
SimplePolicy(CephContext *cct, uint64_t block_num, uint64_t max_inflight,
double watermark);
~SimplePolicy();
cache_status_t lookup_object(std::string file_name);
cache_status_t get_status(std::string file_name);
void update_status(std::string file_name,
cache_status_t new_status,
uint64_t size = 0);
int evict_entry(std::string file_name);
void get_evict_list(std::list<std::string>* obj_list);
uint64_t get_free_size();
uint64_t get_promoting_entry_num();
uint64_t get_promoted_entry_num();
std::string get_evict_entry();
private:
cache_status_t alloc_entry(std::string file_name);
class Entry : public LRUObject {
public:
cache_status_t status;
Entry() : status(OBJ_CACHE_NONE) {}
std::string file_name;
uint64_t size;
};
CephContext* cct;
double m_watermark;
uint64_t m_max_inflight_ops;
uint64_t m_max_cache_size;
std::atomic<uint64_t> inflight_ops = 0;
std::unordered_map<std::string, Entry*> m_cache_map;
ceph::shared_mutex m_cache_map_lock =
ceph::make_shared_mutex("rbd::cache::SimplePolicy::m_cache_map_lock");
std::atomic<uint64_t> m_cache_size;
LRU m_promoted_lru;
};
} // namespace immutable_obj_cache
} // namespace ceph
#endif // CEPH_CACHE_SIMPLE_POLICY_H
| 1,735 | 24.15942 | 75 |
h
|
null |
ceph-main/src/tools/immutable_object_cache/SocketCommon.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_SOCKET_COMMON_H
#define CEPH_CACHE_SOCKET_COMMON_H
namespace ceph {
namespace immutable_obj_cache {
static const int RBDSC_REGISTER = 0X11;
static const int RBDSC_READ = 0X12;
static const int RBDSC_REGISTER_REPLY = 0X13;
static const int RBDSC_READ_REPLY = 0X14;
static const int RBDSC_READ_RADOS = 0X15;
static const int ASIO_ERROR_READ = 0X01;
static const int ASIO_ERROR_WRITE = 0X02;
static const int ASIO_ERROR_CONNECT = 0X03;
static const int ASIO_ERROR_ACCEPT = 0X04;
static const int ASIO_ERROR_MSG_INCOMPLETE = 0X05;
class ObjectCacheRequest;
class CacheSession;
typedef GenContextURef<ObjectCacheRequest*> CacheGenContextURef;
typedef std::function<void(CacheSession*, ObjectCacheRequest*)> ProcessMsg;
} // namespace immutable_obj_cache
} // namespace ceph
#endif // CEPH_CACHE_SOCKET_COMMON_H
| 968 | 29.28125 | 75 |
h
|
null |
ceph-main/src/tools/immutable_object_cache/Types.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "Types.h"
#include "SocketCommon.h"
#define dout_subsys ceph_subsys_immutable_obj_cache
#undef dout_prefix
#define dout_prefix *_dout << "ceph::cache::Types: " << __func__ << ": "
namespace ceph {
namespace immutable_obj_cache {
ObjectCacheRequest::ObjectCacheRequest() {}
ObjectCacheRequest::ObjectCacheRequest(uint16_t t, uint64_t s)
: type(t), seq(s) {}
ObjectCacheRequest::~ObjectCacheRequest() {}
void ObjectCacheRequest::encode() {
ENCODE_START(2, 1, payload);
ceph::encode(type, payload);
ceph::encode(seq, payload);
if (!payload_empty()) {
encode_payload();
}
ENCODE_FINISH(payload);
}
void ObjectCacheRequest::decode(bufferlist& bl) {
auto i = bl.cbegin();
DECODE_START(2, i);
ceph::decode(type, i);
ceph::decode(seq, i);
if (!payload_empty()) {
decode_payload(i, struct_v);
}
DECODE_FINISH(i);
}
ObjectCacheRegData::ObjectCacheRegData() {}
ObjectCacheRegData::ObjectCacheRegData(uint16_t t, uint64_t s)
: ObjectCacheRequest(t, s) {}
ObjectCacheRegData::ObjectCacheRegData(uint16_t t, uint64_t s,
const std::string &version)
: ObjectCacheRequest(t, s),
version(version) {
}
ObjectCacheRegData::~ObjectCacheRegData() {}
void ObjectCacheRegData::encode_payload() {
ceph::encode(version, payload);
}
void ObjectCacheRegData::decode_payload(bufferlist::const_iterator i,
__u8 encode_version) {
if (i.end()) {
return;
}
ceph::decode(version, i);
}
ObjectCacheRegReplyData::ObjectCacheRegReplyData() {}
ObjectCacheRegReplyData::ObjectCacheRegReplyData(uint16_t t, uint64_t s)
: ObjectCacheRequest(t, s) {}
ObjectCacheRegReplyData::~ObjectCacheRegReplyData() {}
void ObjectCacheRegReplyData::encode_payload() {}
void ObjectCacheRegReplyData::decode_payload(bufferlist::const_iterator bl,
__u8 encode_version) {}
ObjectCacheReadData::ObjectCacheReadData(uint16_t t, uint64_t s,
uint64_t read_offset,
uint64_t read_len,
uint64_t pool_id, uint64_t snap_id,
uint64_t object_size,
std::string oid,
std::string pool_namespace)
: ObjectCacheRequest(t, s), read_offset(read_offset),
read_len(read_len), pool_id(pool_id), snap_id(snap_id),
object_size(object_size), oid(oid), pool_namespace(pool_namespace)
{}
ObjectCacheReadData::ObjectCacheReadData(uint16_t t, uint64_t s)
: ObjectCacheRequest(t, s) {}
ObjectCacheReadData::~ObjectCacheReadData() {}
void ObjectCacheReadData::encode_payload() {
ceph::encode(read_offset, payload);
ceph::encode(read_len, payload);
ceph::encode(pool_id, payload);
ceph::encode(snap_id, payload);
ceph::encode(oid, payload);
ceph::encode(pool_namespace, payload);
ceph::encode(object_size, payload);
}
void ObjectCacheReadData::decode_payload(bufferlist::const_iterator i,
__u8 encode_version) {
ceph::decode(read_offset, i);
ceph::decode(read_len, i);
ceph::decode(pool_id, i);
ceph::decode(snap_id, i);
ceph::decode(oid, i);
ceph::decode(pool_namespace, i);
if (encode_version >= 2) {
ceph::decode(object_size, i);
}
}
ObjectCacheReadReplyData::ObjectCacheReadReplyData(uint16_t t, uint64_t s,
std::string cache_path)
: ObjectCacheRequest(t, s), cache_path(cache_path) {}
ObjectCacheReadReplyData::ObjectCacheReadReplyData(uint16_t t, uint64_t s)
: ObjectCacheRequest(t, s) {}
ObjectCacheReadReplyData::~ObjectCacheReadReplyData() {}
void ObjectCacheReadReplyData::encode_payload() {
ceph::encode(cache_path, payload);
}
void ObjectCacheReadReplyData::decode_payload(bufferlist::const_iterator i,
__u8 encode_version) {
ceph::decode(cache_path, i);
}
ObjectCacheReadRadosData::ObjectCacheReadRadosData() {}
ObjectCacheReadRadosData::ObjectCacheReadRadosData(uint16_t t, uint64_t s)
: ObjectCacheRequest(t, s) {}
ObjectCacheReadRadosData::~ObjectCacheReadRadosData() {}
void ObjectCacheReadRadosData::encode_payload() {}
void ObjectCacheReadRadosData::decode_payload(bufferlist::const_iterator i,
__u8 encode_version) {}
ObjectCacheRequest* decode_object_cache_request(bufferlist payload_buffer) {
ObjectCacheRequest* req = nullptr;
uint16_t type;
uint64_t seq;
auto i = payload_buffer.cbegin();
DECODE_START(1, i);
ceph::decode(type, i);
ceph::decode(seq, i);
DECODE_FINISH(i);
switch (type) {
case RBDSC_REGISTER: {
req = new ObjectCacheRegData(type, seq);
break;
}
case RBDSC_READ: {
req = new ObjectCacheReadData(type, seq);
break;
}
case RBDSC_REGISTER_REPLY: {
req = new ObjectCacheRegReplyData(type, seq);
break;
}
case RBDSC_READ_REPLY: {
req = new ObjectCacheReadReplyData(type, seq);
break;
}
case RBDSC_READ_RADOS: {
req = new ObjectCacheReadRadosData(type, seq);
break;
}
default:
ceph_assert(0);
}
req->decode(payload_buffer);
return req;
}
} // namespace immutable_obj_cache
} // namespace ceph
| 5,478 | 28.616216 | 76 |
cc
|
null |
ceph-main/src/tools/immutable_object_cache/Types.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_TYPES_H
#define CEPH_CACHE_TYPES_H
#include "include/encoding.h"
#include "include/Context.h"
#include "SocketCommon.h"
namespace ceph {
namespace immutable_obj_cache {
namespace {
struct HeaderHelper {
uint8_t v;
uint8_t c_v;
ceph_le32 len;
}__attribute__((packed));
inline uint8_t get_header_size() {
return sizeof(HeaderHelper);
}
inline uint32_t get_data_len(char* buf) {
HeaderHelper* header = reinterpret_cast<HeaderHelper*>(buf);
return header->len;
}
} // namespace
class ObjectCacheRequest {
public:
uint16_t type;
uint64_t seq;
bufferlist payload;
CacheGenContextURef process_msg;
ObjectCacheRequest();
ObjectCacheRequest(uint16_t type, uint64_t seq);
virtual ~ObjectCacheRequest();
// encode consists of two steps
// step 1 : directly encode common bits using encode method of base classs.
// step 2 : according to payload_empty, determine whether addtional bits
// need to be encoded which be implements by child class.
void encode();
void decode(bufferlist& bl);
bufferlist get_payload_bufferlist() { return payload; }
virtual void encode_payload() = 0;
virtual void decode_payload(bufferlist::const_iterator bl_it,
__u8 encode_version) = 0;
virtual uint16_t get_request_type() = 0;
virtual bool payload_empty() = 0;
};
class ObjectCacheRegData : public ObjectCacheRequest {
public:
std::string version;
ObjectCacheRegData();
ObjectCacheRegData(uint16_t t, uint64_t s, const std::string &version);
ObjectCacheRegData(uint16_t t, uint64_t s);
~ObjectCacheRegData() override;
void encode_payload() override;
void decode_payload(bufferlist::const_iterator bl,
__u8 encode_version) override;
uint16_t get_request_type() override { return RBDSC_REGISTER; }
bool payload_empty() override { return false; }
};
class ObjectCacheRegReplyData : public ObjectCacheRequest {
public:
ObjectCacheRegReplyData();
ObjectCacheRegReplyData(uint16_t t, uint64_t s);
~ObjectCacheRegReplyData() override;
void encode_payload() override;
void decode_payload(bufferlist::const_iterator iter,
__u8 encode_version) override;
uint16_t get_request_type() override { return RBDSC_REGISTER_REPLY; }
bool payload_empty() override { return true; }
};
class ObjectCacheReadData : public ObjectCacheRequest {
public:
uint64_t read_offset;
uint64_t read_len;
uint64_t pool_id;
uint64_t snap_id;
uint64_t object_size = 0;
std::string oid;
std::string pool_namespace;
ObjectCacheReadData(uint16_t t, uint64_t s, uint64_t read_offset,
uint64_t read_len, uint64_t pool_id,
uint64_t snap_id, uint64_t object_size,
std::string oid, std::string pool_namespace);
ObjectCacheReadData(uint16_t t, uint64_t s);
~ObjectCacheReadData() override;
void encode_payload() override;
void decode_payload(bufferlist::const_iterator bl,
__u8 encode_version) override;
uint16_t get_request_type() override { return RBDSC_READ; }
bool payload_empty() override { return false; }
};
class ObjectCacheReadReplyData : public ObjectCacheRequest {
public:
std::string cache_path;
ObjectCacheReadReplyData(uint16_t t, uint64_t s, std::string cache_path);
ObjectCacheReadReplyData(uint16_t t, uint64_t s);
~ObjectCacheReadReplyData() override;
void encode_payload() override;
void decode_payload(bufferlist::const_iterator bl,
__u8 encode_version) override;
uint16_t get_request_type() override { return RBDSC_READ_REPLY; }
bool payload_empty() override { return false; }
};
class ObjectCacheReadRadosData : public ObjectCacheRequest {
public:
ObjectCacheReadRadosData();
ObjectCacheReadRadosData(uint16_t t, uint64_t s);
~ObjectCacheReadRadosData() override;
void encode_payload() override;
void decode_payload(bufferlist::const_iterator bl,
__u8 encode_version) override;
uint16_t get_request_type() override { return RBDSC_READ_RADOS; }
bool payload_empty() override { return true; }
};
ObjectCacheRequest* decode_object_cache_request(bufferlist payload_buffer);
} // namespace immutable_obj_cache
} // namespace ceph
#endif // CEPH_CACHE_TYPES_H
| 4,398 | 31.109489 | 77 |
h
|
null |
ceph-main/src/tools/immutable_object_cache/Utils.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CACHE_UTILS_H
#define CEPH_CACHE_UTILS_H
#include "include/rados/librados.hpp"
#include "include/Context.h"
namespace ceph {
namespace immutable_obj_cache {
namespace detail {
template <typename T, void(T::*MF)(int)>
void rados_callback(rados_completion_t c, void *arg) {
T *obj = reinterpret_cast<T*>(arg);
int r = rados_aio_get_return_value(c);
(obj->*MF)(r);
}
} // namespace detail
template <typename T, void(T::*MF)(int)=&T::complete>
librados::AioCompletion *create_rados_callback(T *obj) {
return librados::Rados::aio_create_completion(
obj, &detail::rados_callback<T, MF>);
}
} // namespace immutable_obj_cache
} // namespace ceph
#endif // CEPH_CACHE_UTILS_H
| 802 | 24.09375 | 70 |
h
|
null |
ceph-main/src/tools/immutable_object_cache/main.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/ceph_argparse.h"
#include "common/config.h"
#include "common/debug.h"
#include "common/errno.h"
#include "global/global_init.h"
#include "global/signal_handler.h"
#include "CacheController.h"
#include <vector>
ceph::immutable_obj_cache::CacheController *cachectl = nullptr;
void usage() {
std::cout << "usage: ceph-immutable-object-cache [options...]" << std::endl;
std::cout << "options:\n";
std::cout << " -m monaddress[:port] connect to specified monitor\n";
std::cout << " --keyring=<path> path to keyring for local "
<< "cluster\n";
std::cout << " --log-file=<logfile> file to log debug output\n";
std::cout << " --debug-immutable-obj-cache=<log-level>/<memory-level> "
<< "set debug level\n";
generic_server_usage();
}
static void handle_signal(int signum) {
if (cachectl)
cachectl->handle_signal(signum);
}
int main(int argc, const char **argv) {
auto args = argv_to_vec(argc, argv);
env_to_vec(args);
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_DAEMON,
CINIT_FLAG_UNPRIVILEGED_DAEMON_DEFAULTS);
if (g_conf()->daemonize) {
global_init_daemonize(g_ceph_context);
}
common_init_finish(g_ceph_context);
global_init_chdir(g_ceph_context);
init_async_signal_handler();
register_async_signal_handler(SIGHUP, sighup_handler);
register_async_signal_handler_oneshot(SIGINT, handle_signal);
register_async_signal_handler_oneshot(SIGTERM, handle_signal);
auto cmd_args = argv_to_vec(argc, argv);
cachectl = new ceph::immutable_obj_cache::CacheController(g_ceph_context,
cmd_args);
int r = cachectl->init();
if (r < 0) {
std::cerr << "failed to initialize: " << cpp_strerror(r) << std::endl;
goto cleanup;
}
r = cachectl->run();
if (r < 0) {
goto cleanup;
}
cleanup:
unregister_async_signal_handler(SIGHUP, sighup_handler);
unregister_async_signal_handler(SIGINT, handle_signal);
unregister_async_signal_handler(SIGTERM, handle_signal);
shutdown_async_signal_handler();
delete cachectl;
return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
}
| 2,413 | 27.738095 | 78 |
cc
|
null |
ceph-main/src/tools/rados/PoolDump.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/rados/librados.hpp"
#include "common/errno.h"
#include "PoolDump.h"
using namespace librados;
using std::cerr;
using std::less;
using std::map;
using std::string;
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rados
/**
* Export RADOS objects from a live cluster
* to a serialized format via a file descriptor.
*
* @returns 0 on success, else error code
*/
int PoolDump::dump(IoCtx *io_ctx)
{
ceph_assert(io_ctx != NULL);
int r = 0;
write_super();
r = write_simple(TYPE_POOL_BEGIN, file_fd);
if (r != 0) {
return r;
}
io_ctx->set_namespace(all_nspaces);
librados::NObjectIterator i = io_ctx->nobjects_begin();
librados::NObjectIterator i_end = io_ctx->nobjects_end();
for (; i != i_end; ++i) {
const std::string oid = i->get_oid();
dout(10) << "OID '" << oid << "'" << dendl;
// Compose OBJECT_BEGIN
// ====================
object_begin obj_begin;
obj_begin.hoid.hobj.oid = i->get_oid();
obj_begin.hoid.hobj.nspace = i->get_nspace();
obj_begin.hoid.hobj.set_key(i->get_locator());
// Only output head, RadosImport only wants that
obj_begin.hoid.hobj.snap = CEPH_NOSNAP;
// Skip setting object_begin.oi, RadosImport doesn't care
r = write_section(TYPE_OBJECT_BEGIN, obj_begin, file_fd);
if (r != 0) {
return r;
}
// Compose TYPE_DATA chunks
// ========================
const uint32_t op_size = 4096 * 1024;
uint64_t offset = 0;
io_ctx->set_namespace(i->get_nspace());
io_ctx->locator_set_key(i->get_locator());
while (true) {
bufferlist outdata;
r = io_ctx->read(oid, outdata, op_size, offset);
if (r <= 0) {
// Error or no data
break;
}
r = write_section(TYPE_DATA,
data_section(offset, outdata.length(), outdata), file_fd);
if (r != 0) {
// Output stream error
return r;
}
if (outdata.length() < op_size) {
// No more data
break;
}
offset += outdata.length();
}
// Compose TYPE_ATTRS chunk
// ========================
std::map<std::string, bufferlist> raw_xattrs;
std::map<std::string, bufferlist,less<>> xattrs;
r = io_ctx->getxattrs(oid, raw_xattrs);
if (r < 0) {
cerr << "error getting xattr set " << oid << ": " << cpp_strerror(r)
<< std::endl;
return r;
}
// Prepend "_" to mimic how user keys are represented in a pg export
for (std::map<std::string, bufferlist>::iterator i = raw_xattrs.begin();
i != raw_xattrs.end(); ++i) {
std::pair< std::string, bufferlist> item(std::string("_") + std::string(i->first.c_str()), i->second);
xattrs.insert(item);
}
r = write_section(TYPE_ATTRS, attr_section(xattrs), file_fd);
if (r != 0) {
return r;
}
// Compose TYPE_OMAP_HDR section
// =============================
bufferlist omap_header;
r = io_ctx->omap_get_header(oid, &omap_header);
if (r < 0) {
cerr << "error getting omap header " << oid
<< ": " << cpp_strerror(r) << std::endl;
return r;
}
r = write_section(TYPE_OMAP_HDR, omap_hdr_section(omap_header), file_fd);
if (r != 0) {
return r;
}
// Compose TYPE_OMAP
int MAX_READ = 512;
string last_read = "";
do {
map<string, bufferlist> values;
r = io_ctx->omap_get_vals(oid, last_read, MAX_READ, &values);
if (r < 0) {
cerr << "error getting omap keys " << oid << ": "
<< cpp_strerror(r) << std::endl;
return r;
}
if (values.size()) {
last_read = values.rbegin()->first;
} else {
break;
}
r = write_section(TYPE_OMAP, omap_section(values), file_fd);
if (r != 0) {
return r;
}
r = values.size();
} while (r == MAX_READ);
// Close object
// =============
r = write_simple(TYPE_OBJECT_END, file_fd);
if (r != 0) {
return r;
}
}
r = write_simple(TYPE_POOL_END, file_fd);
#if defined(__linux__)
if (file_fd != STDOUT_FILENO)
posix_fadvise(file_fd, 0, 0, POSIX_FADV_DONTNEED);
#endif
return r;
}
| 4,559 | 25.206897 | 108 |
cc
|
null |
ceph-main/src/tools/rados/PoolDump.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef POOL_DUMP_H_
#define POOL_DUMP_H_
#include "include/rados/librados_fwd.hpp"
#include "tools/RadosDump.h"
class PoolDump : public RadosDump
{
public:
explicit PoolDump(int file_fd_) : RadosDump(file_fd_, false) {}
int dump(librados::IoCtx *io_ctx);
};
#endif // POOL_DUMP_H_
| 695 | 22.2 | 70 |
h
|
null |
ceph-main/src/tools/rados/RadosImport.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/errno.h"
#include "osd/PGLog.h"
#include "RadosImport.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rados
using std::cerr;
using std::cout;
using std::map;
using std::string;
int RadosImport::import(std::string pool, bool no_overwrite)
{
librados::IoCtx ioctx;
librados::Rados cluster;
char *id = getenv("CEPH_CLIENT_ID");
if (id) cerr << "Client id is: " << id << std::endl;
int ret = cluster.init(id);
if (ret) {
cerr << "Error " << ret << " in cluster.init" << std::endl;
return ret;
}
ret = cluster.conf_read_file(NULL);
if (ret) {
cerr << "Error " << ret << " in cluster.conf_read_file" << std::endl;
return ret;
}
ret = cluster.conf_parse_env(NULL);
if (ret) {
cerr << "Error " << ret << " in cluster.conf_read_env" << std::endl;
return ret;
}
ret = cluster.connect();
if (ret) {
cerr << "Error " << ret << " in cluster.connect" << std::endl;
return ret;
}
ret = cluster.ioctx_create(pool.c_str(), ioctx);
if (ret < 0) {
cerr << "ioctx_create " << pool << " failed with " << ret << std::endl;
return ret;
}
return import(ioctx, no_overwrite);
}
int RadosImport::import(librados::IoCtx &io_ctx, bool no_overwrite)
{
bufferlist ebl;
pg_info_t info;
PGLog::IndexedLog log;
int ret = read_super();
if (ret)
return ret;
if (sh.magic != super_header::super_magic) {
cerr << "Invalid magic number: 0x"
<< std::hex << sh.magic << " vs. 0x" << super_header::super_magic
<< std::dec << std::endl;
return -EFAULT;
}
if (sh.version > super_header::super_ver) {
cerr << "Can't handle export format version=" << sh.version << std::endl;
return -EINVAL;
}
//First section must be TYPE_PG_BEGIN
sectiontype_t type;
ret = read_section(&type, &ebl);
if (ret)
return ret;
bool pool_mode = false;
if (type == TYPE_POOL_BEGIN) {
pool_mode = true;
cout << "Importing pool" << std::endl;
} else if (type == TYPE_PG_BEGIN) {
auto ebliter = ebl.cbegin();
pg_begin pgb;
pgb.decode(ebliter);
spg_t pgid = pgb.pgid;;
if (!pgid.is_no_shard()) {
cerr << "Importing Erasure Coded shard is not supported" << std::endl;
return -EOPNOTSUPP;
}
dout(10) << "Exported features: " << pgb.superblock.compat_features << dendl;
cout << "Importing from pgid " << pgid << std::endl;
} else {
cerr << "Invalid initial section code " << type << std::endl;
return -EFAULT;
}
// XXX: How to check export features?
#if 0
if (sb.compat_features.compare(pgb.superblock.compat_features) == -1) {
cerr << "Export has incompatible features set "
<< pgb.superblock.compat_features << std::endl;
return -EINVAL;
}
#endif
#if defined(__linux__)
if (file_fd != STDIN_FILENO)
posix_fadvise(file_fd, 0, 0, POSIX_FADV_SEQUENTIAL);
#endif
bool done = false;
bool found_metadata = false;
while(!done) {
ret = read_section(&type, &ebl);
if (ret)
return ret;
//cout << "do_import: Section type " << hex << type << dec << std::endl;
if (type >= END_OF_TYPES) {
cout << "Skipping unknown section type" << std::endl;
continue;
}
switch(type) {
case TYPE_OBJECT_BEGIN:
ret = get_object_rados(io_ctx, ebl, no_overwrite);
if (ret) {
cerr << "Error inserting object: " << ret << std::endl;
return ret;
}
break;
case TYPE_PG_METADATA:
dout(10) << "Don't care about the old metadata" << dendl;
found_metadata = true;
break;
case TYPE_PG_END:
done = true;
break;
case TYPE_POOL_END:
done = true;
break;
default:
return -EFAULT;
}
}
if (!(pool_mode || found_metadata)) {
cerr << "Missing metadata section!" << std::endl;
}
#if defined(__linux__)
if (file_fd != STDIN_FILENO)
posix_fadvise(file_fd, 0, 0, POSIX_FADV_DONTNEED);
#endif
return 0;
}
int RadosImport::get_object_rados(librados::IoCtx &ioctx, bufferlist &bl, bool no_overwrite)
{
auto ebliter = bl.cbegin();
object_begin ob;
ob.decode(ebliter);
map<string,bufferlist>::iterator i;
bufferlist abl;
bool skipping;
data_section ds;
attr_section as;
omap_hdr_section oh;
omap_section os;
ceph_assert(g_ceph_context);
if (ob.hoid.hobj.nspace == g_ceph_context->_conf->osd_hit_set_namespace) {
cout << "Skipping internal object " << ob.hoid << std::endl;
skip_object(bl);
return 0;
}
if (!ob.hoid.hobj.is_head()) {
cout << "Skipping non-head for " << ob.hoid << std::endl;
skip_object(bl);
return 0;
}
ioctx.set_namespace(ob.hoid.hobj.get_namespace());
ioctx.locator_set_key(ob.hoid.hobj.get_key());
string msg("Write");
skipping = false;
if (dry_run) {
uint64_t psize;
time_t pmtime;
int ret = ioctx.stat(ob.hoid.hobj.oid.name, &psize, &pmtime);
if (ret == 0) {
if (no_overwrite)
// Could set skipping, but dry-run doesn't change anything either
msg = "Skipping existing";
else
msg = "***Overwrite***";
}
} else {
int ret = ioctx.create(ob.hoid.hobj.oid.name, true);
if (ret && ret != -EEXIST) {
cerr << "create failed: " << cpp_strerror(ret) << std::endl;
return ret;
}
if (ret == -EEXIST) {
if (no_overwrite) {
msg = "Skipping existing";
skipping = true;
} else {
msg = "***Overwrite***";
ret = ioctx.remove(ob.hoid.hobj.oid.name);
if (ret < 0) {
cerr << "remove failed: " << cpp_strerror(ret) << std::endl;
return ret;
}
ret = ioctx.create(ob.hoid.hobj.oid.name, true);
// If object re-appeared after removal, let's just skip it
if (ret == -EEXIST) {
skipping = true;
msg = "Skipping in-use object";
ret = 0;
}
if (ret < 0) {
cerr << "create failed: " << cpp_strerror(ret) << std::endl;
return ret;
}
}
}
}
cout << msg << " " << ob.hoid << std::endl;
bool need_align = false;
uint64_t alignment = 0;
if (align) {
need_align = true;
alignment = align;
} else {
int ret = ioctx.pool_requires_alignment2(&need_align);
if (ret < 0) {
cerr << "pool_requires_alignment2 failed: " << cpp_strerror(ret)
<< std::endl;
return ret;
}
if (need_align) {
ret = ioctx.pool_required_alignment2(&alignment);
if (ret < 0) {
cerr << "pool_required_alignment2 failed: " << cpp_strerror(ret)
<< std::endl;
return ret;
}
ceph_assert(alignment != 0);
}
}
if (need_align) {
dout(10) << "alignment = " << alignment << dendl;
}
bufferlist ebl, databl;
uint64_t in_offset = 0, out_offset = 0;
bool done = false;
while(!done) {
sectiontype_t type;
int ret = read_section(&type, &ebl);
if (ret) {
cerr << "Error reading section: " << ret << std::endl;
return ret;
}
ebliter = ebl.cbegin();
//cout << "\tdo_object: Section type " << hex << type << dec << std::endl;
//cout << "\t\tsection size " << ebl.length() << std::endl;
if (type >= END_OF_TYPES) {
cout << "Skipping unknown object section type" << std::endl;
continue;
}
switch(type) {
case TYPE_DATA:
ds.decode(ebliter);
dout(10) << "\tdata: offset " << ds.offset << " len " << ds.len << dendl;
if (need_align) {
if (ds.offset != in_offset) {
cerr << "Discontiguous object data in export" << std::endl;
return -EFAULT;
}
ceph_assert(ds.databl.length() == ds.len);
databl.claim_append(ds.databl);
in_offset += ds.len;
if (databl.length() >= alignment) {
uint64_t rndlen = uint64_t(databl.length() / alignment) * alignment;
dout(10) << "write offset=" << out_offset << " len=" << rndlen << dendl;
if (!dry_run && !skipping) {
ret = ioctx.write(ob.hoid.hobj.oid.name, databl, rndlen, out_offset);
if (ret) {
cerr << "write failed: " << cpp_strerror(ret) << std::endl;
return ret;
}
}
out_offset += rndlen;
bufferlist n;
if (databl.length() > rndlen) {
ceph_assert(databl.length() - rndlen < alignment);
n.substr_of(databl, rndlen, databl.length() - rndlen);
}
databl = n;
}
break;
}
if (!dry_run && !skipping) {
ret = ioctx.write(ob.hoid.hobj.oid.name, ds.databl, ds.len, ds.offset);
if (ret) {
cerr << "write failed: " << cpp_strerror(ret) << std::endl;
return ret;
}
}
break;
case TYPE_ATTRS:
as.decode(ebliter);
dout(10) << "\tattrs: len " << as.data.size() << dendl;
if (dry_run || skipping)
break;
for (std::map<string,bufferlist>::iterator i = as.data.begin();
i != as.data.end(); ++i) {
// The user xattrs that we want all begin with "_" with length > 1.
// Drop key "_" and all attributes that do not start with '_'
if (i->first == "_" || i->first[0] != '_')
continue;
ret = ioctx.setxattr(ob.hoid.hobj.oid.name, i->first.substr(1).c_str(), i->second);
if (ret) {
cerr << "setxattr failed: " << cpp_strerror(ret) << std::endl;
if (ret != -EOPNOTSUPP)
return ret;
}
}
break;
case TYPE_OMAP_HDR:
oh.decode(ebliter);
dout(10) << "\tomap header: " << string(oh.hdr.c_str(), oh.hdr.length())
<< dendl;
if (dry_run || skipping)
break;
ret = ioctx.omap_set_header(ob.hoid.hobj.oid.name, oh.hdr);
if (ret) {
cerr << "omap_set_header failed: " << cpp_strerror(ret) << std::endl;
if (ret != -EOPNOTSUPP)
return ret;
}
break;
case TYPE_OMAP:
os.decode(ebliter);
dout(10) << "\tomap: size " << os.omap.size() << dendl;
if (dry_run || skipping)
break;
ret = ioctx.omap_set(ob.hoid.hobj.oid.name, os.omap);
if (ret) {
cerr << "omap_set failed: " << cpp_strerror(ret) << std::endl;
if (ret != -EOPNOTSUPP)
return ret;
}
break;
case TYPE_OBJECT_END:
done = true;
if (need_align && databl.length() > 0) {
ceph_assert(databl.length() < alignment);
dout(10) << "END write offset=" << out_offset << " len=" << databl.length() << dendl;
if (dry_run || skipping)
break;
ret = ioctx.write(ob.hoid.hobj.oid.name, databl, databl.length(), out_offset);
if (ret) {
cerr << "write failed: " << cpp_strerror(ret) << std::endl;
return ret;
}
}
break;
default:
cerr << "Unexpected section type " << type << std::endl;
return -EFAULT;
}
}
return 0;
}
| 11,332 | 26.982716 | 93 |
cc
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.