repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null |
ceph-main/src/test/erasure-code/TestErasureCodeShec_all.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014,2015 FUJITSU LIMITED
*
* Author: Shotaro Kawaguchi <[email protected]>
* Author: Takanori Nakao <[email protected]>
* Author: Takeshi Miyamae <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
// SUMMARY: TestErasureCodeShec combination of k,m,c by 301 patterns
#include <errno.h>
#include <stdlib.h>
#include "crush/CrushWrapper.h"
#include "osd/osd_types.h"
#include "include/stringify.h"
#include "global/global_init.h"
#include "erasure-code/shec/ErasureCodeShec.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "common/ceph_argparse.h"
#include "global/global_context.h"
#include "gtest/gtest.h"
using namespace std;
struct Param_d {
char* k;
char* m;
char* c;
int ch_size;
char sk[16];
char sm[16];
char sc[16];
};
struct Param_d param[301];
unsigned int g_recover = 0;
unsigned int g_cannot_recover = 0;
struct Recover_d {
int k;
int m;
int c;
set<int> want;
set<int> avail;
};
struct std::vector<Recover_d> cannot_recover;
class ParameterTest : public ::testing::TestWithParam<struct Param_d> {
};
TEST_P(ParameterTest, parameter_all)
{
int result;
//get parameters
char* k = GetParam().k;
char* m = GetParam().m;
char* c = GetParam().c;
unsigned c_size = GetParam().ch_size;
int i_k = atoi(k);
int i_m = atoi(m);
int i_c = atoi(c);
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = k;
(*profile)["m"] = m;
(*profile)["c"] = c;
result = shec->init(*profile, &cerr);
//check profile
EXPECT_EQ(i_k, shec->k);
EXPECT_EQ(i_m, shec->m);
EXPECT_EQ(i_c, shec->c);
EXPECT_EQ(8, shec->w);
EXPECT_EQ(ErasureCodeShec::MULTIPLE, shec->technique);
EXPECT_STREQ("default", shec->rule_root.c_str());
EXPECT_STREQ("osd", shec->rule_failure_domain.c_str());
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, result);
//minimum_to_decode
//want_to_decode will be a combination that chooses 1~c from k+m
set<int> want_to_decode, available_chunks, minimum_chunks;
int array_want_to_decode[shec->get_chunk_count()];
struct Recover_d comb;
for (int w = 1; w <= i_c; w++) {
const unsigned int r = w; // combination(k+m,r)
for (unsigned int i = 0; i < r; ++i) {
array_want_to_decode[i] = 1;
}
for (unsigned int i = r; i < shec->get_chunk_count(); ++i) {
array_want_to_decode[i] = 0;
}
do {
for (unsigned int i = 0; i < shec->get_chunk_count(); i++) {
available_chunks.insert(i);
}
for (unsigned int i = 0; i < shec->get_chunk_count(); i++) {
if (array_want_to_decode[i]) {
want_to_decode.insert(i);
available_chunks.erase(i);
}
}
result = shec->_minimum_to_decode(want_to_decode, available_chunks,
&minimum_chunks);
if (result == 0){
EXPECT_EQ(0, result);
EXPECT_TRUE(minimum_chunks.size());
g_recover++;
} else {
EXPECT_EQ(-EIO, result);
EXPECT_EQ(0u, minimum_chunks.size());
g_cannot_recover++;
comb.k = shec->k;
comb.m = shec->m;
comb.c = shec->c;
comb.want = want_to_decode;
comb.avail = available_chunks;
cannot_recover.push_back(comb);
}
want_to_decode.clear();
available_chunks.clear();
minimum_chunks.clear();
} while (std::prev_permutation(
array_want_to_decode,
array_want_to_decode + shec->get_chunk_count()));
}
//minimum_to_decode_with_cost
set<int> want_to_decode_with_cost, minimum_chunks_with_cost;
map<int, int> available_chunks_with_cost;
for (unsigned int i = 0; i < 1; i++) {
want_to_decode_with_cost.insert(i);
}
for (unsigned int i = 0; i < shec->get_chunk_count(); i++) {
available_chunks_with_cost[i] = i;
}
result = shec->minimum_to_decode_with_cost(
want_to_decode_with_cost,
available_chunks_with_cost,
&minimum_chunks_with_cost);
EXPECT_EQ(0, result);
EXPECT_TRUE(minimum_chunks_with_cost.size());
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//186
"012345"//192
);
for (unsigned int i = 0; i < shec->get_chunk_count(); i++) {
want_to_encode.insert(i);
}
result = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, result);
EXPECT_EQ(i_k+i_m, (int)encoded.size());
EXPECT_EQ(c_size, encoded[0].length());
//decode
int want_to_decode2[i_k + i_m];
map<int, bufferlist> decoded;
for (unsigned int i = 0; i < shec->get_chunk_count(); i++) {
want_to_decode2[i] = i;
}
result = shec->_decode(set<int>(want_to_decode2, want_to_decode2 + 2),
encoded, &decoded);
EXPECT_EQ(0, result);
EXPECT_EQ(2u, decoded.size());
EXPECT_EQ(c_size, decoded[0].length());
//check encoded,decoded
bufferlist out1, out2, usable;
//out1 is "encoded"
for (unsigned int i = 0; i < encoded.size(); i++) {
out1.append(encoded[i]);
}
//out2 is "decoded"
shec->decode_concat(encoded, &out2);
usable.substr_of(out2, 0, in.length());
EXPECT_FALSE(out1 == in);
EXPECT_TRUE(usable == in);
//create_rule
stringstream ss;
CrushWrapper *crush = new CrushWrapper;
crush->create();
crush->set_type_name(2, "root");
crush->set_type_name(1, "host");
crush->set_type_name(0, "osd");
int rootno;
crush->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1, 2, 0, NULL,
NULL, &rootno);
crush->set_item_name(rootno, "default");
map < string, string > loc;
loc["root"] = "default";
int num_host = 2;
int num_osd = 5;
int osd = 0;
for (int h = 0; h < num_host; ++h) {
loc["host"] = string("host-") + stringify(h);
for (int o = 0; o < num_osd; ++o, ++osd) {
crush->insert_item(g_ceph_context, osd, 1.0,
string("osd.") + stringify(osd), loc);
}
}
result = shec->create_rule("myrule", *crush, &ss);
EXPECT_EQ(0, result);
EXPECT_STREQ("myrule", crush->rule_name_map[0].c_str());
//get_chunk_count
EXPECT_EQ(i_k+i_m, (int)shec->get_chunk_count());
//get_data_chunk_count
EXPECT_EQ(i_k, (int)shec->get_data_chunk_count());
//get_chunk_size
EXPECT_EQ(c_size, shec->get_chunk_size(192));
delete shec;
delete profile;
delete crush;
}
INSTANTIATE_TEST_SUITE_P(Test, ParameterTest, ::testing::ValuesIn(param));
int main(int argc, char **argv)
{
int i = 0;
int r;
const int kObjectSize = 192;
unsigned alignment, tail, padded_length;
float recovery_percentage;
//make_kmc
for (unsigned int k = 1; k <= 12; k++) {
for (unsigned int m = 1; (m <= k) && (k + m <= 20); m++) {
for (unsigned int c = 1; c <= m; c++) {
sprintf(param[i].sk, "%u", k);
sprintf(param[i].sm, "%u", m);
sprintf(param[i].sc, "%u", c);
param[i].k = param[i].sk;
param[i].m = param[i].sm;
param[i].c = param[i].sc;
alignment = k * 8 * sizeof(int);
tail = kObjectSize % alignment;
padded_length = kObjectSize + (tail ? (alignment - tail) : 0);
param[i].ch_size = padded_length / k;
i++;
}
}
}
auto args = argv_to_vec(argc, argv);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_MON_CONFIG);
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
r = RUN_ALL_TESTS();
std::cout << "minimum_to_decode:recover_num = " << g_recover << std::endl;
std::cout << "minimum_to_decode:cannot_recover_num = " << g_cannot_recover
<< std::endl;
recovery_percentage = 100.0
- (float) (100.0 * g_cannot_recover / (g_recover + g_cannot_recover));
printf("recovery_percentage:%f\n",recovery_percentage);
if (recovery_percentage > 99.0) {
std::cout << "[ OK ] Recovery percentage is more than 99.0%"
<< std::endl;
} else {
std::cout << "[ NG ] Recovery percentage is less than 99.0%"
<< std::endl;
}
std::cout << "cannot recovery patterns:" << std::endl;
for (std::vector<Recover_d>::const_iterator i = cannot_recover.begin();
i != cannot_recover.end(); ++i) {
std::cout << "---" << std::endl;
std::cout << "k = " << i->k << ", m = " << i->m << ", c = " << i->c
<< std::endl;
std::cout << "want_to_decode :" << i->want << std::endl;
std::cout << "available_chunks:" << i->avail << std::endl;
}
std::cout << "---" << std::endl;
return r;
}
| 9,076 | 26.258258 | 89 |
cc
|
null |
ceph-main/src/test/erasure-code/TestErasureCodeShec_arguments.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 FUJITSU LIMITED
*
* Author: Shotaro Kawaguchi <[email protected]>
* Author: Takanori Nakao <[email protected]>
* Author: Takeshi Miyamae <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
// SUMMARY: shec's gtest for each argument of minimum_to_decode()/decode()
#include <algorithm>
#include <bit>
#include <cerrno>
#include <cstdlib>
#include "crush/CrushWrapper.h"
#include "osd/osd_types.h"
#include "include/stringify.h"
#include "global/global_init.h"
#include "erasure-code/shec/ErasureCodeShec.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "common/ceph_argparse.h"
#include "global/global_context.h"
#include "gtest/gtest.h"
using namespace std;
unsigned int count_num = 0;
unsigned int unexpected_count = 0;
unsigned int value_count = 0;
map<set<int>,set<set<int> > > shec_table;
constexpr int getint(std::initializer_list<int> is) {
int a = 0;
for (const auto i : is) {
a |= 1 << i;
}
return a;
}
void create_table_shec432() {
set<int> table_key,vec_avails;
set<set<int> > table_value;
for (int want_count = 0; want_count < 7; ++want_count) {
for (unsigned want = 1; want < (1<<7); ++want) {
table_key.clear();
table_value.clear();
if (std::popcount(want) != want_count) {
continue;
}
{
for (int i = 0; i < 7; ++i) {
if (want & (1 << i)) {
table_key.insert(i);
}
}
}
vector<int> vec;
for (unsigned avails = 0; avails < (1<<7); ++avails) {
if (want & avails) {
continue;
}
if (std::popcount(avails) == 2 &&
std::popcount(want) == 1) {
if (std::cmp_equal(want | avails, getint({0,1,5})) ||
std::cmp_equal(want | avails, getint({2,3,6}))) {
vec.push_back(avails);
}
}
}
for (unsigned avails = 0; avails < (1<<7); ++avails) {
if (want & avails) {
continue;
}
if (std::popcount(avails) == 4) {
auto a = to_array<std::initializer_list<int>>({
{0,1,2,3}, {0,1,2,4}, {0,1,2,6}, {0,1,3,4}, {0,1,3,6}, {0,1,4,6},
{0,2,3,4}, {0,2,3,5}, {0,2,4,5}, {0,2,4,6}, {0,2,5,6}, {0,3,4,5},
{0,3,4,6}, {0,3,5,6}, {0,4,5,6}, {1,2,3,4}, {1,2,3,5}, {1,2,4,5},
{1,2,4,6}, {1,2,5,6}, {1,3,4,5}, {1,3,4,6}, {1,3,5,6}, {1,4,5,6},
{2,3,4,5}, {2,4,5,6}, {3,4,5,6}});
if (ranges::any_of(a, std::bind_front(cmp_equal<uint, int>, avails),
getint)) {
vec.push_back(avails);
}
}
}
for (int i = 0; i < (int)vec.size(); ++i) {
for (int j = i + 1; j < (int)vec.size(); ++j) {
if ((vec[i] & vec[j]) == vec[i]) {
vec.erase(vec.begin() + j);
--j;
}
}
}
for (int i = 0; i < (int)vec.size(); ++i) {
vec_avails.clear();
for (int j = 0; j < 7; ++j) {
if (vec[i] & (1 << j)) {
vec_avails.insert(j);
}
}
table_value.insert(vec_avails);
}
shec_table.insert(std::make_pair(table_key,table_value));
}
}
}
bool search_table_shec432(set<int> want_to_read, set<int> available_chunks) {
set<set<int> > tmp;
set<int> settmp;
bool found;
tmp = shec_table.find(want_to_read)->second;
for (set<set<int> >::iterator itr = tmp.begin();itr != tmp.end(); ++itr) {
found = true;
value_count = 0;
settmp = *itr;
for (set<int>::iterator setitr = settmp.begin();setitr != settmp.end(); ++setitr) {
if (!available_chunks.count(*setitr)) {
found = false;
}
++value_count;
}
if (found) {
return true;
}
}
return false;
}
TEST(ParameterTest, combination_all)
{
const unsigned int kObjectSize = 128;
//get profile
char* k = (char*)"4";
char* m = (char*)"3";
char* c = (char*)"2";
int i_k = atoi(k);
int i_m = atoi(m);
int i_c = atoi(c);
const unsigned alignment = i_k * 8 * sizeof(int);
const unsigned tail = kObjectSize % alignment;
const unsigned padded_length = kObjectSize + (tail ? (alignment - tail) : 0);
const unsigned c_size = padded_length / i_k;
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
map < std::string, std::string > *profile = new map<std::string,
std::string>();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = k;
(*profile)["m"] = m;
(*profile)["c"] = c;
int result = shec->init(*profile, &cerr);
//check profile
EXPECT_EQ(i_k, shec->k);
EXPECT_EQ(i_m, shec->m);
EXPECT_EQ(i_c, shec->c);
EXPECT_EQ(8, shec->w);
EXPECT_EQ(ErasureCodeShec::MULTIPLE, shec->technique);
EXPECT_STREQ("default", shec->rule_root.c_str());
EXPECT_STREQ("osd", shec->rule_failure_domain.c_str());
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, result);
//encode
bufferlist in;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"0123"//128
);
set<int> want_to_encode;
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
map<int, bufferlist> encoded;
result = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, result);
EXPECT_EQ(i_k+i_m, (int)encoded.size());
EXPECT_EQ(c_size, encoded[0].length());
bufferlist out1;
//out1 is "encoded"
for (unsigned int i = 0; i < encoded.size(); ++i) {
out1.append(encoded[i]);
}
EXPECT_FALSE(out1 == in);
for (unsigned int w1 = 0; w1 <= shec->get_chunk_count(); ++w1) {
// combination(k+m,w1)
int array_want_to_read[shec->get_chunk_count()];
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
array_want_to_read[i] = i < w1 ? 1 : 0;
}
for (unsigned w2 = 0; w2 <= shec->get_chunk_count(); ++w2) {
// combination(k+m,w2)
int array_available_chunks[shec->get_chunk_count()];
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i ) {
array_available_chunks[i] = i < w2 ? 1 : 0;
}
do {
do {
set<int> want_to_read, available_chunks;
map<int, bufferlist> inchunks;
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
if (array_want_to_read[i]) {
want_to_read.insert(i);
}
if (array_available_chunks[i]) {
available_chunks.insert(i);
inchunks.insert(make_pair(i,encoded[i]));
}
}
map<int, vector<pair<int,int>>> minimum_chunks;
map<int, bufferlist> decoded;
result = shec->minimum_to_decode(want_to_read, available_chunks,
&minimum_chunks);
int dresult = shec->decode(want_to_read, inchunks, &decoded,
shec->get_chunk_size(kObjectSize));
++count_num;
unsigned int minimum_count = 0;
if (want_to_read.size() == 0) {
EXPECT_EQ(0, result);
EXPECT_EQ(0u, minimum_chunks.size());
EXPECT_EQ(0, dresult);
EXPECT_EQ(0u, decoded.size());
EXPECT_EQ(0u, decoded[0].length());
if (result != 0 || dresult != 0) {
++unexpected_count;
}
} else {
// want - avail
set<int> want_to_read_without_avails;
for (auto chunk : want_to_read) {
if (!available_chunks.count(chunk)) {
want_to_read_without_avails.insert(chunk);
} else {
++minimum_count;
}
}
if (want_to_read_without_avails.size() == 0) {
EXPECT_EQ(0, result);
EXPECT_LT(0u, minimum_chunks.size());
EXPECT_GE(minimum_count, minimum_chunks.size());
EXPECT_EQ(0, dresult);
EXPECT_NE(0u, decoded.size());
for (unsigned int i = 0; i < shec->get_data_chunk_count(); ++i) {
if (array_want_to_read[i]) {
bufferlist usable;
usable.substr_of(in, c_size * i, c_size);
int cmp = memcmp(decoded[i].c_str(), usable.c_str(), c_size);
EXPECT_EQ(c_size, decoded[i].length());
EXPECT_EQ(0, cmp);
if (cmp != 0) {
++unexpected_count;
}
}
}
if (result != 0 || dresult != 0) {
++unexpected_count;
}
} else if (want_to_read_without_avails.size() > 3) {
EXPECT_EQ(-EIO, result);
EXPECT_EQ(0u, minimum_chunks.size());
EXPECT_EQ(-1, dresult);
if (result != -EIO || dresult != -1) {
++unexpected_count;
}
} else {
// search
if (search_table_shec432(want_to_read_without_avails,available_chunks)) {
EXPECT_EQ(0, result);
EXPECT_LT(0u, minimum_chunks.size());
EXPECT_GE(value_count + minimum_count, minimum_chunks.size());
EXPECT_EQ(0, dresult);
EXPECT_NE(0u, decoded.size());
for (unsigned int i = 0; i < shec->get_data_chunk_count(); ++i) {
if (array_want_to_read[i]) {
bufferlist usable;
usable.substr_of(in, c_size * i, c_size);
int cmp = memcmp(decoded[i].c_str(), usable.c_str(), c_size);
EXPECT_EQ(c_size, decoded[i].length());
EXPECT_EQ(0, cmp);
if (cmp != 0) {
++unexpected_count;
std::cout << "decoded[" << i << "] = " << decoded[i].c_str() << std::endl;
std::cout << "usable = " << usable.c_str() << std::endl;
std::cout << "want_to_read :" << want_to_read << std::endl;
std::cout << "available_chunks:" << available_chunks << std::endl;
std::cout << "minimum_chunks :" << minimum_chunks << std::endl;
}
}
}
if (result != 0 || dresult != 0) {
++unexpected_count;
}
} else {
EXPECT_EQ(-EIO, result);
EXPECT_EQ(0u, minimum_chunks.size());
EXPECT_EQ(-1, dresult);
if (result != -EIO || dresult != -1) {
++unexpected_count;
}
}
}
}
} while (std::prev_permutation(
array_want_to_read,
array_want_to_read + shec->get_chunk_count()));
} while (std::prev_permutation(
array_available_chunks,
array_available_chunks + shec->get_chunk_count()));
}
}
delete shec;
delete profile;
}
int main(int argc, char **argv)
{
auto args = argv_to_vec(argc, argv);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_MON_CONFIG);
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
create_table_shec432();
int r = RUN_ALL_TESTS();
std::cout << "minimum_to_decode:total_num = " << count_num
<< std::endl;
std::cout << "minimum_to_decode:unexpected_num = " << unexpected_count
<< std::endl;
return r;
}
| 11,876 | 31.013477 | 96 |
cc
|
null |
ceph-main/src/test/erasure-code/TestErasureCodeShec_thread.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014,2015 FUJITSU LIMITED
*
* Author: Shotaro Kawaguchi <[email protected]>
* Author: Takanori Nakao <[email protected]>
* Author: Takeshi Miyamae <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
// SUMMARY: TestErasureCodeShec executes some threads at the same time
#include <errno.h>
#include <pthread.h>
#include <stdlib.h>
#include "crush/CrushWrapper.h"
#include "osd/osd_types.h"
#include "include/stringify.h"
#include "erasure-code/shec/ErasureCodeShec.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "global/global_context.h"
#include "gtest/gtest.h"
using namespace std;
void* thread1(void* pParam);
class TestParam {
public:
string k, m, c, w;
};
TEST(ErasureCodeShec, thread)
{
TestParam param1, param2, param3, param4, param5;
param1.k = "6";
param1.m = "4";
param1.c = "3";
param1.w = "8";
param2.k = "4";
param2.m = "3";
param2.c = "2";
param2.w = "16";
param3.k = "10";
param3.m = "8";
param3.c = "4";
param3.w = "32";
param4.k = "5";
param4.m = "5";
param4.c = "5";
param4.w = "8";
param5.k = "9";
param5.m = "9";
param5.c = "6";
param5.w = "16";
pthread_t tid1, tid2, tid3, tid4, tid5;
pthread_create(&tid1, NULL, thread1, (void*) ¶m1);
std::cout << "thread1 start " << std::endl;
pthread_create(&tid2, NULL, thread1, (void*) ¶m2);
std::cout << "thread2 start " << std::endl;
pthread_create(&tid3, NULL, thread1, (void*) ¶m3);
std::cout << "thread3 start " << std::endl;
pthread_create(&tid4, NULL, thread1, (void*) ¶m4);
std::cout << "thread4 start " << std::endl;
pthread_create(&tid5, NULL, thread1, (void*) ¶m5);
std::cout << "thread5 start " << std::endl;
pthread_join(tid1, NULL);
pthread_join(tid2, NULL);
pthread_join(tid3, NULL);
pthread_join(tid4, NULL);
pthread_join(tid5, NULL);
}
void* thread1(void* pParam)
{
TestParam* param = static_cast<TestParam*>(pParam);
time_t start, end;
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
instance.disable_dlclose = true;
{
std::lock_guard l{instance.lock};
__erasure_code_init((char*) "shec", (char*) "");
}
std::cout << "__erasure_code_init finish " << std::endl;
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" //length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//186
"012345"//192
);
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
map<int, bufferlist> decoded;
bufferlist out1, out2, usable;
time(&start);
time(&end);
const int kTestSec = 60;
ErasureCodeShecTableCache tcache;
while (kTestSec >= (end - start)) {
//init
int r;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "multiple";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = param->k;
(*profile)["m"] = param->m;
(*profile)["c"] = param->c;
(*profile)["w"] = param->w;
r = shec->init(*profile, &cerr);
int i_k = std::atoi(param->k.c_str());
int i_m = std::atoi(param->m.c_str());
int i_c = std::atoi(param->c.c_str());
int i_w = std::atoi(param->w.c_str());
EXPECT_EQ(0, r);
EXPECT_EQ(i_k, shec->k);
EXPECT_EQ(i_m, shec->m);
EXPECT_EQ(i_c, shec->c);
EXPECT_EQ(i_w, shec->w);
EXPECT_EQ(ErasureCodeShec::MULTIPLE, shec->technique);
EXPECT_STREQ("default", shec->rule_root.c_str());
EXPECT_STREQ("osd", shec->rule_failure_domain.c_str());
EXPECT_TRUE(shec->matrix != NULL);
if ((shec->matrix == NULL)) {
std::cout << "matrix is null" << std::endl;
// error
break;
}
//encode
for (unsigned int i = 0; i < shec->get_chunk_count(); i++) {
want_to_encode.insert(i);
}
r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count(), encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
if (r != 0) {
std::cout << "error in encode" << std::endl;
//error
break;
}
//decode
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 2),
encoded,
&decoded);
EXPECT_EQ(0, r);
EXPECT_EQ(2u, decoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), decoded[0].length());
if (r != 0) {
std::cout << "error in decode" << std::endl;
//error
break;
}
//out1 is "encoded"
for (unsigned int i = 0; i < encoded.size(); i++) {
out1.append(encoded[i]);
}
//out2 is "decoded"
shec->decode_concat(encoded, &out2);
usable.substr_of(out2, 0, in.length());
EXPECT_FALSE(out1 == in);
EXPECT_TRUE(usable == in);
if (out1 == in || !(usable == in)) {
std::cout << "encode(decode) result is not correct" << std::endl;
break;
}
delete shec;
delete profile;
want_to_encode.clear();
encoded.clear();
decoded.clear();
out1.clear();
out2.clear();
usable.clear();
time(&end);
}
return NULL;
}
| 5,809 | 25.409091 | 90 |
cc
|
null |
ceph-main/src/test/erasure-code/ceph_erasure_code_benchmark.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <boost/scoped_ptr.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/program_options/option.hpp>
#include <boost/program_options/options_description.hpp>
#include <boost/program_options/variables_map.hpp>
#include <boost/program_options/cmdline.hpp>
#include <boost/program_options/parsers.hpp>
#include <boost/algorithm/string.hpp>
#include "global/global_context.h"
#include "global/global_init.h"
#include "common/ceph_argparse.h"
#include "common/ceph_context.h"
#include "common/config.h"
#include "common/Clock.h"
#include "include/utime.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "erasure-code/ErasureCode.h"
#include "ceph_erasure_code_benchmark.h"
using std::endl;
using std::cerr;
using std::cout;
using std::map;
using std::set;
using std::string;
using std::stringstream;
using std::vector;
namespace po = boost::program_options;
int ErasureCodeBench::setup(int argc, char** argv) {
po::options_description desc("Allowed options");
desc.add_options()
("help,h", "produce help message")
("verbose,v", "explain what happens")
("size,s", po::value<int>()->default_value(1024 * 1024),
"size of the buffer to be encoded")
("iterations,i", po::value<int>()->default_value(1),
"number of encode/decode runs")
("plugin,p", po::value<string>()->default_value("jerasure"),
"erasure code plugin name")
("workload,w", po::value<string>()->default_value("encode"),
"run either encode or decode")
("erasures,e", po::value<int>()->default_value(1),
"number of erasures when decoding")
("erased", po::value<vector<int> >(),
"erased chunk (repeat if more than one chunk is erased)")
("erasures-generation,E", po::value<string>()->default_value("random"),
"If set to 'random', pick the number of chunks to recover (as specified by "
" --erasures) at random. If set to 'exhaustive' try all combinations of erasures "
" (i.e. k=4,m=3 with one erasure will try to recover from the erasure of "
" the first chunk, then the second etc.)")
("parameter,P", po::value<vector<string> >(),
"add a parameter to the erasure code profile")
;
po::variables_map vm;
po::parsed_options parsed =
po::command_line_parser(argc, argv).options(desc).allow_unregistered().run();
po::store(
parsed,
vm);
po::notify(vm);
vector<const char *> ceph_options;
vector<string> ceph_option_strings = po::collect_unrecognized(
parsed.options, po::include_positional);
ceph_options.reserve(ceph_option_strings.size());
for (vector<string>::iterator i = ceph_option_strings.begin();
i != ceph_option_strings.end();
++i) {
ceph_options.push_back(i->c_str());
}
cct = global_init(
NULL, ceph_options, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
g_ceph_context->_conf.apply_changes(nullptr);
if (vm.count("help")) {
cout << desc << std::endl;
return 1;
}
if (vm.count("parameter")) {
const vector<string> &p = vm["parameter"].as< vector<string> >();
for (vector<string>::const_iterator i = p.begin();
i != p.end();
++i) {
std::vector<std::string> strs;
boost::split(strs, *i, boost::is_any_of("="));
if (strs.size() != 2) {
cerr << "--parameter " << *i << " ignored because it does not contain exactly one =" << endl;
} else {
profile[strs[0]] = strs[1];
}
}
}
in_size = vm["size"].as<int>();
max_iterations = vm["iterations"].as<int>();
plugin = vm["plugin"].as<string>();
workload = vm["workload"].as<string>();
erasures = vm["erasures"].as<int>();
if (vm.count("erasures-generation") > 0 &&
vm["erasures-generation"].as<string>() == "exhaustive")
exhaustive_erasures = true;
else
exhaustive_erasures = false;
if (vm.count("erased") > 0)
erased = vm["erased"].as<vector<int> >();
try {
k = stoi(profile["k"]);
m = stoi(profile["m"]);
} catch (const std::logic_error& e) {
cout << "Invalid k and/or m: k=" << profile["k"] << ", m=" << profile["m"]
<< " (" << e.what() << ")" << endl;
return -EINVAL;
}
if (k <= 0) {
cout << "parameter k is " << k << ". But k needs to be > 0." << endl;
return -EINVAL;
} else if ( m < 0 ) {
cout << "parameter m is " << m << ". But m needs to be >= 0." << endl;
return -EINVAL;
}
verbose = vm.count("verbose") > 0 ? true : false;
return 0;
}
int ErasureCodeBench::run() {
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
instance.disable_dlclose = true;
if (workload == "encode")
return encode();
else
return decode();
}
int ErasureCodeBench::encode()
{
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
ErasureCodeInterfaceRef erasure_code;
stringstream messages;
int code = instance.factory(plugin,
g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, &messages);
if (code) {
cerr << messages.str() << endl;
return code;
}
bufferlist in;
in.append(string(in_size, 'X'));
in.rebuild_aligned(ErasureCode::SIMD_ALIGN);
set<int> want_to_encode;
for (int i = 0; i < k + m; i++) {
want_to_encode.insert(i);
}
utime_t begin_time = ceph_clock_now();
for (int i = 0; i < max_iterations; i++) {
std::map<int,bufferlist> encoded;
code = erasure_code->encode(want_to_encode, in, &encoded);
if (code)
return code;
}
utime_t end_time = ceph_clock_now();
cout << (end_time - begin_time) << "\t" << (max_iterations * (in_size / 1024)) << endl;
return 0;
}
static void display_chunks(const map<int,bufferlist> &chunks,
unsigned int chunk_count) {
cout << "chunks ";
for (unsigned int chunk = 0; chunk < chunk_count; chunk++) {
if (chunks.count(chunk) == 0) {
cout << "(" << chunk << ")";
} else {
cout << " " << chunk << " ";
}
cout << " ";
}
cout << "(X) is an erased chunk" << endl;
}
int ErasureCodeBench::decode_erasures(const map<int,bufferlist> &all_chunks,
const map<int,bufferlist> &chunks,
unsigned i,
unsigned want_erasures,
ErasureCodeInterfaceRef erasure_code)
{
int code = 0;
if (want_erasures == 0) {
if (verbose)
display_chunks(chunks, erasure_code->get_chunk_count());
set<int> want_to_read;
for (unsigned int chunk = 0; chunk < erasure_code->get_chunk_count(); chunk++)
if (chunks.count(chunk) == 0)
want_to_read.insert(chunk);
map<int,bufferlist> decoded;
code = erasure_code->decode(want_to_read, chunks, &decoded, 0);
if (code)
return code;
for (set<int>::iterator chunk = want_to_read.begin();
chunk != want_to_read.end();
++chunk) {
if (all_chunks.find(*chunk)->second.length() != decoded[*chunk].length()) {
cerr << "chunk " << *chunk << " length=" << all_chunks.find(*chunk)->second.length()
<< " decoded with length=" << decoded[*chunk].length() << endl;
return -1;
}
bufferlist tmp = all_chunks.find(*chunk)->second;
if (!tmp.contents_equal(decoded[*chunk])) {
cerr << "chunk " << *chunk
<< " content and recovered content are different" << endl;
return -1;
}
}
return 0;
}
for (; i < erasure_code->get_chunk_count(); i++) {
map<int,bufferlist> one_less = chunks;
one_less.erase(i);
code = decode_erasures(all_chunks, one_less, i + 1, want_erasures - 1, erasure_code);
if (code)
return code;
}
return 0;
}
int ErasureCodeBench::decode()
{
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
ErasureCodeInterfaceRef erasure_code;
stringstream messages;
int code = instance.factory(plugin,
g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, &messages);
if (code) {
cerr << messages.str() << endl;
return code;
}
bufferlist in;
in.append(string(in_size, 'X'));
in.rebuild_aligned(ErasureCode::SIMD_ALIGN);
set<int> want_to_encode;
for (int i = 0; i < k + m; i++) {
want_to_encode.insert(i);
}
map<int,bufferlist> encoded;
code = erasure_code->encode(want_to_encode, in, &encoded);
if (code)
return code;
set<int> want_to_read = want_to_encode;
if (erased.size() > 0) {
for (vector<int>::const_iterator i = erased.begin();
i != erased.end();
++i)
encoded.erase(*i);
display_chunks(encoded, erasure_code->get_chunk_count());
}
utime_t begin_time = ceph_clock_now();
for (int i = 0; i < max_iterations; i++) {
if (exhaustive_erasures) {
code = decode_erasures(encoded, encoded, 0, erasures, erasure_code);
if (code)
return code;
} else if (erased.size() > 0) {
map<int,bufferlist> decoded;
code = erasure_code->decode(want_to_read, encoded, &decoded, 0);
if (code)
return code;
} else {
map<int,bufferlist> chunks = encoded;
for (int j = 0; j < erasures; j++) {
int erasure;
do {
erasure = rand() % ( k + m );
} while(chunks.count(erasure) == 0);
chunks.erase(erasure);
}
map<int,bufferlist> decoded;
code = erasure_code->decode(want_to_read, chunks, &decoded, 0);
if (code)
return code;
}
}
utime_t end_time = ceph_clock_now();
cout << (end_time - begin_time) << "\t" << (max_iterations * (in_size / 1024)) << endl;
return 0;
}
int main(int argc, char** argv) {
ErasureCodeBench ecbench;
try {
int err = ecbench.setup(argc, argv);
if (err)
return err;
return ecbench.run();
} catch(po::error &e) {
cerr << e.what() << endl;
return 1;
}
}
/*
* Local Variables:
* compile-command: "cd ../../../build ; make -j4 ceph_erasure_code_benchmark &&
* valgrind --tool=memcheck --leak-check=full \
* ./bin/ceph_erasure_code_benchmark \
* --plugin jerasure \
* --parameter directory=lib \
* --parameter technique=reed_sol_van \
* --parameter k=2 \
* --parameter m=2 \
* --iterations 1
* "
* End:
*/
| 10,659 | 29.028169 | 94 |
cc
|
null |
ceph-main/src/test/erasure-code/ceph_erasure_code_benchmark.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_BENCHMARK_H
#define CEPH_ERASURE_CODE_BENCHMARK_H
#include <string>
#include <map>
#include <vector>
#include <boost/intrusive_ptr.hpp>
#include "include/buffer.h"
#include "common/ceph_context.h"
#include "erasure-code/ErasureCodeInterface.h"
class ErasureCodeBench {
int in_size;
int max_iterations;
int erasures;
int k;
int m;
std::string plugin;
bool exhaustive_erasures;
std::vector<int> erased;
std::string workload;
ceph::ErasureCodeProfile profile;
bool verbose;
boost::intrusive_ptr<CephContext> cct;
public:
int setup(int argc, char** argv);
int run();
int decode_erasures(const std::map<int, ceph::buffer::list> &all_chunks,
const std::map<int, ceph::buffer::list> &chunks,
unsigned i,
unsigned want_erasures,
ErasureCodeInterfaceRef erasure_code);
int decode();
int encode();
};
#endif
| 1,482 | 22.539683 | 74 |
h
|
null |
ceph-main/src/test/erasure-code/ceph_erasure_code_non_regression.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Red Hat (C) 2014, 2015 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <errno.h>
#include <stdlib.h>
#include <boost/scoped_ptr.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/program_options/option.hpp>
#include <boost/program_options/options_description.hpp>
#include <boost/program_options/variables_map.hpp>
#include <boost/program_options/cmdline.hpp>
#include <boost/program_options/parsers.hpp>
#include <boost/algorithm/string.hpp>
#include "global/global_context.h"
#include "global/global_init.h"
#include "common/errno.h"
#include "common/ceph_context.h"
#include "common/ceph_argparse.h"
#include "common/config.h"
#include "erasure-code/ErasureCodePlugin.h"
namespace po = boost::program_options;
using namespace std;
class ErasureCodeNonRegression {
unsigned stripe_width;
string plugin;
bool create;
bool check;
string base;
string directory;
ErasureCodeProfile profile;
boost::intrusive_ptr<CephContext> cct;
public:
int setup(int argc, char** argv);
int run();
int run_create();
int run_check();
int decode_erasures(ErasureCodeInterfaceRef erasure_code,
set<int> erasures,
map<int,bufferlist> chunks);
string content_path();
string chunk_path(unsigned int chunk);
};
int ErasureCodeNonRegression::setup(int argc, char** argv) {
po::options_description desc("Allowed options");
desc.add_options()
("help,h", "produce help message")
("stripe-width,s", po::value<int>()->default_value(4 * 1024),
"stripe_width, i.e. the size of the buffer to be encoded")
("plugin,p", po::value<string>()->default_value("jerasure"),
"erasure code plugin name")
("base", po::value<string>()->default_value("."),
"prefix all paths with base")
("parameter,P", po::value<vector<string> >(),
"add a parameter to the erasure code profile")
("create", "create the erasure coded content in the directory")
("check", "check the content in the directory matches the chunks and vice versa")
;
po::variables_map vm;
po::parsed_options parsed =
po::command_line_parser(argc, argv).options(desc).allow_unregistered().run();
po::store(
parsed,
vm);
po::notify(vm);
vector<const char *> ceph_options;
vector<string> ceph_option_strings = po::collect_unrecognized(
parsed.options, po::include_positional);
ceph_options.reserve(ceph_option_strings.size());
for (vector<string>::iterator i = ceph_option_strings.begin();
i != ceph_option_strings.end();
++i) {
ceph_options.push_back(i->c_str());
}
cct = global_init(NULL, ceph_options, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_MON_CONFIG);
common_init_finish(g_ceph_context);
g_ceph_context->_conf.apply_changes(nullptr);
if (vm.count("help")) {
cout << desc << std::endl;
return 1;
}
stripe_width = vm["stripe-width"].as<int>();
plugin = vm["plugin"].as<string>();
base = vm["base"].as<string>();
check = vm.count("check") > 0;
create = vm.count("create") > 0;
if (!check && !create) {
cerr << "must specifify either --check, or --create" << endl;
return 1;
}
{
stringstream path;
path << base << "/" << "plugin=" << plugin << " stripe-width=" << stripe_width;
directory = path.str();
}
if (vm.count("parameter")) {
const vector<string> &p = vm["parameter"].as< vector<string> >();
for (vector<string>::const_iterator i = p.begin();
i != p.end();
++i) {
std::vector<std::string> strs;
boost::split(strs, *i, boost::is_any_of("="));
if (strs.size() != 2) {
cerr << "--parameter " << *i << " ignored because it does not contain exactly one =" << endl;
} else {
profile[strs[0]] = strs[1];
}
directory += " " + *i;
}
}
return 0;
}
int ErasureCodeNonRegression::run()
{
int ret = 0;
if(create && (ret = run_create()))
return ret;
if(check && (ret = run_check()))
return ret;
return ret;
}
int ErasureCodeNonRegression::run_create()
{
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
ErasureCodeInterfaceRef erasure_code;
stringstream messages;
int code = instance.factory(plugin,
g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, &messages);
if (code) {
cerr << messages.str() << endl;
return code;
}
if (::mkdir(directory.c_str(), 0755)) {
cerr << "mkdir(" << directory << "): " << cpp_strerror(errno) << endl;
return 1;
}
unsigned payload_chunk_size = 37;
string payload;
for (unsigned j = 0; j < payload_chunk_size; ++j)
payload.push_back('a' + (rand() % 26));
bufferlist in;
for (unsigned j = 0; j < stripe_width; j += payload_chunk_size)
in.append(payload);
if (stripe_width < in.length())
in.splice(stripe_width, in.length() - stripe_width);
if (in.write_file(content_path().c_str()))
return 1;
set<int> want_to_encode;
for (unsigned int i = 0; i < erasure_code->get_chunk_count(); i++) {
want_to_encode.insert(i);
}
map<int,bufferlist> encoded;
code = erasure_code->encode(want_to_encode, in, &encoded);
if (code)
return code;
for (map<int,bufferlist>::iterator chunk = encoded.begin();
chunk != encoded.end();
++chunk) {
if (chunk->second.write_file(chunk_path(chunk->first).c_str()))
return 1;
}
return 0;
}
int ErasureCodeNonRegression::decode_erasures(ErasureCodeInterfaceRef erasure_code,
set<int> erasures,
map<int,bufferlist> chunks)
{
map<int,bufferlist> available;
for (map<int,bufferlist>::iterator chunk = chunks.begin();
chunk != chunks.end();
++chunk) {
if (erasures.count(chunk->first) == 0)
available[chunk->first] = chunk->second;
}
map<int,bufferlist> decoded;
int code = erasure_code->decode(erasures, available, &decoded, available.begin()->second.length());
if (code)
return code;
for (set<int>::iterator erasure = erasures.begin();
erasure != erasures.end();
++erasure) {
if (!chunks[*erasure].contents_equal(decoded[*erasure])) {
cerr << "chunk " << *erasure << " incorrectly recovered" << endl;
return 1;
}
}
return 0;
}
int ErasureCodeNonRegression::run_check()
{
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
ErasureCodeInterfaceRef erasure_code;
stringstream messages;
int code = instance.factory(plugin,
g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, &messages);
if (code) {
cerr << messages.str() << endl;
return code;
}
string errors;
bufferlist in;
if (in.read_file(content_path().c_str(), &errors)) {
cerr << errors << endl;
return 1;
}
set<int> want_to_encode;
for (unsigned int i = 0; i < erasure_code->get_chunk_count(); i++) {
want_to_encode.insert(i);
}
map<int,bufferlist> encoded;
code = erasure_code->encode(want_to_encode, in, &encoded);
if (code)
return code;
for (map<int,bufferlist>::iterator chunk = encoded.begin();
chunk != encoded.end();
++chunk) {
bufferlist existing;
if (existing.read_file(chunk_path(chunk->first).c_str(), &errors)) {
cerr << errors << endl;
return 1;
}
bufferlist &old = chunk->second;
if (existing.length() != old.length() ||
memcmp(existing.c_str(), old.c_str(), old.length())) {
cerr << "chunk " << chunk->first << " encodes differently" << endl;
return 1;
}
}
// erasing a single chunk is likely to use a specific code path in every plugin
set<int> erasures;
erasures.clear();
erasures.insert(0);
code = decode_erasures(erasure_code, erasures, encoded);
if (code)
return code;
if (erasure_code->get_chunk_count() - erasure_code->get_data_chunk_count() > 1) {
// erasing two chunks is likely to be the general case
erasures.clear();
erasures.insert(0);
erasures.insert(erasure_code->get_chunk_count() - 1);
code = decode_erasures(erasure_code, erasures, encoded);
if (code)
return code;
}
return 0;
}
string ErasureCodeNonRegression::content_path()
{
stringstream path;
path << directory << "/content";
return path.str();
}
string ErasureCodeNonRegression::chunk_path(unsigned int chunk)
{
stringstream path;
path << directory << "/" << chunk;
return path.str();
}
int main(int argc, char** argv) {
ErasureCodeNonRegression non_regression;
int err = non_regression.setup(argc, argv);
if (err)
return err;
return non_regression.run();
}
/*
* Local Variables:
* compile-command: "cd ../.. ; make -j4 &&
* make ceph_erasure_code_non_regression &&
* libtool --mode=execute valgrind --tool=memcheck --leak-check=full \
* ./ceph_erasure_code_non_regression \
* --plugin jerasure \
* --parameter technique=reed_sol_van \
* --parameter k=2 \
* --parameter m=2 \
* --directory /tmp/ceph_erasure_code_non_regression \
* --stripe-width 3181 \
* --create \
* --check
* "
* End:
*/
| 9,521 | 28.030488 | 101 |
cc
|
null |
ceph-main/src/test/exporter/test_exporter.cc
|
#include "gtest/gtest.h"
#include "exporter/util.h"
#include <string>
#include <vector>
#include <utility>
// 17.2.6's memento mori:
// This data was gathered from the python implementation of the promethize method
// where we transform the path of a counter to a valid prometheus name.
static std::vector<std::pair<std::string, std::string>> promethize_data = {
{"bluefs.alloc_slow_fallback", "ceph_bluefs_alloc_slow_fallback"},
{"bluefs.alloc_slow_size_fallback", "ceph_bluefs_alloc_slow_size_fallback"},
{"bluefs.alloc_unit_db", "ceph_bluefs_alloc_unit_db"},
{"bluefs.alloc_unit_main", "ceph_bluefs_alloc_unit_main"},
{"bluefs.alloc_unit_wal", "ceph_bluefs_alloc_unit_wal"},
{"bluefs.bytes_written_slow", "ceph_bluefs_bytes_written_slow"},
{"bluefs.bytes_written_sst", "ceph_bluefs_bytes_written_sst"},
{"bluefs.bytes_written_wal", "ceph_bluefs_bytes_written_wal"},
{"bluefs.compact_lat_count", "ceph_bluefs_compact_lat_count"},
{"bluefs.compact_lat_sum", "ceph_bluefs_compact_lat_sum"},
{"bluefs.compact_lock_lat_count", "ceph_bluefs_compact_lock_lat_count"},
{"bluefs.compact_lock_lat_sum", "ceph_bluefs_compact_lock_lat_sum"},
{"bluefs.db_total_bytes", "ceph_bluefs_db_total_bytes"},
{"bluefs.db_used_bytes", "ceph_bluefs_db_used_bytes"},
{"bluefs.log_bytes", "ceph_bluefs_log_bytes"},
{"bluefs.logged_bytes", "ceph_bluefs_logged_bytes"},
{"bluefs.max_bytes_db", "ceph_bluefs_max_bytes_db"},
{"bluefs.max_bytes_slow", "ceph_bluefs_max_bytes_slow"},
{"bluefs.max_bytes_wal", "ceph_bluefs_max_bytes_wal"},
{"bluefs.num_files", "ceph_bluefs_num_files"},
{"bluefs.read_bytes", "ceph_bluefs_read_bytes"},
{"bluefs.read_count", "ceph_bluefs_read_count"},
{"bluefs.read_disk_bytes", "ceph_bluefs_read_disk_bytes"},
{"bluefs.read_disk_bytes_db", "ceph_bluefs_read_disk_bytes_db"},
{"bluefs.read_disk_bytes_slow", "ceph_bluefs_read_disk_bytes_slow"},
{"bluefs.read_disk_bytes_wal", "ceph_bluefs_read_disk_bytes_wal"},
{"bluefs.read_disk_count", "ceph_bluefs_read_disk_count"},
{"bluefs.read_prefetch_bytes", "ceph_bluefs_read_prefetch_bytes"},
{"bluefs.read_prefetch_count", "ceph_bluefs_read_prefetch_count"},
{"bluefs.read_random_buffer_bytes", "ceph_bluefs_read_random_buffer_bytes"},
{"bluefs.read_random_buffer_count", "ceph_bluefs_read_random_buffer_count"},
{"bluefs.read_random_bytes", "ceph_bluefs_read_random_bytes"},
{"bluefs.read_random_count", "ceph_bluefs_read_random_count"},
{"bluefs.read_random_disk_bytes", "ceph_bluefs_read_random_disk_bytes"},
{"bluefs.read_random_disk_bytes_db", "ceph_bluefs_read_random_disk_bytes_db"},
{"bluefs.read_random_disk_bytes_slow", "ceph_bluefs_read_random_disk_bytes_slow"},
{"bluefs.read_random_disk_bytes_wal", "ceph_bluefs_read_random_disk_bytes_wal"},
{"bluefs.read_random_disk_count", "ceph_bluefs_read_random_disk_count"},
{"bluefs.slow_total_bytes", "ceph_bluefs_slow_total_bytes"},
{"bluefs.slow_used_bytes", "ceph_bluefs_slow_used_bytes"},
{"bluefs.wal_total_bytes", "ceph_bluefs_wal_total_bytes"},
{"bluefs.wal_used_bytes", "ceph_bluefs_wal_used_bytes"},
{"bluestore-pricache.cache_bytes", "ceph_bluestore_pricache_cache_bytes"},
{"bluestore-pricache.heap_bytes", "ceph_bluestore_pricache_heap_bytes"},
{"bluestore-pricache.mapped_bytes", "ceph_bluestore_pricache_mapped_bytes"},
{"bluestore-pricache.target_bytes", "ceph_bluestore_pricache_target_bytes"},
{"bluestore-pricache.unmapped_bytes", "ceph_bluestore_pricache_unmapped_bytes"},
{"bluestore-pricache:data.committed_bytes", "ceph_bluestore_pricache:data_committed_bytes"},
{"bluestore-pricache:data.pri0_bytes", "ceph_bluestore_pricache:data_pri0_bytes"},
{"bluestore-pricache:data.pri10_bytes", "ceph_bluestore_pricache:data_pri10_bytes"},
{"bluestore-pricache:data.pri11_bytes", "ceph_bluestore_pricache:data_pri11_bytes"},
{"bluestore-pricache:data.pri1_bytes", "ceph_bluestore_pricache:data_pri1_bytes"},
{"bluestore-pricache:data.pri2_bytes", "ceph_bluestore_pricache:data_pri2_bytes"},
{"bluestore-pricache:data.pri3_bytes", "ceph_bluestore_pricache:data_pri3_bytes"},
{"bluestore-pricache:data.pri4_bytes", "ceph_bluestore_pricache:data_pri4_bytes"},
{"bluestore-pricache:data.pri5_bytes", "ceph_bluestore_pricache:data_pri5_bytes"},
{"bluestore-pricache:data.pri6_bytes", "ceph_bluestore_pricache:data_pri6_bytes"},
{"bluestore-pricache:data.pri7_bytes", "ceph_bluestore_pricache:data_pri7_bytes"},
{"bluestore-pricache:data.pri8_bytes", "ceph_bluestore_pricache:data_pri8_bytes"},
{"bluestore-pricache:data.pri9_bytes", "ceph_bluestore_pricache:data_pri9_bytes"},
{"bluestore-pricache:data.reserved_bytes", "ceph_bluestore_pricache:data_reserved_bytes"},
{"bluestore-pricache:kv.committed_bytes", "ceph_bluestore_pricache:kv_committed_bytes"},
{"bluestore-pricache:kv.pri0_bytes", "ceph_bluestore_pricache:kv_pri0_bytes"},
{"bluestore-pricache:kv.pri10_bytes", "ceph_bluestore_pricache:kv_pri10_bytes"},
{"bluestore-pricache:kv.pri11_bytes", "ceph_bluestore_pricache:kv_pri11_bytes"},
{"bluestore-pricache:kv.pri1_bytes", "ceph_bluestore_pricache:kv_pri1_bytes"},
{"bluestore-pricache:kv.pri2_bytes", "ceph_bluestore_pricache:kv_pri2_bytes"},
{"bluestore-pricache:kv.pri3_bytes", "ceph_bluestore_pricache:kv_pri3_bytes"},
{"bluestore-pricache:kv.pri4_bytes", "ceph_bluestore_pricache:kv_pri4_bytes"},
{"bluestore-pricache:kv.pri5_bytes", "ceph_bluestore_pricache:kv_pri5_bytes"},
{"bluestore-pricache:kv.pri6_bytes", "ceph_bluestore_pricache:kv_pri6_bytes"},
{"bluestore-pricache:kv.pri7_bytes", "ceph_bluestore_pricache:kv_pri7_bytes"},
{"bluestore-pricache:kv.pri8_bytes", "ceph_bluestore_pricache:kv_pri8_bytes"},
{"bluestore-pricache:kv.pri9_bytes", "ceph_bluestore_pricache:kv_pri9_bytes"},
{"bluestore-pricache:kv.reserved_bytes", "ceph_bluestore_pricache:kv_reserved_bytes"},
{"bluestore-pricache:kv_onode.committed_bytes", "ceph_bluestore_pricache:kv_onode_committed_bytes"},
{"bluestore-pricache:kv_onode.pri0_bytes", "ceph_bluestore_pricache:kv_onode_pri0_bytes"},
{"bluestore-pricache:kv_onode.pri10_bytes", "ceph_bluestore_pricache:kv_onode_pri10_bytes"},
{"bluestore-pricache:kv_onode.pri11_bytes", "ceph_bluestore_pricache:kv_onode_pri11_bytes"},
{"bluestore-pricache:kv_onode.pri1_bytes", "ceph_bluestore_pricache:kv_onode_pri1_bytes"},
{"bluestore-pricache:kv_onode.pri2_bytes", "ceph_bluestore_pricache:kv_onode_pri2_bytes"},
{"bluestore-pricache:kv_onode.pri3_bytes", "ceph_bluestore_pricache:kv_onode_pri3_bytes"},
{"bluestore-pricache:kv_onode.pri4_bytes", "ceph_bluestore_pricache:kv_onode_pri4_bytes"},
{"bluestore-pricache:kv_onode.pri5_bytes", "ceph_bluestore_pricache:kv_onode_pri5_bytes"},
{"bluestore-pricache:kv_onode.pri6_bytes", "ceph_bluestore_pricache:kv_onode_pri6_bytes"},
{"bluestore-pricache:kv_onode.pri7_bytes", "ceph_bluestore_pricache:kv_onode_pri7_bytes"},
{"bluestore-pricache:kv_onode.pri8_bytes", "ceph_bluestore_pricache:kv_onode_pri8_bytes"},
{"bluestore-pricache:kv_onode.pri9_bytes", "ceph_bluestore_pricache:kv_onode_pri9_bytes"},
{"bluestore-pricache:kv_onode.reserved_bytes", "ceph_bluestore_pricache:kv_onode_reserved_bytes"},
{"bluestore-pricache:meta.committed_bytes", "ceph_bluestore_pricache:meta_committed_bytes"},
{"bluestore-pricache:meta.pri0_bytes", "ceph_bluestore_pricache:meta_pri0_bytes"},
{"bluestore-pricache:meta.pri10_bytes", "ceph_bluestore_pricache:meta_pri10_bytes"},
{"bluestore-pricache:meta.pri11_bytes", "ceph_bluestore_pricache:meta_pri11_bytes"},
{"bluestore-pricache:meta.pri1_bytes", "ceph_bluestore_pricache:meta_pri1_bytes"},
{"bluestore-pricache:meta.pri2_bytes", "ceph_bluestore_pricache:meta_pri2_bytes"},
{"bluestore-pricache:meta.pri3_bytes", "ceph_bluestore_pricache:meta_pri3_bytes"},
{"bluestore-pricache:meta.pri4_bytes", "ceph_bluestore_pricache:meta_pri4_bytes"},
{"bluestore-pricache:meta.pri5_bytes", "ceph_bluestore_pricache:meta_pri5_bytes"},
{"bluestore-pricache:meta.pri6_bytes", "ceph_bluestore_pricache:meta_pri6_bytes"},
{"bluestore-pricache:meta.pri7_bytes", "ceph_bluestore_pricache:meta_pri7_bytes"},
{"bluestore-pricache:meta.pri8_bytes", "ceph_bluestore_pricache:meta_pri8_bytes"},
{"bluestore-pricache:meta.pri9_bytes", "ceph_bluestore_pricache:meta_pri9_bytes"},
{"bluestore-pricache:meta.reserved_bytes", "ceph_bluestore_pricache:meta_reserved_bytes"},
{"bluestore.alloc_unit", "ceph_bluestore_alloc_unit"},
{"bluestore.allocated", "ceph_bluestore_allocated"},
{"bluestore.clist_lat_count", "ceph_bluestore_clist_lat_count"},
{"bluestore.clist_lat_sum", "ceph_bluestore_clist_lat_sum"},
{"bluestore.compress_lat_count", "ceph_bluestore_compress_lat_count"},
{"bluestore.compress_lat_sum", "ceph_bluestore_compress_lat_sum"},
{"bluestore.compressed", "ceph_bluestore_compressed"},
{"bluestore.compressed_allocated", "ceph_bluestore_compressed_allocated"},
{"bluestore.compressed_original", "ceph_bluestore_compressed_original"},
{"bluestore.csum_lat_count", "ceph_bluestore_csum_lat_count"},
{"bluestore.csum_lat_sum", "ceph_bluestore_csum_lat_sum"},
{"bluestore.decompress_lat_count", "ceph_bluestore_decompress_lat_count"},
{"bluestore.decompress_lat_sum", "ceph_bluestore_decompress_lat_sum"},
{"bluestore.kv_commit_lat_count", "ceph_bluestore_kv_commit_lat_count"},
{"bluestore.kv_commit_lat_sum", "ceph_bluestore_kv_commit_lat_sum"},
{"bluestore.kv_final_lat_count", "ceph_bluestore_kv_final_lat_count"},
{"bluestore.kv_final_lat_sum", "ceph_bluestore_kv_final_lat_sum"},
{"bluestore.kv_flush_lat_count", "ceph_bluestore_kv_flush_lat_count"},
{"bluestore.kv_flush_lat_sum", "ceph_bluestore_kv_flush_lat_sum"},
{"bluestore.kv_sync_lat_count", "ceph_bluestore_kv_sync_lat_count"},
{"bluestore.kv_sync_lat_sum", "ceph_bluestore_kv_sync_lat_sum"},
{"bluestore.omap_get_keys_lat_count", "ceph_bluestore_omap_get_keys_lat_count"},
{"bluestore.omap_get_keys_lat_sum", "ceph_bluestore_omap_get_keys_lat_sum"},
{"bluestore.omap_get_values_lat_count", "ceph_bluestore_omap_get_values_lat_count"},
{"bluestore.omap_get_values_lat_sum", "ceph_bluestore_omap_get_values_lat_sum"},
{"bluestore.omap_lower_bound_lat_count", "ceph_bluestore_omap_lower_bound_lat_count"},
{"bluestore.omap_lower_bound_lat_sum", "ceph_bluestore_omap_lower_bound_lat_sum"},
{"bluestore.omap_next_lat_count", "ceph_bluestore_omap_next_lat_count"},
{"bluestore.omap_next_lat_sum", "ceph_bluestore_omap_next_lat_sum"},
{"bluestore.omap_seek_to_first_lat_count", "ceph_bluestore_omap_seek_to_first_lat_count"},
{"bluestore.omap_seek_to_first_lat_sum", "ceph_bluestore_omap_seek_to_first_lat_sum"},
{"bluestore.omap_upper_bound_lat_count", "ceph_bluestore_omap_upper_bound_lat_count"},
{"bluestore.omap_upper_bound_lat_sum", "ceph_bluestore_omap_upper_bound_lat_sum"},
{"bluestore.onode_hits", "ceph_bluestore_onode_hits"},
{"bluestore.onode_misses", "ceph_bluestore_onode_misses"},
{"bluestore.read_lat_count", "ceph_bluestore_read_lat_count"},
{"bluestore.read_lat_sum", "ceph_bluestore_read_lat_sum"},
{"bluestore.read_onode_meta_lat_count", "ceph_bluestore_read_onode_meta_lat_count"},
{"bluestore.read_onode_meta_lat_sum", "ceph_bluestore_read_onode_meta_lat_sum"},
{"bluestore.read_wait_aio_lat_count", "ceph_bluestore_read_wait_aio_lat_count"},
{"bluestore.read_wait_aio_lat_sum", "ceph_bluestore_read_wait_aio_lat_sum"},
{"bluestore.reads_with_retries", "ceph_bluestore_reads_with_retries"},
{"bluestore.remove_lat_count", "ceph_bluestore_remove_lat_count"},
{"bluestore.remove_lat_sum", "ceph_bluestore_remove_lat_sum"},
{"bluestore.state_aio_wait_lat_count", "ceph_bluestore_state_aio_wait_lat_count"},
{"bluestore.state_aio_wait_lat_sum", "ceph_bluestore_state_aio_wait_lat_sum"},
{"bluestore.state_deferred_aio_wait_lat_count", "ceph_bluestore_state_deferred_aio_wait_lat_count"},
{"bluestore.state_deferred_aio_wait_lat_sum", "ceph_bluestore_state_deferred_aio_wait_lat_sum"},
{"bluestore.state_deferred_cleanup_lat_count", "ceph_bluestore_state_deferred_cleanup_lat_count"},
{"bluestore.state_deferred_cleanup_lat_sum", "ceph_bluestore_state_deferred_cleanup_lat_sum"},
{"bluestore.state_deferred_queued_lat_count", "ceph_bluestore_state_deferred_queued_lat_count"},
{"bluestore.state_deferred_queued_lat_sum", "ceph_bluestore_state_deferred_queued_lat_sum"},
{"bluestore.state_done_lat_count", "ceph_bluestore_state_done_lat_count"},
{"bluestore.state_done_lat_sum", "ceph_bluestore_state_done_lat_sum"},
{"bluestore.state_finishing_lat_count", "ceph_bluestore_state_finishing_lat_count"},
{"bluestore.state_finishing_lat_sum", "ceph_bluestore_state_finishing_lat_sum"},
{"bluestore.state_io_done_lat_count", "ceph_bluestore_state_io_done_lat_count"},
{"bluestore.state_io_done_lat_sum", "ceph_bluestore_state_io_done_lat_sum"},
{"bluestore.state_kv_commiting_lat_count", "ceph_bluestore_state_kv_commiting_lat_count"},
{"bluestore.state_kv_commiting_lat_sum", "ceph_bluestore_state_kv_commiting_lat_sum"},
{"bluestore.state_kv_done_lat_count", "ceph_bluestore_state_kv_done_lat_count"},
{"bluestore.state_kv_done_lat_sum", "ceph_bluestore_state_kv_done_lat_sum"},
{"bluestore.state_kv_queued_lat_count", "ceph_bluestore_state_kv_queued_lat_count"},
{"bluestore.state_kv_queued_lat_sum", "ceph_bluestore_state_kv_queued_lat_sum"},
{"bluestore.state_prepare_lat_count", "ceph_bluestore_state_prepare_lat_count"},
{"bluestore.state_prepare_lat_sum", "ceph_bluestore_state_prepare_lat_sum"},
{"bluestore.stored", "ceph_bluestore_stored"},
{"bluestore.truncate_lat_count", "ceph_bluestore_truncate_lat_count"},
{"bluestore.truncate_lat_sum", "ceph_bluestore_truncate_lat_sum"},
{"bluestore.txc_commit_lat_count", "ceph_bluestore_txc_commit_lat_count"},
{"bluestore.txc_commit_lat_sum", "ceph_bluestore_txc_commit_lat_sum"},
{"bluestore.txc_submit_lat_count", "ceph_bluestore_txc_submit_lat_count"},
{"bluestore.txc_submit_lat_sum", "ceph_bluestore_txc_submit_lat_sum"},
{"bluestore.txc_throttle_lat_count", "ceph_bluestore_txc_throttle_lat_count"},
{"bluestore.txc_throttle_lat_sum", "ceph_bluestore_txc_throttle_lat_sum"},
{"cluster_by_class_total_bytes", "ceph_cluster_by_class_total_bytes"},
{"cluster_by_class_total_used_bytes", "ceph_cluster_by_class_total_used_bytes"},
{"cluster_by_class_total_used_raw_bytes", "ceph_cluster_by_class_total_used_raw_bytes"},
{"cluster_osd_blocklist_count", "ceph_cluster_osd_blocklist_count"},
{"cluster_total_bytes", "ceph_cluster_total_bytes"},
{"cluster_total_used_bytes", "ceph_cluster_total_used_bytes"},
{"cluster_total_used_raw_bytes", "ceph_cluster_total_used_raw_bytes"},
{"daemon_health_metrics", "ceph_daemon_health_metrics"},
{"disk_occupation", "ceph_disk_occupation"},
{"disk_occupation_human", "ceph_disk_occupation_human"},
{"fs_metadata", "ceph_fs_metadata"},
{"health_detail", "ceph_health_detail"},
{"health_status", "ceph_health_status"},
{"healthcheck_slow_ops", "ceph_healthcheck_slow_ops"},
{"mds.caps", "ceph_mds_caps"},
{"mds.ceph_cap_op_flush_ack", "ceph_mds_ceph_cap_op_flush_ack"},
{"mds.ceph_cap_op_flushsnap_ack", "ceph_mds_ceph_cap_op_flushsnap_ack"},
{"mds.ceph_cap_op_grant", "ceph_mds_ceph_cap_op_grant"},
{"mds.ceph_cap_op_revoke", "ceph_mds_ceph_cap_op_revoke"},
{"mds.ceph_cap_op_trunc", "ceph_mds_ceph_cap_op_trunc"},
{"mds.dir_commit", "ceph_mds_dir_commit"},
{"mds.dir_fetch_complete", "ceph_mds_dir_fetch_complete"},
{"mds.dir_fetch_keys", "ceph_mds_dir_fetch_keys"},
{"mds.dir_merge", "ceph_mds_dir_merge"},
{"mds.dir_split", "ceph_mds_dir_split"},
{"mds.exported_inodes", "ceph_mds_exported_inodes"},
{"mds.forward", "ceph_mds_forward"},
{"mds.handle_client_cap_release", "ceph_mds_handle_client_cap_release"},
{"mds.handle_client_caps", "ceph_mds_handle_client_caps"},
{"mds.handle_client_caps_dirty", "ceph_mds_handle_client_caps_dirty"},
{"mds.handle_inode_file_caps", "ceph_mds_handle_inode_file_caps"},
{"mds.imported_inodes", "ceph_mds_imported_inodes"},
{"mds.inodes", "ceph_mds_inodes"},
{"mds.inodes_expired", "ceph_mds_inodes_expired"},
{"mds.inodes_pinned", "ceph_mds_inodes_pinned"},
{"mds.inodes_with_caps", "ceph_mds_inodes_with_caps"},
{"mds.load_cent", "ceph_mds_load_cent"},
{"mds.openino_dir_fetch", "ceph_mds_openino_dir_fetch"},
{"mds.process_request_cap_release", "ceph_mds_process_request_cap_release"},
{"mds.reply_latency_count", "ceph_mds_reply_latency_count"},
{"mds.reply_latency_sum", "ceph_mds_reply_latency_sum"},
{"mds.request", "ceph_mds_request"},
{"mds.root_rbytes", "ceph_mds_root_rbytes"},
{"mds.root_rfiles", "ceph_mds_root_rfiles"},
{"mds.root_rsnaps", "ceph_mds_root_rsnaps"},
{"mds.slow_reply", "ceph_mds_slow_reply"},
{"mds.subtrees", "ceph_mds_subtrees"},
{"mds_cache.ireq_enqueue_scrub", "ceph_mds_cache_ireq_enqueue_scrub"},
{"mds_cache.ireq_exportdir", "ceph_mds_cache_ireq_exportdir"},
{"mds_cache.ireq_flush", "ceph_mds_cache_ireq_flush"},
{"mds_cache.ireq_fragmentdir", "ceph_mds_cache_ireq_fragmentdir"},
{"mds_cache.ireq_fragstats", "ceph_mds_cache_ireq_fragstats"},
{"mds_cache.ireq_inodestats", "ceph_mds_cache_ireq_inodestats"},
{"mds_cache.num_recovering_enqueued", "ceph_mds_cache_num_recovering_enqueued"},
{"mds_cache.num_recovering_prioritized", "ceph_mds_cache_num_recovering_prioritized"},
{"mds_cache.num_recovering_processing", "ceph_mds_cache_num_recovering_processing"},
{"mds_cache.num_strays", "ceph_mds_cache_num_strays"},
{"mds_cache.num_strays_delayed", "ceph_mds_cache_num_strays_delayed"},
{"mds_cache.num_strays_enqueuing", "ceph_mds_cache_num_strays_enqueuing"},
{"mds_cache.recovery_completed", "ceph_mds_cache_recovery_completed"},
{"mds_cache.recovery_started", "ceph_mds_cache_recovery_started"},
{"mds_cache.strays_created", "ceph_mds_cache_strays_created"},
{"mds_cache.strays_enqueued", "ceph_mds_cache_strays_enqueued"},
{"mds_cache.strays_migrated", "ceph_mds_cache_strays_migrated"},
{"mds_cache.strays_reintegrated", "ceph_mds_cache_strays_reintegrated"},
{"mds_log.ev", "ceph_mds_log_ev"},
{"mds_log.evadd", "ceph_mds_log_evadd"},
{"mds_log.evex", "ceph_mds_log_evex"},
{"mds_log.evexd", "ceph_mds_log_evexd"},
{"mds_log.evexg", "ceph_mds_log_evexg"},
{"mds_log.evtrm", "ceph_mds_log_evtrm"},
{"mds_log.jlat_count", "ceph_mds_log_jlat_count"},
{"mds_log.jlat_sum", "ceph_mds_log_jlat_sum"},
{"mds_log.replayed", "ceph_mds_log_replayed"},
{"mds_log.seg", "ceph_mds_log_seg"},
{"mds_log.segadd", "ceph_mds_log_segadd"},
{"mds_log.segex", "ceph_mds_log_segex"},
{"mds_log.segexd", "ceph_mds_log_segexd"},
{"mds_log.segexg", "ceph_mds_log_segexg"},
{"mds_log.segtrm", "ceph_mds_log_segtrm"},
{"mds_mem.cap", "ceph_mds_mem_cap"},
{"mds_mem.cap+", "ceph_mds_mem_cap_plus"},
{"mds_mem.cap-", "ceph_mds_mem_cap_minus"},
{"mds_mem.dir", "ceph_mds_mem_dir"},
{"mds_mem.dir+", "ceph_mds_mem_dir_plus"},
{"mds_mem.dir-", "ceph_mds_mem_dir_minus"},
{"mds_mem.dn", "ceph_mds_mem_dn"},
{"mds_mem.dn+", "ceph_mds_mem_dn_plus"},
{"mds_mem.dn-", "ceph_mds_mem_dn_minus"},
{"mds_mem.heap", "ceph_mds_mem_heap"},
{"mds_mem.ino", "ceph_mds_mem_ino"},
{"mds_mem.ino+", "ceph_mds_mem_ino_plus"},
{"mds_mem.ino-", "ceph_mds_mem_ino_minus"},
{"mds_metadata", "ceph_mds_metadata"},
{"mds_server.cap_acquisition_throttle", "ceph_mds_server_cap_acquisition_throttle"},
{"mds_server.cap_revoke_eviction", "ceph_mds_server_cap_revoke_eviction"},
{"mds_server.handle_client_request", "ceph_mds_server_handle_client_request"},
{"mds_server.handle_client_session", "ceph_mds_server_handle_client_session"},
{"mds_server.handle_peer_request", "ceph_mds_server_handle_peer_request"},
{"mds_server.req_create_latency_count", "ceph_mds_server_req_create_latency_count"},
{"mds_server.req_create_latency_sum", "ceph_mds_server_req_create_latency_sum"},
{"mds_server.req_getattr_latency_count", "ceph_mds_server_req_getattr_latency_count"},
{"mds_server.req_getattr_latency_sum", "ceph_mds_server_req_getattr_latency_sum"},
{"mds_server.req_getfilelock_latency_count", "ceph_mds_server_req_getfilelock_latency_count"},
{"mds_server.req_getfilelock_latency_sum", "ceph_mds_server_req_getfilelock_latency_sum"},
{"mds_server.req_getvxattr_latency_count", "ceph_mds_server_req_getvxattr_latency_count"},
{"mds_server.req_getvxattr_latency_sum", "ceph_mds_server_req_getvxattr_latency_sum"},
{"mds_server.req_link_latency_count", "ceph_mds_server_req_link_latency_count"},
{"mds_server.req_link_latency_sum", "ceph_mds_server_req_link_latency_sum"},
{"mds_server.req_lookup_latency_count", "ceph_mds_server_req_lookup_latency_count"},
{"mds_server.req_lookup_latency_sum", "ceph_mds_server_req_lookup_latency_sum"},
{"mds_server.req_lookuphash_latency_count", "ceph_mds_server_req_lookuphash_latency_count"},
{"mds_server.req_lookuphash_latency_sum", "ceph_mds_server_req_lookuphash_latency_sum"},
{"mds_server.req_lookupino_latency_count", "ceph_mds_server_req_lookupino_latency_count"},
{"mds_server.req_lookupino_latency_sum", "ceph_mds_server_req_lookupino_latency_sum"},
{"mds_server.req_lookupname_latency_count", "ceph_mds_server_req_lookupname_latency_count"},
{"mds_server.req_lookupname_latency_sum", "ceph_mds_server_req_lookupname_latency_sum"},
{"mds_server.req_lookupparent_latency_count", "ceph_mds_server_req_lookupparent_latency_count"},
{"mds_server.req_lookupparent_latency_sum", "ceph_mds_server_req_lookupparent_latency_sum"},
{"mds_server.req_lookupsnap_latency_count", "ceph_mds_server_req_lookupsnap_latency_count"},
{"mds_server.req_lookupsnap_latency_sum", "ceph_mds_server_req_lookupsnap_latency_sum"},
{"mds_server.req_lssnap_latency_count", "ceph_mds_server_req_lssnap_latency_count"},
{"mds_server.req_lssnap_latency_sum", "ceph_mds_server_req_lssnap_latency_sum"},
{"mds_server.req_mkdir_latency_count", "ceph_mds_server_req_mkdir_latency_count"},
{"mds_server.req_mkdir_latency_sum", "ceph_mds_server_req_mkdir_latency_sum"},
{"mds_server.req_mknod_latency_count", "ceph_mds_server_req_mknod_latency_count"},
{"mds_server.req_mknod_latency_sum", "ceph_mds_server_req_mknod_latency_sum"},
{"mds_server.req_mksnap_latency_count", "ceph_mds_server_req_mksnap_latency_count"},
{"mds_server.req_mksnap_latency_sum", "ceph_mds_server_req_mksnap_latency_sum"},
{"mds_server.req_open_latency_count", "ceph_mds_server_req_open_latency_count"},
{"mds_server.req_open_latency_sum", "ceph_mds_server_req_open_latency_sum"},
{"mds_server.req_readdir_latency_count", "ceph_mds_server_req_readdir_latency_count"},
{"mds_server.req_readdir_latency_sum", "ceph_mds_server_req_readdir_latency_sum"},
{"mds_server.req_rename_latency_count", "ceph_mds_server_req_rename_latency_count"},
{"mds_server.req_rename_latency_sum", "ceph_mds_server_req_rename_latency_sum"},
{"mds_server.req_renamesnap_latency_count", "ceph_mds_server_req_renamesnap_latency_count"},
{"mds_server.req_renamesnap_latency_sum", "ceph_mds_server_req_renamesnap_latency_sum"},
{"mds_server.req_rmdir_latency_count", "ceph_mds_server_req_rmdir_latency_count"},
{"mds_server.req_rmdir_latency_sum", "ceph_mds_server_req_rmdir_latency_sum"},
{"mds_server.req_rmsnap_latency_count", "ceph_mds_server_req_rmsnap_latency_count"},
{"mds_server.req_rmsnap_latency_sum", "ceph_mds_server_req_rmsnap_latency_sum"},
{"mds_server.req_rmxattr_latency_count", "ceph_mds_server_req_rmxattr_latency_count"},
{"mds_server.req_rmxattr_latency_sum", "ceph_mds_server_req_rmxattr_latency_sum"},
{"mds_server.req_setattr_latency_count", "ceph_mds_server_req_setattr_latency_count"},
{"mds_server.req_setattr_latency_sum", "ceph_mds_server_req_setattr_latency_sum"},
{"mds_server.req_setdirlayout_latency_count", "ceph_mds_server_req_setdirlayout_latency_count"},
{"mds_server.req_setdirlayout_latency_sum", "ceph_mds_server_req_setdirlayout_latency_sum"},
{"mds_server.req_setfilelock_latency_count", "ceph_mds_server_req_setfilelock_latency_count"},
{"mds_server.req_setfilelock_latency_sum", "ceph_mds_server_req_setfilelock_latency_sum"},
{"mds_server.req_setlayout_latency_count", "ceph_mds_server_req_setlayout_latency_count"},
{"mds_server.req_setlayout_latency_sum", "ceph_mds_server_req_setlayout_latency_sum"},
{"mds_server.req_setxattr_latency_count", "ceph_mds_server_req_setxattr_latency_count"},
{"mds_server.req_setxattr_latency_sum", "ceph_mds_server_req_setxattr_latency_sum"},
{"mds_server.req_symlink_latency_count", "ceph_mds_server_req_symlink_latency_count"},
{"mds_server.req_symlink_latency_sum", "ceph_mds_server_req_symlink_latency_sum"},
{"mds_server.req_unlink_latency_count", "ceph_mds_server_req_unlink_latency_count"},
{"mds_server.req_unlink_latency_sum", "ceph_mds_server_req_unlink_latency_sum"},
{"mds_sessions.average_load", "ceph_mds_sessions_average_load"},
{"mds_sessions.avg_session_uptime", "ceph_mds_sessions_avg_session_uptime"},
{"mds_sessions.session_add", "ceph_mds_sessions_session_add"},
{"mds_sessions.session_count", "ceph_mds_sessions_session_count"},
{"mds_sessions.session_remove", "ceph_mds_sessions_session_remove"},
{"mds_sessions.sessions_open", "ceph_mds_sessions_sessions_open"},
{"mds_sessions.sessions_stale", "ceph_mds_sessions_sessions_stale"},
{"mds_sessions.total_load", "ceph_mds_sessions_total_load"},
{"mgr_metadata", "ceph_mgr_metadata"},
{"mgr_module_can_run", "ceph_mgr_module_can_run"},
{"mgr_module_status", "ceph_mgr_module_status"},
{"mgr_status", "ceph_mgr_status"},
{"mon.election_call", "ceph_mon_election_call"},
{"mon.election_lose", "ceph_mon_election_lose"},
{"mon.election_win", "ceph_mon_election_win"},
{"mon.num_elections", "ceph_mon_num_elections"},
{"mon.num_sessions", "ceph_mon_num_sessions"},
{"mon.session_add", "ceph_mon_session_add"},
{"mon.session_rm", "ceph_mon_session_rm"},
{"mon.session_trim", "ceph_mon_session_trim"},
{"mon_metadata", "ceph_mon_metadata"},
{"mon_quorum_status", "ceph_mon_quorum_status"},
{"num_objects_degraded", "ceph_num_objects_degraded"},
{"num_objects_misplaced", "ceph_num_objects_misplaced"},
{"num_objects_unfound", "ceph_num_objects_unfound"},
{"objecter-0x5591781656c0.op_active", "ceph_objecter_0x5591781656c0_op_active"},
{"objecter-0x5591781656c0.op_r", "ceph_objecter_0x5591781656c0_op_r"},
{"objecter-0x5591781656c0.op_rmw", "ceph_objecter_0x5591781656c0_op_rmw"},
{"objecter-0x5591781656c0.op_w", "ceph_objecter_0x5591781656c0_op_w"},
{"objecter-0x559178165930.op_active", "ceph_objecter_0x559178165930_op_active"},
{"objecter-0x559178165930.op_r", "ceph_objecter_0x559178165930_op_r"},
{"objecter-0x559178165930.op_rmw", "ceph_objecter_0x559178165930_op_rmw"},
{"objecter-0x559178165930.op_w", "ceph_objecter_0x559178165930_op_w"},
{"objecter.op_active", "ceph_objecter_op_active"},
{"objecter.op_r", "ceph_objecter_op_r"},
{"objecter.op_rmw", "ceph_objecter_op_rmw"},
{"objecter.op_w", "ceph_objecter_op_w"},
{"osd.numpg", "ceph_osd_numpg"},
{"osd.numpg_removing", "ceph_osd_numpg_removing"},
{"osd.op", "ceph_osd_op"},
{"osd.op_in_bytes", "ceph_osd_op_in_bytes"},
{"osd.op_latency_count", "ceph_osd_op_latency_count"},
{"osd.op_latency_sum", "ceph_osd_op_latency_sum"},
{"osd.op_out_bytes", "ceph_osd_op_out_bytes"},
{"osd.op_prepare_latency_count", "ceph_osd_op_prepare_latency_count"},
{"osd.op_prepare_latency_sum", "ceph_osd_op_prepare_latency_sum"},
{"osd.op_process_latency_count", "ceph_osd_op_process_latency_count"},
{"osd.op_process_latency_sum", "ceph_osd_op_process_latency_sum"},
{"osd.op_r", "ceph_osd_op_r"},
{"osd.op_r_latency_count", "ceph_osd_op_r_latency_count"},
{"osd.op_r_latency_sum", "ceph_osd_op_r_latency_sum"},
{"osd.op_r_out_bytes", "ceph_osd_op_r_out_bytes"},
{"osd.op_r_prepare_latency_count", "ceph_osd_op_r_prepare_latency_count"},
{"osd.op_r_prepare_latency_sum", "ceph_osd_op_r_prepare_latency_sum"},
{"osd.op_r_process_latency_count", "ceph_osd_op_r_process_latency_count"},
{"osd.op_r_process_latency_sum", "ceph_osd_op_r_process_latency_sum"},
{"osd.op_rw", "ceph_osd_op_rw"},
{"osd.op_rw_in_bytes", "ceph_osd_op_rw_in_bytes"},
{"osd.op_rw_latency_count", "ceph_osd_op_rw_latency_count"},
{"osd.op_rw_latency_sum", "ceph_osd_op_rw_latency_sum"},
{"osd.op_rw_out_bytes", "ceph_osd_op_rw_out_bytes"},
{"osd.op_rw_prepare_latency_count", "ceph_osd_op_rw_prepare_latency_count"},
{"osd.op_rw_prepare_latency_sum", "ceph_osd_op_rw_prepare_latency_sum"},
{"osd.op_rw_process_latency_count", "ceph_osd_op_rw_process_latency_count"},
{"osd.op_rw_process_latency_sum", "ceph_osd_op_rw_process_latency_sum"},
{"osd.op_w", "ceph_osd_op_w"},
{"osd.op_w_in_bytes", "ceph_osd_op_w_in_bytes"},
{"osd.op_w_latency_count", "ceph_osd_op_w_latency_count"},
{"osd.op_w_latency_sum", "ceph_osd_op_w_latency_sum"},
{"osd.op_w_prepare_latency_count", "ceph_osd_op_w_prepare_latency_count"},
{"osd.op_w_prepare_latency_sum", "ceph_osd_op_w_prepare_latency_sum"},
{"osd.op_w_process_latency_count", "ceph_osd_op_w_process_latency_count"},
{"osd.op_w_process_latency_sum", "ceph_osd_op_w_process_latency_sum"},
{"osd.op_wip", "ceph_osd_op_wip"},
{"osd.recovery_bytes", "ceph_osd_recovery_bytes"},
{"osd.recovery_ops", "ceph_osd_recovery_ops"},
{"osd.stat_bytes", "ceph_osd_stat_bytes"},
{"osd.stat_bytes_used", "ceph_osd_stat_bytes_used"},
{"osd_apply_latency_ms", "ceph_osd_apply_latency_ms"},
{"osd_commit_latency_ms", "ceph_osd_commit_latency_ms"},
{"osd_flag_nobackfill", "ceph_osd_flag_nobackfill"},
{"osd_flag_nodeep-scrub", "ceph_osd_flag_nodeep_scrub"},
{"osd_flag_nodown", "ceph_osd_flag_nodown"},
{"osd_flag_noin", "ceph_osd_flag_noin"},
{"osd_flag_noout", "ceph_osd_flag_noout"},
{"osd_flag_norebalance", "ceph_osd_flag_norebalance"},
{"osd_flag_norecover", "ceph_osd_flag_norecover"},
{"osd_flag_noscrub", "ceph_osd_flag_noscrub"},
{"osd_flag_noup", "ceph_osd_flag_noup"},
{"osd_in", "ceph_osd_in"},
{"osd_metadata", "ceph_osd_metadata"},
{"osd_up", "ceph_osd_up"},
{"osd_weight", "ceph_osd_weight"},
{"paxos.accept_timeout", "ceph_paxos_accept_timeout"},
{"paxos.begin", "ceph_paxos_begin"},
{"paxos.begin_bytes_count", "ceph_paxos_begin_bytes_count"},
{"paxos.begin_bytes_sum", "ceph_paxos_begin_bytes_sum"},
{"paxos.begin_keys_count", "ceph_paxos_begin_keys_count"},
{"paxos.begin_keys_sum", "ceph_paxos_begin_keys_sum"},
{"paxos.begin_latency_count", "ceph_paxos_begin_latency_count"},
{"paxos.begin_latency_sum", "ceph_paxos_begin_latency_sum"},
{"paxos.collect", "ceph_paxos_collect"},
{"paxos.collect_bytes_count", "ceph_paxos_collect_bytes_count"},
{"paxos.collect_bytes_sum", "ceph_paxos_collect_bytes_sum"},
{"paxos.collect_keys_count", "ceph_paxos_collect_keys_count"},
{"paxos.collect_keys_sum", "ceph_paxos_collect_keys_sum"},
{"paxos.collect_latency_count", "ceph_paxos_collect_latency_count"},
{"paxos.collect_latency_sum", "ceph_paxos_collect_latency_sum"},
{"paxos.collect_timeout", "ceph_paxos_collect_timeout"},
{"paxos.collect_uncommitted", "ceph_paxos_collect_uncommitted"},
{"paxos.commit", "ceph_paxos_commit"},
{"paxos.commit_bytes_count", "ceph_paxos_commit_bytes_count"},
{"paxos.commit_bytes_sum", "ceph_paxos_commit_bytes_sum"},
{"paxos.commit_keys_count", "ceph_paxos_commit_keys_count"},
{"paxos.commit_keys_sum", "ceph_paxos_commit_keys_sum"},
{"paxos.commit_latency_count", "ceph_paxos_commit_latency_count"},
{"paxos.commit_latency_sum", "ceph_paxos_commit_latency_sum"},
{"paxos.lease_ack_timeout", "ceph_paxos_lease_ack_timeout"},
{"paxos.lease_timeout", "ceph_paxos_lease_timeout"},
{"paxos.new_pn", "ceph_paxos_new_pn"},
{"paxos.new_pn_latency_count", "ceph_paxos_new_pn_latency_count"},
{"paxos.new_pn_latency_sum", "ceph_paxos_new_pn_latency_sum"},
{"paxos.refresh", "ceph_paxos_refresh"},
{"paxos.refresh_latency_count", "ceph_paxos_refresh_latency_count"},
{"paxos.refresh_latency_sum", "ceph_paxos_refresh_latency_sum"},
{"paxos.restart", "ceph_paxos_restart"},
{"paxos.share_state", "ceph_paxos_share_state"},
{"paxos.share_state_bytes_count", "ceph_paxos_share_state_bytes_count"},
{"paxos.share_state_bytes_sum", "ceph_paxos_share_state_bytes_sum"},
{"paxos.share_state_keys_count", "ceph_paxos_share_state_keys_count"},
{"paxos.share_state_keys_sum", "ceph_paxos_share_state_keys_sum"},
{"paxos.start_leader", "ceph_paxos_start_leader"},
{"paxos.start_peon", "ceph_paxos_start_peon"},
{"paxos.store_state", "ceph_paxos_store_state"},
{"paxos.store_state_bytes_count", "ceph_paxos_store_state_bytes_count"},
{"paxos.store_state_bytes_sum", "ceph_paxos_store_state_bytes_sum"},
{"paxos.store_state_keys_count", "ceph_paxos_store_state_keys_count"},
{"paxos.store_state_keys_sum", "ceph_paxos_store_state_keys_sum"},
{"paxos.store_state_latency_count", "ceph_paxos_store_state_latency_count"},
{"paxos.store_state_latency_sum", "ceph_paxos_store_state_latency_sum"},
{"pg_activating", "ceph_pg_activating"},
{"pg_active", "ceph_pg_active"},
{"pg_backfill_toofull", "ceph_pg_backfill_toofull"},
{"pg_backfill_unfound", "ceph_pg_backfill_unfound"},
{"pg_backfill_wait", "ceph_pg_backfill_wait"},
{"pg_backfilling", "ceph_pg_backfilling"},
{"pg_clean", "ceph_pg_clean"},
{"pg_creating", "ceph_pg_creating"},
{"pg_deep", "ceph_pg_deep"},
{"pg_degraded", "ceph_pg_degraded"},
{"pg_down", "ceph_pg_down"},
{"pg_failed_repair", "ceph_pg_failed_repair"},
{"pg_forced_backfill", "ceph_pg_forced_backfill"},
{"pg_forced_recovery", "ceph_pg_forced_recovery"},
{"pg_incomplete", "ceph_pg_incomplete"},
{"pg_inconsistent", "ceph_pg_inconsistent"},
{"pg_laggy", "ceph_pg_laggy"},
{"pg_peered", "ceph_pg_peered"},
{"pg_peering", "ceph_pg_peering"},
{"pg_premerge", "ceph_pg_premerge"},
{"pg_recovering", "ceph_pg_recovering"},
{"pg_recovery_toofull", "ceph_pg_recovery_toofull"},
{"pg_recovery_unfound", "ceph_pg_recovery_unfound"},
{"pg_recovery_wait", "ceph_pg_recovery_wait"},
{"pg_remapped", "ceph_pg_remapped"},
{"pg_repair", "ceph_pg_repair"},
{"pg_scrubbing", "ceph_pg_scrubbing"},
{"pg_snaptrim", "ceph_pg_snaptrim"},
{"pg_snaptrim_error", "ceph_pg_snaptrim_error"},
{"pg_snaptrim_wait", "ceph_pg_snaptrim_wait"},
{"pg_stale", "ceph_pg_stale"},
{"pg_total", "ceph_pg_total"},
{"pg_undersized", "ceph_pg_undersized"},
{"pg_unknown", "ceph_pg_unknown"},
{"pg_wait", "ceph_pg_wait"},
{"pool_avail_raw", "ceph_pool_avail_raw"},
{"pool_bytes_used", "ceph_pool_bytes_used"},
{"pool_compress_bytes_used", "ceph_pool_compress_bytes_used"},
{"pool_compress_under_bytes", "ceph_pool_compress_under_bytes"},
{"pool_dirty", "ceph_pool_dirty"},
{"pool_max_avail", "ceph_pool_max_avail"},
{"pool_metadata", "ceph_pool_metadata"},
{"pool_num_bytes_recovered", "ceph_pool_num_bytes_recovered"},
{"pool_num_objects_recovered", "ceph_pool_num_objects_recovered"},
{"pool_objects", "ceph_pool_objects"},
{"pool_objects_repaired", "ceph_pool_objects_repaired"},
{"pool_percent_used", "ceph_pool_percent_used"},
{"pool_quota_bytes", "ceph_pool_quota_bytes"},
{"pool_quota_objects", "ceph_pool_quota_objects"},
{"pool_rd", "ceph_pool_rd"},
{"pool_rd_bytes", "ceph_pool_rd_bytes"},
{"pool_recovering_bytes_per_sec", "ceph_pool_recovering_bytes_per_sec"},
{"pool_recovering_keys_per_sec", "ceph_pool_recovering_keys_per_sec"},
{"pool_recovering_objects_per_sec", "ceph_pool_recovering_objects_per_sec"},
{"pool_stored", "ceph_pool_stored"},
{"pool_stored_raw", "ceph_pool_stored_raw"},
{"pool_wr", "ceph_pool_wr"},
{"pool_wr_bytes", "ceph_pool_wr_bytes"},
{"prioritycache.cache_bytes", "ceph_prioritycache_cache_bytes"},
{"prioritycache.heap_bytes", "ceph_prioritycache_heap_bytes"},
{"prioritycache.mapped_bytes", "ceph_prioritycache_mapped_bytes"},
{"prioritycache.target_bytes", "ceph_prioritycache_target_bytes"},
{"prioritycache.unmapped_bytes", "ceph_prioritycache_unmapped_bytes"},
{"prioritycache:full.committed_bytes", "ceph_prioritycache:full_committed_bytes"},
{"prioritycache:full.pri0_bytes", "ceph_prioritycache:full_pri0_bytes"},
{"prioritycache:full.pri10_bytes", "ceph_prioritycache:full_pri10_bytes"},
{"prioritycache:full.pri11_bytes", "ceph_prioritycache:full_pri11_bytes"},
{"prioritycache:full.pri1_bytes", "ceph_prioritycache:full_pri1_bytes"},
{"prioritycache:full.pri2_bytes", "ceph_prioritycache:full_pri2_bytes"},
{"prioritycache:full.pri3_bytes", "ceph_prioritycache:full_pri3_bytes"},
{"prioritycache:full.pri4_bytes", "ceph_prioritycache:full_pri4_bytes"},
{"prioritycache:full.pri5_bytes", "ceph_prioritycache:full_pri5_bytes"},
{"prioritycache:full.pri6_bytes", "ceph_prioritycache:full_pri6_bytes"},
{"prioritycache:full.pri7_bytes", "ceph_prioritycache:full_pri7_bytes"},
{"prioritycache:full.pri8_bytes", "ceph_prioritycache:full_pri8_bytes"},
{"prioritycache:full.pri9_bytes", "ceph_prioritycache:full_pri9_bytes"},
{"prioritycache:full.reserved_bytes", "ceph_prioritycache:full_reserved_bytes"},
{"prioritycache:inc.committed_bytes", "ceph_prioritycache:inc_committed_bytes"},
{"prioritycache:inc.pri0_bytes", "ceph_prioritycache:inc_pri0_bytes"},
{"prioritycache:inc.pri10_bytes", "ceph_prioritycache:inc_pri10_bytes"},
{"prioritycache:inc.pri11_bytes", "ceph_prioritycache:inc_pri11_bytes"},
{"prioritycache:inc.pri1_bytes", "ceph_prioritycache:inc_pri1_bytes"},
{"prioritycache:inc.pri2_bytes", "ceph_prioritycache:inc_pri2_bytes"},
{"prioritycache:inc.pri3_bytes", "ceph_prioritycache:inc_pri3_bytes"},
{"prioritycache:inc.pri4_bytes", "ceph_prioritycache:inc_pri4_bytes"},
{"prioritycache:inc.pri5_bytes", "ceph_prioritycache:inc_pri5_bytes"},
{"prioritycache:inc.pri6_bytes", "ceph_prioritycache:inc_pri6_bytes"},
{"prioritycache:inc.pri7_bytes", "ceph_prioritycache:inc_pri7_bytes"},
{"prioritycache:inc.pri8_bytes", "ceph_prioritycache:inc_pri8_bytes"},
{"prioritycache:inc.pri9_bytes", "ceph_prioritycache:inc_pri9_bytes"},
{"prioritycache:inc.reserved_bytes", "ceph_prioritycache:inc_reserved_bytes"},
{"prioritycache:kv.committed_bytes", "ceph_prioritycache:kv_committed_bytes"},
{"prioritycache:kv.pri0_bytes", "ceph_prioritycache:kv_pri0_bytes"},
{"prioritycache:kv.pri10_bytes", "ceph_prioritycache:kv_pri10_bytes"},
{"prioritycache:kv.pri11_bytes", "ceph_prioritycache:kv_pri11_bytes"},
{"prioritycache:kv.pri1_bytes", "ceph_prioritycache:kv_pri1_bytes"},
{"prioritycache:kv.pri2_bytes", "ceph_prioritycache:kv_pri2_bytes"},
{"prioritycache:kv.pri3_bytes", "ceph_prioritycache:kv_pri3_bytes"},
{"prioritycache:kv.pri4_bytes", "ceph_prioritycache:kv_pri4_bytes"},
{"prioritycache:kv.pri5_bytes", "ceph_prioritycache:kv_pri5_bytes"},
{"prioritycache:kv.pri6_bytes", "ceph_prioritycache:kv_pri6_bytes"},
{"prioritycache:kv.pri7_bytes", "ceph_prioritycache:kv_pri7_bytes"},
{"prioritycache:kv.pri8_bytes", "ceph_prioritycache:kv_pri8_bytes"},
{"prioritycache:kv.pri9_bytes", "ceph_prioritycache:kv_pri9_bytes"},
{"prioritycache:kv.reserved_bytes", "ceph_prioritycache:kv_reserved_bytes"},
{"prometheus_collect_duration_seconds_count", "ceph_prometheus_collect_duration_seconds_count"},
{"prometheus_collect_duration_seconds_sum", "ceph_prometheus_collect_duration_seconds_sum"},
{"purge_queue.pq_executed", "ceph_purge_queue_pq_executed"},
{"purge_queue.pq_executing", "ceph_purge_queue_pq_executing"},
{"purge_queue.pq_executing_high_water", "ceph_purge_queue_pq_executing_high_water"},
{"purge_queue.pq_executing_ops", "ceph_purge_queue_pq_executing_ops"},
{"purge_queue.pq_executing_ops_high_water", "ceph_purge_queue_pq_executing_ops_high_water"},
{"purge_queue.pq_item_in_journal", "ceph_purge_queue_pq_item_in_journal"},
{"rbd_mirror_metadata", "ceph_rbd_mirror_metadata"},
{"rgw.cache_hit", "ceph_rgw_cache_hit"},
{"rgw.cache_miss", "ceph_rgw_cache_miss"},
{"rgw.failed_req", "ceph_rgw_failed_req"},
{"rgw.gc_retire_object", "ceph_rgw_gc_retire_object"},
{"rgw.get", "ceph_rgw_get"},
{"rgw.get_b", "ceph_rgw_get_b"},
{"rgw.get_initial_lat_count", "ceph_rgw_get_initial_lat_count"},
{"rgw.get_initial_lat_sum", "ceph_rgw_get_initial_lat_sum"},
{"rgw.keystone_token_cache_hit", "ceph_rgw_keystone_token_cache_hit"},
{"rgw.keystone_token_cache_miss", "ceph_rgw_keystone_token_cache_miss"},
{"rgw.lc_abort_mpu", "ceph_rgw_lc_abort_mpu"},
{"rgw.lc_expire_current", "ceph_rgw_lc_expire_current"},
{"rgw.lc_expire_dm", "ceph_rgw_lc_expire_dm"},
{"rgw.lc_expire_noncurrent", "ceph_rgw_lc_expire_noncurrent"},
{"rgw.lc_transition_current", "ceph_rgw_lc_transition_current"},
{"rgw.lc_transition_noncurrent", "ceph_rgw_lc_transition_noncurrent"},
{"rgw.lua_current_vms", "ceph_rgw_lua_current_vms"},
{"rgw.lua_script_fail", "ceph_rgw_lua_script_fail"},
{"rgw.lua_script_ok", "ceph_rgw_lua_script_ok"},
{"rgw.pubsub_event_lost", "ceph_rgw_pubsub_event_lost"},
{"rgw.pubsub_event_triggered", "ceph_rgw_pubsub_event_triggered"},
{"rgw.pubsub_events", "ceph_rgw_pubsub_events"},
{"rgw.pubsub_missing_conf", "ceph_rgw_pubsub_missing_conf"},
{"rgw.pubsub_push_failed", "ceph_rgw_pubsub_push_failed"},
{"rgw.pubsub_push_ok", "ceph_rgw_pubsub_push_ok"},
{"rgw.pubsub_push_pending", "ceph_rgw_pubsub_push_pending"},
{"rgw.pubsub_store_fail", "ceph_rgw_pubsub_store_fail"},
{"rgw.pubsub_store_ok", "ceph_rgw_pubsub_store_ok"},
{"rgw.put", "ceph_rgw_put"},
{"rgw.put_b", "ceph_rgw_put_b"},
{"rgw.put_initial_lat_count", "ceph_rgw_put_initial_lat_count"},
{"rgw.put_initial_lat_sum", "ceph_rgw_put_initial_lat_sum"},
{"rgw.qactive", "ceph_rgw_qactive"},
{"rgw.qlen", "ceph_rgw_qlen"},
{"rgw.req", "ceph_rgw_req"},
{"rgw_metadata", "ceph_rgw_metadata"},
{"rocksdb.compact", "ceph_rocksdb_compact"},
{"rocksdb.compact_queue_len", "ceph_rocksdb_compact_queue_len"},
{"rocksdb.compact_queue_merge", "ceph_rocksdb_compact_queue_merge"},
{"rocksdb.compact_range", "ceph_rocksdb_compact_range"},
{"rocksdb.get_latency_count", "ceph_rocksdb_get_latency_count"},
{"rocksdb.get_latency_sum", "ceph_rocksdb_get_latency_sum"},
{"rocksdb.rocksdb_write_delay_time_count", "ceph_rocksdb_rocksdb_write_delay_time_count"},
{"rocksdb.rocksdb_write_delay_time_sum", "ceph_rocksdb_rocksdb_write_delay_time_sum"},
{"rocksdb.rocksdb_write_memtable_time_count", "ceph_rocksdb_rocksdb_write_memtable_time_count"},
{"rocksdb.rocksdb_write_memtable_time_sum", "ceph_rocksdb_rocksdb_write_memtable_time_sum"},
{"rocksdb.rocksdb_write_pre_and_post_time_count", "ceph_rocksdb_rocksdb_write_pre_and_post_time_count"},
{"rocksdb.rocksdb_write_pre_and_post_time_sum", "ceph_rocksdb_rocksdb_write_pre_and_post_time_sum"},
{"rocksdb.rocksdb_write_wal_time_count", "ceph_rocksdb_rocksdb_write_wal_time_count"},
{"rocksdb.rocksdb_write_wal_time_sum", "ceph_rocksdb_rocksdb_write_wal_time_sum"},
{"rocksdb.submit_latency_count", "ceph_rocksdb_submit_latency_count"},
{"rocksdb.submit_latency_sum", "ceph_rocksdb_submit_latency_sum"},
{"rocksdb.submit_sync_latency_count", "ceph_rocksdb_submit_sync_latency_count"},
{"rocksdb.submit_sync_latency_sum", "ceph_rocksdb_submit_sync_latency_sum"}
};
TEST(Exporter, promethize) {
for (auto &test_case : promethize_data) {
std::string path = test_case.first;
promethize(path);
ASSERT_EQ(path, test_case.second);
}
}
| 43,530 | 64.46015 | 106 |
cc
|
null |
ceph-main/src/test/fedora-33/install-deps.sh
|
../../../install-deps.sh
| 24 | 24 | 24 |
sh
|
null |
ceph-main/src/test/fio/fio_ceph_messenger.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* CEPH messenger engine
*
* FIO engine which uses ceph messenger as a transport. See corresponding
* FIO client and server jobs for details.
*/
#include "global/global_init.h"
#include "msg/Messenger.h"
#include "messages/MOSDOp.h"
#include "messages/MOSDOpReply.h"
#include "common/perf_counters.h"
#include "auth/DummyAuth.h"
#include "ring_buffer.h"
#include <fio.h>
#include <flist.h>
#include <optgroup.h>
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_
using namespace std;
enum ceph_msgr_type {
CEPH_MSGR_TYPE_UNDEF,
CEPH_MSGR_TYPE_POSIX,
CEPH_MSGR_TYPE_DPDK,
CEPH_MSGR_TYPE_RDMA,
};
const char *ceph_msgr_types[] = { "undef", "async+posix",
"async+dpdk", "async+rdma" };
struct ceph_msgr_options {
struct thread_data *td__;
unsigned int is_receiver;
unsigned int is_single;
unsigned int port;
const char *hostname;
const char *conffile;
enum ceph_msgr_type ms_type;
};
class FioDispatcher;
struct ceph_msgr_data {
ceph_msgr_data(struct ceph_msgr_options *o_, unsigned iodepth) :
o(o_) {
INIT_FLIST_HEAD(&io_inflight_list);
INIT_FLIST_HEAD(&io_pending_list);
ring_buffer_init(&io_completed_q, iodepth);
pthread_spin_init(&spin, PTHREAD_PROCESS_PRIVATE);
}
struct ceph_msgr_options *o;
Messenger *msgr = NULL;
FioDispatcher *disp = NULL;
pthread_spinlock_t spin;
struct ring_buffer io_completed_q;
struct flist_head io_inflight_list;
struct flist_head io_pending_list;
unsigned int io_inflight_nr = 0;
unsigned int io_pending_nr = 0;
};
struct ceph_msgr_io {
struct flist_head list;
struct ceph_msgr_data *data;
struct io_u *io_u;
MOSDOp *req_msg; /** Cached request, valid only for sender */
};
struct ceph_msgr_reply_io {
struct flist_head list;
MOSDOpReply *rep;
};
static void *str_to_ptr(const std::string &str)
{
// str is assumed to be a valid ptr string
return reinterpret_cast<void*>(ceph::parse<uintptr_t>(str, 16).value());
}
static std::string ptr_to_str(void *ptr)
{
char buf[32];
snprintf(buf, sizeof(buf), "%llx", (unsigned long long)ptr);
return std::string(buf);
}
/*
* Used for refcounters print on the last context put, almost duplicates
* global context refcounter, sigh.
*/
static std::atomic<int> ctx_ref(1);
static DummyAuthClientServer *g_dummy_auth;
static void create_or_get_ceph_context(struct ceph_msgr_options *o)
{
if (g_ceph_context) {
g_ceph_context->get();
ctx_ref++;
return;
}
boost::intrusive_ptr<CephContext> cct;
vector<const char*> args;
if (o->conffile)
args = { "--conf", o->conffile };
cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
/* Will use g_ceph_context instead */
cct.detach();
common_init_finish(g_ceph_context);
g_ceph_context->_conf.apply_changes(NULL);
g_dummy_auth = new DummyAuthClientServer(g_ceph_context);
g_dummy_auth->auth_registry.refresh_config();
}
static void put_ceph_context(void)
{
if (--ctx_ref == 0) {
ostringstream ostr;
Formatter* f;
f = Formatter::create("json-pretty");
g_ceph_context->get_perfcounters_collection()->dump_formatted(f, false, false);
ostr << ">>>>>>>>>>>>> PERFCOUNTERS BEGIN <<<<<<<<<<<<" << std::endl;
f->flush(ostr);
ostr << ">>>>>>>>>>>>> PERFCOUNTERS END <<<<<<<<<<<<" << std::endl;
delete f;
delete g_dummy_auth;
dout(0) << ostr.str() << dendl;
}
g_ceph_context->put();
}
static void ceph_msgr_sender_on_reply(const object_t &oid)
{
struct ceph_msgr_data *data;
struct ceph_msgr_io *io;
/*
* Here we abuse object and use it as a raw pointer. Since this is
* only for benchmarks and testing we do not care about anything
* but performance. So no need to use global structure in order
* to search for reply, just send a pointer and get it back.
*/
io = (decltype(io))str_to_ptr(oid.name);
data = io->data;
ring_buffer_enqueue(&data->io_completed_q, (void *)io);
}
class ReplyCompletion : public Message::CompletionHook {
struct ceph_msgr_io *m_io;
public:
ReplyCompletion(MOSDOpReply *rep, struct ceph_msgr_io *io) :
Message::CompletionHook(rep),
m_io(io) {
}
void finish(int err) override {
struct ceph_msgr_data *data = m_io->data;
ring_buffer_enqueue(&data->io_completed_q, (void *)m_io);
}
};
static void ceph_msgr_receiver_on_request(struct ceph_msgr_data *data,
MOSDOp *req)
{
MOSDOpReply *rep;
rep = new MOSDOpReply(req, 0, 0, 0, false);
rep->set_connection(req->get_connection());
pthread_spin_lock(&data->spin);
if (data->io_inflight_nr) {
struct ceph_msgr_io *io;
data->io_inflight_nr--;
io = flist_first_entry(&data->io_inflight_list,
struct ceph_msgr_io, list);
flist_del(&io->list);
pthread_spin_unlock(&data->spin);
rep->set_completion_hook(new ReplyCompletion(rep, io));
rep->get_connection()->send_message(rep);
} else {
struct ceph_msgr_reply_io *rep_io;
rep_io = (decltype(rep_io))malloc(sizeof(*rep_io));
rep_io->rep = rep;
data->io_pending_nr++;
flist_add_tail(&rep_io->list, &data->io_pending_list);
pthread_spin_unlock(&data->spin);
}
}
class FioDispatcher : public Dispatcher {
struct ceph_msgr_data *m_data;
public:
FioDispatcher(struct ceph_msgr_data *data):
Dispatcher(g_ceph_context),
m_data(data) {
}
bool ms_can_fast_dispatch_any() const override {
return true;
}
bool ms_can_fast_dispatch(const Message *m) const override {
switch (m->get_type()) {
case CEPH_MSG_OSD_OP:
return m_data->o->is_receiver;
case CEPH_MSG_OSD_OPREPLY:
return !m_data->o->is_receiver;
default:
return false;
}
}
void ms_handle_fast_connect(Connection *con) override {
}
void ms_handle_fast_accept(Connection *con) override {
}
bool ms_dispatch(Message *m) override {
return true;
}
void ms_fast_dispatch(Message *m) override {
if (m_data->o->is_receiver) {
MOSDOp *req;
/*
* Server side, handle request.
*/
req = static_cast<MOSDOp*>(m);
req->finish_decode();
ceph_msgr_receiver_on_request(m_data, req);
} else {
MOSDOpReply *rep;
/*
* Client side, get reply, extract objid and mark
* IO as completed.
*/
rep = static_cast<MOSDOpReply*>(m);
ceph_msgr_sender_on_reply(rep->get_oid());
}
m->put();
}
bool ms_handle_reset(Connection *con) override {
return true;
}
void ms_handle_remote_reset(Connection *con) override {
}
bool ms_handle_refused(Connection *con) override {
return false;
}
int ms_handle_authentication(Connection *con) override {
return 1;
}
};
static entity_addr_t hostname_to_addr(struct ceph_msgr_options *o)
{
entity_addr_t addr;
addr.parse(o->hostname);
addr.set_port(o->port);
addr.set_nonce(0);
return addr;
}
static Messenger *create_messenger(struct ceph_msgr_options *o)
{
entity_name_t ename = o->is_receiver ?
entity_name_t::OSD(0) : entity_name_t::CLIENT(0);
std::string lname = o->is_receiver ?
"receiver" : "sender";
std::string ms_type = o->ms_type != CEPH_MSGR_TYPE_UNDEF ?
ceph_msgr_types[o->ms_type] :
g_ceph_context->_conf.get_val<std::string>("ms_type");
/* o->td__>pid doesn't set value, so use getpid() instead*/
auto nonce = o->is_receiver ? 0 : (getpid() + o->td__->thread_number);
Messenger *msgr = Messenger::create(g_ceph_context, ms_type.c_str(),
ename, lname, nonce);
if (o->is_receiver) {
msgr->set_default_policy(Messenger::Policy::stateless_server(0));
msgr->bind(hostname_to_addr(o));
} else {
msgr->set_default_policy(Messenger::Policy::lossless_client(0));
}
msgr->set_auth_client(g_dummy_auth);
msgr->set_auth_server(g_dummy_auth);
msgr->set_require_authorizer(false);
msgr->start();
return msgr;
}
static Messenger *single_msgr;
static std::atomic<int> single_msgr_ref;
static vector<FioDispatcher *> single_msgr_disps;
static void init_messenger(struct ceph_msgr_data *data)
{
struct ceph_msgr_options *o = data->o;
FioDispatcher *disp;
Messenger *msgr;
disp = new FioDispatcher(data);
if (o->is_single) {
/*
* Single messenger instance for the whole FIO
*/
if (!single_msgr) {
msgr = create_messenger(o);
single_msgr = msgr;
} else {
msgr = single_msgr;
}
single_msgr_disps.push_back(disp);
single_msgr_ref++;
} else {
/*
* Messenger instance per FIO thread
*/
msgr = create_messenger(o);
}
msgr->add_dispatcher_head(disp);
data->disp = disp;
data->msgr = msgr;
}
static void free_messenger(struct ceph_msgr_data *data)
{
data->msgr->shutdown();
data->msgr->wait();
delete data->msgr;
}
static void put_messenger(struct ceph_msgr_data *data)
{
struct ceph_msgr_options *o = data->o;
if (o->is_single) {
if (--single_msgr_ref == 0) {
free_messenger(data);
/*
* In case of a single messenger instance we have to
* free dispatchers after actual messenger destruction.
*/
for (auto disp : single_msgr_disps)
delete disp;
single_msgr = NULL;
}
} else {
free_messenger(data);
delete data->disp;
}
data->disp = NULL;
data->msgr = NULL;
}
static int fio_ceph_msgr_setup(struct thread_data *td)
{
struct ceph_msgr_options *o = (decltype(o))td->eo;
o->td__ = td;
ceph_msgr_data *data;
/* We have to manage global resources so we use threads */
td->o.use_thread = 1;
create_or_get_ceph_context(o);
if (!td->io_ops_data) {
data = new ceph_msgr_data(o, td->o.iodepth);
init_messenger(data);
td->io_ops_data = (void *)data;
}
return 0;
}
static void fio_ceph_msgr_cleanup(struct thread_data *td)
{
struct ceph_msgr_data *data;
unsigned nr;
data = (decltype(data))td->io_ops_data;
put_messenger(data);
nr = ring_buffer_used_size(&data->io_completed_q);
if (nr)
fprintf(stderr, "fio: io_completed_nr==%d, but should be zero\n",
nr);
if (data->io_inflight_nr)
fprintf(stderr, "fio: io_inflight_nr==%d, but should be zero\n",
data->io_inflight_nr);
if (data->io_pending_nr)
fprintf(stderr, "fio: io_pending_nr==%d, but should be zero\n",
data->io_pending_nr);
if (!flist_empty(&data->io_inflight_list))
fprintf(stderr, "fio: io_inflight_list is not empty\n");
if (!flist_empty(&data->io_pending_list))
fprintf(stderr, "fio: io_pending_list is not empty\n");
ring_buffer_deinit(&data->io_completed_q);
delete data;
put_ceph_context();
}
static int fio_ceph_msgr_io_u_init(struct thread_data *td, struct io_u *io_u)
{
struct ceph_msgr_options *o = (decltype(o))td->eo;
struct ceph_msgr_io *io;
MOSDOp *req_msg = NULL;
io = (decltype(io))malloc(sizeof(*io));
io->io_u = io_u;
io->data = (decltype(io->data))td->io_ops_data;
if (!o->is_receiver) {
object_t oid(ptr_to_str(io));
pg_t pgid;
object_locator_t oloc;
hobject_t hobj(oid, oloc.key, CEPH_NOSNAP, pgid.ps(),
pgid.pool(), oloc.nspace);
spg_t spgid(pgid);
entity_inst_t dest(entity_name_t::OSD(0), hostname_to_addr(o));
Messenger *msgr = io->data->msgr;
ConnectionRef con = msgr->connect_to(dest.name.type(),
entity_addrvec_t(dest.addr));
req_msg = new MOSDOp(0, 0, hobj, spgid, 0, 0, 0);
req_msg->set_connection(con);
}
io->req_msg = req_msg;
io_u->engine_data = (void *)io;
return 0;
}
static void fio_ceph_msgr_io_u_free(struct thread_data *td, struct io_u *io_u)
{
struct ceph_msgr_io *io;
io = (decltype(io))io_u->engine_data;
if (io) {
io_u->engine_data = NULL;
if (io->req_msg)
io->req_msg->put();
free(io);
}
}
static enum fio_q_status ceph_msgr_sender_queue(struct thread_data *td,
struct io_u *io_u)
{
struct ceph_msgr_data *data;
struct ceph_msgr_io *io;
bufferlist buflist = bufferlist::static_from_mem(
(char *)io_u->buf, io_u->buflen);
io = (decltype(io))io_u->engine_data;
data = (decltype(data))td->io_ops_data;
/* No handy method to clear ops before reusage? Ok */
io->req_msg->ops.clear();
/* Here we do not care about direction, always send as write */
io->req_msg->write(0, io_u->buflen, buflist);
/* Keep message alive */
io->req_msg->get();
io->req_msg->get_connection()->send_message(io->req_msg);
return FIO_Q_QUEUED;
}
static int fio_ceph_msgr_getevents(struct thread_data *td, unsigned int min,
unsigned int max, const struct timespec *ts)
{
struct ceph_msgr_data *data;
unsigned int nr;
data = (decltype(data))td->io_ops_data;
/*
* Check io_u.c : if min == 0 -> ts is valid and equal to zero,
* if min != 0 -> ts is NULL.
*/
assert(!min ^ !ts);
nr = ring_buffer_used_size(&data->io_completed_q);
if (nr >= min)
/* We got something */
return min(nr, max);
/* Here we are only if min != 0 and ts == NULL */
assert(min && !ts);
while ((nr = ring_buffer_used_size(&data->io_completed_q)) < min &&
!td->terminate) {
/* Poll, no disk IO, so we expect response immediately. */
usleep(10);
}
return min(nr, max);
}
static struct io_u *fio_ceph_msgr_event(struct thread_data *td, int event)
{
struct ceph_msgr_data *data;
struct ceph_msgr_io *io;
data = (decltype(data))td->io_ops_data;
io = (decltype(io))ring_buffer_dequeue(&data->io_completed_q);
return io->io_u;
}
static enum fio_q_status ceph_msgr_receiver_queue(struct thread_data *td,
struct io_u *io_u)
{
struct ceph_msgr_data *data;
struct ceph_msgr_io *io;
io = (decltype(io))io_u->engine_data;
data = io->data;
pthread_spin_lock(&data->spin);
if (data->io_pending_nr) {
struct ceph_msgr_reply_io *rep_io;
MOSDOpReply *rep;
data->io_pending_nr--;
rep_io = flist_first_entry(&data->io_pending_list,
struct ceph_msgr_reply_io,
list);
flist_del(&rep_io->list);
rep = rep_io->rep;
pthread_spin_unlock(&data->spin);
free(rep_io);
rep->set_completion_hook(new ReplyCompletion(rep, io));
rep->get_connection()->send_message(rep);
} else {
data->io_inflight_nr++;
flist_add_tail(&io->list, &data->io_inflight_list);
pthread_spin_unlock(&data->spin);
}
return FIO_Q_QUEUED;
}
static enum fio_q_status fio_ceph_msgr_queue(struct thread_data *td,
struct io_u *io_u)
{
struct ceph_msgr_options *o = (decltype(o))td->eo;
if (o->is_receiver)
return ceph_msgr_receiver_queue(td, io_u);
else
return ceph_msgr_sender_queue(td, io_u);
}
static int fio_ceph_msgr_open_file(struct thread_data *td, struct fio_file *f)
{
return 0;
}
static int fio_ceph_msgr_close_file(struct thread_data *, struct fio_file *)
{
return 0;
}
template <class Func>
fio_option make_option(Func&& func)
{
auto o = fio_option{};
o.category = FIO_OPT_C_ENGINE;
func(std::ref(o));
return o;
}
static std::vector<fio_option> options {
make_option([] (fio_option& o) {
o.name = "receiver";
o.lname = "CEPH messenger is receiver";
o.type = FIO_OPT_BOOL;
o.off1 = offsetof(struct ceph_msgr_options, is_receiver);
o.help = "CEPH messenger is sender or receiver";
o.def = "0";
}),
make_option([] (fio_option& o) {
o.name = "single_instance";
o.lname = "Single instance of CEPH messenger ";
o.type = FIO_OPT_BOOL;
o.off1 = offsetof(struct ceph_msgr_options, is_single);
o.help = "CEPH messenger is a created once for all threads";
o.def = "0";
}),
make_option([] (fio_option& o) {
o.name = "hostname";
o.lname = "CEPH messenger hostname";
o.type = FIO_OPT_STR_STORE;
o.off1 = offsetof(struct ceph_msgr_options, hostname);
o.help = "Hostname for CEPH messenger engine";
}),
make_option([] (fio_option& o) {
o.name = "port";
o.lname = "CEPH messenger engine port";
o.type = FIO_OPT_INT;
o.off1 = offsetof(struct ceph_msgr_options, port);
o.maxval = 65535;
o.minval = 1;
o.help = "Port to use for CEPH messenger";
}),
make_option([] (fio_option& o) {
o.name = "ms_type";
o.lname = "CEPH messenger transport type: async+posix, async+dpdk, async+rdma";
o.type = FIO_OPT_STR;
o.off1 = offsetof(struct ceph_msgr_options, ms_type);
o.help = "Transport type for CEPH messenger, see 'ms async transport type' corresponding CEPH documentation page";
o.def = "undef";
o.posval[0].ival = "undef";
o.posval[0].oval = CEPH_MSGR_TYPE_UNDEF;
o.posval[1].ival = "async+posix";
o.posval[1].oval = CEPH_MSGR_TYPE_POSIX;
o.posval[1].help = "POSIX API";
o.posval[2].ival = "async+dpdk";
o.posval[2].oval = CEPH_MSGR_TYPE_DPDK;
o.posval[2].help = "DPDK";
o.posval[3].ival = "async+rdma";
o.posval[3].oval = CEPH_MSGR_TYPE_RDMA;
o.posval[3].help = "RDMA";
}),
make_option([] (fio_option& o) {
o.name = "ceph_conf_file";
o.lname = "CEPH configuration file";
o.type = FIO_OPT_STR_STORE;
o.off1 = offsetof(struct ceph_msgr_options, conffile);
o.help = "Path to CEPH configuration file";
}),
{} /* Last NULL */
};
static struct ioengine_ops ioengine;
extern "C" {
void get_ioengine(struct ioengine_ops** ioengine_ptr)
{
/*
* Main ioengine structure
*/
ioengine.name = "ceph-msgr";
ioengine.version = FIO_IOOPS_VERSION;
ioengine.flags = FIO_DISKLESSIO | FIO_UNIDIR | FIO_PIPEIO;
ioengine.setup = fio_ceph_msgr_setup;
ioengine.queue = fio_ceph_msgr_queue;
ioengine.getevents = fio_ceph_msgr_getevents;
ioengine.event = fio_ceph_msgr_event;
ioengine.cleanup = fio_ceph_msgr_cleanup;
ioengine.open_file = fio_ceph_msgr_open_file;
ioengine.close_file = fio_ceph_msgr_close_file;
ioengine.io_u_init = fio_ceph_msgr_io_u_init;
ioengine.io_u_free = fio_ceph_msgr_io_u_free;
ioengine.option_struct_size = sizeof(struct ceph_msgr_options);
ioengine.options = options.data();
*ioengine_ptr = &ioengine;
}
} // extern "C"
| 18,120 | 24.850214 | 119 |
cc
|
null |
ceph-main/src/test/fio/fio_ceph_objectstore.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph ObjectStore engine
*
* IO engine using Ceph's ObjectStore class to test low-level performance of
* Ceph OSDs.
*
*/
#include <memory>
#include <system_error>
#include <vector>
#include <fstream>
#include "os/ObjectStore.h"
#include "global/global_init.h"
#include "common/errno.h"
#include "include/intarith.h"
#include "include/stringify.h"
#include "include/random.h"
#include "include/str_list.h"
#include "common/perf_counters.h"
#include "common/TracepointProvider.h"
#include <fio.h>
#include <optgroup.h>
#include "include/ceph_assert.h" // fio.h clobbers our assert.h
#include <algorithm>
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_
using namespace std;
namespace {
/// fio configuration options read from the job file
struct Options {
thread_data* td;
char* conf;
char* perf_output_file;
char* throttle_values;
char* deferred_throttle_values;
unsigned long long
cycle_throttle_period,
oi_attr_len_low,
oi_attr_len_high,
snapset_attr_len_low,
snapset_attr_len_high,
pglog_omap_len_low,
pglog_omap_len_high,
pglog_dup_omap_len_low,
pglog_dup_omap_len_high,
_fastinfo_omap_len_low,
_fastinfo_omap_len_high;
unsigned simulate_pglog;
unsigned single_pool_mode;
unsigned preallocate_files;
unsigned check_files;
};
template <class Func> // void Func(fio_option&)
fio_option make_option(Func&& func)
{
// zero-initialize and set common defaults
auto o = fio_option{};
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_RBD;
func(std::ref(o));
return o;
}
static std::vector<fio_option> ceph_options{
make_option([] (fio_option& o) {
o.name = "conf";
o.lname = "ceph configuration file";
o.type = FIO_OPT_STR_STORE;
o.help = "Path to a ceph configuration file";
o.off1 = offsetof(Options, conf);
}),
make_option([] (fio_option& o) {
o.name = "perf_output_file";
o.lname = "perf output target";
o.type = FIO_OPT_STR_STORE;
o.help = "Path to which to write json formatted perf output";
o.off1 = offsetof(Options, perf_output_file);
o.def = 0;
}),
make_option([] (fio_option& o) {
o.name = "oi_attr_len";
o.lname = "OI Attr length";
o.type = FIO_OPT_STR_VAL;
o.help = "Set OI(aka '_') attribute to specified length";
o.off1 = offsetof(Options, oi_attr_len_low);
o.off2 = offsetof(Options, oi_attr_len_high);
o.def = 0;
o.minval = 0;
}),
make_option([] (fio_option& o) {
o.name = "snapset_attr_len";
o.lname = "Attr 'snapset' length";
o.type = FIO_OPT_STR_VAL;
o.help = "Set 'snapset' attribute to specified length";
o.off1 = offsetof(Options, snapset_attr_len_low);
o.off2 = offsetof(Options, snapset_attr_len_high);
o.def = 0;
o.minval = 0;
}),
make_option([] (fio_option& o) {
o.name = "_fastinfo_omap_len";
o.lname = "'_fastinfo' omap entry length";
o.type = FIO_OPT_STR_VAL;
o.help = "Set '_fastinfo' OMAP attribute to specified length";
o.off1 = offsetof(Options, _fastinfo_omap_len_low);
o.off2 = offsetof(Options, _fastinfo_omap_len_high);
o.def = 0;
o.minval = 0;
}),
make_option([] (fio_option& o) {
o.name = "pglog_simulation";
o.lname = "pglog behavior simulation";
o.type = FIO_OPT_BOOL;
o.help = "Enables PG Log simulation behavior";
o.off1 = offsetof(Options, simulate_pglog);
o.def = "0";
}),
make_option([] (fio_option& o) {
o.name = "pglog_omap_len";
o.lname = "pglog omap entry length";
o.type = FIO_OPT_STR_VAL;
o.help = "Set pglog omap entry to specified length";
o.off1 = offsetof(Options, pglog_omap_len_low);
o.off2 = offsetof(Options, pglog_omap_len_high);
o.def = 0;
o.minval = 0;
}),
make_option([] (fio_option& o) {
o.name = "pglog_dup_omap_len";
o.lname = "uplicate pglog omap entry length";
o.type = FIO_OPT_STR_VAL;
o.help = "Set duplicate pglog omap entry to specified length";
o.off1 = offsetof(Options, pglog_dup_omap_len_low);
o.off2 = offsetof(Options, pglog_dup_omap_len_high);
o.def = 0;
o.minval = 0;
}),
make_option([] (fio_option& o) {
o.name = "single_pool_mode";
o.lname = "single(shared among jobs) pool mode";
o.type = FIO_OPT_BOOL;
o.help = "Enables the mode when all jobs run against the same pool";
o.off1 = offsetof(Options, single_pool_mode);
o.def = "0";
}),
make_option([] (fio_option& o) {
o.name = "preallocate_files";
o.lname = "preallocate files on init";
o.type = FIO_OPT_BOOL;
o.help = "Enables/disables file preallocation (touch and resize) on init";
o.off1 = offsetof(Options, preallocate_files);
o.def = "1";
}),
make_option([] (fio_option& o) {
o.name = "check_files";
o.lname = "ensure files exist and are correct on init";
o.type = FIO_OPT_BOOL;
o.help = "Enables/disables checking of files on init";
o.off1 = offsetof(Options, check_files);
o.def = "0";
}),
make_option([] (fio_option& o) {
o.name = "bluestore_throttle";
o.lname = "set bluestore throttle";
o.type = FIO_OPT_STR_STORE;
o.help = "comma delimited list of throttle values",
o.off1 = offsetof(Options, throttle_values);
o.def = 0;
}),
make_option([] (fio_option& o) {
o.name = "bluestore_deferred_throttle";
o.lname = "set bluestore deferred throttle";
o.type = FIO_OPT_STR_STORE;
o.help = "comma delimited list of throttle values",
o.off1 = offsetof(Options, deferred_throttle_values);
o.def = 0;
}),
make_option([] (fio_option& o) {
o.name = "vary_bluestore_throttle_period";
o.lname = "period between different throttle values";
o.type = FIO_OPT_STR_VAL;
o.help = "set to non-zero value to periodically cycle through throttle options";
o.off1 = offsetof(Options, cycle_throttle_period);
o.def = "0";
o.minval = 0;
}),
{} // fio expects a 'null'-terminated list
};
struct Collection {
spg_t pg;
coll_t cid;
ObjectStore::CollectionHandle ch;
// Can't use mutex directly in vectors hence dynamic allocation
std::unique_ptr<std::mutex> lock;
uint64_t pglog_ver_head = 1;
uint64_t pglog_ver_tail = 1;
uint64_t pglog_dup_ver_tail = 1;
// use big pool ids to avoid clashing with existing collections
static constexpr int64_t MIN_POOL_ID = 0x0000ffffffffffff;
Collection(const spg_t& pg, ObjectStore::CollectionHandle _ch)
: pg(pg), cid(pg), ch(_ch),
lock(new std::mutex) {
}
};
int destroy_collections(
std::unique_ptr<ObjectStore>& os,
std::vector<Collection>& collections)
{
ObjectStore::Transaction t;
bool failed = false;
// remove our collections
for (auto& coll : collections) {
ghobject_t pgmeta_oid(coll.pg.make_pgmeta_oid());
t.remove(coll.cid, pgmeta_oid);
t.remove_collection(coll.cid);
int r = os->queue_transaction(coll.ch, std::move(t));
if (r && !failed) {
derr << "Engine cleanup failed with " << cpp_strerror(-r) << dendl;
failed = true;
}
}
return 0;
}
int init_collections(std::unique_ptr<ObjectStore>& os,
uint64_t pool,
std::vector<Collection>& collections,
uint64_t count)
{
ceph_assert(count > 0);
collections.reserve(count);
const int split_bits = cbits(count - 1);
{
// propagate Superblock object to ensure proper functioning of tools that
// need it. E.g. ceph-objectstore-tool
coll_t cid(coll_t::meta());
bool exists = os->collection_exists(cid);
if (!exists) {
auto ch = os->create_new_collection(cid);
OSDSuperblock superblock;
bufferlist bl;
encode(superblock, bl);
ObjectStore::Transaction t;
t.create_collection(cid, split_bits);
t.write(cid, OSD_SUPERBLOCK_GOBJECT, 0, bl.length(), bl);
int r = os->queue_transaction(ch, std::move(t));
if (r < 0) {
derr << "Failure to write OSD superblock: " << cpp_strerror(-r) << dendl;
return r;
}
}
}
for (uint32_t i = 0; i < count; i++) {
auto pg = spg_t{pg_t{i, pool}};
coll_t cid(pg);
bool exists = os->collection_exists(cid);
auto ch = exists ?
os->open_collection(cid) :
os->create_new_collection(cid) ;
collections.emplace_back(pg, ch);
ObjectStore::Transaction t;
auto& coll = collections.back();
if (!exists) {
t.create_collection(coll.cid, split_bits);
ghobject_t pgmeta_oid(coll.pg.make_pgmeta_oid());
t.touch(coll.cid, pgmeta_oid);
int r = os->queue_transaction(coll.ch, std::move(t));
if (r) {
derr << "Engine init failed with " << cpp_strerror(-r) << dendl;
destroy_collections(os, collections);
return r;
}
}
}
return 0;
}
/// global engine state shared between all jobs within the process. this
/// includes g_ceph_context and the ObjectStore instance
struct Engine {
/// the initial g_ceph_context reference to be dropped on destruction
boost::intrusive_ptr<CephContext> cct;
std::unique_ptr<ObjectStore> os;
std::vector<Collection> collections; //< shared collections to spread objects over
std::mutex lock;
int ref_count;
const bool unlink; //< unlink objects on destruction
// file to which to output formatted perf information
const std::optional<std::string> perf_output_file;
explicit Engine(thread_data* td);
~Engine();
static Engine* get_instance(thread_data* td) {
// note: creates an Engine with the options associated with the first job
static Engine engine(td);
return &engine;
}
void ref() {
std::lock_guard<std::mutex> l(lock);
++ref_count;
}
void deref() {
std::lock_guard<std::mutex> l(lock);
--ref_count;
if (!ref_count) {
ostringstream ostr;
Formatter* f = Formatter::create(
"json-pretty", "json-pretty", "json-pretty");
f->open_object_section("perf_output");
cct->get_perfcounters_collection()->dump_formatted(f, false, false);
if (g_conf()->rocksdb_perf) {
f->open_object_section("rocksdb_perf");
os->get_db_statistics(f);
f->close_section();
}
mempool::dump(f);
{
f->open_object_section("db_histogram");
os->generate_db_histogram(f);
f->close_section();
}
f->close_section();
f->flush(ostr);
delete f;
if (unlink) {
destroy_collections(os, collections);
}
os->umount();
dout(0) << "FIO plugin perf dump:" << dendl;
dout(0) << ostr.str() << dendl;
if (perf_output_file) {
try {
std::ofstream foutput(*perf_output_file);
foutput << ostr.str() << std::endl;
} catch (std::exception &e) {
std::cerr << "Unable to write formatted output to "
<< *perf_output_file
<< ", exception: " << e.what()
<< std::endl;
}
}
}
}
};
TracepointProvider::Traits bluestore_tracepoint_traits("libbluestore_tp.so",
"bluestore_tracing");
Engine::Engine(thread_data* td)
: ref_count(0),
unlink(td->o.unlink),
perf_output_file(
static_cast<Options*>(td->eo)->perf_output_file ?
std::make_optional(static_cast<Options*>(td->eo)->perf_output_file) :
std::nullopt)
{
// add the ceph command line arguments
auto o = static_cast<Options*>(td->eo);
if (!o->conf) {
throw std::runtime_error("missing conf option for ceph configuration file");
}
std::vector<const char*> args{
"-i", "0", // identify as osd.0 for osd_data and osd_journal
"--conf", o->conf, // use the requested conf file
};
if (td->o.directory) { // allow conf files to use ${fio_dir} for data
args.emplace_back("--fio_dir");
args.emplace_back(td->o.directory);
}
// claim the g_ceph_context reference and release it on destruction
cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_OSD,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
TracepointProvider::initialize<bluestore_tracepoint_traits>(g_ceph_context);
// create the ObjectStore
os = ObjectStore::create(g_ceph_context,
g_conf().get_val<std::string>("osd objectstore"),
g_conf().get_val<std::string>("osd data"),
g_conf().get_val<std::string>("osd journal"));
if (!os)
throw std::runtime_error("bad objectstore type " + g_conf()->osd_objectstore);
unsigned num_shards;
if(g_conf()->osd_op_num_shards)
num_shards = g_conf()->osd_op_num_shards;
else if(os->is_rotational())
num_shards = g_conf()->osd_op_num_shards_hdd;
else
num_shards = g_conf()->osd_op_num_shards_ssd;
os->set_cache_shards(num_shards);
//normalize options
o->oi_attr_len_high = max(o->oi_attr_len_low, o->oi_attr_len_high);
o->snapset_attr_len_high = max(o->snapset_attr_len_low,
o->snapset_attr_len_high);
o->pglog_omap_len_high = max(o->pglog_omap_len_low,
o->pglog_omap_len_high);
o->pglog_dup_omap_len_high = max(o->pglog_dup_omap_len_low,
o->pglog_dup_omap_len_high);
o->_fastinfo_omap_len_high = max(o->_fastinfo_omap_len_low,
o->_fastinfo_omap_len_high);
int r = os->mkfs();
if (r < 0)
throw std::system_error(-r, std::system_category(), "mkfs failed");
r = os->mount();
if (r < 0)
throw std::system_error(-r, std::system_category(), "mount failed");
// create shared collections up to osd_pool_default_pg_num
if (o->single_pool_mode) {
uint64_t count = g_conf().get_val<uint64_t>("osd_pool_default_pg_num");
if (count > td->o.nr_files)
count = td->o.nr_files;
init_collections(os, Collection::MIN_POOL_ID, collections, count);
}
}
Engine::~Engine()
{
ceph_assert(!ref_count);
}
struct Object {
ghobject_t oid;
Collection& coll;
Object(const char* name, Collection& coll)
: oid(hobject_t(name, "", CEPH_NOSNAP, coll.pg.ps(), coll.pg.pool(), "")),
coll(coll) {}
};
/// treat each fio job either like a separate pool with its own collections and objects
/// or just a client using its own objects from the shared pool
struct Job {
Engine* engine; //< shared ptr to the global Engine
const unsigned subjob_number; //< subjob num
std::vector<Collection> collections; //< job's private collections to spread objects over
std::vector<Object> objects; //< associate an object with each fio_file
std::vector<io_u*> events; //< completions for fio_ceph_os_event()
const bool unlink; //< unlink objects on destruction
bufferptr one_for_all_data; //< preallocated buffer long enough
//< to use for vairious operations
std::mutex throttle_lock;
const vector<unsigned> throttle_values;
const vector<unsigned> deferred_throttle_values;
std::chrono::duration<double> cycle_throttle_period;
mono_clock::time_point last = ceph::mono_clock::zero();
unsigned index = 0;
static vector<unsigned> parse_throttle_str(const char *p) {
vector<unsigned> ret;
if (p == nullptr) {
return ret;
}
ceph::for_each_substr(p, ",\"", [&ret] (auto &&s) mutable {
if (s.size() > 0) {
ret.push_back(std::stoul(std::string(s)));
}
});
return ret;
}
void check_throttle();
Job(Engine* engine, const thread_data* td);
~Job();
};
Job::Job(Engine* engine, const thread_data* td)
: engine(engine),
subjob_number(td->subjob_number),
events(td->o.iodepth),
unlink(td->o.unlink),
throttle_values(
parse_throttle_str(static_cast<Options*>(td->eo)->throttle_values)),
deferred_throttle_values(
parse_throttle_str(static_cast<Options*>(td->eo)->deferred_throttle_values)),
cycle_throttle_period(
static_cast<Options*>(td->eo)->cycle_throttle_period)
{
engine->ref();
auto o = static_cast<Options*>(td->eo);
unsigned long long max_data = max(o->oi_attr_len_high,
o->snapset_attr_len_high);
max_data = max(max_data, o->pglog_omap_len_high);
max_data = max(max_data, o->pglog_dup_omap_len_high);
max_data = max(max_data, o->_fastinfo_omap_len_high);
one_for_all_data = buffer::create(max_data);
std::vector<Collection>* colls;
// create private collections up to osd_pool_default_pg_num
if (!o->single_pool_mode) {
uint64_t count = g_conf().get_val<uint64_t>("osd_pool_default_pg_num");
if (count > td->o.nr_files)
count = td->o.nr_files;
// use the fio thread_number for our unique pool id
const uint64_t pool = Collection::MIN_POOL_ID + td->thread_number + 1;
init_collections(engine->os, pool, collections, count);
colls = &collections;
} else {
colls = &engine->collections;
}
const uint64_t file_size = td->o.size / max(1u, td->o.nr_files);
ObjectStore::Transaction t;
// create an object for each file in the job
objects.reserve(td->o.nr_files);
unsigned checked_or_preallocated = 0;
for (uint32_t i = 0; i < td->o.nr_files; i++) {
auto f = td->files[i];
f->real_file_size = file_size;
f->engine_pos = i;
// associate each object with a collection in a round-robin fashion.
auto& coll = (*colls)[i % colls->size()];
objects.emplace_back(f->file_name, coll);
if (o->preallocate_files) {
auto& oid = objects.back().oid;
t.touch(coll.cid, oid);
t.truncate(coll.cid, oid, file_size);
int r = engine->os->queue_transaction(coll.ch, std::move(t));
if (r) {
engine->deref();
throw std::system_error(r, std::system_category(), "job init");
}
}
if (o->check_files) {
auto& oid = objects.back().oid;
struct stat st;
int r = engine->os->stat(coll.ch, oid, &st);
if (r || ((unsigned)st.st_size) != file_size) {
derr << "Problem checking " << oid << ", r=" << r
<< ", st.st_size=" << st.st_size
<< ", file_size=" << file_size
<< ", nr_files=" << td->o.nr_files << dendl;
engine->deref();
throw std::system_error(
r, std::system_category(), "job init -- cannot check file");
}
}
if (o->check_files || o->preallocate_files) {
++checked_or_preallocated;
}
}
if (o->check_files) {
derr << "fio_ceph_objectstore checked " << checked_or_preallocated
<< " files"<< dendl;
}
if (o->preallocate_files ){
derr << "fio_ceph_objectstore preallocated " << checked_or_preallocated
<< " files"<< dendl;
}
}
Job::~Job()
{
if (unlink) {
ObjectStore::Transaction t;
bool failed = false;
// remove our objects
for (auto& obj : objects) {
t.remove(obj.coll.cid, obj.oid);
int r = engine->os->queue_transaction(obj.coll.ch, std::move(t));
if (r && !failed) {
derr << "job cleanup failed with " << cpp_strerror(-r) << dendl;
failed = true;
}
}
destroy_collections(engine->os, collections);
}
engine->deref();
}
void Job::check_throttle()
{
if (subjob_number != 0)
return;
std::lock_guard<std::mutex> l(throttle_lock);
if (throttle_values.empty() && deferred_throttle_values.empty())
return;
if (ceph::mono_clock::is_zero(last) ||
((cycle_throttle_period != cycle_throttle_period.zero()) &&
(ceph::mono_clock::now() - last) > cycle_throttle_period)) {
unsigned tvals = throttle_values.size() ? throttle_values.size() : 1;
unsigned dtvals = deferred_throttle_values.size() ? deferred_throttle_values.size() : 1;
if (!throttle_values.empty()) {
std::string val = std::to_string(throttle_values[index % tvals]);
std::cerr << "Setting bluestore_throttle_bytes to " << val << std::endl;
int r = engine->cct->_conf.set_val(
"bluestore_throttle_bytes",
val,
nullptr);
ceph_assert(r == 0);
}
if (!deferred_throttle_values.empty()) {
std::string val = std::to_string(deferred_throttle_values[(index / tvals) % dtvals]);
std::cerr << "Setting bluestore_deferred_throttle_bytes to " << val << std::endl;
int r = engine->cct->_conf.set_val(
"bluestore_throttle_deferred_bytes",
val,
nullptr);
ceph_assert(r == 0);
}
engine->cct->_conf.apply_changes(nullptr);
index++;
index %= tvals * dtvals;
last = ceph::mono_clock::now();
}
}
int fio_ceph_os_setup(thread_data* td)
{
// if there are multiple jobs, they must run in the same process against a
// single instance of the ObjectStore. explicitly disable fio's default
// job-per-process configuration
td->o.use_thread = 1;
try {
// get or create the global Engine instance
auto engine = Engine::get_instance(td);
// create a Job for this thread
td->io_ops_data = new Job(engine, td);
} catch (std::exception& e) {
std::cerr << "setup failed with " << e.what() << std::endl;
return -1;
}
return 0;
}
void fio_ceph_os_cleanup(thread_data* td)
{
auto job = static_cast<Job*>(td->io_ops_data);
td->io_ops_data = nullptr;
delete job;
}
io_u* fio_ceph_os_event(thread_data* td, int event)
{
// return the requested event from fio_ceph_os_getevents()
auto job = static_cast<Job*>(td->io_ops_data);
return job->events[event];
}
int fio_ceph_os_getevents(thread_data* td, unsigned int min,
unsigned int max, const timespec* t)
{
auto job = static_cast<Job*>(td->io_ops_data);
unsigned int events = 0;
io_u* u = NULL;
unsigned int i = 0;
// loop through inflight ios until we find 'min' completions
do {
io_u_qiter(&td->io_u_all, u, i) {
if (!(u->flags & IO_U_F_FLIGHT))
continue;
if (u->engine_data) {
u->engine_data = nullptr;
job->events[events] = u;
events++;
}
}
if (events >= min)
break;
usleep(100);
} while (1);
return events;
}
/// completion context for ObjectStore::queue_transaction()
class UnitComplete : public Context {
io_u* u;
public:
explicit UnitComplete(io_u* u) : u(u) {}
void finish(int r) {
// mark the pointer to indicate completion for fio_ceph_os_getevents()
u->engine_data = reinterpret_cast<void*>(1ull);
}
};
enum fio_q_status fio_ceph_os_queue(thread_data* td, io_u* u)
{
fio_ro_check(td, u);
auto o = static_cast<const Options*>(td->eo);
auto job = static_cast<Job*>(td->io_ops_data);
auto& object = job->objects[u->file->engine_pos];
auto& coll = object.coll;
auto& os = job->engine->os;
job->check_throttle();
if (u->ddir == DDIR_WRITE) {
// provide a hint if we're likely to read this data back
const int flags = td_rw(td) ? CEPH_OSD_OP_FLAG_FADVISE_WILLNEED : 0;
bufferlist bl;
bl.push_back(buffer::copy(reinterpret_cast<char*>(u->xfer_buf),
u->xfer_buflen ) );
map<string,bufferptr,less<>> attrset;
map<string, bufferlist> omaps;
// enqueue a write transaction on the collection's handle
ObjectStore::Transaction t;
char ver_key[64];
// fill attrs if any
if (o->oi_attr_len_high) {
ceph_assert(o->oi_attr_len_high >= o->oi_attr_len_low);
// fill with the garbage as we do not care of the actual content...
job->one_for_all_data.set_length(
ceph::util::generate_random_number(
o->oi_attr_len_low, o->oi_attr_len_high));
attrset["_"] = job->one_for_all_data;
}
if (o->snapset_attr_len_high) {
ceph_assert(o->snapset_attr_len_high >= o->snapset_attr_len_low);
job->one_for_all_data.set_length(
ceph::util::generate_random_number
(o->snapset_attr_len_low, o->snapset_attr_len_high));
attrset["snapset"] = job->one_for_all_data;
}
if (o->_fastinfo_omap_len_high) {
ceph_assert(o->_fastinfo_omap_len_high >= o->_fastinfo_omap_len_low);
// fill with the garbage as we do not care of the actual content...
job->one_for_all_data.set_length(
ceph::util::generate_random_number(
o->_fastinfo_omap_len_low, o->_fastinfo_omap_len_high));
omaps["_fastinfo"].append(job->one_for_all_data);
}
uint64_t pglog_trim_head = 0, pglog_trim_tail = 0;
uint64_t pglog_dup_trim_head = 0, pglog_dup_trim_tail = 0;
if (o->simulate_pglog) {
uint64_t pglog_ver_cnt = 0;
{
std::lock_guard<std::mutex> l(*coll.lock);
pglog_ver_cnt = coll.pglog_ver_head++;
if (o->pglog_omap_len_high &&
pglog_ver_cnt >=
coll.pglog_ver_tail +
g_conf()->osd_min_pg_log_entries + g_conf()->osd_pg_log_trim_min) {
pglog_trim_tail = coll.pglog_ver_tail;
coll.pglog_ver_tail = pglog_trim_head =
pglog_trim_tail + g_conf()->osd_pg_log_trim_min;
if (o->pglog_dup_omap_len_high &&
pglog_ver_cnt >=
coll.pglog_dup_ver_tail + g_conf()->osd_pg_log_dups_tracked +
g_conf()->osd_pg_log_trim_min) {
pglog_dup_trim_tail = coll.pglog_dup_ver_tail;
coll.pglog_dup_ver_tail = pglog_dup_trim_head =
pglog_dup_trim_tail + g_conf()->osd_pg_log_trim_min;
}
}
}
if (o->pglog_omap_len_high) {
ceph_assert(o->pglog_omap_len_high >= o->pglog_omap_len_low);
snprintf(ver_key, sizeof(ver_key),
"0000000011.%020llu", (unsigned long long)pglog_ver_cnt);
// fill with the garbage as we do not care of the actual content...
job->one_for_all_data.set_length(
ceph::util::generate_random_number(
o->pglog_omap_len_low, o->pglog_omap_len_high));
omaps[ver_key].append(job->one_for_all_data);
}
if (o->pglog_dup_omap_len_high) {
//insert dup
ceph_assert(o->pglog_dup_omap_len_high >= o->pglog_dup_omap_len_low);
for( auto i = pglog_trim_tail; i < pglog_trim_head; ++i) {
snprintf(ver_key, sizeof(ver_key),
"dup_0000000011.%020llu", (unsigned long long)i);
// fill with the garbage as we do not care of the actual content...
job->one_for_all_data.set_length(
ceph::util::generate_random_number(
o->pglog_dup_omap_len_low, o->pglog_dup_omap_len_high));
omaps[ver_key].append(job->one_for_all_data);
}
}
}
if (!attrset.empty()) {
t.setattrs(coll.cid, object.oid, attrset);
}
t.write(coll.cid, object.oid, u->offset, u->xfer_buflen, bl, flags);
set<string> rmkeys;
for( auto i = pglog_trim_tail; i < pglog_trim_head; ++i) {
snprintf(ver_key, sizeof(ver_key),
"0000000011.%020llu", (unsigned long long)i);
rmkeys.emplace(ver_key);
}
for( auto i = pglog_dup_trim_tail; i < pglog_dup_trim_head; ++i) {
snprintf(ver_key, sizeof(ver_key),
"dup_0000000011.%020llu", (unsigned long long)i);
rmkeys.emplace(ver_key);
}
if (rmkeys.size()) {
ghobject_t pgmeta_oid(coll.pg.make_pgmeta_oid());
t.omap_rmkeys(coll.cid, pgmeta_oid, rmkeys);
}
if (omaps.size()) {
ghobject_t pgmeta_oid(coll.pg.make_pgmeta_oid());
t.omap_setkeys(coll.cid, pgmeta_oid, omaps);
}
t.register_on_commit(new UnitComplete(u));
os->queue_transaction(coll.ch,
std::move(t));
return FIO_Q_QUEUED;
}
if (u->ddir == DDIR_READ) {
// ObjectStore reads are synchronous, so make the call and return COMPLETED
bufferlist bl;
int r = os->read(coll.ch, object.oid, u->offset, u->xfer_buflen, bl);
if (r < 0) {
u->error = r;
td_verror(td, u->error, "xfer");
} else {
bl.begin().copy(bl.length(), static_cast<char*>(u->xfer_buf));
u->resid = u->xfer_buflen - r;
}
return FIO_Q_COMPLETED;
}
derr << "WARNING: Only DDIR_READ and DDIR_WRITE are supported!" << dendl;
u->error = -EINVAL;
td_verror(td, u->error, "xfer");
return FIO_Q_COMPLETED;
}
int fio_ceph_os_commit(thread_data* td)
{
// commit() allows the engine to batch up queued requests to be submitted all
// at once. it would be natural for queue() to collect transactions in a list,
// and use commit() to pass them all to ObjectStore::queue_transactions(). but
// because we spread objects over multiple collections, we a) need to use a
// different sequencer for each collection, and b) are less likely to see a
// benefit from batching requests within a collection
return 0;
}
// open/close are noops. we set the FIO_DISKLESSIO flag in ioengine_ops to
// prevent fio from creating the files
int fio_ceph_os_open(thread_data* td, fio_file* f) { return 0; }
int fio_ceph_os_close(thread_data* td, fio_file* f) { return 0; }
int fio_ceph_os_io_u_init(thread_data* td, io_u* u)
{
// no data is allocated, we just use the pointer as a boolean 'completed' flag
u->engine_data = nullptr;
return 0;
}
void fio_ceph_os_io_u_free(thread_data* td, io_u* u)
{
u->engine_data = nullptr;
}
// ioengine_ops for get_ioengine()
struct ceph_ioengine : public ioengine_ops {
ceph_ioengine() : ioengine_ops({}) {
name = "ceph-os";
version = FIO_IOOPS_VERSION;
flags = FIO_DISKLESSIO;
setup = fio_ceph_os_setup;
queue = fio_ceph_os_queue;
commit = fio_ceph_os_commit;
getevents = fio_ceph_os_getevents;
event = fio_ceph_os_event;
cleanup = fio_ceph_os_cleanup;
open_file = fio_ceph_os_open;
close_file = fio_ceph_os_close;
io_u_init = fio_ceph_os_io_u_init;
io_u_free = fio_ceph_os_io_u_free;
options = ceph_options.data();
option_struct_size = sizeof(struct Options);
}
};
} // anonymous namespace
extern "C" {
// the exported fio engine interface
void get_ioengine(struct ioengine_ops** ioengine_ptr) {
static ceph_ioengine ioengine;
*ioengine_ptr = &ioengine;
}
} // extern "C"
| 29,440 | 30.220573 | 92 |
cc
|
null |
ceph-main/src/test/fio/fio_librgw.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <stdint.h>
#include <tuple>
#include <vector>
#include <functional>
#include <iostream>
#include <semaphore.h> // XXX kill this?
#include "fmt/include/fmt/format.h"
#include "include/rados/librgw.h"
#include "include/rados/rgw_file.h"
//#include "rgw/rgw_file.h"
//#include "rgw/rgw_lib_frontend.h" // direct requests
/* naughty fio.h leaks min and max as C macros--include it last */
#include <fio.h>
#include <optgroup.h>
#undef min
#undef max
namespace {
struct librgw_iou {
struct io_u *io_u;
int io_complete;
};
struct librgw_data {
io_u** aio_events;
librgw_t rgw_h;
rgw_fs* fs;
rgw_file_handle* bucket_fh;
std::vector<rgw_file_handle*> fh_vec;
librgw_data(thread_data* td)
: rgw_h(nullptr), fs(nullptr), bucket_fh(nullptr)
{
auto size = td->o.iodepth * sizeof(io_u*);
aio_events = static_cast<io_u**>(malloc(size));
memset(aio_events, 0, size);
}
void save_handle(rgw_file_handle* fh) {
fh_vec.push_back(fh);
}
void release_handles() {
for (auto object_fh : fh_vec) {
rgw_fh_rele(fs, object_fh, RGW_FH_RELE_FLAG_NONE);
}
fh_vec.clear();
}
~librgw_data() {
free(aio_events);
}
};
struct opt_struct {
struct thread_data *td;
const char* config; /* can these be std::strings? */
const char* cluster;
const char* name; // instance?
const char* init_args;
const char* access_key;
const char* secret_key;
const char* userid;
const char* bucket_name;
uint32_t owner_uid = 867;
uint32_t owner_gid = 5309;
};
uint32_t create_mask = RGW_SETATTR_UID | RGW_SETATTR_GID | RGW_SETATTR_MODE;
/* borrowed from fio_ceph_objectstore */
template <class F>
fio_option make_option(F&& func)
{
// zero-initialize and set common defaults
auto o = fio_option{};
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_INVALID;
func(std::ref(o));
return o;
}
static std::vector<fio_option> options = {
make_option([] (fio_option& o) {
o.name = "ceph_conf";
o.lname = "ceph configuration file";
o.type = FIO_OPT_STR_STORE;
o.help = "Path to ceph.conf file";
o.off1 = offsetof(opt_struct, config);
}),
make_option([] (fio_option& o) {
o.name = "ceph_name";
o.lname = "ceph instance name";
o.type = FIO_OPT_STR_STORE;
o.help = "Name of this program instance";
o.off1 = offsetof(opt_struct, name);
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_INVALID;
}),
make_option([] (fio_option& o) {
o.name = "ceph_cluster";
o.lname = "ceph cluster name";
o.type = FIO_OPT_STR_STORE;
o.help = "Name of ceph cluster (default=ceph)";
o.off1 = offsetof(opt_struct, cluster);
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_INVALID;
}),
make_option([] (fio_option& o) {
o.name = "ceph_init_args";
o.lname = "ceph init args";
o.type = FIO_OPT_STR_STORE;
o.help = "Extra ceph arguments (e.g., -d --debug-rgw=16)";
o.off1 = offsetof(opt_struct, init_args);
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_INVALID;
}),
make_option([] (fio_option& o) {
o.name = "access_key";
o.lname = "AWS access key";
o.type = FIO_OPT_STR_STORE;
o.help = "AWS access key";
o.off1 = offsetof(opt_struct, access_key);
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_INVALID;
}),
make_option([] (fio_option& o) {
o.name = "secret_key";
o.lname = "AWS secret key";
o.type = FIO_OPT_STR_STORE;
o.help = "AWS secret key";
o.off1 = offsetof(opt_struct, secret_key);
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_INVALID;
}),
make_option([] (fio_option& o) {
o.name = "userid";
o.lname = "userid";
o.type = FIO_OPT_STR_STORE;
o.help = "userid corresponding to access key";
o.off1 = offsetof(opt_struct, userid);
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_INVALID;
}),
make_option([] (fio_option& o) {
o.name = "bucket_name";
o.lname = "S3 bucket";
o.type = FIO_OPT_STR_STORE;
o.help = "S3 bucket to operate on";
o.off1 = offsetof(opt_struct, bucket_name);
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_INVALID;
}),
{} // fio expects a 'null'-terminated list
};
struct save_args {
int argc;
char *argv[8];
save_args() : argc(1)
{
argv[0] = strdup("librgw");
for (int ix = 1; ix < 8; ++ix) {
argv[ix] = nullptr;
}
}
void push_arg(const std::string sarg) {
argv[argc++] = strdup(sarg.c_str());
}
~save_args() {
for (int ix = 0; ix < argc; ++ix) {
argv[ix] = nullptr;
}
}
} args;
/*
* It looks like the setup function is called once, on module load.
* It's not documented in the skeleton driver.
*/
static int fio_librgw_setup(struct thread_data* td)
{
opt_struct& o = *(reinterpret_cast<opt_struct*>(td->eo));
librgw_data* data = nullptr;
int r = 0;
dprint(FD_IO, "fio_librgw_setup\n");
if (! td->io_ops_data) {
data = new librgw_data(td);
/* init args */
std::string sopt;
if (o.config) {
sopt = fmt::format("--conf={}", o.config);
args.push_arg(sopt);
}
std::cout << o.name << std::endl;
if (o.name) {
sopt = fmt::format("--name={}", o.name);
args.push_arg(sopt);
}
if (o.cluster) {
sopt = fmt::format("--cluster={}", o.cluster);
args.push_arg(sopt);
}
if (o.init_args) {
args.push_arg(std::string(o.init_args));
}
r = librgw_create(&data->rgw_h, args.argc, args.argv);
if (!! r) {
dprint(FD_IO, "librgw_create failed\n");
return r;
}
r = rgw_mount2(data->rgw_h, o.userid, o.access_key, o.secret_key, "/",
&data->fs, RGW_MOUNT_FLAG_NONE);
if (!! r) {
dprint(FD_IO, "rgw_mount2 failed\n");
return r;
}
/* go ahead and lookup the bucket as well */
r = rgw_lookup(data->fs, data->fs->root_fh, o.bucket_name,
&data->bucket_fh, nullptr, 0, RGW_LOOKUP_FLAG_NONE);
if (! data->bucket_fh) {
dprint(FD_IO, "rgw_lookup on bucket %s failed, will create\n",
o.bucket_name);
struct stat st;
st.st_uid = o.owner_uid;
st.st_gid = o.owner_gid;
st.st_mode = 755;
r = rgw_mkdir(data->fs, data->fs->root_fh, o.bucket_name,
&st, create_mask, &data->bucket_fh, RGW_MKDIR_FLAG_NONE);
if (! data->bucket_fh) {
dprint(FD_IO, "rgw_mkdir for bucket %s failed\n", o.bucket_name);
return EINVAL;
}
}
td->io_ops_data = data;
}
td->o.use_thread = 1;
if (r != 0) {
abort();
}
return r;
}
/*
* The init function is called once per thread/process, and should set up
* any structures that this io engine requires to keep track of io. Not
* required.
*/
static int fio_librgw_init(struct thread_data *td)
{
dprint(FD_IO, "fio_librgw_init\n");
return 0;
}
/*
* This is paired with the ->init() function and is called when a thread is
* done doing io. Should tear down anything setup by the ->init() function.
* Not required.
*
* N.b., the cohort driver made this idempotent by allocating data in
* setup, clearing data here if present, and doing nothing in the
* subsequent per-thread invocations.
*/
static void fio_librgw_cleanup(struct thread_data *td)
{
int r = 0;
dprint(FD_IO, "fio_librgw_cleanup\n");
/* cleanup specific data */
librgw_data* data = static_cast<librgw_data*>(td->io_ops_data);
if (data) {
/* release active handles */
data->release_handles();
if (data->bucket_fh) {
r = rgw_fh_rele(data->fs, data->bucket_fh, 0 /* flags */);
}
r = rgw_umount(data->fs, RGW_UMOUNT_FLAG_NONE);
librgw_shutdown(data->rgw_h);
td->io_ops_data = nullptr;
delete data;
}
}
/*
* The ->prep() function is called for each io_u prior to being submitted
* with ->queue(). This hook allows the io engine to perform any
* preparatory actions on the io_u, before being submitted. Not required.
*/
static int fio_librgw_prep(struct thread_data *td, struct io_u *io_u)
{
return 0;
}
/*
* The ->event() hook is called to match an event number with an io_u.
* After the core has called ->getevents() and it has returned eg 3,
* the ->event() hook must return the 3 events that have completed for
* subsequent calls to ->event() with [0-2]. Required.
*/
static struct io_u *fio_librgw_event(struct thread_data *td, int event)
{
return NULL;
}
/*
* The ->getevents() hook is used to reap completion events from an async
* io engine. It returns the number of completed events since the last call,
* which may then be retrieved by calling the ->event() hook with the event
* numbers. Required.
*/
static int fio_librgw_getevents(struct thread_data *td, unsigned int min,
unsigned int max, const struct timespec *t)
{
return 0;
}
/*
* The ->cancel() hook attempts to cancel the io_u. Only relevant for
* async io engines, and need not be supported.
*/
static int fio_librgw_cancel(struct thread_data *td, struct io_u *io_u)
{
return 0;
}
/*
* The ->queue() hook is responsible for initiating io on the io_u
* being passed in. If the io engine is a synchronous one, io may complete
* before ->queue() returns. Required.
*
* The io engine must transfer in the direction noted by io_u->ddir
* to the buffer pointed to by io_u->xfer_buf for as many bytes as
* io_u->xfer_buflen. Residual data count may be set in io_u->resid
* for a short read/write.
*/
static enum fio_q_status fio_librgw_queue(struct thread_data *td,
struct io_u *io_u)
{
librgw_data* data = static_cast<librgw_data*>(td->io_ops_data);
const char* object = io_u->file->file_name;
struct rgw_file_handle* object_fh = nullptr;
size_t nbytes;
int r = 0;
/*
* Double sanity check to catch errant write on a readonly setup
*/
fio_ro_check(td, io_u);
if (io_u->ddir == DDIR_WRITE) {
/* Do full write cycle */
r = rgw_lookup(data->fs, data->bucket_fh, object, &object_fh, nullptr, 0,
RGW_LOOKUP_FLAG_CREATE);
if (!! r) {
dprint(FD_IO, "rgw_lookup failed to create filehandle for %s\n",
object);
goto out;
}
r = rgw_open(data->fs, object_fh, 0 /* posix flags */, 0 /* flags */);
if (!! r) {
dprint(FD_IO, "rgw_open failed to create filehandle for %s\n",
object);
rgw_fh_rele(data->fs, object_fh, RGW_FH_RELE_FLAG_NONE);
goto out;
}
/* librgw can write at any offset, but only sequentially
* starting at 0, in one open/write/close cycle */
r = rgw_write(data->fs, object_fh, 0, io_u->xfer_buflen, &nbytes,
(void*) io_u->xfer_buf, RGW_WRITE_FLAG_NONE);
if (!! r) {
dprint(FD_IO, "rgw_write failed for %s\n",
object);
}
r = rgw_close(data->fs, object_fh, 0 /* flags */);
/* object_fh is closed but still reachable, save it */
data->save_handle(object_fh);
} else if (io_u->ddir == DDIR_READ) {
r = rgw_lookup(data->fs, data->bucket_fh, object, &object_fh,
nullptr, 0, RGW_LOOKUP_FLAG_NONE);
if (!! r) {
dprint(FD_IO, "rgw_lookup failed to create filehandle for %s\n",
object);
goto out;
}
r = rgw_open(data->fs, object_fh, 0 /* posix flags */, 0 /* flags */);
if (!! r) {
dprint(FD_IO, "rgw_open failed to create filehandle for %s\n",
object);
rgw_fh_rele(data->fs, object_fh, RGW_FH_RELE_FLAG_NONE);
goto out;
}
r = rgw_read(data->fs, object_fh, io_u->offset, io_u->xfer_buflen,
&nbytes, io_u->xfer_buf, RGW_READ_FLAG_NONE);
if (!! r) {
dprint(FD_IO, "rgw_read failed for %s\n",
object);
}
} else {
dprint(FD_IO, "%s: Warning: unhandled ddir: %d\n", __func__,
io_u->ddir);
}
if (object_fh) {
r = rgw_close(data->fs, object_fh, 0 /* flags */);
/* object_fh is closed but still reachable, save it */
data->save_handle(object_fh);
}
out:
/*
* Could return FIO_Q_QUEUED for a queued request,
* FIO_Q_COMPLETED for a completed request, and FIO_Q_BUSY
* if we could queue no more at this point (you'd have to
* define ->commit() to handle that.
*/
return FIO_Q_COMPLETED;
}
int fio_librgw_commit(thread_data* td)
{
// commit() allows the engine to batch up queued requests to be submitted all
// at once. it would be natural for queue() to collect transactions in a list,
// and use commit() to pass them all to ObjectStore::queue_transactions(). but
// because we spread objects over multiple collections, we a) need to use a
// different sequencer for each collection, and b) are less likely to see a
// benefit from batching requests within a collection
return 0;
}
/*
* Hook for opening the given file. Unless the engine has special
* needs, it usually just provides generic_open_file() as the handler.
*/
static int fio_librgw_open(struct thread_data *td, struct fio_file *f)
{
/* for now, let's try to avoid doing open/close in these hooks */
return 0;
}
/*
* Hook for closing a file. See fio_librgw_open().
*/
static int fio_librgw_close(struct thread_data *td, struct fio_file *f)
{
/* for now, let's try to avoid doing open/close in these hooks */
return 0;
}
/* XXX next two probably not needed */
int fio_librgw_io_u_init(thread_data* td, io_u* u)
{
// no data is allocated, we just use the pointer as a boolean 'completed' flag
u->engine_data = nullptr;
return 0;
}
void fio_librgw_io_u_free(thread_data* td, io_u* u)
{
u->engine_data = nullptr;
}
struct librgw_ioengine : public ioengine_ops
{
librgw_ioengine() : ioengine_ops({}) {
name = "librgw";
version = FIO_IOOPS_VERSION;
flags = FIO_DISKLESSIO;
setup = fio_librgw_setup;
init = fio_librgw_init;
queue = fio_librgw_queue;
commit = fio_librgw_commit;
getevents = fio_librgw_getevents;
event = fio_librgw_event;
cleanup = fio_librgw_cleanup;
open_file = fio_librgw_open;
close_file = fio_librgw_close;
io_u_init = fio_librgw_io_u_init;
io_u_free = fio_librgw_io_u_free;
options = ::options.data();
option_struct_size = sizeof(opt_struct);
}
};
} // namespace
extern "C" {
// the exported fio engine interface
void get_ioengine(struct ioengine_ops** ioengine_ptr) {
static librgw_ioengine ioengine;
*ioengine_ptr = &ioengine;
}
} // extern "C"
| 15,229 | 27.151571 | 82 |
cc
|
null |
ceph-main/src/test/fio/ring_buffer.h
|
/*
* Very simple and fast lockless ring buffer implementatation for
* one producer and one consumer.
*/
#include <stdint.h>
#include <stddef.h>
/* Do not overcomplicate, choose generic x86 case */
#define L1_CACHE_BYTES 64
#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
struct ring_buffer
{
unsigned int read_idx __cacheline_aligned;
unsigned int write_idx __cacheline_aligned;
unsigned int size;
unsigned int low_mask;
unsigned int high_mask;
unsigned int bit_shift;
void *data_ptr;
};
static inline unsigned int upper_power_of_two(unsigned int v)
{
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
static inline int ring_buffer_init(struct ring_buffer* rbuf, unsigned int size)
{
/* Must be pow2 */
if (((size-1) & size))
size = upper_power_of_two(size);
size *= sizeof(void *);
rbuf->data_ptr = malloc(size);
rbuf->size = size;
rbuf->read_idx = 0;
rbuf->write_idx = 0;
rbuf->bit_shift = __builtin_ffs(sizeof(void *))-1;
rbuf->low_mask = rbuf->size - 1;
rbuf->high_mask = rbuf->size * 2 - 1;
return 0;
}
static inline void ring_buffer_deinit(struct ring_buffer* rbuf)
{
free(rbuf->data_ptr);
}
static inline unsigned int ring_buffer_used_size(const struct ring_buffer* rbuf)
{
__sync_synchronize();
return ((rbuf->write_idx - rbuf->read_idx) & rbuf->high_mask) >>
rbuf->bit_shift;
}
static inline void ring_buffer_enqueue(struct ring_buffer* rbuf, void *ptr)
{
unsigned int idx;
/*
* Be aware: we do not check that buffer can be full,
* assume user of the ring buffer can't submit more.
*/
idx = rbuf->write_idx & rbuf->low_mask;
*(void **)((uintptr_t)rbuf->data_ptr + idx) = ptr;
/* Barrier to be sure stored pointer will be seen properly */
__sync_synchronize();
rbuf->write_idx = (rbuf->write_idx + sizeof(ptr)) & rbuf->high_mask;
}
static inline void *ring_buffer_dequeue(struct ring_buffer* rbuf)
{
unsigned idx;
void *ptr;
/*
* Be aware: we do not check that buffer can be empty,
* assume user of the ring buffer called ring_buffer_used_size(),
* which returns actual used size and introduces memory barrier
* explicitly.
*/
idx = rbuf->read_idx & rbuf->low_mask;
ptr = *(void **)((uintptr_t)rbuf->data_ptr + idx);
rbuf->read_idx = (rbuf->read_idx + sizeof(ptr)) & rbuf->high_mask;
return ptr;
}
| 2,420 | 22.504854 | 80 |
h
|
null |
ceph-main/src/test/fs/mds_types.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Red Hat
*
* Author: Greg Farnum <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "mds/mdstypes.h"
#include "mds/inode_backtrace.h"
TEST(inode_t, compare_equal)
{
inode_t foo{};
inode_t bar{};
int compare_r;
bool divergent;
compare_r = foo.compare(bar, &divergent);
EXPECT_EQ(0, compare_r);
EXPECT_FALSE(divergent);
compare_r = bar.compare(foo, &divergent);
EXPECT_EQ(0, compare_r);
EXPECT_FALSE(divergent);
foo.ino = 1234;
foo.ctime.set_from_double(10.0);
foo.mode = 0777;
foo.uid = 42;
foo.gid = 43;
foo.nlink = 3;
foo.version = 3;
bar = foo;
compare_r = foo.compare(bar, &divergent);
EXPECT_EQ(0, compare_r);
EXPECT_FALSE(divergent);
compare_r = bar.compare(foo, &divergent);
EXPECT_EQ(0, compare_r);
EXPECT_FALSE(divergent);
}
TEST(inode_t, compare_aged)
{
inode_t foo{};
inode_t bar{};
foo.ino = 1234;
foo.ctime.set_from_double(10.0);
foo.mode = 0777;
foo.uid = 42;
foo.gid = 43;
foo.nlink = 3;
foo.version = 3;
foo.rstat.version = 1;
bar = foo;
bar.version = 2;
int compare_r;
bool divergent;
compare_r = foo.compare(bar, &divergent);
EXPECT_EQ(1, compare_r);
EXPECT_FALSE(divergent);
compare_r = bar.compare(foo, &divergent);
EXPECT_EQ(-1, compare_r);
EXPECT_FALSE(divergent);
}
TEST(inode_t, compare_divergent)
{
inode_t foo{};
inode_t bar{};
foo.ino = 1234;
foo.ctime.set_from_double(10.0);
foo.mode = 0777;
foo.uid = 42;
foo.gid = 43;
foo.nlink = 3;
foo.version = 3;
foo.rstat.version = 1;
bar = foo;
bar.version = 2;
bar.rstat.version = 2;
int compare_r;
bool divergent;
compare_r = foo.compare(bar, &divergent);
EXPECT_EQ(1, compare_r);
EXPECT_TRUE(divergent);
compare_r = bar.compare(foo, &divergent);
EXPECT_EQ(-1, compare_r);
EXPECT_TRUE(divergent);
}
TEST(inode_backtrace_t, compare_equal)
{
inode_backtrace_t foo;
inode_backtrace_t bar;
foo.ino = 1234;
foo.pool = 12;
foo.old_pools.push_back(10);
foo.old_pools.push_back(5);
inode_backpointer_t foop;
foop.dirino = 3;
foop.dname = "l3";
foop.version = 15;
foo.ancestors.push_back(foop);
foop.dirino = 2;
foop.dname = "l2";
foop.version = 10;
foo.ancestors.push_back(foop);
foop.dirino = 1;
foop.dname = "l1";
foop.version = 25;
foo.ancestors.push_back(foop);
bar = foo;
int compare_r;
bool equivalent;
bool divergent;
compare_r = foo.compare(bar, &equivalent, &divergent);
EXPECT_EQ(0, compare_r);
EXPECT_TRUE(equivalent);
EXPECT_FALSE(divergent);
}
TEST(inode_backtrace_t, compare_newer)
{
inode_backtrace_t foo;
inode_backtrace_t bar;
foo.ino = 1234;
foo.pool = 12;
foo.old_pools.push_back(10);
foo.old_pools.push_back(5);
bar.ino = 1234;
bar.pool = 12;
bar.old_pools.push_back(10);
inode_backpointer_t foop;
foop.dirino = 3;
foop.dname = "l3";
foop.version = 15;
foo.ancestors.push_back(foop);
foop.version = 14;
bar.ancestors.push_back(foop);
foop.dirino = 2;
foop.dname = "l2";
foop.version = 10;
foo.ancestors.push_back(foop);
foop.version = 9;
bar.ancestors.push_back(foop);
foop.dirino = 1;
foop.dname = "l1";
foop.version = 25;
foo.ancestors.push_back(foop);
bar.ancestors.push_back(foop);
int compare_r;
bool equivalent;
bool divergent;
compare_r = foo.compare(bar, &equivalent, &divergent);
EXPECT_EQ(1, compare_r);
EXPECT_TRUE(equivalent);
EXPECT_FALSE(divergent);
compare_r = bar.compare(foo, &equivalent, &divergent);
EXPECT_EQ(-1, compare_r);
EXPECT_TRUE(equivalent);
EXPECT_FALSE(divergent);
bar.ancestors.back().dirino = 75;
bar.ancestors.back().dname = "l1-old";
bar.ancestors.back().version = 70;
compare_r = foo.compare(bar, &equivalent, &divergent);
EXPECT_EQ(1, compare_r);
EXPECT_FALSE(equivalent);
EXPECT_FALSE(divergent);
compare_r = bar.compare(foo, &equivalent, &divergent);
EXPECT_EQ(-1, compare_r);
EXPECT_FALSE(equivalent);
EXPECT_FALSE(divergent);
}
TEST(inode_backtrace_t, compare_divergent)
{
inode_backtrace_t foo;
inode_backtrace_t bar;
foo.ino = 1234;
foo.pool = 12;
foo.old_pools.push_back(10);
foo.old_pools.push_back(5);
bar.ino = 1234;
bar.pool = 12;
bar.old_pools.push_back(10);
inode_backpointer_t foop;
foop.dirino = 3;
foop.dname = "l3";
foop.version = 15;
foo.ancestors.push_back(foop);
foop.version = 17;
bar.ancestors.push_back(foop);
foop.dirino = 2;
foop.dname = "l2";
foop.version = 10;
foo.ancestors.push_back(foop);
foop.version = 9;
bar.ancestors.push_back(foop);
foop.dirino = 1;
foop.dname = "l1";
foop.version = 25;
foo.ancestors.push_back(foop);
bar.ancestors.push_back(foop);
int compare_r;
bool equivalent;
bool divergent;
compare_r = foo.compare(bar, &equivalent, &divergent);
EXPECT_EQ(1, compare_r);
EXPECT_TRUE(divergent);
compare_r = bar.compare(foo, &equivalent, &divergent);
EXPECT_EQ(-1, compare_r);
EXPECT_TRUE(divergent);
}
| 5,344 | 20.126482 | 70 |
cc
|
null |
ceph-main/src/test/fs/test_ino_release_cb.cc
|
#include <string>
#include <unistd.h>
#include <include/fs_types.h>
#include <mds/mdstypes.h>
#include <include/cephfs/libcephfs.h>
#define MAX_CEPH_FILES 1000
#define DIRNAME "ino_release_cb"
using namespace std;
static std::atomic<bool> cb_done = false;
static void cb(void *hdl, vinodeno_t vino)
{
cb_done = true;
}
int main(int argc, char *argv[])
{
inodeno_t inos[MAX_CEPH_FILES];
struct ceph_mount_info *cmount = NULL;
ceph_create(&cmount, "admin");
ceph_conf_read_file(cmount, NULL);
ceph_init(cmount);
[[maybe_unused]] int ret = ceph_mount(cmount, NULL);
assert(ret >= 0);
ret = ceph_mkdir(cmount, DIRNAME, 0755);
assert(ret >= 0);
ret = ceph_chdir(cmount, DIRNAME);
assert(ret >= 0);
/* Create a bunch of files, get their inode numbers and close them */
int i;
for (i = 0; i < MAX_CEPH_FILES; ++i) {
int fd;
struct ceph_statx stx;
string name = std::to_string(i);
fd = ceph_open(cmount, name.c_str(), O_RDWR|O_CREAT, 0644);
assert(fd >= 0);
ret = ceph_fstatx(cmount, fd, &stx, CEPH_STATX_INO, 0);
assert(ret >= 0);
inos[i] = stx.stx_ino;
ceph_close(cmount, fd);
}
/* Remount */
ceph_unmount(cmount);
ceph_release(cmount);
ceph_create(&cmount, "admin");
ceph_conf_read_file(cmount, NULL);
ceph_init(cmount);
struct ceph_client_callback_args args = { 0 };
args.ino_release_cb = cb;
ret = ceph_ll_register_callbacks2(cmount, &args);
assert(ret == 0);
ret = ceph_mount(cmount, NULL);
assert(ret >= 0);
Inode *inodes[MAX_CEPH_FILES];
for (i = 0; i < MAX_CEPH_FILES; ++i) {
/* We can stop if we got a callback */
if (cb_done)
break;
ret = ceph_ll_lookup_inode(cmount, inos[i], &inodes[i]);
assert(ret >= 0);
}
sleep(45);
assert(cb_done);
ceph_unmount(cmount);
ceph_release(cmount);
return 0;
}
| 1,785 | 20.011765 | 70 |
cc
|
null |
ceph-main/src/test/fs/test_trim_caps.cc
|
#define _FILE_OFFSET_BITS 64
#if defined(__linux__)
#include <features.h>
#endif
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <fcntl.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <assert.h>
#include <unistd.h>
#include <include/cephfs/libcephfs.h>
int main(int argc, char *argv[])
{
char buf;
int pipefd[2];
int rc [[maybe_unused]] = pipe(pipefd);
assert(rc >= 0);
pid_t pid = fork();
assert(pid >= 0);
if (pid == 0)
close(pipefd[1]);
else
close(pipefd[0]);
struct ceph_mount_info *cmount = NULL;
ceph_create(&cmount, "admin");
ceph_conf_read_file(cmount, NULL);
int ret [[maybe_unused]] = ceph_mount(cmount, NULL);
assert(ret >= 0);
if (pid == 0) {
ret = read(pipefd[0], &buf, 1);
assert(ret == 1);
ret = ceph_rename(cmount, "1", "3");
assert(ret >= 0);
ret = ceph_rename(cmount, "2", "1");
assert(ret >= 0);
ceph_unmount(cmount);
printf("child exits\n");
} else {
ret = ceph_mkdirs(cmount, "1/2", 0755);
assert(ret >= 0);
struct ceph_statx stx;
ret = ceph_statx(cmount, "1", &stx, 0, 0);
assert(ret >= 0);
uint64_t orig_ino [[maybe_unused]] = stx.stx_ino;
ret = ceph_mkdir(cmount, "2", 0755);
assert(ret >= 0);
ret = write(pipefd[1], &buf, 1);
assert(ret == 1);
int wstatus;
ret = waitpid(pid, &wstatus, 0);
assert(ret >= 0);
assert(wstatus == 0);
// make origin '1' no parent dentry
ret = ceph_statx(cmount, "1", &stx, 0, 0);
assert(ret >= 0);
assert(orig_ino != stx.stx_ino);
// move root inode's cap_item to tail of session->caps
ret = ceph_statx(cmount, ".", &stx, 0, 0);
assert(ret >= 0);
printf("waiting for crash\n");
sleep(60);
}
return 0;
}
| 1,739 | 19.232558 | 56 |
cc
|
null |
ceph-main/src/test/immutable_object_cache/MockCacheDaemon.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef IMMUTABLE_OBJECT_CACHE_MOCK_DAEMON
#define IMMUTABLE_OBJECT_CACHE_MOCK_DAEMON
#include <iostream>
#include <unistd.h>
#include "gmock/gmock.h"
#include "include/Context.h"
#include "tools/immutable_object_cache/CacheClient.h"
namespace ceph {
namespace immutable_obj_cache {
class MockCacheClient {
public:
MockCacheClient(const std::string& file, CephContext* ceph_ctx) {}
MOCK_METHOD0(run, void());
MOCK_METHOD0(is_session_work, bool());
MOCK_METHOD0(close, void());
MOCK_METHOD0(stop, void());
MOCK_METHOD0(connect, int());
MOCK_METHOD1(connect, void(Context*));
MOCK_METHOD6(lookup_object, void(std::string, uint64_t, uint64_t, uint64_t,
std::string, CacheGenContextURef));
MOCK_METHOD1(register_client, int(Context*));
};
class MockCacheServer {
public:
MockCacheServer(CephContext* cct, const std::string& file,
ProcessMsg processmsg) {
}
MOCK_METHOD0(run, int());
MOCK_METHOD0(start_accept, int());
MOCK_METHOD0(stop, int());
};
} // namespace immutable_obj_cach3
} // namespace ceph
#endif // IMMUTABLE_OBJECT_CACHE_MOCK_DAEMON
| 1,237 | 25.913043 | 77 |
h
|
null |
ceph-main/src/test/immutable_object_cache/test_DomainSocket.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <iostream>
#include <unistd.h>
#include "gtest/gtest.h"
#include "include/Context.h"
#include "global/global_init.h"
#include "global/global_context.h"
#include "test/immutable_object_cache/test_common.h"
#include "tools/immutable_object_cache/CacheClient.h"
#include "tools/immutable_object_cache/CacheServer.h"
using namespace ceph::immutable_obj_cache;
class TestCommunication :public ::testing::Test {
public:
CacheServer* m_cache_server;
std::thread* srv_thd;
CacheClient* m_cache_client;
std::string m_local_path;
pthread_mutex_t m_mutex;
pthread_cond_t m_cond;
std::atomic<uint64_t> m_send_request_index;
std::atomic<uint64_t> m_recv_ack_index;
WaitEvent m_wait_event;
unordered_set<std::string> m_hit_entry_set;
TestCommunication()
: m_cache_server(nullptr), m_cache_client(nullptr),
m_local_path("/tmp/ceph_test_domain_socket"),
m_send_request_index(0), m_recv_ack_index(0)
{}
~TestCommunication() {}
static void SetUpTestCase() {}
static void TearDownTestCase() {}
void SetUp() override {
std::remove(m_local_path.c_str());
m_cache_server = new CacheServer(g_ceph_context, m_local_path,
[this](CacheSession* sid, ObjectCacheRequest* req){
handle_request(sid, req);
});
ASSERT_TRUE(m_cache_server != nullptr);
srv_thd = new std::thread([this]() {m_cache_server->run();});
m_cache_client = new CacheClient(m_local_path, g_ceph_context);
ASSERT_TRUE(m_cache_client != nullptr);
m_cache_client->run();
while (true) {
if (0 == m_cache_client->connect()) {
break;
}
}
auto ctx = new LambdaContext([](int reg) {
ASSERT_TRUE(reg == 0);
});
m_cache_client->register_client(ctx);
ASSERT_TRUE(m_cache_client->is_session_work());
}
void TearDown() override {
delete m_cache_client;
m_cache_server->stop();
if (srv_thd->joinable()) {
srv_thd->join();
}
delete m_cache_server;
std::remove(m_local_path.c_str());
delete srv_thd;
}
void handle_request(CacheSession* session_id, ObjectCacheRequest* req) {
switch (req->get_request_type()) {
case RBDSC_REGISTER: {
ObjectCacheRequest* reply = new ObjectCacheRegReplyData(RBDSC_REGISTER_REPLY, req->seq);
session_id->send(reply);
break;
}
case RBDSC_READ: {
ObjectCacheReadData* read_req = (ObjectCacheReadData*)req;
ObjectCacheRequest* reply = nullptr;
if (m_hit_entry_set.find(read_req->oid) == m_hit_entry_set.end()) {
reply = new ObjectCacheReadRadosData(RBDSC_READ_RADOS, req->seq);
} else {
reply = new ObjectCacheReadReplyData(RBDSC_READ_REPLY, req->seq, "/fakepath");
}
session_id->send(reply);
break;
}
}
}
// times: message number
// queue_depth : imitate message queue depth
// thinking : imitate handing message time
void startup_pingpong_testing(uint64_t times, uint64_t queue_depth, int thinking) {
m_send_request_index.store(0);
m_recv_ack_index.store(0);
for (uint64_t index = 0; index < times; index++) {
auto ctx = make_gen_lambda_context<ObjectCacheRequest*, std::function<void(ObjectCacheRequest*)>>
([this, thinking, times](ObjectCacheRequest* ack){
if (thinking != 0) {
usleep(thinking); // handling message
}
m_recv_ack_index++;
if (m_recv_ack_index == times) {
m_wait_event.signal();
}
});
// simple queue depth
while (m_send_request_index - m_recv_ack_index > queue_depth) {
usleep(1);
}
m_cache_client->lookup_object("pool_nspace", 1, 2, 3, "object_name", std::move(ctx));
m_send_request_index++;
}
m_wait_event.wait();
}
bool startup_lookupobject_testing(std::string pool_nspace, std::string object_id) {
bool hit;
auto ctx = make_gen_lambda_context<ObjectCacheRequest*, std::function<void(ObjectCacheRequest*)>>
([this, &hit](ObjectCacheRequest* ack){
hit = ack->type == RBDSC_READ_REPLY;
m_wait_event.signal();
});
m_cache_client->lookup_object(pool_nspace, 1, 2, 3, object_id, std::move(ctx));
m_wait_event.wait();
return hit;
}
void set_hit_entry_in_fake_lru(std::string cache_file_name) {
if (m_hit_entry_set.find(cache_file_name) == m_hit_entry_set.end()) {
m_hit_entry_set.insert(cache_file_name);
}
}
};
TEST_F(TestCommunication, test_pingpong) {
startup_pingpong_testing(64, 16, 0);
ASSERT_TRUE(m_send_request_index == m_recv_ack_index);
startup_pingpong_testing(200, 128, 0);
ASSERT_TRUE(m_send_request_index == m_recv_ack_index);
}
TEST_F(TestCommunication, test_lookup_object) {
m_hit_entry_set.clear();
srand(time(0));
uint64_t random_hit = random();
for (uint64_t i = 50; i < 100; i++) {
if ((random_hit % i) == 0) {
set_hit_entry_in_fake_lru(std::to_string(i));
}
}
for (uint64_t i = 50; i < 100; i++) {
if ((random_hit % i) != 0) {
ASSERT_FALSE(startup_lookupobject_testing("test_nspace", std::to_string(i)));
} else {
ASSERT_TRUE(startup_lookupobject_testing("test_nspace", std::to_string(i)));
}
}
}
| 5,323 | 28.910112 | 103 |
cc
|
null |
ceph-main/src/test/immutable_object_cache/test_SimplePolicy.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <sstream>
#include <list>
#include <gtest/gtest.h>
#include "include/Context.h"
#include "tools/immutable_object_cache/SimplePolicy.h"
using namespace ceph::immutable_obj_cache;
std::string generate_file_name(uint64_t index) {
std::string pre_name("object_cache_file_");
std::ostringstream oss;
oss << index;
return pre_name + oss.str();
}
class TestSimplePolicy :public ::testing::Test {
public:
SimplePolicy* m_simple_policy;
const uint64_t m_cache_size;
uint64_t m_entry_index;
std::vector<std::string> m_promoted_lru;
std::vector<std::string> m_promoting_lru;
TestSimplePolicy() : m_cache_size(100), m_entry_index(0) {}
~TestSimplePolicy() {}
static void SetUpTestCase() {}
static void TearDownTestCase() {}
void SetUp() override {
m_simple_policy = new SimplePolicy(g_ceph_context, m_cache_size, 128, 0.9);
// populate 50 entries
for (uint64_t i = 0; i < m_cache_size / 2; i++, m_entry_index++) {
insert_entry_into_promoted_lru(generate_file_name(m_entry_index));
}
}
void TearDown() override {
while(m_promoted_lru.size()) {
ASSERT_TRUE(m_simple_policy->get_evict_entry() == m_promoted_lru.front());
m_simple_policy->evict_entry(m_simple_policy->get_evict_entry());
m_promoted_lru.erase(m_promoted_lru.begin());
}
delete m_simple_policy;
}
void insert_entry_into_promoted_lru(std::string cache_file_name) {
ASSERT_EQ(m_cache_size - m_promoted_lru.size(), m_simple_policy->get_free_size());
ASSERT_EQ(m_promoting_lru.size(), m_simple_policy->get_promoting_entry_num());
ASSERT_EQ(m_promoted_lru.size(), m_simple_policy->get_promoted_entry_num());
ASSERT_EQ(OBJ_CACHE_NONE, m_simple_policy->get_status(cache_file_name));
m_simple_policy->lookup_object(cache_file_name);
ASSERT_EQ(OBJ_CACHE_SKIP, m_simple_policy->get_status(cache_file_name));
ASSERT_EQ(m_cache_size - m_promoted_lru.size(), m_simple_policy->get_free_size());
ASSERT_EQ(m_promoting_lru.size() + 1, m_simple_policy->get_promoting_entry_num());
ASSERT_EQ(m_promoted_lru.size(), m_simple_policy->get_promoted_entry_num());
m_simple_policy->update_status(cache_file_name, OBJ_CACHE_PROMOTED, 1);
m_promoted_lru.push_back(cache_file_name);
ASSERT_EQ(OBJ_CACHE_PROMOTED, m_simple_policy->get_status(cache_file_name));
ASSERT_EQ(m_cache_size - m_promoted_lru.size(), m_simple_policy->get_free_size());
ASSERT_EQ(m_promoting_lru.size(), m_simple_policy->get_promoting_entry_num());
ASSERT_EQ(m_promoted_lru.size(), m_simple_policy->get_promoted_entry_num());
}
void insert_entry_into_promoting_lru(std::string cache_file_name) {
ASSERT_EQ(m_cache_size - m_promoted_lru.size(), m_simple_policy->get_free_size());
ASSERT_EQ(m_promoting_lru.size(), m_simple_policy->get_promoting_entry_num());
ASSERT_EQ(m_promoted_lru.size(), m_simple_policy->get_promoted_entry_num());
ASSERT_EQ(OBJ_CACHE_NONE, m_simple_policy->get_status(cache_file_name));
m_simple_policy->lookup_object(cache_file_name);
m_promoting_lru.push_back(cache_file_name);
ASSERT_EQ(OBJ_CACHE_SKIP, m_simple_policy->get_status(cache_file_name));
ASSERT_EQ(m_cache_size - m_promoted_lru.size(), m_simple_policy->get_free_size());
ASSERT_EQ(m_promoting_lru.size(), m_simple_policy->get_promoting_entry_num());
ASSERT_EQ(m_promoted_lru.size(), m_simple_policy->get_promoted_entry_num());
}
};
TEST_F(TestSimplePolicy, test_lookup_miss_and_no_free) {
// exhaust cache space
uint64_t left_entry_num = m_cache_size - m_promoted_lru.size();
for (uint64_t i = 0; i < left_entry_num; i++, ++m_entry_index) {
insert_entry_into_promoted_lru(generate_file_name(m_entry_index));
}
ASSERT_TRUE(0 == m_simple_policy->get_free_size());
ASSERT_TRUE(m_simple_policy->lookup_object("no_this_cache_file_name") == OBJ_CACHE_SKIP);
}
TEST_F(TestSimplePolicy, test_lookup_miss_and_have_free) {
ASSERT_TRUE(m_cache_size - m_promoted_lru.size() == m_simple_policy->get_free_size());
ASSERT_TRUE(m_simple_policy->lookup_object("miss_but_have_free_space_file_name") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("miss_but_have_free_space_file_name") == OBJ_CACHE_SKIP);
}
TEST_F(TestSimplePolicy, test_lookup_hit_and_promoting) {
ASSERT_TRUE(m_cache_size - m_promoted_lru.size() == m_simple_policy->get_free_size());
insert_entry_into_promoting_lru("promoting_file_1");
insert_entry_into_promoting_lru("promoting_file_2");
insert_entry_into_promoted_lru(generate_file_name(++m_entry_index));
insert_entry_into_promoted_lru(generate_file_name(++m_entry_index));
insert_entry_into_promoting_lru("promoting_file_3");
insert_entry_into_promoting_lru("promoting_file_4");
ASSERT_TRUE(m_simple_policy->get_promoting_entry_num() == 4);
ASSERT_TRUE(m_simple_policy->get_status("promoting_file_1") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_file_2") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_file_3") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_file_4") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->lookup_object("promoting_file_1") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->lookup_object("promoting_file_2") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->lookup_object("promoting_file_3") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->lookup_object("promoting_file_4") == OBJ_CACHE_SKIP);
}
TEST_F(TestSimplePolicy, test_lookup_hit_and_promoted) {
ASSERT_TRUE(m_promoted_lru.size() == m_simple_policy->get_promoted_entry_num());
for (uint64_t index = 0; index < m_entry_index; index++) {
ASSERT_TRUE(m_simple_policy->get_status(generate_file_name(index)) == OBJ_CACHE_PROMOTED);
}
}
TEST_F(TestSimplePolicy, test_update_state_from_promoting_to_none) {
ASSERT_TRUE(m_cache_size - m_promoted_lru.size() == m_simple_policy->get_free_size());
insert_entry_into_promoting_lru("promoting_to_none_file_1");
insert_entry_into_promoting_lru("promoting_to_none_file_2");
insert_entry_into_promoted_lru(generate_file_name(++m_entry_index));
insert_entry_into_promoting_lru("promoting_to_none_file_3");
insert_entry_into_promoting_lru("promoting_to_none_file_4");
ASSERT_TRUE(m_simple_policy->get_promoting_entry_num() == 4);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_1") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_2") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_3") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_4") == OBJ_CACHE_SKIP);
m_simple_policy->update_status("promoting_to_none_file_1", OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_promoting_entry_num() == 3);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_1") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_2") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_3") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_4") == OBJ_CACHE_SKIP);
m_simple_policy->update_status("promoting_to_none_file_2", OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_promoting_entry_num() == 2);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_1") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_2") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_3") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_4") == OBJ_CACHE_SKIP);
m_simple_policy->update_status("promoting_to_none_file_3", OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_promoting_entry_num() == 1);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_1") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_2") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_3") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_4") == OBJ_CACHE_SKIP);
m_simple_policy->update_status("promoting_to_none_file_4", OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_promoting_entry_num() == 0);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_1") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_2") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_3") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_4") == OBJ_CACHE_NONE);
}
TEST_F(TestSimplePolicy, test_update_state_from_promoted_to_none) {
ASSERT_TRUE(m_promoted_lru.size() == m_simple_policy->get_promoted_entry_num());
for (uint64_t index = 0; index < m_entry_index; index++) {
ASSERT_TRUE(m_simple_policy->get_status(generate_file_name(index)) == OBJ_CACHE_PROMOTED);
m_simple_policy->update_status(generate_file_name(index), OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status(generate_file_name(index)) == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_promoted_entry_num() == m_promoted_lru.size() - index - 1);
}
m_promoted_lru.clear();
}
TEST_F(TestSimplePolicy, test_update_state_from_promoting_to_promoted) {
ASSERT_TRUE(m_cache_size - m_promoted_lru.size() == m_simple_policy->get_free_size());
insert_entry_into_promoting_lru("promoting_to_promoted_file_1");
insert_entry_into_promoting_lru("promoting_to_promoted_file_2");
insert_entry_into_promoting_lru("promoting_to_promoted_file_3");
insert_entry_into_promoting_lru("promoting_to_promoted_file_4");
ASSERT_TRUE(4 == m_simple_policy->get_promoting_entry_num());
m_simple_policy->update_status("promoting_to_promoted_file_1", OBJ_CACHE_PROMOTED);
ASSERT_TRUE(3 == m_simple_policy->get_promoting_entry_num());
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_promoted_file_1") == OBJ_CACHE_PROMOTED);
m_simple_policy->update_status("promoting_to_promoted_file_2", OBJ_CACHE_PROMOTED);
ASSERT_TRUE(2 == m_simple_policy->get_promoting_entry_num());
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_promoted_file_2") == OBJ_CACHE_PROMOTED);
m_simple_policy->update_status("promoting_to_promoted_file_3", OBJ_CACHE_PROMOTED);
ASSERT_TRUE(1 == m_simple_policy->get_promoting_entry_num());
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_promoted_file_3") == OBJ_CACHE_PROMOTED);
m_simple_policy->update_status("promoting_to_promoted_file_4", OBJ_CACHE_PROMOTED);
ASSERT_TRUE(0 == m_simple_policy->get_promoting_entry_num());
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_promoted_file_4") == OBJ_CACHE_PROMOTED);
m_promoted_lru.push_back("promoting_to_promoted_file_1");
m_promoted_lru.push_back("promoting_to_promoted_file_2");
m_promoted_lru.push_back("promoting_to_promoted_file_3");
m_promoted_lru.push_back("promoting_to_promoted_file_4");
}
TEST_F(TestSimplePolicy, test_evict_list_0) {
std::list<std::string> evict_entry_list;
// the default water mark is 0.9
ASSERT_TRUE((float)m_simple_policy->get_free_size() > m_cache_size*0.1);
m_simple_policy->get_evict_list(&evict_entry_list);
ASSERT_TRUE(evict_entry_list.size() == 0);
}
TEST_F(TestSimplePolicy, test_evict_list_10) {
uint64_t left_entry_num = m_cache_size - m_promoted_lru.size();
for (uint64_t i = 0; i < left_entry_num; i++, ++m_entry_index) {
insert_entry_into_promoted_lru(generate_file_name(m_entry_index));
}
ASSERT_TRUE(0 == m_simple_policy->get_free_size());
std::list<std::string> evict_entry_list;
m_simple_policy->get_evict_list(&evict_entry_list);
// evict 10% of old entries
ASSERT_TRUE(m_cache_size*0.1 == evict_entry_list.size());
ASSERT_TRUE(m_cache_size - m_cache_size*0.1 == m_simple_policy->get_promoted_entry_num());
for (auto it = evict_entry_list.begin(); it != evict_entry_list.end(); it++) {
ASSERT_TRUE(*it == m_promoted_lru.front());
m_promoted_lru.erase(m_promoted_lru.begin());
}
}
| 12,207 | 50.728814 | 102 |
cc
|
null |
ceph-main/src/test/immutable_object_cache/test_common.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CACHE_TEST_COMMON_H
#define CACHE_TEST_COMMON_H
#include <pthread.h>
class WaitEvent {
public:
WaitEvent() : m_signaled(false) {
pthread_mutex_init(&m_lock, NULL);
pthread_cond_init(&m_cond, NULL);
}
~WaitEvent() {
pthread_mutex_destroy(&m_lock);
pthread_cond_destroy(&m_cond);
}
void wait() {
pthread_mutex_lock(&m_lock);
while (!m_signaled) {
pthread_cond_wait(&m_cond, &m_lock);
}
m_signaled = false;
pthread_mutex_unlock(&m_lock);
}
void signal() {
pthread_mutex_lock(&m_lock);
m_signaled = true;
pthread_cond_signal(&m_cond);
pthread_mutex_unlock(&m_lock);
}
private:
pthread_mutex_t m_lock;
pthread_cond_t m_cond;
bool m_signaled;
};
#endif
| 846 | 19.166667 | 70 |
h
|
null |
ceph-main/src/test/immutable_object_cache/test_main.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.hpp"
#include "global/global_context.h"
#include "test/librados/test_cxx.h"
#include "gtest/gtest.h"
#include <iostream>
#include <string>
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
librados::Rados rados;
std::string result = connect_cluster_pp(rados);
if (result != "" ) {
std::cerr << result << std::endl;
return 1;
}
g_ceph_context = reinterpret_cast<CephContext*>(rados.cct());
int r = rados.conf_set("lockdep", "true");
if (r < 0) {
std::cerr << "warning: failed to enable lockdep" << std::endl;
}
return RUN_ALL_TESTS();
}
| 728 | 23.3 | 70 |
cc
|
null |
ceph-main/src/test/immutable_object_cache/test_message.cc
|
#include "gtest/gtest.h"
#include "tools/immutable_object_cache/Types.h"
#include "tools/immutable_object_cache/SocketCommon.h"
using namespace ceph::immutable_obj_cache;
TEST(test_for_message, test_1)
{
std::string pool_nspace("this is a pool namespace");
std::string oid_name("this is a oid name");
std::string cache_file_path("/temp/ceph_immutable_object_cache");
uint16_t type = RBDSC_READ;
uint64_t seq = 123456UL;
uint64_t read_offset = 222222UL;
uint64_t read_len = 333333UL;
uint64_t pool_id = 444444UL;
uint64_t snap_id = 555555UL;
uint64_t object_size = 666666UL;
// ObjectRequest --> bufferlist
ObjectCacheRequest* req = new ObjectCacheReadData(type, seq, read_offset, read_len,
pool_id, snap_id, object_size, oid_name, pool_nspace);
req->encode();
auto payload_bl = req->get_payload_bufferlist();
uint32_t data_len = get_data_len(payload_bl.c_str());
ASSERT_EQ(payload_bl.length(), data_len + get_header_size());
ASSERT_TRUE(payload_bl.c_str() != nullptr);
// bufferlist --> ObjectCacheRequest
ObjectCacheRequest* req_decode = decode_object_cache_request(payload_bl);
ASSERT_EQ(req_decode->get_request_type(), RBDSC_READ);
ASSERT_EQ(req_decode->type, RBDSC_READ);
ASSERT_EQ(req_decode->seq, 123456UL);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->type, RBDSC_READ);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->seq, 123456UL);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->read_offset, 222222UL);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->read_len, 333333UL);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->pool_id, 444444UL);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->snap_id, 555555UL);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->oid, oid_name);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->pool_namespace, pool_nspace);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->object_size, 666666UL);
delete req;
delete req_decode;
}
| 1,963 | 37.509804 | 90 |
cc
|
null |
ceph-main/src/test/immutable_object_cache/test_multi_session.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <iostream>
#include <unistd.h>
#include "gtest/gtest.h"
#include "include/Context.h"
#include "global/global_init.h"
#include "global/global_context.h"
#include "test/immutable_object_cache/test_common.h"
#include "tools/immutable_object_cache/CacheClient.h"
#include "tools/immutable_object_cache/CacheServer.h"
using namespace std;
using namespace ceph::immutable_obj_cache;
class TestMultiSession : public ::testing::Test {
public:
std::string m_local_path;
CacheServer* m_cache_server;
std::thread* m_cache_server_thread;
std::vector<CacheClient*> m_cache_client_vec;
WaitEvent m_wait_event;
std::atomic<uint64_t> m_send_request_index;
std::atomic<uint64_t> m_recv_ack_index;
uint64_t m_session_num = 110;
TestMultiSession() : m_local_path("/tmp/ceph_test_multisession_socket"),
m_cache_server_thread(nullptr), m_send_request_index(0),
m_recv_ack_index(0) {
m_cache_client_vec.resize(m_session_num + 1, nullptr);
}
~TestMultiSession() {}
static void SetUpTestCase() {}
static void TearDownTestCase() {}
void SetUp() override {
std::remove(m_local_path.c_str());
m_cache_server = new CacheServer(g_ceph_context, m_local_path,
[this](CacheSession* session_id, ObjectCacheRequest* req){
server_handle_request(session_id, req);
});
ASSERT_TRUE(m_cache_server != nullptr);
m_cache_server_thread = new std::thread(([this]() {
m_wait_event.signal();
m_cache_server->run();
}));
// waiting for thread running.
m_wait_event.wait();
// waiting for io_service run.
usleep(2);
}
void TearDown() override {
for (uint64_t i = 0; i < m_session_num; i++) {
if (m_cache_client_vec[i] != nullptr) {
m_cache_client_vec[i]->close();
delete m_cache_client_vec[i];
}
}
m_cache_server->stop();
if (m_cache_server_thread->joinable()) {
m_cache_server_thread->join();
}
delete m_cache_server;
delete m_cache_server_thread;
std::remove(m_local_path.c_str());
}
CacheClient* create_session(uint64_t random_index) {
CacheClient* cache_client = new CacheClient(m_local_path, g_ceph_context);
cache_client->run();
while (true) {
if (0 == cache_client->connect()) {
break;
}
}
m_cache_client_vec[random_index] = cache_client;
return cache_client;
}
void server_handle_request(CacheSession* session_id, ObjectCacheRequest* req) {
switch (req->get_request_type()) {
case RBDSC_REGISTER: {
ObjectCacheRequest* reply = new ObjectCacheRegReplyData(RBDSC_REGISTER_REPLY,
req->seq);
session_id->send(reply);
break;
}
case RBDSC_READ: {
ObjectCacheRequest* reply = new ObjectCacheReadReplyData(RBDSC_READ_REPLY,
req->seq);
session_id->send(reply);
break;
}
}
}
void test_register_client(uint64_t random_index) {
ASSERT_TRUE(m_cache_client_vec[random_index] == nullptr);
auto ctx = new LambdaContext([](int ret){
ASSERT_TRUE(ret == 0);
});
auto session = create_session(random_index);
session->register_client(ctx);
ASSERT_TRUE(m_cache_client_vec[random_index] != nullptr);
ASSERT_TRUE(session->is_session_work());
}
void test_lookup_object(std::string pool_nspace, uint64_t index,
uint64_t request_num, bool is_last) {
for (uint64_t i = 0; i < request_num; i++) {
auto ctx = make_gen_lambda_context<ObjectCacheRequest*,
std::function<void(ObjectCacheRequest*)>>([this](ObjectCacheRequest* ack) {
m_recv_ack_index++;
});
m_send_request_index++;
// here just for concurrently testing register + lookup, so fix object id.
m_cache_client_vec[index]->lookup_object(pool_nspace, 1, 2, 3, "1234", std::move(ctx));
}
if (is_last) {
while(m_send_request_index != m_recv_ack_index) {
usleep(1);
}
m_wait_event.signal();
}
}
};
// test concurrent : multi-session + register_client + lookup_request
TEST_F(TestMultiSession, test_multi_session) {
uint64_t test_times = 1000;
uint64_t test_session_num = 100;
for (uint64_t i = 0; i <= test_times; i++) {
uint64_t random_index = random() % test_session_num;
if (m_cache_client_vec[random_index] == nullptr) {
test_register_client(random_index);
} else {
test_lookup_object(string("test_nspace") + std::to_string(random_index),
random_index, 4, i == test_times ? true : false);
}
}
// make sure all ack will be received.
m_wait_event.wait();
ASSERT_TRUE(m_send_request_index == m_recv_ack_index);
}
| 4,948 | 29.361963 | 93 |
cc
|
null |
ceph-main/src/test/immutable_object_cache/test_object_store.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <filesystem>
#include <iostream>
#include <unistd.h>
#include "gtest/gtest.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
#include "librbd/ImageCtx.h"
#include "test/librados/test.h"
#include "global/global_init.h"
#include "global/global_context.h"
#include "test/librados/test_cxx.h"
#include "tools/immutable_object_cache/ObjectCacheStore.h"
namespace fs = std::filesystem;
using namespace ceph::immutable_obj_cache;
std::string test_cache_path("/tmp/test_ceph_immutable_shared_cache");
class TestObjectStore : public ::testing::Test {
public:
ObjectCacheStore* m_object_cache_store;
librados::Rados* m_test_rados;
CephContext* m_ceph_context;
librados::IoCtx m_local_io_ctx;
std::string m_temp_pool_name;
std::string m_temp_volume_name;
TestObjectStore(): m_object_cache_store(nullptr), m_test_rados(nullptr), m_ceph_context(nullptr){}
~TestObjectStore(){}
static void SetUpTestCase() {}
static void TearDownTestCase() {}
void SetUp() override {
m_test_rados = new librados::Rados();
ASSERT_EQ("", connect_cluster_pp(*m_test_rados));
ASSERT_EQ(0, m_test_rados->conf_set("rbd_cache", "false"));
ASSERT_EQ(0, m_test_rados->conf_set("immutable_object_cache_max_size", "1024"));
ASSERT_EQ(0, m_test_rados->conf_set("immutable_object_cache_path", test_cache_path.c_str()));
}
void create_object_cache_store(uint64_t entry_num) {
m_temp_pool_name = get_temp_pool_name("test_pool_");
ASSERT_EQ(0, m_test_rados->pool_create(m_temp_pool_name.c_str()));
ASSERT_EQ(0, m_test_rados->ioctx_create(m_temp_pool_name.c_str(), m_local_io_ctx));
m_temp_volume_name = "test_volume";
m_ceph_context = reinterpret_cast<CephContext*>(m_test_rados->cct());
m_object_cache_store = new ObjectCacheStore(m_ceph_context);
}
void init_object_cache_store(std::string pool_name, std::string vol_name,
uint64_t vol_size, bool reset) {
ASSERT_EQ(0, m_object_cache_store->init(reset));
ASSERT_EQ(0, m_object_cache_store->init_cache());
}
void shutdown_object_cache_store() {
ASSERT_EQ(0, m_object_cache_store->shutdown());
}
void lookup_object_cache_store(std::string pool_name, std::string vol_name,
std::string obj_name, int& ret) {
std::string cache_path;
ret = m_object_cache_store->lookup_object(pool_name, 1, 2, 3,
obj_name, true, cache_path);
}
void TearDown() override {
if(m_test_rados)
delete m_test_rados;
if(m_object_cache_store)
delete m_object_cache_store;
}
};
TEST_F(TestObjectStore, test_1) {
create_object_cache_store(1000);
std::string cache_path(test_cache_path);
fs::remove_all(test_cache_path);
init_object_cache_store(m_temp_pool_name, m_temp_volume_name, 1000, true);
// TODO add lookup interface testing
shutdown_object_cache_store();
}
| 3,068 | 29.69 | 100 |
cc
|
null |
ceph-main/src/test/journal/RadosTestFixture.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/librados/test_cxx.h"
#include "test/journal/RadosTestFixture.h"
#include "cls/journal/cls_journal_client.h"
#include "include/stringify.h"
#include "common/WorkQueue.h"
#include "journal/Settings.h"
using namespace std::chrono_literals;
RadosTestFixture::RadosTestFixture()
: m_timer_lock(ceph::make_mutex("m_timer_lock")),
m_listener(this) {
}
void RadosTestFixture::SetUpTestCase() {
_pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool_pp(_pool_name, _rados));
CephContext* cct = reinterpret_cast<CephContext*>(_rados.cct());
_thread_pool = new ThreadPool(cct, "RadosTestFixture::_thread_pool",
"tp_test", 1);
_thread_pool->start();
}
void RadosTestFixture::TearDownTestCase() {
_thread_pool->stop();
delete _thread_pool;
ASSERT_EQ(0, destroy_one_pool_pp(_pool_name, _rados));
}
std::string RadosTestFixture::get_temp_oid() {
++_oid_number;
return "oid" + stringify(_oid_number);
}
void RadosTestFixture::SetUp() {
ASSERT_EQ(0, _rados.ioctx_create(_pool_name.c_str(), m_ioctx));
CephContext* cct = reinterpret_cast<CephContext*>(m_ioctx.cct());
m_work_queue = new ContextWQ("RadosTestFixture::m_work_queue",
ceph::make_timespan(60),
_thread_pool);
m_timer = new SafeTimer(cct, m_timer_lock, true);
m_timer->init();
}
void RadosTestFixture::TearDown() {
for (auto metadata : m_metadatas) {
C_SaferCond ctx;
metadata->shut_down(&ctx);
ASSERT_EQ(0, ctx.wait());
}
{
std::lock_guard locker{m_timer_lock};
m_timer->shutdown();
}
delete m_timer;
m_work_queue->drain();
delete m_work_queue;
}
int RadosTestFixture::create(const std::string &oid, uint8_t order,
uint8_t splay_width) {
return cls::journal::client::create(m_ioctx, oid, order, splay_width, -1);
}
ceph::ref_t<journal::JournalMetadata> RadosTestFixture::create_metadata(
const std::string &oid, const std::string &client_id,
double commit_interval, int max_concurrent_object_sets) {
journal::Settings settings;
settings.commit_interval = commit_interval;
settings.max_concurrent_object_sets = max_concurrent_object_sets;
auto metadata = ceph::make_ref<journal::JournalMetadata>(
m_work_queue, m_timer, &m_timer_lock, m_ioctx, oid, client_id, settings);
m_metadatas.push_back(metadata);
return metadata;
}
int RadosTestFixture::append(const std::string &oid, const bufferlist &bl) {
librados::ObjectWriteOperation op;
op.append(bl);
return m_ioctx.operate(oid, &op);
}
int RadosTestFixture::client_register(const std::string &oid,
const std::string &id,
const std::string &description) {
bufferlist data;
data.append(description);
return cls::journal::client::client_register(m_ioctx, oid, id, data);
}
int RadosTestFixture::client_commit(const std::string &oid,
const std::string &id,
const cls::journal::ObjectSetPosition &commit_position) {
librados::ObjectWriteOperation op;
cls::journal::client::client_commit(&op, id, commit_position);
return m_ioctx.operate(oid, &op);
}
bufferlist RadosTestFixture::create_payload(const std::string &payload) {
bufferlist bl;
bl.append(payload);
return bl;
}
int RadosTestFixture::init_metadata(const ceph::ref_t<journal::JournalMetadata>& metadata) {
C_SaferCond cond;
metadata->init(&cond);
return cond.wait();
}
bool RadosTestFixture::wait_for_update(const ceph::ref_t<journal::JournalMetadata>& metadata) {
std::unique_lock locker{m_listener.mutex};
while (m_listener.updates[metadata.get()] == 0) {
if (m_listener.cond.wait_for(locker, 10s) == std::cv_status::timeout) {
return false;
}
}
--m_listener.updates[metadata.get()];
return true;
}
std::string RadosTestFixture::_pool_name;
librados::Rados RadosTestFixture::_rados;
uint64_t RadosTestFixture::_oid_number = 0;
ThreadPool *RadosTestFixture::_thread_pool = nullptr;
| 4,199 | 29.882353 | 95 |
cc
|
null |
ceph-main/src/test/journal/RadosTestFixture.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/librados/test.h"
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include "journal/JournalMetadata.h"
#include "cls/journal/cls_journal_types.h"
#include "gtest/gtest.h"
class ThreadPool;
class RadosTestFixture : public ::testing::Test {
public:
static void SetUpTestCase();
static void TearDownTestCase();
static std::string get_temp_oid();
RadosTestFixture();
void SetUp() override;
void TearDown() override;
int create(const std::string &oid, uint8_t order = 14,
uint8_t splay_width = 2);
ceph::ref_t<journal::JournalMetadata> create_metadata(const std::string &oid,
const std::string &client_id = "client",
double commit_internal = 0.1,
int max_concurrent_object_sets = 0);
int append(const std::string &oid, const bufferlist &bl);
int client_register(const std::string &oid, const std::string &id = "client",
const std::string &description = "");
int client_commit(const std::string &oid, const std::string &id,
const cls::journal::ObjectSetPosition &commit_position);
bufferlist create_payload(const std::string &payload);
struct Listener : public journal::JournalMetadataListener {
RadosTestFixture *test_fixture;
ceph::mutex mutex = ceph::make_mutex("mutex");
ceph::condition_variable cond;
std::map<journal::JournalMetadata*, uint32_t> updates;
Listener(RadosTestFixture *_test_fixture)
: test_fixture(_test_fixture) {}
void handle_update(journal::JournalMetadata *metadata) override {
std::lock_guard locker{mutex};
++updates[metadata];
cond.notify_all();
}
};
int init_metadata(const ceph::ref_t<journal::JournalMetadata>& metadata);
bool wait_for_update(const ceph::ref_t<journal::JournalMetadata>& metadata);
static std::string _pool_name;
static librados::Rados _rados;
static uint64_t _oid_number;
static ThreadPool *_thread_pool;
librados::IoCtx m_ioctx;
ContextWQ *m_work_queue = nullptr;
ceph::mutex m_timer_lock;
SafeTimer *m_timer = nullptr;
Listener m_listener;
std::list<ceph::ref_t<journal::JournalMetadata>> m_metadatas;
};
| 2,386 | 30.826667 | 86 |
h
|
null |
ceph-main/src/test/journal/test_Entry.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/Entry.h"
#include "gtest/gtest.h"
class TestEntry : public ::testing::Test {
};
TEST_F(TestEntry, DefaultConstructor) {
journal::Entry entry;
ASSERT_EQ(0U, entry.get_entry_tid());
ASSERT_EQ(0U, entry.get_tag_tid());
bufferlist data(entry.get_data());
bufferlist expected_data;
ASSERT_TRUE(data.contents_equal(expected_data));
}
TEST_F(TestEntry, Constructor) {
bufferlist data;
data.append("data");
journal::Entry entry(234, 123, data);
data.clear();
data = entry.get_data();
bufferlist expected_data;
expected_data.append("data");
ASSERT_EQ(123U, entry.get_entry_tid());
ASSERT_EQ(234U, entry.get_tag_tid());
ASSERT_TRUE(data.contents_equal(expected_data));
}
TEST_F(TestEntry, IsReadable) {
bufferlist data;
data.append("data");
journal::Entry entry(234, 123, data);
bufferlist full_bl;
encode(entry, full_bl);
uint32_t bytes_needed;
for (size_t i = 0; i < full_bl.length() - 1; ++i) {
bufferlist partial_bl;
if (i > 0) {
partial_bl.substr_of(full_bl, 0, i);
}
ASSERT_FALSE(journal::Entry::is_readable(partial_bl.begin(),
&bytes_needed));
ASSERT_GT(bytes_needed, 0U);
}
ASSERT_TRUE(journal::Entry::is_readable(full_bl.begin(), &bytes_needed));
ASSERT_EQ(0U, bytes_needed);
}
TEST_F(TestEntry, IsReadableBadPreamble) {
bufferlist data;
data.append("data");
journal::Entry entry(234, 123, data);
uint64_t stray_bytes = 0x1122334455667788;
bufferlist full_bl;
encode(stray_bytes, full_bl);
encode(entry, full_bl);
uint32_t bytes_needed;
bufferlist::iterator it = full_bl.begin();
ASSERT_FALSE(journal::Entry::is_readable(it, &bytes_needed));
ASSERT_EQ(0U, bytes_needed);
it += sizeof(stray_bytes);
ASSERT_TRUE(journal::Entry::is_readable(it, &bytes_needed));
ASSERT_EQ(0U, bytes_needed);
}
TEST_F(TestEntry, IsReadableBadCRC) {
bufferlist data;
data.append("data");
journal::Entry entry(234, 123, data);
bufferlist full_bl;
encode(entry, full_bl);
bufferlist bad_bl;
bad_bl.substr_of(full_bl, 0, full_bl.length() - 4);
encode(full_bl.crc32c(1), bad_bl);
uint32_t bytes_needed;
ASSERT_FALSE(journal::Entry::is_readable(bad_bl.begin(), &bytes_needed));
ASSERT_EQ(0U, bytes_needed);
}
| 2,392 | 23.670103 | 75 |
cc
|
null |
ceph-main/src/test/journal/test_FutureImpl.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/FutureImpl.h"
#include "common/Cond.h"
#include "gtest/gtest.h"
#include "test/journal/RadosTestFixture.h"
class TestFutureImpl : public RadosTestFixture {
public:
struct FlushHandler : public journal::FutureImpl::FlushHandler {
uint64_t flushes = 0;
void flush(const ceph::ref_t<journal::FutureImpl>& future) override {
++flushes;
}
FlushHandler() = default;
};
TestFutureImpl() {
m_flush_handler = std::make_shared<FlushHandler>();
}
auto create_future(uint64_t tag_tid, uint64_t entry_tid,
uint64_t commit_tid,
ceph::ref_t<journal::FutureImpl> prev = nullptr) {
auto future = ceph::make_ref<journal::FutureImpl>(tag_tid, entry_tid, commit_tid);
future->init(prev);
return future;
}
void flush(const ceph::ref_t<journal::FutureImpl>& future) {
}
std::shared_ptr<FlushHandler> m_flush_handler;
};
TEST_F(TestFutureImpl, Getters) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 123, 456);
ASSERT_EQ(234U, future->get_tag_tid());
ASSERT_EQ(123U, future->get_entry_tid());
ASSERT_EQ(456U, future->get_commit_tid());
}
TEST_F(TestFutureImpl, Attach) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 123, 456);
ASSERT_FALSE(future->attach(m_flush_handler));
ASSERT_EQ(2U, m_flush_handler.use_count());
}
TEST_F(TestFutureImpl, AttachWithPendingFlush) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 123, 456);
future->flush(NULL);
ASSERT_TRUE(future->attach(m_flush_handler));
ASSERT_EQ(2U, m_flush_handler.use_count());
}
TEST_F(TestFutureImpl, Detach) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 123, 456);
ASSERT_FALSE(future->attach(m_flush_handler));
future->detach();
ASSERT_EQ(1U, m_flush_handler.use_count());
}
TEST_F(TestFutureImpl, DetachImplicit) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 123, 456);
ASSERT_FALSE(future->attach(m_flush_handler));
future.reset();
ASSERT_EQ(1U, m_flush_handler.use_count());
}
TEST_F(TestFutureImpl, Flush) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 123, 456);
ASSERT_FALSE(future->attach(m_flush_handler));
C_SaferCond cond;
future->flush(&cond);
ASSERT_EQ(1U, m_flush_handler->flushes);
future->safe(-EIO);
ASSERT_EQ(-EIO, cond.wait());
}
TEST_F(TestFutureImpl, FlushWithoutContext) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 123, 456);
ASSERT_FALSE(future->attach(m_flush_handler));
future->flush(NULL);
ASSERT_EQ(1U, m_flush_handler->flushes);
future->safe(-EIO);
ASSERT_TRUE(future->is_complete());
ASSERT_EQ(-EIO, future->get_return_value());
}
TEST_F(TestFutureImpl, FlushChain) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future1 = create_future(234, 123, 456);
auto future2 = create_future(234, 124, 457, future1);
auto future3 = create_future(235, 1, 458, future2);
auto flush_handler = std::make_shared<FlushHandler>();
ASSERT_FALSE(future1->attach(m_flush_handler));
ASSERT_FALSE(future2->attach(flush_handler));
ASSERT_FALSE(future3->attach(m_flush_handler));
C_SaferCond cond;
future3->flush(&cond);
ASSERT_EQ(1U, m_flush_handler->flushes);
ASSERT_EQ(1U, flush_handler->flushes);
future3->safe(0);
ASSERT_FALSE(future3->is_complete());
future1->safe(0);
ASSERT_FALSE(future3->is_complete());
future2->safe(-EIO);
ASSERT_TRUE(future3->is_complete());
ASSERT_EQ(-EIO, future3->get_return_value());
ASSERT_EQ(-EIO, cond.wait());
ASSERT_EQ(0, future1->get_return_value());
}
TEST_F(TestFutureImpl, FlushInProgress) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future1 = create_future(234, 123, 456);
auto future2 = create_future(234, 124, 457, future1);
ASSERT_FALSE(future1->attach(m_flush_handler));
ASSERT_FALSE(future2->attach(m_flush_handler));
future1->set_flush_in_progress();
ASSERT_TRUE(future1->is_flush_in_progress());
future1->flush(NULL);
ASSERT_EQ(0U, m_flush_handler->flushes);
future1->safe(0);
}
TEST_F(TestFutureImpl, FlushAlreadyComplete) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 123, 456);
future->safe(-EIO);
C_SaferCond cond;
future->flush(&cond);
ASSERT_EQ(-EIO, cond.wait());
}
TEST_F(TestFutureImpl, Wait) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 1, 456);
C_SaferCond cond;
future->wait(&cond);
future->safe(-EEXIST);
ASSERT_EQ(-EEXIST, cond.wait());
}
TEST_F(TestFutureImpl, WaitAlreadyComplete) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 1, 456);
future->safe(-EEXIST);
C_SaferCond cond;
future->wait(&cond);
ASSERT_EQ(-EEXIST, cond.wait());
}
TEST_F(TestFutureImpl, SafePreservesError) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future1 = create_future(234, 123, 456);
auto future2 = create_future(234, 124, 457, future1);
future1->safe(-EIO);
future2->safe(-EEXIST);
ASSERT_TRUE(future2->is_complete());
ASSERT_EQ(-EIO, future2->get_return_value());
}
TEST_F(TestFutureImpl, ConsistentPreservesError) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future1 = create_future(234, 123, 456);
auto future2 = create_future(234, 124, 457, future1);
future2->safe(-EEXIST);
future1->safe(-EIO);
ASSERT_TRUE(future2->is_complete());
ASSERT_EQ(-EEXIST, future2->get_return_value());
}
| 7,631 | 27.371747 | 86 |
cc
|
null |
ceph-main/src/test/journal/test_JournalMetadata.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/JournalMetadata.h"
#include "test/journal/RadosTestFixture.h"
#include "common/Cond.h"
#include <map>
class TestJournalMetadata : public RadosTestFixture {
public:
void TearDown() override {
for (MetadataList::iterator it = m_metadata_list.begin();
it != m_metadata_list.end(); ++it) {
(*it)->remove_listener(&m_listener);
}
m_metadata_list.clear();
RadosTestFixture::TearDown();
}
auto create_metadata(const std::string &oid,
const std::string &client_id,
double commit_interval = 0.1,
int max_concurrent_object_sets = 0) {
auto metadata = RadosTestFixture::create_metadata(
oid, client_id, commit_interval, max_concurrent_object_sets);
m_metadata_list.push_back(metadata);
metadata->add_listener(&m_listener);
return metadata;
}
typedef std::list<ceph::ref_t<journal::JournalMetadata>> MetadataList;
MetadataList m_metadata_list;
};
TEST_F(TestJournalMetadata, JournalDNE) {
std::string oid = get_temp_oid();
auto metadata1 = create_metadata(oid, "client1");
ASSERT_EQ(-ENOENT, init_metadata(metadata1));
}
TEST_F(TestJournalMetadata, ClientDNE) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 14, 2));
ASSERT_EQ(0, client_register(oid, "client1", ""));
auto metadata1 = create_metadata(oid, "client1");
ASSERT_EQ(0, init_metadata(metadata1));
auto metadata2 = create_metadata(oid, "client2");
ASSERT_EQ(-ENOENT, init_metadata(metadata2));
}
TEST_F(TestJournalMetadata, Committed) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 14, 2));
ASSERT_EQ(0, client_register(oid, "client1", ""));
auto metadata1 = create_metadata(oid, "client1", 600);
ASSERT_EQ(0, init_metadata(metadata1));
auto metadata2 = create_metadata(oid, "client1");
ASSERT_EQ(0, init_metadata(metadata2));
ASSERT_TRUE(wait_for_update(metadata2));
journal::JournalMetadata::ObjectSetPosition expect_commit_position;
journal::JournalMetadata::ObjectSetPosition read_commit_position;
metadata1->get_commit_position(&read_commit_position);
ASSERT_EQ(expect_commit_position, read_commit_position);
uint64_t commit_tid1 = metadata1->allocate_commit_tid(0, 0, 0);
uint64_t commit_tid2 = metadata1->allocate_commit_tid(0, 1, 0);
uint64_t commit_tid3 = metadata1->allocate_commit_tid(1, 0, 1);
uint64_t commit_tid4 = metadata1->allocate_commit_tid(0, 0, 2);
// cannot commit until tid1 + 2 committed
metadata1->committed(commit_tid2, []() { return nullptr; });
metadata1->committed(commit_tid3, []() { return nullptr; });
C_SaferCond cond1;
metadata1->committed(commit_tid1, [&cond1]() { return &cond1; });
// given our 10 minute commit internal, this should override the
// in-flight commit
C_SaferCond cond2;
metadata1->committed(commit_tid4, [&cond2]() { return &cond2; });
ASSERT_EQ(-ESTALE, cond1.wait());
metadata1->flush_commit_position();
ASSERT_EQ(0, cond2.wait());
ASSERT_TRUE(wait_for_update(metadata2));
metadata2->get_commit_position(&read_commit_position);
expect_commit_position = {{{0, 0, 2}, {1, 0, 1}}};
ASSERT_EQ(expect_commit_position, read_commit_position);
}
TEST_F(TestJournalMetadata, UpdateActiveObject) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 14, 2));
ASSERT_EQ(0, client_register(oid, "client1", ""));
auto metadata1 = create_metadata(oid, "client1");
ASSERT_EQ(0, init_metadata(metadata1));
ASSERT_TRUE(wait_for_update(metadata1));
ASSERT_EQ(0U, metadata1->get_active_set());
ASSERT_EQ(0, metadata1->set_active_set(123));
ASSERT_TRUE(wait_for_update(metadata1));
ASSERT_EQ(123U, metadata1->get_active_set());
}
TEST_F(TestJournalMetadata, DisconnectLaggyClient) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid, "client1", ""));
ASSERT_EQ(0, client_register(oid, "client2", "laggy"));
int max_concurrent_object_sets = 100;
auto metadata =
create_metadata(oid, "client1", 0.1, max_concurrent_object_sets);
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(0U, metadata->get_active_set());
journal::JournalMetadata::RegisteredClients clients;
#define ASSERT_CLIENT_STATES(s1, s2) \
ASSERT_EQ(2U, clients.size()); \
for (auto &c : clients) { \
if (c.id == "client1") { \
ASSERT_EQ(c.state, s1); \
} else if (c.id == "client2") { \
ASSERT_EQ(c.state, s2); \
} else { \
ASSERT_TRUE(false); \
} \
}
metadata->get_registered_clients(&clients);
ASSERT_CLIENT_STATES(cls::journal::CLIENT_STATE_CONNECTED,
cls::journal::CLIENT_STATE_CONNECTED);
// client2 is connected when active set <= max_concurrent_object_sets
ASSERT_EQ(0, metadata->set_active_set(max_concurrent_object_sets));
ASSERT_TRUE(wait_for_update(metadata));
uint64_t commit_tid = metadata->allocate_commit_tid(0, 0, 0);
C_SaferCond cond1;
metadata->committed(commit_tid, [&cond1]() { return &cond1; });
ASSERT_EQ(0, cond1.wait());
metadata->flush_commit_position();
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(100U, metadata->get_active_set());
clients.clear();
metadata->get_registered_clients(&clients);
ASSERT_CLIENT_STATES(cls::journal::CLIENT_STATE_CONNECTED,
cls::journal::CLIENT_STATE_CONNECTED);
// client2 is disconnected when active set > max_concurrent_object_sets
ASSERT_EQ(0, metadata->set_active_set(max_concurrent_object_sets + 1));
ASSERT_TRUE(wait_for_update(metadata));
commit_tid = metadata->allocate_commit_tid(0, 0, 1);
C_SaferCond cond2;
metadata->committed(commit_tid, [&cond2]() { return &cond2; });
ASSERT_EQ(0, cond2.wait());
metadata->flush_commit_position();
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(101U, metadata->get_active_set());
clients.clear();
metadata->get_registered_clients(&clients);
ASSERT_CLIENT_STATES(cls::journal::CLIENT_STATE_CONNECTED,
cls::journal::CLIENT_STATE_DISCONNECTED);
}
TEST_F(TestJournalMetadata, AssertActiveTag) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid, "client1", ""));
auto metadata = create_metadata(oid, "client1");
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_TRUE(wait_for_update(metadata));
C_SaferCond ctx1;
cls::journal::Tag tag1;
metadata->allocate_tag(cls::journal::Tag::TAG_CLASS_NEW, {}, &tag1, &ctx1);
ASSERT_EQ(0, ctx1.wait());
C_SaferCond ctx2;
metadata->assert_active_tag(tag1.tid, &ctx2);
ASSERT_EQ(0, ctx2.wait());
C_SaferCond ctx3;
cls::journal::Tag tag2;
metadata->allocate_tag(tag1.tag_class, {}, &tag2, &ctx3);
ASSERT_EQ(0, ctx3.wait());
C_SaferCond ctx4;
metadata->assert_active_tag(tag1.tid, &ctx4);
ASSERT_EQ(-ESTALE, ctx4.wait());
}
| 6,965 | 32.014218 | 77 |
cc
|
null |
ceph-main/src/test/journal/test_JournalPlayer.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/JournalPlayer.h"
#include "journal/Entry.h"
#include "journal/JournalMetadata.h"
#include "journal/ReplayHandler.h"
#include "include/stringify.h"
#include "common/ceph_mutex.h"
#include "gtest/gtest.h"
#include "test/journal/RadosTestFixture.h"
#include <list>
#include <boost/scope_exit.hpp>
using namespace std::chrono_literals;
typedef std::list<journal::Entry> Entries;
template <typename T>
class TestJournalPlayer : public RadosTestFixture {
public:
typedef std::list<journal::JournalPlayer *> JournalPlayers;
static const uint64_t max_fetch_bytes = T::max_fetch_bytes;
struct ReplayHandler : public journal::ReplayHandler {
ceph::mutex lock = ceph::make_mutex("lock");
ceph::condition_variable cond;
bool entries_available;
bool complete;
int complete_result;
ReplayHandler()
: entries_available(false), complete(false),
complete_result(0) {}
void handle_entries_available() override {
std::lock_guard locker{lock};
entries_available = true;
cond.notify_all();
}
void handle_complete(int r) override {
std::lock_guard locker{lock};
complete = true;
complete_result = r;
cond.notify_all();
}
};
void TearDown() override {
for (JournalPlayers::iterator it = m_players.begin();
it != m_players.end(); ++it) {
delete *it;
}
RadosTestFixture::TearDown();
}
auto create_metadata(const std::string &oid) {
return RadosTestFixture::create_metadata(oid, "client", 0.1,
max_fetch_bytes);
}
int client_commit(const std::string &oid,
journal::JournalPlayer::ObjectSetPosition position) {
return RadosTestFixture::client_commit(oid, "client", position);
}
journal::Entry create_entry(uint64_t tag_tid, uint64_t entry_tid) {
std::string payload(128, '0');
bufferlist payload_bl;
payload_bl.append(payload);
return journal::Entry(tag_tid, entry_tid, payload_bl);
}
journal::JournalPlayer *create_player(const std::string &oid,
const ceph::ref_t<journal::JournalMetadata>& metadata) {
journal::JournalPlayer *player(new journal::JournalPlayer(
m_ioctx, oid + ".", metadata, &m_replay_hander, nullptr));
m_players.push_back(player);
return player;
}
bool wait_for_entries(journal::JournalPlayer *player, uint32_t count,
Entries *entries) {
entries->clear();
while (entries->size() < count) {
journal::Entry entry;
uint64_t commit_tid;
while (entries->size() < count &&
player->try_pop_front(&entry, &commit_tid)) {
entries->push_back(entry);
}
if (entries->size() == count) {
break;
}
std::unique_lock locker{m_replay_hander.lock};
if (m_replay_hander.entries_available) {
m_replay_hander.entries_available = false;
} else if (m_replay_hander.cond.wait_for(locker, 10s) ==
std::cv_status::timeout) {
break;
}
}
return entries->size() == count;
}
bool wait_for_complete(journal::JournalPlayer *player) {
std::unique_lock locker{m_replay_hander.lock};
while (!m_replay_hander.complete) {
journal::Entry entry;
uint64_t commit_tid;
player->try_pop_front(&entry, &commit_tid);
if (m_replay_hander.cond.wait_for(locker, 10s) ==
std::cv_status::timeout) {
return false;
}
}
m_replay_hander.complete = false;
return true;
}
int write_entry(const std::string &oid, uint64_t object_num,
uint64_t tag_tid, uint64_t entry_tid) {
bufferlist bl;
encode(create_entry(tag_tid, entry_tid), bl);
return append(oid + "." + stringify(object_num), bl);
}
JournalPlayers m_players;
ReplayHandler m_replay_hander;
};
template <uint64_t _max_fetch_bytes>
class TestJournalPlayerParams {
public:
static const uint64_t max_fetch_bytes = _max_fetch_bytes;
};
typedef ::testing::Types<TestJournalPlayerParams<0>,
TestJournalPlayerParams<16> > TestJournalPlayerTypes;
TYPED_TEST_SUITE(TestJournalPlayer, TestJournalPlayerTypes);
TYPED_TEST(TestJournalPlayer, Prefetch) {
std::string oid = this->get_temp_oid();
journal::JournalPlayer::ObjectPositions positions;
positions = {
cls::journal::ObjectPosition(0, 234, 122) };
cls::journal::ObjectSetPosition commit_position(positions);
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 122));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 123));
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 124));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 125));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 3, &entries));
ASSERT_TRUE(this->wait_for_complete(player));
Entries expected_entries;
expected_entries = {
this->create_entry(234, 123),
this->create_entry(234, 124),
this->create_entry(234, 125)};
ASSERT_EQ(expected_entries, entries);
uint64_t last_tid;
ASSERT_TRUE(metadata->get_last_allocated_entry_tid(234, &last_tid));
ASSERT_EQ(125U, last_tid);
}
TYPED_TEST(TestJournalPlayer, PrefetchSkip) {
std::string oid = this->get_temp_oid();
journal::JournalPlayer::ObjectPositions positions;
positions = {
cls::journal::ObjectPosition(0, 234, 125),
cls::journal::ObjectPosition(1, 234, 124) };
cls::journal::ObjectSetPosition commit_position(positions);
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 122));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 123));
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 124));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 125));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 0, &entries));
ASSERT_TRUE(this->wait_for_complete(player));
uint64_t last_tid;
ASSERT_TRUE(metadata->get_last_allocated_entry_tid(234, &last_tid));
ASSERT_EQ(125U, last_tid);
}
TYPED_TEST(TestJournalPlayer, PrefetchWithoutCommit) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 122));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 123));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 2, &entries));
ASSERT_TRUE(this->wait_for_complete(player));
Entries expected_entries;
expected_entries = {
this->create_entry(234, 122),
this->create_entry(234, 123)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, PrefetchMultipleTags) {
std::string oid = this->get_temp_oid();
journal::JournalPlayer::ObjectPositions positions;
positions = {
cls::journal::ObjectPosition(2, 234, 122),
cls::journal::ObjectPosition(1, 234, 121),
cls::journal::ObjectPosition(0, 234, 120)};
cls::journal::ObjectSetPosition commit_position(positions);
ASSERT_EQ(0, this->create(oid, 14, 3));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 120));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 121));
ASSERT_EQ(0, this->write_entry(oid, 2, 234, 122));
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 123));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 124));
ASSERT_EQ(0, this->write_entry(oid, 0, 236, 0)); // new tag allocated
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 3, &entries));
ASSERT_TRUE(this->wait_for_complete(player));
uint64_t last_tid;
ASSERT_TRUE(metadata->get_last_allocated_entry_tid(234, &last_tid));
ASSERT_EQ(124U, last_tid);
ASSERT_TRUE(metadata->get_last_allocated_entry_tid(236, &last_tid));
ASSERT_EQ(0U, last_tid);
}
TYPED_TEST(TestJournalPlayer, PrefetchCorruptSequence) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 120));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 121));
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 124));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 2, &entries));
journal::Entry entry;
uint64_t commit_tid;
ASSERT_FALSE(player->try_pop_front(&entry, &commit_tid));
ASSERT_TRUE(this->wait_for_complete(player));
ASSERT_EQ(-ENOMSG, this->m_replay_hander.complete_result);
}
TYPED_TEST(TestJournalPlayer, PrefetchMissingSequence) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid, 14, 4));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, metadata->set_active_set(1));
ASSERT_EQ(0, this->write_entry(oid, 0, 2, 852));
ASSERT_EQ(0, this->write_entry(oid, 0, 2, 856));
ASSERT_EQ(0, this->write_entry(oid, 0, 2, 860));
ASSERT_EQ(0, this->write_entry(oid, 1, 2, 853));
ASSERT_EQ(0, this->write_entry(oid, 1, 2, 857));
ASSERT_EQ(0, this->write_entry(oid, 5, 2, 861));
ASSERT_EQ(0, this->write_entry(oid, 2, 2, 854));
ASSERT_EQ(0, this->write_entry(oid, 0, 3, 0));
ASSERT_EQ(0, this->write_entry(oid, 5, 3, 1));
ASSERT_EQ(0, this->write_entry(oid, 2, 3, 2));
ASSERT_EQ(0, this->write_entry(oid, 3, 3, 3));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 7, &entries));
Entries expected_entries = {
this->create_entry(2, 852),
this->create_entry(2, 853),
this->create_entry(2, 854),
this->create_entry(3, 0),
this->create_entry(3, 1),
this->create_entry(3, 2),
this->create_entry(3, 3)};
ASSERT_EQ(expected_entries, entries);
ASSERT_TRUE(this->wait_for_complete(player));
ASSERT_EQ(0, this->m_replay_hander.complete_result);
}
TYPED_TEST(TestJournalPlayer, PrefetchLargeMissingSequence) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, metadata->set_active_set(2));
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 0));
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 1));
ASSERT_EQ(0, this->write_entry(oid, 3, 0, 3));
ASSERT_EQ(0, this->write_entry(oid, 4, 1, 0));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 3, &entries));
Entries expected_entries = {
this->create_entry(0, 0),
this->create_entry(0, 1),
this->create_entry(1, 0)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, PrefetchBlockedNewTag) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 0));
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 1));
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 2));
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 4));
ASSERT_EQ(0, this->write_entry(oid, 0, 1, 0));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 4, &entries));
Entries expected_entries = {
this->create_entry(0, 0),
this->create_entry(0, 1),
this->create_entry(0, 2),
this->create_entry(1, 0)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, PrefetchStaleEntries) {
std::string oid = this->get_temp_oid();
journal::JournalPlayer::ObjectPositions positions = {
cls::journal::ObjectPosition(0, 1, 0) };
cls::journal::ObjectSetPosition commit_position(positions);
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 1));
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 3));
ASSERT_EQ(0, this->write_entry(oid, 0, 1, 0));
ASSERT_EQ(0, this->write_entry(oid, 1, 1, 1));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 1, &entries));
Entries expected_entries = {
this->create_entry(1, 1)};
ASSERT_EQ(expected_entries, entries);
ASSERT_TRUE(this->wait_for_complete(player));
ASSERT_EQ(0, this->m_replay_hander.complete_result);
}
TYPED_TEST(TestJournalPlayer, PrefetchUnexpectedTag) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 120));
ASSERT_EQ(0, this->write_entry(oid, 1, 235, 121));
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 124));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 1, &entries));
journal::Entry entry;
uint64_t commit_tid;
ASSERT_FALSE(player->try_pop_front(&entry, &commit_tid));
ASSERT_TRUE(this->wait_for_complete(player));
ASSERT_EQ(0, this->m_replay_hander.complete_result);
}
TYPED_TEST(TestJournalPlayer, PrefetchAndWatch) {
std::string oid = this->get_temp_oid();
journal::JournalPlayer::ObjectPositions positions;
positions = {
cls::journal::ObjectPosition(0, 234, 122)};
cls::journal::ObjectSetPosition commit_position(positions);
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 122));
player->prefetch_and_watch(0.25);
Entries entries;
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 123));
ASSERT_TRUE(this->wait_for_entries(player, 1, &entries));
Entries expected_entries;
expected_entries = {this->create_entry(234, 123)};
ASSERT_EQ(expected_entries, entries);
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 124));
ASSERT_TRUE(this->wait_for_entries(player, 1, &entries));
expected_entries = {this->create_entry(234, 124)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, PrefetchSkippedObject) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid, 14, 3));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
ASSERT_EQ(0, metadata->set_active_set(2));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 122));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 123));
ASSERT_EQ(0, this->write_entry(oid, 5, 234, 124));
ASSERT_EQ(0, this->write_entry(oid, 6, 234, 125));
ASSERT_EQ(0, this->write_entry(oid, 7, 234, 126));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 5, &entries));
ASSERT_TRUE(this->wait_for_complete(player));
Entries expected_entries;
expected_entries = {
this->create_entry(234, 122),
this->create_entry(234, 123),
this->create_entry(234, 124),
this->create_entry(234, 125),
this->create_entry(234, 126)};
ASSERT_EQ(expected_entries, entries);
uint64_t last_tid;
ASSERT_TRUE(metadata->get_last_allocated_entry_tid(234, &last_tid));
ASSERT_EQ(126U, last_tid);
}
TYPED_TEST(TestJournalPlayer, ImbalancedJournal) {
std::string oid = this->get_temp_oid();
journal::JournalPlayer::ObjectPositions positions = {
cls::journal::ObjectPosition(9, 300, 1),
cls::journal::ObjectPosition(8, 300, 0),
cls::journal::ObjectPosition(10, 200, 4334),
cls::journal::ObjectPosition(11, 200, 4331) };
cls::journal::ObjectSetPosition commit_position(positions);
ASSERT_EQ(0, this->create(oid, 14, 4));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
ASSERT_EQ(0, metadata->set_active_set(2));
metadata->set_minimum_set(2);
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 8, 300, 0));
ASSERT_EQ(0, this->write_entry(oid, 8, 301, 0));
ASSERT_EQ(0, this->write_entry(oid, 9, 300, 1));
ASSERT_EQ(0, this->write_entry(oid, 9, 301, 1));
ASSERT_EQ(0, this->write_entry(oid, 10, 200, 4334));
ASSERT_EQ(0, this->write_entry(oid, 10, 301, 2));
ASSERT_EQ(0, this->write_entry(oid, 11, 200, 4331));
ASSERT_EQ(0, this->write_entry(oid, 11, 301, 3));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 4, &entries));
ASSERT_TRUE(this->wait_for_complete(player));
Entries expected_entries;
expected_entries = {
this->create_entry(301, 0),
this->create_entry(301, 1),
this->create_entry(301, 2),
this->create_entry(301, 3)};
ASSERT_EQ(expected_entries, entries);
uint64_t last_tid;
ASSERT_TRUE(metadata->get_last_allocated_entry_tid(301, &last_tid));
ASSERT_EQ(3U, last_tid);
}
TYPED_TEST(TestJournalPlayer, LiveReplayLaggyAppend) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 0));
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 1));
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 2));
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 4));
ASSERT_EQ(0, this->write_entry(oid, 3, 0, 5)); // laggy entry 0/3 in object 1
player->prefetch_and_watch(0.25);
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 3, &entries));
Entries expected_entries = {
this->create_entry(0, 0),
this->create_entry(0, 1),
this->create_entry(0, 2)};
ASSERT_EQ(expected_entries, entries);
journal::Entry entry;
uint64_t commit_tid;
ASSERT_FALSE(player->try_pop_front(&entry, &commit_tid));
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 3));
ASSERT_EQ(0, metadata->set_active_set(1));
ASSERT_TRUE(this->wait_for_entries(player, 3, &entries));
expected_entries = {
this->create_entry(0, 3),
this->create_entry(0, 4),
this->create_entry(0, 5)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, LiveReplayMissingSequence) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid, 14, 4));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 2, 852));
ASSERT_EQ(0, this->write_entry(oid, 0, 2, 856));
ASSERT_EQ(0, this->write_entry(oid, 0, 2, 860));
ASSERT_EQ(0, this->write_entry(oid, 1, 2, 853));
ASSERT_EQ(0, this->write_entry(oid, 1, 2, 857));
ASSERT_EQ(0, this->write_entry(oid, 2, 2, 854));
ASSERT_EQ(0, this->write_entry(oid, 0, 2, 856));
player->prefetch_and_watch(0.25);
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 3, &entries));
Entries expected_entries = {
this->create_entry(2, 852),
this->create_entry(2, 853),
this->create_entry(2, 854)};
ASSERT_EQ(expected_entries, entries);
journal::Entry entry;
uint64_t commit_tid;
ASSERT_FALSE(player->try_pop_front(&entry, &commit_tid));
ASSERT_EQ(0, this->write_entry(oid, 3, 3, 3));
ASSERT_EQ(0, this->write_entry(oid, 2, 3, 2));
ASSERT_EQ(0, this->write_entry(oid, 1, 3, 1));
ASSERT_EQ(0, this->write_entry(oid, 0, 3, 0));
ASSERT_TRUE(this->wait_for_entries(player, 4, &entries));
expected_entries = {
this->create_entry(3, 0),
this->create_entry(3, 1),
this->create_entry(3, 2),
this->create_entry(3, 3)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, LiveReplayLargeMissingSequence) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, metadata->set_active_set(2));
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 0));
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 1));
ASSERT_EQ(0, this->write_entry(oid, 3, 0, 3));
ASSERT_EQ(0, this->write_entry(oid, 4, 1, 0));
player->prefetch_and_watch(0.25);
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 3, &entries));
Entries expected_entries = {
this->create_entry(0, 0),
this->create_entry(0, 1),
this->create_entry(1, 0)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, LiveReplayBlockedNewTag) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
C_SaferCond ctx1;
cls::journal::Tag tag1;
metadata->allocate_tag(cls::journal::Tag::TAG_CLASS_NEW, {}, &tag1, &ctx1);
ASSERT_EQ(0, ctx1.wait());
ASSERT_EQ(0, metadata->set_active_set(0));
ASSERT_EQ(0, this->write_entry(oid, 0, tag1.tid, 0));
ASSERT_EQ(0, this->write_entry(oid, 1, tag1.tid, 1));
ASSERT_EQ(0, this->write_entry(oid, 0, tag1.tid, 2));
ASSERT_EQ(0, this->write_entry(oid, 0, tag1.tid, 4));
player->prefetch_and_watch(0.25);
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 3, &entries));
Entries expected_entries = {
this->create_entry(tag1.tid, 0),
this->create_entry(tag1.tid, 1),
this->create_entry(tag1.tid, 2)};
ASSERT_EQ(expected_entries, entries);
journal::Entry entry;
uint64_t commit_tid;
ASSERT_FALSE(player->try_pop_front(&entry, &commit_tid));
C_SaferCond ctx2;
cls::journal::Tag tag2;
metadata->allocate_tag(tag1.tag_class, {}, &tag2, &ctx2);
ASSERT_EQ(0, ctx2.wait());
ASSERT_EQ(0, this->write_entry(oid, 0, tag2.tid, 0));
ASSERT_TRUE(this->wait_for_entries(player, 1, &entries));
expected_entries = {
this->create_entry(tag2.tid, 0)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, LiveReplayStaleEntries) {
std::string oid = this->get_temp_oid();
journal::JournalPlayer::ObjectPositions positions = {
cls::journal::ObjectPosition(0, 1, 0) };
cls::journal::ObjectSetPosition commit_position(positions);
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 1));
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 3));
ASSERT_EQ(0, this->write_entry(oid, 0, 1, 0));
ASSERT_EQ(0, this->write_entry(oid, 1, 1, 1));
player->prefetch_and_watch(0.25);
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 1, &entries));
Entries expected_entries = {
this->create_entry(1, 1)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, LiveReplayRefetchRemoveEmpty) {
std::string oid = this->get_temp_oid();
journal::JournalPlayer::ObjectPositions positions = {
cls::journal::ObjectPosition(1, 0, 1),
cls::journal::ObjectPosition(0, 0, 0)};
cls::journal::ObjectSetPosition commit_position(positions);
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, metadata->set_active_set(1));
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 0));
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 1));
ASSERT_EQ(0, this->write_entry(oid, 3, 0, 3));
ASSERT_EQ(0, this->write_entry(oid, 2, 1, 0));
player->prefetch_and_watch(0.25);
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 1, &entries));
Entries expected_entries = {
this->create_entry(1, 0)};
ASSERT_EQ(expected_entries, entries);
// should remove player for offset 3 after refetching
ASSERT_EQ(0, metadata->set_active_set(3));
ASSERT_EQ(0, this->write_entry(oid, 7, 1, 1));
ASSERT_TRUE(this->wait_for_entries(player, 1, &entries));
expected_entries = {
this->create_entry(1, 1)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, PrefetchShutDown) {
std::string oid = this->get_temp_oid();
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, {}));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
player->prefetch();
}
TYPED_TEST(TestJournalPlayer, LiveReplayShutDown) {
std::string oid = this->get_temp_oid();
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, {}));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
player->prefetch_and_watch(0.25);
}
| 31,562 | 30.689759 | 96 |
cc
|
null |
ceph-main/src/test/journal/test_JournalRecorder.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/JournalRecorder.h"
#include "journal/Entry.h"
#include "journal/JournalMetadata.h"
#include "test/journal/RadosTestFixture.h"
#include <limits>
#include <list>
#include <memory>
class TestJournalRecorder : public RadosTestFixture {
public:
using JournalRecorderPtr = std::unique_ptr<journal::JournalRecorder,
std::function<void(journal::JournalRecorder*)>>;
JournalRecorderPtr create_recorder(
const std::string &oid, const ceph::ref_t<journal::JournalMetadata>& metadata) {
JournalRecorderPtr recorder{
new journal::JournalRecorder(m_ioctx, oid + ".", metadata, 0),
[](journal::JournalRecorder* recorder) {
C_SaferCond cond;
recorder->shut_down(&cond);
cond.wait();
delete recorder;
}
};
recorder->set_append_batch_options(0, std::numeric_limits<uint32_t>::max(), 0);
return recorder;
}
};
TEST_F(TestJournalRecorder, Append) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
JournalRecorderPtr recorder = create_recorder(oid, metadata);
journal::Future future1 = recorder->append(123, create_payload("payload"));
C_SaferCond cond;
future1.flush(&cond);
ASSERT_EQ(0, cond.wait());
}
TEST_F(TestJournalRecorder, AppendKnownOverflow) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_EQ(0U, metadata->get_active_set());
JournalRecorderPtr recorder = create_recorder(oid, metadata);
recorder->append(123, create_payload(std::string(metadata->get_object_size() -
journal::Entry::get_fixed_size(), '1')));
journal::Future future2 = recorder->append(123, create_payload(std::string(1, '2')));
C_SaferCond cond;
future2.flush(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_EQ(1U, metadata->get_active_set());
}
TEST_F(TestJournalRecorder, AppendDelayedOverflow) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_EQ(0U, metadata->get_active_set());
JournalRecorderPtr recorder1 = create_recorder(oid, metadata);
JournalRecorderPtr recorder2 = create_recorder(oid, metadata);
recorder1->append(234, create_payload(std::string(1, '1')));
recorder2->append(123, create_payload(std::string(metadata->get_object_size() -
journal::Entry::get_fixed_size(), '2')));
journal::Future future = recorder2->append(123, create_payload(std::string(1, '3')));
C_SaferCond cond;
future.flush(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_EQ(1U, metadata->get_active_set());
}
TEST_F(TestJournalRecorder, FutureFlush) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
JournalRecorderPtr recorder = create_recorder(oid, metadata);
journal::Future future1 = recorder->append(123, create_payload("payload1"));
journal::Future future2 = recorder->append(123, create_payload("payload2"));
C_SaferCond cond;
future2.flush(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_TRUE(future1.is_complete());
ASSERT_TRUE(future2.is_complete());
}
TEST_F(TestJournalRecorder, Flush) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
JournalRecorderPtr recorder = create_recorder(oid, metadata);
journal::Future future1 = recorder->append(123, create_payload("payload1"));
journal::Future future2 = recorder->append(123, create_payload("payload2"));
C_SaferCond cond1;
recorder->flush(&cond1);
ASSERT_EQ(0, cond1.wait());
C_SaferCond cond2;
future2.wait(&cond2);
ASSERT_EQ(0, cond2.wait());
ASSERT_TRUE(future1.is_complete());
ASSERT_TRUE(future2.is_complete());
}
TEST_F(TestJournalRecorder, OverflowCommitObjectNumber) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_EQ(0U, metadata->get_active_set());
JournalRecorderPtr recorder = create_recorder(oid, metadata);
recorder->append(123, create_payload(std::string(metadata->get_object_size() -
journal::Entry::get_fixed_size(), '1')));
journal::Future future2 = recorder->append(124, create_payload(std::string(1, '2')));
C_SaferCond cond;
future2.flush(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_EQ(1U, metadata->get_active_set());
uint64_t object_num;
uint64_t tag_tid;
uint64_t entry_tid;
metadata->get_commit_entry(1, &object_num, &tag_tid, &entry_tid);
ASSERT_EQ(0U, object_num);
ASSERT_EQ(123U, tag_tid);
ASSERT_EQ(0U, entry_tid);
metadata->get_commit_entry(2, &object_num, &tag_tid, &entry_tid);
ASSERT_EQ(2U, object_num);
ASSERT_EQ(124U, tag_tid);
ASSERT_EQ(0U, entry_tid);
}
| 5,450 | 30.148571 | 93 |
cc
|
null |
ceph-main/src/test/journal/test_JournalTrimmer.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/JournalTrimmer.h"
#include "journal/JournalMetadata.h"
#include "include/stringify.h"
#include "test/journal/RadosTestFixture.h"
#include <limits>
#include <list>
class TestJournalTrimmer : public RadosTestFixture {
public:
void TearDown() override {
for (MetadataList::iterator it = m_metadata_list.begin();
it != m_metadata_list.end(); ++it) {
(*it)->remove_listener(&m_listener);
}
m_metadata_list.clear();
for (std::list<journal::JournalTrimmer*>::iterator it = m_trimmers.begin();
it != m_trimmers.end(); ++it) {
C_SaferCond ctx;
(*it)->shut_down(&ctx);
ASSERT_EQ(0, ctx.wait());
delete *it;
}
RadosTestFixture::TearDown();
}
int append_payload(const ceph::ref_t<journal::JournalMetadata>& metadata,
const std::string &oid, uint64_t object_num,
const std::string &payload, uint64_t *commit_tid) {
int r = append(oid + "." + stringify(object_num), create_payload(payload));
uint64_t tid = metadata->allocate_commit_tid(object_num, 234, 123);
if (commit_tid != NULL) {
*commit_tid = tid;
}
return r;
}
auto create_metadata(const std::string &oid) {
auto metadata = RadosTestFixture::create_metadata(oid);
m_metadata_list.push_back(metadata);
metadata->add_listener(&m_listener);
return metadata;
}
journal::JournalTrimmer *create_trimmer(const std::string &oid,
const ceph::ref_t<journal::JournalMetadata>& metadata) {
journal::JournalTrimmer *trimmer(new journal::JournalTrimmer(
m_ioctx, oid + ".", metadata));
m_trimmers.push_back(trimmer);
return trimmer;
}
int assert_exists(const std::string &oid) {
librados::ObjectWriteOperation op;
op.assert_exists();
return m_ioctx.operate(oid, &op);
}
typedef std::list<ceph::ref_t<journal::JournalMetadata>> MetadataList;
MetadataList m_metadata_list;
std::list<journal::JournalTrimmer*> m_trimmers;
};
TEST_F(TestJournalTrimmer, Committed) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(0, metadata->set_active_set(10));
ASSERT_TRUE(wait_for_update(metadata));
uint64_t commit_tid1;
uint64_t commit_tid2;
uint64_t commit_tid3;
uint64_t commit_tid4;
uint64_t commit_tid5;
uint64_t commit_tid6;
ASSERT_EQ(0, append_payload(metadata, oid, 0, "payload", &commit_tid1));
ASSERT_EQ(0, append_payload(metadata, oid, 4, "payload", &commit_tid2));
ASSERT_EQ(0, append_payload(metadata, oid, 5, "payload", &commit_tid3));
ASSERT_EQ(0, append_payload(metadata, oid, 0, "payload", &commit_tid4));
ASSERT_EQ(0, append_payload(metadata, oid, 4, "payload", &commit_tid5));
ASSERT_EQ(0, append_payload(metadata, oid, 5, "payload", &commit_tid6));
journal::JournalTrimmer *trimmer = create_trimmer(oid, metadata);
trimmer->committed(commit_tid4);
trimmer->committed(commit_tid6);
trimmer->committed(commit_tid2);
trimmer->committed(commit_tid5);
trimmer->committed(commit_tid3);
trimmer->committed(commit_tid1);
while (metadata->get_minimum_set() != 2U) {
ASSERT_TRUE(wait_for_update(metadata));
}
ASSERT_EQ(-ENOENT, assert_exists(oid + ".0"));
ASSERT_EQ(-ENOENT, assert_exists(oid + ".2"));
ASSERT_EQ(0, assert_exists(oid + ".5"));
}
TEST_F(TestJournalTrimmer, CommittedWithOtherClient) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
ASSERT_EQ(0, client_register(oid, "client2", "slow client"));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(0, metadata->set_active_set(10));
ASSERT_TRUE(wait_for_update(metadata));
uint64_t commit_tid1;
uint64_t commit_tid2;
uint64_t commit_tid3;
uint64_t commit_tid4;
ASSERT_EQ(0, append_payload(metadata, oid, 0, "payload", &commit_tid1));
ASSERT_EQ(0, append_payload(metadata, oid, 2, "payload", &commit_tid2));
ASSERT_EQ(0, append_payload(metadata, oid, 3, "payload", &commit_tid3));
ASSERT_EQ(0, append_payload(metadata, oid, 5, "payload", &commit_tid4));
journal::JournalTrimmer *trimmer = create_trimmer(oid, metadata);
trimmer->committed(commit_tid1);
trimmer->committed(commit_tid2);
trimmer->committed(commit_tid3);
trimmer->committed(commit_tid4);
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(0, assert_exists(oid + ".0"));
ASSERT_EQ(0, assert_exists(oid + ".2"));
ASSERT_EQ(0, assert_exists(oid + ".3"));
ASSERT_EQ(0, assert_exists(oid + ".5"));
}
TEST_F(TestJournalTrimmer, RemoveObjects) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(0, metadata->set_active_set(10));
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(0, append(oid + ".0", create_payload("payload")));
ASSERT_EQ(0, append(oid + ".2", create_payload("payload")));
ASSERT_EQ(0, append(oid + ".3", create_payload("payload")));
ASSERT_EQ(0, append(oid + ".5", create_payload("payload")));
journal::JournalTrimmer *trimmer = create_trimmer(oid, metadata);
C_SaferCond cond;
trimmer->remove_objects(false, &cond);
ASSERT_EQ(0, cond.wait());
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(-ENOENT, assert_exists(oid + ".0"));
ASSERT_EQ(-ENOENT, assert_exists(oid + ".2"));
ASSERT_EQ(-ENOENT, assert_exists(oid + ".3"));
ASSERT_EQ(-ENOENT, assert_exists(oid + ".5"));
}
TEST_F(TestJournalTrimmer, RemoveObjectsWithOtherClient) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 12, 2));
ASSERT_EQ(0, client_register(oid));
ASSERT_EQ(0, client_register(oid, "client2", "other client"));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_TRUE(wait_for_update(metadata));
journal::JournalTrimmer *trimmer = create_trimmer(oid, metadata);
C_SaferCond ctx1;
trimmer->remove_objects(false, &ctx1);
ASSERT_EQ(-EBUSY, ctx1.wait());
C_SaferCond ctx2;
trimmer->remove_objects(true, &ctx2);
ASSERT_EQ(0, ctx2.wait());
}
| 6,515 | 31.909091 | 98 |
cc
|
null |
ceph-main/src/test/journal/test_Journaler.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/stringify.h"
#include "journal/Journaler.h"
#include "journal/Settings.h"
#include "test/librados/test.h"
#include "test/journal/RadosTestFixture.h"
#include "gtest/gtest.h"
// reinclude our assert to clobber the system one
#include "include/ceph_assert.h"
class TestJournaler : public RadosTestFixture {
public:
static const std::string CLIENT_ID;
static std::string get_temp_journal_id() {
return stringify(++_journal_id);
}
void SetUp() override {
RadosTestFixture::SetUp();
m_journal_id = get_temp_journal_id();
m_journaler = new journal::Journaler(m_work_queue, m_timer, &m_timer_lock,
m_ioctx, m_journal_id, CLIENT_ID, {},
nullptr);
}
void TearDown() override {
delete m_journaler;
RadosTestFixture::TearDown();
}
int create_journal(uint8_t order, uint8_t splay_width) {
C_SaferCond cond;
m_journaler->create(order, splay_width, -1, &cond);
return cond.wait();
}
int init_journaler() {
C_SaferCond cond;
m_journaler->init(&cond);
return cond.wait();
}
int shut_down_journaler() {
C_SaferCond ctx;
m_journaler->shut_down(&ctx);
return ctx.wait();
}
int register_client(const std::string &client_id, const std::string &desc) {
journal::Journaler journaler(m_work_queue, m_timer, &m_timer_lock, m_ioctx,
m_journal_id, client_id, {}, nullptr);
bufferlist data;
data.append(desc);
C_SaferCond cond;
journaler.register_client(data, &cond);
return cond.wait();
}
int update_client(const std::string &client_id, const std::string &desc) {
journal::Journaler journaler(m_work_queue, m_timer, &m_timer_lock, m_ioctx,
m_journal_id, client_id, {}, nullptr);
bufferlist data;
data.append(desc);
C_SaferCond cond;
journaler.update_client(data, &cond);
return cond.wait();
}
int unregister_client(const std::string &client_id) {
journal::Journaler journaler(m_work_queue, m_timer, &m_timer_lock, m_ioctx,
m_journal_id, client_id, {}, nullptr);
C_SaferCond cond;
journaler.unregister_client(&cond);
return cond.wait();
}
static uint64_t _journal_id;
std::string m_journal_id;
journal::Journaler *m_journaler;
};
const std::string TestJournaler::CLIENT_ID = "client1";
uint64_t TestJournaler::_journal_id = 0;
TEST_F(TestJournaler, Create) {
ASSERT_EQ(0, create_journal(12, 8));
}
TEST_F(TestJournaler, CreateDuplicate) {
ASSERT_EQ(0, create_journal(12, 8));
ASSERT_EQ(-EEXIST, create_journal(12, 8));
}
TEST_F(TestJournaler, CreateInvalidParams) {
ASSERT_EQ(-EDOM, create_journal(1, 8));
ASSERT_EQ(-EDOM, create_journal(123, 8));
ASSERT_EQ(-EINVAL, create_journal(12, 0));
}
TEST_F(TestJournaler, Init) {
ASSERT_EQ(0, create_journal(12, 8));
ASSERT_EQ(0, register_client(CLIENT_ID, "foo"));
ASSERT_EQ(0, init_journaler());
ASSERT_EQ(0, shut_down_journaler());
}
TEST_F(TestJournaler, InitDNE) {
ASSERT_EQ(-ENOENT, init_journaler());
ASSERT_EQ(0, shut_down_journaler());
}
TEST_F(TestJournaler, RegisterClientDuplicate) {
ASSERT_EQ(0, create_journal(12, 8));
ASSERT_EQ(0, register_client(CLIENT_ID, "foo"));
ASSERT_EQ(-EEXIST, register_client(CLIENT_ID, "foo2"));
}
TEST_F(TestJournaler, UpdateClient) {
ASSERT_EQ(0, create_journal(12, 8));
ASSERT_EQ(0, register_client(CLIENT_ID, "foo"));
ASSERT_EQ(0, update_client(CLIENT_ID, "foo2"));
}
TEST_F(TestJournaler, UpdateClientDNE) {
ASSERT_EQ(0, create_journal(12, 8));
ASSERT_EQ(-ENOENT, update_client(CLIENT_ID, "foo"));
}
TEST_F(TestJournaler, UnregisterClient) {
ASSERT_EQ(0, create_journal(12, 8));
ASSERT_EQ(0, register_client(CLIENT_ID, "foo"));
ASSERT_EQ(0, unregister_client(CLIENT_ID));
// Test it does not exist and can be registered again
ASSERT_EQ(-ENOENT, update_client(CLIENT_ID, "foo"));
ASSERT_EQ(0, register_client(CLIENT_ID, "foo"));
}
TEST_F(TestJournaler, UnregisterClientDNE) {
ASSERT_EQ(0, create_journal(12, 8));
ASSERT_EQ(-ENOENT, unregister_client(CLIENT_ID));
}
TEST_F(TestJournaler, AllocateTag) {
ASSERT_EQ(0, create_journal(12, 8));
cls::journal::Tag tag;
bufferlist data;
data.append(std::string(128, '1'));
// allocate a new tag class
C_SaferCond ctx1;
m_journaler->allocate_tag(data, &tag, &ctx1);
ASSERT_EQ(0, ctx1.wait());
ASSERT_EQ(cls::journal::Tag(0, 0, data), tag);
// re-use an existing tag class
C_SaferCond ctx2;
m_journaler->allocate_tag(tag.tag_class, bufferlist(), &tag, &ctx2);
ASSERT_EQ(0, ctx2.wait());
ASSERT_EQ(cls::journal::Tag(1, 0, bufferlist()), tag);
}
TEST_F(TestJournaler, GetTags) {
ASSERT_EQ(0, create_journal(12, 8));
ASSERT_EQ(0, register_client(CLIENT_ID, "foo"));
std::list<cls::journal::Tag> expected_tags;
for (size_t i = 0; i < 256; ++i) {
C_SaferCond ctx;
cls::journal::Tag tag;
if (i < 2) {
m_journaler->allocate_tag(bufferlist(), &tag, &ctx);
} else {
m_journaler->allocate_tag(i % 2, bufferlist(), &tag, &ctx);
}
ASSERT_EQ(0, ctx.wait());
if (i % 2 == 0) {
expected_tags.push_back(tag);
}
}
std::list<cls::journal::Tag> tags;
C_SaferCond ctx;
m_journaler->get_tags(0, &tags, &ctx);
ASSERT_EQ(0, ctx.wait());
ASSERT_EQ(expected_tags, tags);
}
| 5,506 | 26.673367 | 79 |
cc
|
null |
ceph-main/src/test/journal/test_ObjectPlayer.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/ObjectPlayer.h"
#include "journal/Entry.h"
#include "include/stringify.h"
#include "common/Timer.h"
#include "gtest/gtest.h"
#include "test/librados/test.h"
#include "test/journal/RadosTestFixture.h"
template <typename T>
class TestObjectPlayer : public RadosTestFixture, public T {
public:
auto create_object(const std::string &oid, uint8_t order) {
auto object = ceph::make_ref<journal::ObjectPlayer>(
m_ioctx, oid + ".", 0, *m_timer, m_timer_lock, order,
T::max_fetch_bytes);
return object;
}
int fetch(const ceph::ref_t<journal::ObjectPlayer>& object_player) {
while (true) {
C_SaferCond ctx;
object_player->set_refetch_state(
journal::ObjectPlayer::REFETCH_STATE_NONE);
object_player->fetch(&ctx);
int r = ctx.wait();
if (r < 0 || !object_player->refetch_required()) {
return r;
}
}
return 0;
}
int watch_and_wait_for_entries(const ceph::ref_t<journal::ObjectPlayer>& object_player,
journal::ObjectPlayer::Entries *entries,
size_t count) {
for (size_t i = 0; i < 50; ++i) {
object_player->get_entries(entries);
if (entries->size() == count) {
break;
}
C_SaferCond ctx;
object_player->watch(&ctx, 0.1);
int r = ctx.wait();
if (r < 0) {
return r;
}
}
return 0;
}
std::string get_object_name(const std::string &oid) {
return oid + ".0";
}
};
template <uint32_t _max_fetch_bytes>
struct TestObjectPlayerParams {
static inline const uint32_t max_fetch_bytes = _max_fetch_bytes;
};
typedef ::testing::Types<TestObjectPlayerParams<0>,
TestObjectPlayerParams<10> > TestObjectPlayerTypes;
TYPED_TEST_SUITE(TestObjectPlayer, TestObjectPlayerTypes);
TYPED_TEST(TestObjectPlayer, Fetch) {
std::string oid = this->get_temp_oid();
journal::Entry entry1(234, 123, this->create_payload(std::string(24, '1')));
journal::Entry entry2(234, 124, this->create_payload(std::string(24, '1')));
bufferlist bl;
encode(entry1, bl);
encode(entry2, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
auto object = this->create_object(oid, 14);
ASSERT_LE(0, this->fetch(object));
journal::ObjectPlayer::Entries entries;
object->get_entries(&entries);
ASSERT_EQ(2U, entries.size());
journal::ObjectPlayer::Entries expected_entries = {entry1, entry2};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestObjectPlayer, FetchLarge) {
std::string oid = this->get_temp_oid();
journal::Entry entry1(234, 123,
this->create_payload(std::string(8192 - 32, '1')));
journal::Entry entry2(234, 124, this->create_payload(""));
bufferlist bl;
encode(entry1, bl);
encode(entry2, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
auto object = this->create_object(oid, 12);
ASSERT_LE(0, this->fetch(object));
journal::ObjectPlayer::Entries entries;
object->get_entries(&entries);
ASSERT_EQ(2U, entries.size());
journal::ObjectPlayer::Entries expected_entries = {entry1, entry2};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestObjectPlayer, FetchDeDup) {
std::string oid = this->get_temp_oid();
journal::Entry entry1(234, 123, this->create_payload(std::string(24, '1')));
journal::Entry entry2(234, 123, this->create_payload(std::string(24, '2')));
bufferlist bl;
encode(entry1, bl);
encode(entry2, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
auto object = this->create_object(oid, 14);
ASSERT_LE(0, this->fetch(object));
journal::ObjectPlayer::Entries entries;
object->get_entries(&entries);
ASSERT_EQ(1U, entries.size());
journal::ObjectPlayer::Entries expected_entries = {entry2};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestObjectPlayer, FetchEmpty) {
std::string oid = this->get_temp_oid();
bufferlist bl;
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
auto object = this->create_object(oid, 14);
ASSERT_EQ(0, this->fetch(object));
ASSERT_TRUE(object->empty());
}
TYPED_TEST(TestObjectPlayer, FetchCorrupt) {
std::string oid = this->get_temp_oid();
journal::Entry entry1(234, 123, this->create_payload(std::string(24, '1')));
journal::Entry entry2(234, 124, this->create_payload(std::string(24, '2')));
bufferlist bl;
encode(entry1, bl);
encode(this->create_payload("corruption" + std::string(1024, 'X')), bl);
encode(entry2, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
auto object = this->create_object(oid, 14);
ASSERT_EQ(-EBADMSG, this->fetch(object));
journal::ObjectPlayer::Entries entries;
object->get_entries(&entries);
ASSERT_EQ(1U, entries.size());
journal::ObjectPlayer::Entries expected_entries = {entry1};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestObjectPlayer, FetchAppend) {
std::string oid = this->get_temp_oid();
journal::Entry entry1(234, 123, this->create_payload(std::string(24, '1')));
journal::Entry entry2(234, 124, this->create_payload(std::string(24, '2')));
bufferlist bl;
encode(entry1, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
auto object = this->create_object(oid, 14);
ASSERT_LE(0, this->fetch(object));
journal::ObjectPlayer::Entries entries;
object->get_entries(&entries);
ASSERT_EQ(1U, entries.size());
journal::ObjectPlayer::Entries expected_entries = {entry1};
ASSERT_EQ(expected_entries, entries);
bl.clear();
encode(entry2, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
ASSERT_LE(0, this->fetch(object));
object->get_entries(&entries);
ASSERT_EQ(2U, entries.size());
expected_entries = {entry1, entry2};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestObjectPlayer, PopEntry) {
std::string oid = this->get_temp_oid();
journal::Entry entry1(234, 123, this->create_payload(std::string(24, '1')));
journal::Entry entry2(234, 124, this->create_payload(std::string(24, '1')));
bufferlist bl;
encode(entry1, bl);
encode(entry2, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
auto object = this->create_object(oid, 14);
ASSERT_LE(0, this->fetch(object));
journal::ObjectPlayer::Entries entries;
object->get_entries(&entries);
ASSERT_EQ(2U, entries.size());
journal::Entry entry;
object->front(&entry);
object->pop_front();
ASSERT_EQ(entry1, entry);
object->front(&entry);
object->pop_front();
ASSERT_EQ(entry2, entry);
ASSERT_TRUE(object->empty());
}
TYPED_TEST(TestObjectPlayer, Watch) {
std::string oid = this->get_temp_oid();
auto object = this->create_object(oid, 14);
C_SaferCond cond1;
object->watch(&cond1, 0.1);
journal::Entry entry1(234, 123, this->create_payload(std::string(24, '1')));
journal::Entry entry2(234, 124, this->create_payload(std::string(24, '1')));
bufferlist bl;
encode(entry1, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
ASSERT_LE(0, cond1.wait());
journal::ObjectPlayer::Entries entries;
ASSERT_EQ(0, this->watch_and_wait_for_entries(object, &entries, 1U));
ASSERT_EQ(1U, entries.size());
journal::ObjectPlayer::Entries expected_entries;
expected_entries = {entry1};
ASSERT_EQ(expected_entries, entries);
C_SaferCond cond2;
object->watch(&cond2, 0.1);
bl.clear();
encode(entry2, bl);
ASSERT_EQ(0, this->append(this->get_object_name(oid), bl));
ASSERT_LE(0, cond2.wait());
ASSERT_EQ(0, this->watch_and_wait_for_entries(object, &entries, 2U));
ASSERT_EQ(2U, entries.size());
expected_entries = {entry1, entry2};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestObjectPlayer, Unwatch) {
std::string oid = this->get_temp_oid();
auto object = this->create_object(oid, 14);
C_SaferCond watch_ctx;
object->watch(&watch_ctx, 600);
usleep(200000);
object->unwatch();
ASSERT_EQ(-ECANCELED, watch_ctx.wait());
}
| 8,088 | 27.684397 | 89 |
cc
|
null |
ceph-main/src/test/journal/test_ObjectRecorder.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/ObjectRecorder.h"
#include "common/Cond.h"
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include "gtest/gtest.h"
#include "test/librados/test.h"
#include "test/journal/RadosTestFixture.h"
#include <limits>
using namespace std::chrono_literals;
using std::shared_ptr;
class TestObjectRecorder : public RadosTestFixture {
public:
TestObjectRecorder() = default;
struct Handler : public journal::ObjectRecorder::Handler {
ceph::mutex lock = ceph::make_mutex("lock");
ceph::mutex* object_lock = nullptr;
ceph::condition_variable cond;
bool is_closed = false;
uint32_t overflows = 0;
Handler() = default;
void closed(journal::ObjectRecorder *object_recorder) override {
std::lock_guard locker{lock};
is_closed = true;
cond.notify_all();
}
void overflow(journal::ObjectRecorder *object_recorder) override {
std::lock_guard locker{lock};
journal::AppendBuffers append_buffers;
object_lock->lock();
object_recorder->claim_append_buffers(&append_buffers);
object_lock->unlock();
++overflows;
cond.notify_all();
}
};
// flush the pending buffers in dtor
class ObjectRecorderFlusher {
public:
ObjectRecorderFlusher(librados::IoCtx& ioctx,
ContextWQ* work_queue)
: m_ioctx{ioctx},
m_work_queue{work_queue}
{}
ObjectRecorderFlusher(librados::IoCtx& ioctx,
ContextWQ* work_queue,
uint32_t flush_interval,
uint16_t flush_bytes,
double flush_age,
int max_in_flight)
: m_ioctx{ioctx},
m_work_queue{work_queue},
m_flush_interval{flush_interval},
m_flush_bytes{flush_bytes},
m_flush_age{flush_age},
m_max_in_flight_appends{max_in_flight < 0 ?
std::numeric_limits<uint64_t>::max() :
static_cast<uint64_t>(max_in_flight)}
{}
~ObjectRecorderFlusher() {
for (auto& [object_recorder, m] : m_object_recorders) {
C_SaferCond cond;
object_recorder->flush(&cond);
cond.wait();
std::scoped_lock l{*m};
if (!object_recorder->is_closed()) {
object_recorder->close();
}
}
}
auto create_object(std::string_view oid, uint8_t order, ceph::mutex* lock) {
auto object = ceph::make_ref<journal::ObjectRecorder>(
m_ioctx, oid, 0, lock, m_work_queue, &m_handler,
order, m_max_in_flight_appends);
{
std::lock_guard locker{*lock};
object->set_append_batch_options(m_flush_interval,
m_flush_bytes,
m_flush_age);
}
m_object_recorders.emplace_back(object, lock);
m_handler.object_lock = lock;
return object;
}
bool wait_for_closed() {
std::unique_lock locker{m_handler.lock};
return m_handler.cond.wait_for(locker, 10s,
[this] { return m_handler.is_closed; });
}
bool wait_for_overflow() {
std::unique_lock locker{m_handler.lock};
if (m_handler.cond.wait_for(locker, 10s,
[this] { return m_handler.overflows > 0; })) {
m_handler.overflows = 0;
return true;
} else {
return false;
}
}
private:
librados::IoCtx& m_ioctx;
ContextWQ *m_work_queue;
uint32_t m_flush_interval = std::numeric_limits<uint32_t>::max();
uint64_t m_flush_bytes = std::numeric_limits<uint64_t>::max();
double m_flush_age = 600;
uint64_t m_max_in_flight_appends = 0;
using ObjectRecorders =
std::list<std::pair<ceph::ref_t<journal::ObjectRecorder>, ceph::mutex*>>;
ObjectRecorders m_object_recorders;
Handler m_handler;
};
journal::AppendBuffer create_append_buffer(uint64_t tag_tid,
uint64_t entry_tid,
const std::string &payload) {
auto future = ceph::make_ref<journal::FutureImpl>(tag_tid, entry_tid, 456);
future->init(ceph::ref_t<journal::FutureImpl>());
bufferlist bl;
bl.append(payload);
return std::make_pair(future, bl);
}
};
TEST_F(TestObjectRecorder, Append) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue, 0, 0, 0, 0);
auto object = flusher.create_object(oid, 24, &lock);
journal::AppendBuffer append_buffer1 = create_append_buffer(234, 123,
"payload");
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer1};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(0U, object->get_pending_appends());
journal::AppendBuffer append_buffer2 = create_append_buffer(234, 124,
"payload");
append_buffers = {append_buffer2};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(0U, object->get_pending_appends());
C_SaferCond cond;
append_buffer2.first->flush(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_EQ(0U, object->get_pending_appends());
}
TEST_F(TestObjectRecorder, AppendFlushByCount) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue, 2, 0, 0, -1);
auto object = flusher.create_object(oid, 24, &lock);
journal::AppendBuffer append_buffer1 = create_append_buffer(234, 123,
"payload");
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer1};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(1U, object->get_pending_appends());
journal::AppendBuffer append_buffer2 = create_append_buffer(234, 124,
"payload");
append_buffers = {append_buffer2};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(0U, object->get_pending_appends());
C_SaferCond cond;
append_buffer2.first->wait(&cond);
ASSERT_EQ(0, cond.wait());
}
TEST_F(TestObjectRecorder, AppendFlushByBytes) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue, 0, 10, 0, -1);
auto object = flusher.create_object(oid, 24, &lock);
journal::AppendBuffer append_buffer1 = create_append_buffer(234, 123,
"payload");
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer1};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(1U, object->get_pending_appends());
journal::AppendBuffer append_buffer2 = create_append_buffer(234, 124,
"payload");
append_buffers = {append_buffer2};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(0U, object->get_pending_appends());
C_SaferCond cond;
append_buffer2.first->wait(&cond);
ASSERT_EQ(0, cond.wait());
}
TEST_F(TestObjectRecorder, AppendFlushByAge) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue, 0, 0, 0.0005, -1);
auto object = flusher.create_object(oid, 24, &lock);
journal::AppendBuffer append_buffer1 = create_append_buffer(234, 123,
"payload");
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer1};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
uint32_t offset = 0;
journal::AppendBuffer append_buffer2;
while (!append_buffer1.first->is_flush_in_progress() &&
!append_buffer1.first->is_complete()) {
usleep(1000);
append_buffer2 = create_append_buffer(234, 124 + offset, "payload");
++offset;
append_buffers = {append_buffer2};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
}
C_SaferCond cond;
append_buffer2.first->wait(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_EQ(0U, object->get_pending_appends());
}
TEST_F(TestObjectRecorder, AppendFilledObject) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue, 0, 0, 0.0, -1);
auto object = flusher.create_object(oid, 12, &lock);
std::string payload(2048, '1');
journal::AppendBuffer append_buffer1 = create_append_buffer(234, 123,
payload);
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer1};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
journal::AppendBuffer append_buffer2 = create_append_buffer(234, 124,
payload);
append_buffers = {append_buffer2};
lock.lock();
ASSERT_TRUE(object->append(std::move(append_buffers)));
lock.unlock();
C_SaferCond cond;
append_buffer2.first->wait(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_EQ(0U, object->get_pending_appends());
}
TEST_F(TestObjectRecorder, Flush) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue, 0, 10, 0, -1);
auto object = flusher.create_object(oid, 24, &lock);
journal::AppendBuffer append_buffer1 = create_append_buffer(234, 123,
"payload");
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer1};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(1U, object->get_pending_appends());
C_SaferCond cond1;
object->flush(&cond1);
ASSERT_EQ(0, cond1.wait());
C_SaferCond cond2;
append_buffer1.first->wait(&cond2);
ASSERT_EQ(0, cond2.wait());
ASSERT_EQ(0U, object->get_pending_appends());
}
TEST_F(TestObjectRecorder, FlushFuture) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue, 0, 10, 0, -1);
auto object = flusher.create_object(oid, 24, &lock);
journal::AppendBuffer append_buffer = create_append_buffer(234, 123,
"payload");
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(1U, object->get_pending_appends());
C_SaferCond cond;
append_buffer.first->wait(&cond);
object->flush(append_buffer.first);
ASSERT_TRUE(append_buffer.first->is_flush_in_progress() ||
append_buffer.first->is_complete());
ASSERT_EQ(0, cond.wait());
}
TEST_F(TestObjectRecorder, FlushDetachedFuture) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue);
auto object = flusher.create_object(oid, 24, &lock);
journal::AppendBuffer append_buffer = create_append_buffer(234, 123,
"payload");
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer};
object->flush(append_buffer.first);
ASSERT_FALSE(append_buffer.first->is_flush_in_progress());
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
// should automatically flush once its attached to the object
C_SaferCond cond;
append_buffer.first->wait(&cond);
ASSERT_EQ(0, cond.wait());
}
TEST_F(TestObjectRecorder, Close) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock = ceph::make_mutex("object_recorder_lock");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue, 2, 0, 0, -1);
auto object = flusher.create_object(oid, 24, &lock);
journal::AppendBuffer append_buffer1 = create_append_buffer(234, 123,
"payload");
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer1};
lock.lock();
ASSERT_FALSE(object->append(std::move(append_buffers)));
lock.unlock();
ASSERT_EQ(1U, object->get_pending_appends());
lock.lock();
ASSERT_FALSE(object->close());
ASSERT_TRUE(ceph_mutex_is_locked(lock));
lock.unlock();
ASSERT_TRUE(flusher.wait_for_closed());
ASSERT_EQ(0U, object->get_pending_appends());
}
TEST_F(TestObjectRecorder, Overflow) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
ceph::mutex lock1 = ceph::make_mutex("object_recorder_lock_1");
ceph::mutex lock2 = ceph::make_mutex("object_recorder_lock_2");
ObjectRecorderFlusher flusher(m_ioctx, m_work_queue);
auto object1 = flusher.create_object(oid, 12, &lock1);
std::string payload(1 << 11, '1');
journal::AppendBuffer append_buffer1 = create_append_buffer(234, 123,
payload);
journal::AppendBuffer append_buffer2 = create_append_buffer(234, 124,
payload);
journal::AppendBuffers append_buffers;
append_buffers = {append_buffer1, append_buffer2};
lock1.lock();
ASSERT_TRUE(object1->append(std::move(append_buffers)));
lock1.unlock();
C_SaferCond cond;
append_buffer2.first->wait(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_EQ(0U, object1->get_pending_appends());
auto object2 = flusher.create_object(oid, 12, &lock2);
journal::AppendBuffer append_buffer3 = create_append_buffer(456, 123,
payload);
append_buffers = {append_buffer3};
lock2.lock();
ASSERT_FALSE(object2->append(std::move(append_buffers)));
lock2.unlock();
append_buffer3.first->flush(NULL);
ASSERT_TRUE(flusher.wait_for_overflow());
}
| 15,613 | 32.578495 | 80 |
cc
|
null |
ceph-main/src/test/journal/test_main.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "gtest/gtest.h"
#include "common/ceph_argparse.h"
#include "common/ceph_crypto.h"
#include "common/config_proxy.h"
#include "global/global_context.h"
#include "global/global_init.h"
#include <vector>
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
auto args = argv_to_vec(argc, argv);
auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_OSD,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_MON_CONFIG);
g_conf().set_val("lockdep", "true");
common_init_finish(g_ceph_context);
int r = RUN_ALL_TESTS();
return r;
}
| 668 | 23.777778 | 70 |
cc
|
null |
ceph-main/src/test/journal/mock/MockJournaler.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "MockJournaler.h"
namespace journal {
MockFuture *MockFuture::s_instance = nullptr;
MockReplayEntry *MockReplayEntry::s_instance = nullptr;
MockJournaler *MockJournaler::s_instance = nullptr;
std::ostream &operator<<(std::ostream &os, const MockJournalerProxy &) {
return os;
}
} // namespace journal
| 415 | 23.470588 | 72 |
cc
|
null |
ceph-main/src/test/journal/mock/MockJournaler.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef TEST_RBD_MIRROR_MOCK_JOURNALER_H
#define TEST_RBD_MIRROR_MOCK_JOURNALER_H
#include <gmock/gmock.h>
#include "include/int_types.h"
#include "include/rados/librados.hpp"
#include "cls/journal/cls_journal_types.h"
#include "journal/Journaler.h"
#include <iosfwd>
#include <string>
class Context;
namespace journal {
struct ReplayHandler;
struct Settings;
struct MockFuture {
static MockFuture *s_instance;
static MockFuture &get_instance() {
ceph_assert(s_instance != nullptr);
return *s_instance;
}
MockFuture() {
s_instance = this;
}
MOCK_CONST_METHOD0(is_valid, bool());
MOCK_METHOD1(flush, void(Context *));
MOCK_METHOD1(wait, void(Context *));
};
struct MockFutureProxy {
bool is_valid() const {
return MockFuture::get_instance().is_valid();
}
void flush(Context *on_safe) {
MockFuture::get_instance().flush(on_safe);
}
void wait(Context *on_safe) {
MockFuture::get_instance().wait(on_safe);
}
};
struct MockReplayEntry {
static MockReplayEntry *s_instance;
static MockReplayEntry &get_instance() {
ceph_assert(s_instance != nullptr);
return *s_instance;
}
MockReplayEntry() {
s_instance = this;
}
MOCK_CONST_METHOD0(get_commit_tid, uint64_t());
MOCK_CONST_METHOD0(get_data, bufferlist());
};
struct MockReplayEntryProxy {
uint64_t get_commit_tid() const {
return MockReplayEntry::get_instance().get_commit_tid();
}
bufferlist get_data() const {
return MockReplayEntry::get_instance().get_data();
}
};
struct MockJournaler {
static MockJournaler *s_instance;
static MockJournaler &get_instance() {
ceph_assert(s_instance != nullptr);
return *s_instance;
}
MockJournaler() {
s_instance = this;
}
MOCK_METHOD0(construct, void());
MOCK_METHOD1(init, void(Context *));
MOCK_METHOD0(shut_down, void());
MOCK_METHOD1(shut_down, void(Context *));
MOCK_CONST_METHOD0(is_initialized, bool());
MOCK_METHOD3(get_metadata, void(uint8_t *order, uint8_t *splay_width,
int64_t *pool_id));
MOCK_METHOD4(get_mutable_metadata, void(uint64_t*, uint64_t*,
std::set<cls::journal::Client> *,
Context*));
MOCK_METHOD2(register_client, void(const bufferlist &, Context *));
MOCK_METHOD1(unregister_client, void(Context *));
MOCK_METHOD3(get_client, void(const std::string &, cls::journal::Client *,
Context *));
MOCK_METHOD2(get_cached_client, int(const std::string&, cls::journal::Client*));
MOCK_METHOD2(update_client, void(const bufferlist &, Context *));
MOCK_METHOD4(allocate_tag, void(uint64_t, const bufferlist &,
cls::journal::Tag*, Context *));
MOCK_METHOD3(get_tag, void(uint64_t, cls::journal::Tag *, Context *));
MOCK_METHOD3(get_tags, void(uint64_t, journal::Journaler::Tags*, Context*));
MOCK_METHOD4(get_tags, void(uint64_t, uint64_t, journal::Journaler::Tags*,
Context*));
MOCK_METHOD1(start_replay, void(::journal::ReplayHandler *replay_handler));
MOCK_METHOD2(start_live_replay, void(ReplayHandler *, double));
MOCK_METHOD1(try_pop_front, bool(MockReplayEntryProxy *));
MOCK_METHOD2(try_pop_front, bool(MockReplayEntryProxy *, uint64_t *));
MOCK_METHOD0(stop_replay, void());
MOCK_METHOD1(stop_replay, void(Context *on_finish));
MOCK_METHOD1(start_append, void(uint64_t));
MOCK_METHOD3(set_append_batch_options, void(int, uint64_t, double));
MOCK_CONST_METHOD0(get_max_append_size, uint64_t());
MOCK_METHOD2(append, MockFutureProxy(uint64_t tag_id,
const bufferlist &bl));
MOCK_METHOD1(flush, void(Context *on_safe));
MOCK_METHOD1(stop_append, void(Context *on_safe));
MOCK_METHOD1(committed, void(const MockReplayEntryProxy &));
MOCK_METHOD1(committed, void(const MockFutureProxy &future));
MOCK_METHOD1(flush_commit_position, void(Context*));
MOCK_METHOD1(add_listener, void(JournalMetadataListener *));
MOCK_METHOD1(remove_listener, void(JournalMetadataListener *));
};
struct MockJournalerProxy {
MockJournalerProxy() {
MockJournaler::get_instance().construct();
}
template <typename IoCtxT>
MockJournalerProxy(IoCtxT &header_ioctx, const std::string &,
const std::string &, const Settings&,
journal::CacheManagerHandler *) {
MockJournaler::get_instance().construct();
}
template <typename WorkQueue, typename Timer>
MockJournalerProxy(WorkQueue *work_queue, Timer *timer, ceph::mutex *timer_lock,
librados::IoCtx &header_ioctx,
const std::string &journal_id,
const std::string &client_id, const Settings&,
journal::CacheManagerHandler *) {
MockJournaler::get_instance().construct();
}
void exists(Context *on_finish) const {
on_finish->complete(-EINVAL);
}
void create(uint8_t order, uint8_t splay_width, int64_t pool_id, Context *on_finish) {
on_finish->complete(-EINVAL);
}
void remove(bool force, Context *on_finish) {
on_finish->complete(-EINVAL);
}
int register_client(const bufferlist &data) {
return -EINVAL;
}
void allocate_tag(uint64_t tag_class, const bufferlist &tag_data,
cls::journal::Tag* tag, Context *on_finish) {
MockJournaler::get_instance().allocate_tag(tag_class, tag_data, tag,
on_finish);
}
void init(Context *on_finish) {
MockJournaler::get_instance().init(on_finish);
}
void shut_down() {
MockJournaler::get_instance().shut_down();
}
void shut_down(Context *on_finish) {
MockJournaler::get_instance().shut_down(on_finish);
}
bool is_initialized() const {
return MockJournaler::get_instance().is_initialized();
}
void get_metadata(uint8_t *order, uint8_t *splay_width, int64_t *pool_id) {
MockJournaler::get_instance().get_metadata(order, splay_width, pool_id);
}
void get_mutable_metadata(uint64_t *min, uint64_t *active,
std::set<cls::journal::Client> *clients,
Context *on_finish) {
MockJournaler::get_instance().get_mutable_metadata(min, active, clients,
on_finish);
}
void register_client(const bufferlist &data, Context *on_finish) {
MockJournaler::get_instance().register_client(data, on_finish);
}
void unregister_client(Context *on_finish) {
MockJournaler::get_instance().unregister_client(on_finish);
}
void get_client(const std::string &client_id, cls::journal::Client *client,
Context *on_finish) {
MockJournaler::get_instance().get_client(client_id, client, on_finish);
}
int get_cached_client(const std::string& client_id,
cls::journal::Client* client) {
return MockJournaler::get_instance().get_cached_client(client_id, client);
}
void update_client(const bufferlist &client_data, Context *on_finish) {
MockJournaler::get_instance().update_client(client_data, on_finish);
}
void get_tag(uint64_t tag_tid, cls::journal::Tag *tag, Context *on_finish) {
MockJournaler::get_instance().get_tag(tag_tid, tag, on_finish);
}
void get_tags(uint64_t tag_class, journal::Journaler::Tags *tags,
Context *on_finish) {
MockJournaler::get_instance().get_tags(tag_class, tags, on_finish);
}
void get_tags(uint64_t start_after_tag_tid, uint64_t tag_class,
journal::Journaler::Tags *tags, Context *on_finish) {
MockJournaler::get_instance().get_tags(start_after_tag_tid, tag_class, tags,
on_finish);
}
void start_replay(::journal::ReplayHandler *replay_handler) {
MockJournaler::get_instance().start_replay(replay_handler);
}
void start_live_replay(ReplayHandler *handler, double interval) {
MockJournaler::get_instance().start_live_replay(handler, interval);
}
bool try_pop_front(MockReplayEntryProxy *replay_entry) {
return MockJournaler::get_instance().try_pop_front(replay_entry);
}
bool try_pop_front(MockReplayEntryProxy *entry, uint64_t *tag_tid) {
return MockJournaler::get_instance().try_pop_front(entry, tag_tid);
}
void stop_replay() {
MockJournaler::get_instance().stop_replay();
}
void stop_replay(Context *on_finish) {
MockJournaler::get_instance().stop_replay(on_finish);
}
void start_append(uint64_t max_in_flight_appends) {
MockJournaler::get_instance().start_append(max_in_flight_appends);
}
void set_append_batch_options(int flush_interval, uint64_t flush_bytes,
double flush_age) {
MockJournaler::get_instance().set_append_batch_options(
flush_interval, flush_bytes, flush_age);
}
uint64_t get_max_append_size() const {
return MockJournaler::get_instance().get_max_append_size();
}
MockFutureProxy append(uint64_t tag_id, const bufferlist &bl) {
return MockJournaler::get_instance().append(tag_id, bl);
}
void flush(Context *on_safe) {
MockJournaler::get_instance().flush(on_safe);
}
void stop_append(Context *on_safe) {
MockJournaler::get_instance().stop_append(on_safe);
}
void committed(const MockReplayEntryProxy &entry) {
MockJournaler::get_instance().committed(entry);
}
void committed(const MockFutureProxy &future) {
MockJournaler::get_instance().committed(future);
}
void flush_commit_position(Context *on_finish) {
MockJournaler::get_instance().flush_commit_position(on_finish);
}
void add_listener(JournalMetadataListener *listener) {
MockJournaler::get_instance().add_listener(listener);
}
void remove_listener(JournalMetadataListener *listener) {
MockJournaler::get_instance().remove_listener(listener);
}
};
std::ostream &operator<<(std::ostream &os, const MockJournalerProxy &);
} // namespace journal
#endif // TEST_RBD_MIRROR_MOCK_JOURNALER_H
| 10,179 | 31.420382 | 88 |
h
|
null |
ceph-main/src/test/lazy-omap-stats/lazy_omap_stats_test.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <algorithm>
#include <boost/algorithm/string/trim.hpp>
#include <boost/tokenizer.hpp>
#include <boost/uuid/uuid.hpp> // uuid class
#include <boost/uuid/uuid_generators.hpp> // generators
#include <boost/uuid/uuid_io.hpp> // streaming operators etc.
#include <chrono>
#include <iostream>
#include <thread>
#include <vector>
#include "common/ceph_json.h"
#include "global/global_init.h"
#include "include/compat.h"
#include "lazy_omap_stats_test.h"
using namespace std;
void LazyOmapStatsTest::init(const int argc, const char** argv)
{
int ret = rados.init("admin");
if (ret < 0) {
ret = -ret;
cerr << "Failed to initialise rados! Error: " << ret << " " << strerror(ret)
<< endl;
exit(ret);
}
ret = rados.conf_parse_argv(argc, argv);
if (ret < 0) {
ret = -ret;
cerr << "Failed to parse command line config options! Error: " << ret << " "
<< strerror(ret) << endl;
exit(ret);
}
rados.conf_parse_env(NULL);
if (ret < 0) {
ret = -ret;
cerr << "Failed to parse environment! Error: " << ret << " "
<< strerror(ret) << endl;
exit(ret);
}
rados.conf_read_file(NULL);
if (ret < 0) {
ret = -ret;
cerr << "Failed to read config file! Error: " << ret << " " << strerror(ret)
<< endl;
exit(ret);
}
ret = rados.connect();
if (ret < 0) {
ret = -ret;
cerr << "Failed to connect to running cluster! Error: " << ret << " "
<< strerror(ret) << endl;
exit(ret);
}
string command = R"(
{
"prefix": "osd pool create",
"pool": ")" + conf.pool_name +
R"(",
"pool_type": "replicated",
"size": )" + to_string(conf.replica_count) +
R"(
})";
librados::bufferlist inbl;
string output;
ret = rados.mon_command(command, inbl, nullptr, &output);
if (output.length()) cout << output << endl;
if (ret < 0) {
ret = -ret;
cerr << "Failed to create pool! Error: " << ret << " " << strerror(ret)
<< endl;
exit(ret);
}
ret = rados.ioctx_create(conf.pool_name.c_str(), io_ctx);
if (ret < 0) {
ret = -ret;
cerr << "Failed to create ioctx! Error: " << ret << " " << strerror(ret)
<< endl;
exit(ret);
}
get_pool_id(conf.pool_name);
}
void LazyOmapStatsTest::shutdown()
{
rados.pool_delete(conf.pool_name.c_str());
rados.shutdown();
}
void LazyOmapStatsTest::write_omap(const string& object_name)
{
librados::bufferlist bl;
int ret = io_ctx.write_full(object_name, bl);
if (ret < 0) {
ret = -ret;
cerr << "Failed to create object! Error: " << ret << " " << strerror(ret)
<< endl;
exit(ret);
}
ret = io_ctx.omap_set(object_name, payload);
if (ret < 0) {
ret = -ret;
cerr << "Failed to write omap payload! Error: " << ret << " "
<< strerror(ret) << endl;
exit(ret);
}
cout << "Wrote " << conf.keys << " omap keys of " << conf.payload_size
<< " bytes to "
<< "the " << object_name << " object" << endl;
}
const string LazyOmapStatsTest::get_name() const
{
boost::uuids::uuid uuid = boost::uuids::random_generator()();
return boost::uuids::to_string(uuid);
}
void LazyOmapStatsTest::write_many(uint how_many)
{
for (uint i = 0; i < how_many; i++) {
write_omap(get_name());
}
}
void LazyOmapStatsTest::create_payload()
{
librados::bufferlist Lorem;
Lorem.append(
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do "
"eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut "
"enim ad minim veniam, quis nostrud exercitation ullamco laboris "
"nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in "
"reprehenderit in voluptate velit esse cillum dolore eu fugiat "
"nulla pariatur. Excepteur sint occaecat cupidatat non proident, "
"sunt in culpa qui officia deserunt mollit anim id est laborum.");
conf.payload_size = Lorem.length();
conf.total_bytes = conf.keys * conf.payload_size * conf.how_many;
conf.total_keys = conf.keys * conf.how_many;
uint i = 0;
for (i = 1; i < conf.keys + 1; ++i) {
payload[get_name()] = Lorem;
}
cout << "Created payload with " << conf.keys << " keys of "
<< conf.payload_size
<< " bytes each. Total size in bytes = " << conf.keys * conf.payload_size
<< endl;
}
void LazyOmapStatsTest::scrub()
{
cout << "Scrubbing" << endl;
cout << "Before scrub stamps:" << endl;
string target_pool(conf.pool_id);
target_pool.append(".");
bool target_pool_found = false;
map<string, string> before_scrub = get_scrub_stamps();
for (auto [pg, stamp] : before_scrub) {
cout << "pg = " << pg << " stamp = " << stamp << endl;
if (pg.rfind(target_pool, 0) == 0) {
target_pool_found = true;
}
}
if (!target_pool_found) {
cout << "Error: Target pool " << conf.pool_name << ":" << conf.pool_id
<< " not found!" << endl;
exit(2); // ENOENT
}
cout << endl;
// Short sleep to make sure the new pool is visible
sleep(5);
string command = R"({"prefix": "osd deep-scrub", "who": "all"})";
auto output = get_output(command);
cout << output << endl;
cout << "Waiting for deep-scrub to complete..." << endl;
while (sleep(1) == 0) {
cout << "Current scrub stamps:" << endl;
bool complete = true;
map<string, string> current_stamps = get_scrub_stamps();
for (auto [pg, stamp] : current_stamps) {
cout << "pg = " << pg << " stamp = " << stamp << endl;
if (stamp == before_scrub[pg]) {
// See if stamp for each pg has changed
// If not, we haven't completed the deep-scrub
complete = false;
}
}
cout << endl;
if (complete) {
break;
}
}
cout << "Scrubbing complete" << endl;
}
const int LazyOmapStatsTest::find_matches(string& output, regex& reg) const
{
sregex_iterator cur(output.begin(), output.end(), reg);
uint x = 0;
for (auto end = std::sregex_iterator(); cur != end; ++cur) {
cout << (*cur)[1].str() << endl;
x++;
}
return x;
}
const string LazyOmapStatsTest::get_output(const string command,
const bool silent,
const CommandTarget target)
{
librados::bufferlist inbl, outbl;
string output;
int ret = 0;
if (target == CommandTarget::TARGET_MON) {
ret = rados.mon_command(command, inbl, &outbl, &output);
} else {
ret = rados.mgr_command(command, inbl, &outbl, &output);
}
if (output.length() && !silent) {
cout << output << endl;
}
if (ret < 0) {
ret = -ret;
cerr << "Failed to get " << command << "! Error: " << ret << " "
<< strerror(ret) << endl;
exit(ret);
}
return string(outbl.c_str(), outbl.length());
}
void LazyOmapStatsTest::get_pool_id(const string& pool)
{
cout << R"(Querying pool id)" << endl;
string command = R"({"prefix": "osd pool ls", "detail": "detail", "format": "json"})";
librados::bufferlist inbl, outbl;
auto output = get_output(command, false, CommandTarget::TARGET_MON);
JSONParser parser;
parser.parse(output.c_str(), output.size());
for (const auto& pool : parser.get_array_elements()) {
JSONParser parser2;
parser2.parse(pool.c_str(), static_cast<int>(pool.size()));
auto* obj = parser2.find_obj("pool_name");
if (obj->get_data().compare(conf.pool_name) == 0) {
obj = parser2.find_obj("pool_id");
conf.pool_id = obj->get_data();
}
}
if (conf.pool_id.empty()) {
cout << "Failed to find pool ID for pool " << conf.pool_name << "!" << endl;
exit(2); // ENOENT
} else {
cout << "Found pool ID: " << conf.pool_id << endl;
}
}
map<string, string> LazyOmapStatsTest::get_scrub_stamps() {
map<string, string> stamps;
string command = R"({"prefix": "pg dump", "format": "json"})";
auto output = get_output(command);
JSONParser parser;
parser.parse(output.c_str(), output.size());
auto* obj = parser.find_obj("pg_map")->find_obj("pg_stats");
for (auto pg = obj->find_first(); !pg.end(); ++pg) {
stamps.insert({(*pg)->find_obj("pgid")->get_data(),
(*pg)->find_obj("last_deep_scrub_stamp")->get_data()});
}
return stamps;
}
void LazyOmapStatsTest::check_one()
{
string full_output = get_output();
cout << full_output << endl;
regex reg(
"\n"
R"((PG_STAT[\s\S]*)"
"\n)OSD_STAT"); // Strip OSD_STAT table so we don't find matches there
smatch match;
regex_search(full_output, match, reg);
auto truncated_output = match[1].str();
cout << truncated_output << endl;
reg = regex(
"\n"
R"(([0-9,s].*\s)" +
to_string(conf.keys) +
R"(\s.*))"
"\n");
cout << "Checking number of keys " << conf.keys << endl;
cout << "Found the following lines" << endl;
cout << "*************************" << endl;
uint result = find_matches(truncated_output, reg);
cout << "**********************" << endl;
cout << "Found " << result << " matching line(s)" << endl;
uint total = result;
reg = regex(
"\n"
R"(([0-9,s].*\s)" +
to_string(conf.payload_size * conf.keys) +
R"(\s.*))"
"\n");
cout << "Checking number of bytes "
<< conf.payload_size * conf.keys << endl;
cout << "Found the following lines" << endl;
cout << "*************************" << endl;
result = find_matches(truncated_output, reg);
cout << "**********************" << endl;
cout << "Found " << result << " matching line(s)" << endl;
total += result;
if (total != 6) {
cout << "Error: Found " << total << " matches, expected 6! Exiting..."
<< endl;
exit(22); // EINVAL
}
cout << "check_one successful. Found " << total << " matches as expected"
<< endl;
}
const int LazyOmapStatsTest::find_index(string& haystack, regex& needle,
string label) const
{
smatch match;
regex_search(haystack, match, needle);
auto line = match[1].str();
boost::algorithm::trim(line);
boost::char_separator<char> sep{" "};
boost::tokenizer<boost::char_separator<char>> tok(line, sep);
vector<string> tokens(tok.begin(), tok.end());
auto it = find(tokens.begin(), tokens.end(), label);
if (it != tokens.end()) {
return distance(tokens.begin(), it);
}
cerr << "find_index failed to find index for " << label << endl;
exit(2); // ENOENT
return -1; // Unreachable
}
const uint LazyOmapStatsTest::tally_column(const uint omap_bytes_index,
const string& table,
bool header) const
{
istringstream buffer(table);
string line;
uint64_t total = 0;
while (std::getline(buffer, line)) {
if (header) {
header = false;
continue;
}
boost::char_separator<char> sep{" "};
boost::tokenizer<boost::char_separator<char>> tok(line, sep);
vector<string> tokens(tok.begin(), tok.end());
total += stoi(tokens.at(omap_bytes_index));
}
return total;
}
void LazyOmapStatsTest::check_column(const int index, const string& table,
const string& type, bool header) const
{
uint expected;
string errormsg;
if (type.compare("bytes") == 0) {
expected = conf.total_bytes;
errormsg = "Error. Got unexpected byte count!";
} else {
expected = conf.total_keys;
errormsg = "Error. Got unexpected key count!";
}
uint sum = tally_column(index, table, header);
cout << "Got: " << sum << " Expected: " << expected << endl;
if (sum != expected) {
cout << errormsg << endl;
exit(22); // EINVAL
}
}
index_t LazyOmapStatsTest::get_indexes(regex& reg, string& output) const
{
index_t indexes;
indexes.byte_index = find_index(output, reg, "OMAP_BYTES*");
indexes.key_index = find_index(output, reg, "OMAP_KEYS*");
return indexes;
}
void LazyOmapStatsTest::check_pg_dump()
{
cout << R"(Checking "pg dump" output)" << endl;
string dump_output = get_output();
cout << dump_output << endl;
regex reg(
"\n"
R"((PG_STAT\s.*))"
"\n");
index_t indexes = get_indexes(reg, dump_output);
reg =
"\n"
R"((PG_STAT[\s\S]*))"
"\n +\n[0-9]";
smatch match;
regex_search(dump_output, match, reg);
auto table = match[1].str();
cout << "Checking bytes" << endl;
check_column(indexes.byte_index, table, string("bytes"));
cout << "Checking keys" << endl;
check_column(indexes.key_index, table, string("keys"));
cout << endl;
}
void LazyOmapStatsTest::check_pg_dump_summary()
{
cout << R"(Checking "pg dump summary" output)" << endl;
string command = R"({"prefix": "pg dump", "dumpcontents": ["summary"]})";
string dump_output = get_output(command);
cout << dump_output << endl;
regex reg(
"\n"
R"((PG_STAT\s.*))"
"\n");
index_t indexes = get_indexes(reg, dump_output);
reg =
"\n"
R"((sum\s.*))"
"\n";
smatch match;
regex_search(dump_output, match, reg);
auto table = match[1].str();
cout << "Checking bytes" << endl;
check_column(indexes.byte_index, table, string("bytes"), false);
cout << "Checking keys" << endl;
check_column(indexes.key_index, table, string("keys"), false);
cout << endl;
}
void LazyOmapStatsTest::check_pg_dump_pgs()
{
cout << R"(Checking "pg dump pgs" output)" << endl;
string command = R"({"prefix": "pg dump", "dumpcontents": ["pgs"]})";
string dump_output = get_output(command);
cout << dump_output << endl;
regex reg(R"(^(PG_STAT\s.*))"
"\n");
index_t indexes = get_indexes(reg, dump_output);
reg = R"(^(PG_STAT[\s\S]*))"
"\n\n";
smatch match;
regex_search(dump_output, match, reg);
auto table = match[1].str();
cout << "Checking bytes" << endl;
check_column(indexes.byte_index, table, string("bytes"));
cout << "Checking keys" << endl;
check_column(indexes.key_index, table, string("keys"));
cout << endl;
}
void LazyOmapStatsTest::check_pg_dump_pools()
{
cout << R"(Checking "pg dump pools" output)" << endl;
string command = R"({"prefix": "pg dump", "dumpcontents": ["pools"]})";
string dump_output = get_output(command);
cout << dump_output << endl;
regex reg(R"(^(POOLID\s.*))"
"\n");
index_t indexes = get_indexes(reg, dump_output);
reg =
"\n"
R"(()" +
conf.pool_id +
R"(\s.*))"
"\n";
smatch match;
regex_search(dump_output, match, reg);
auto line = match[1].str();
cout << "Checking bytes" << endl;
check_column(indexes.byte_index, line, string("bytes"), false);
cout << "Checking keys" << endl;
check_column(indexes.key_index, line, string("keys"), false);
cout << endl;
}
void LazyOmapStatsTest::check_pg_ls()
{
cout << R"(Checking "pg ls" output)" << endl;
string command = R"({"prefix": "pg ls"})";
string dump_output = get_output(command);
cout << dump_output << endl;
regex reg(R"(^(PG\s.*))"
"\n");
index_t indexes = get_indexes(reg, dump_output);
reg = R"(^(PG[\s\S]*))"
"\n\n";
smatch match;
regex_search(dump_output, match, reg);
auto table = match[1].str();
cout << "Checking bytes" << endl;
check_column(indexes.byte_index, table, string("bytes"));
cout << "Checking keys" << endl;
check_column(indexes.key_index, table, string("keys"));
cout << endl;
}
void LazyOmapStatsTest::wait_for_active_clean()
{
cout << "Waiting for active+clean" << endl;
int index = -1;
regex reg(
"\n"
R"((PG_STAT[\s\S]*))"
"\n +\n[0-9]");
string command = R"({"prefix": "pg dump"})";
int num_not_clean;
do {
string dump_output = get_output(command, true);
if (index == -1) {
regex ireg(
"\n"
R"((PG_STAT\s.*))"
"\n");
index = find_index(dump_output, ireg, "STATE");
}
smatch match;
regex_search(dump_output, match, reg);
istringstream buffer(match[1].str());
string line;
num_not_clean = 0;
while (std::getline(buffer, line)) {
if (line.compare(0, 1, "P") == 0) continue;
boost::char_separator<char> sep{" "};
boost::tokenizer<boost::char_separator<char>> tok(line, sep);
vector<string> tokens(tok.begin(), tok.end());
num_not_clean += tokens.at(index).compare("active+clean");
}
cout << "." << flush;
this_thread::sleep_for(chrono::milliseconds(250));
} while (num_not_clean);
cout << endl;
}
const int LazyOmapStatsTest::run(const int argc, const char** argv)
{
init(argc, argv);
create_payload();
wait_for_active_clean();
write_omap(get_name());
scrub();
check_one();
write_many(conf.how_many - 1); // Since we already wrote one
scrub();
check_pg_dump();
check_pg_dump_summary();
check_pg_dump_pgs();
check_pg_dump_pools();
check_pg_ls();
cout << "All tests passed. Success!" << endl;
shutdown();
return 0;
}
| 17,314 | 26.837621 | 88 |
cc
|
null |
ceph-main/src/test/lazy-omap-stats/lazy_omap_stats_test.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LAZY_OMAP_STATS_TEST_H
#define CEPH_LAZY_OMAP_STATS_TEST_H
#include <map>
#include <regex>
#include <string>
#include "include/compat.h"
#include "include/rados/librados.hpp"
struct index_t {
unsigned byte_index = 0;
unsigned key_index = 0;
};
class LazyOmapStatsTest
{
librados::IoCtx io_ctx;
librados::Rados rados;
std::map<std::string, librados::bufferlist> payload;
struct lazy_omap_test_t {
unsigned payload_size = 0;
unsigned replica_count = 3;
unsigned keys = 2000;
unsigned how_many = 50;
std::string pool_name = "lazy_omap_test_pool";
std::string pool_id;
unsigned total_bytes = 0;
unsigned total_keys = 0;
} conf;
typedef enum {
TARGET_MON,
TARGET_MGR
} CommandTarget;
LazyOmapStatsTest(LazyOmapStatsTest&) = delete;
void operator=(LazyOmapStatsTest) = delete;
void init(const int argc, const char** argv);
void shutdown();
void write_omap(const std::string& object_name);
const std::string get_name() const;
void create_payload();
void write_many(const unsigned how_many);
void scrub();
const int find_matches(std::string& output, std::regex& reg) const;
void check_one();
const int find_index(std::string& haystack, std::regex& needle,
std::string label) const;
const unsigned tally_column(const unsigned omap_bytes_index,
const std::string& table, bool header) const;
void check_column(const int index, const std::string& table,
const std::string& type, bool header = true) const;
index_t get_indexes(std::regex& reg, std::string& output) const;
void check_pg_dump();
void check_pg_dump_summary();
void check_pg_dump_pgs();
void check_pg_dump_pools();
void check_pg_ls();
const std::string get_output(
const std::string command = R"({"prefix": "pg dump"})",
const bool silent = false,
const CommandTarget target = CommandTarget::TARGET_MGR);
void get_pool_id(const std::string& pool);
std::map<std::string, std::string> get_scrub_stamps();
void wait_for_active_clean();
public:
LazyOmapStatsTest() = default;
const int run(const int argc, const char** argv);
};
#endif // CEPH_LAZY_OMAP_STATS_TEST_H
| 2,632 | 28.58427 | 71 |
h
|
null |
ceph-main/src/test/lazy-omap-stats/main.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "lazy_omap_stats_test.h"
int main(const int argc, const char** argv)
{
LazyOmapStatsTest app;
return app.run(argc, argv);
}
| 537 | 23.454545 | 70 |
cc
|
null |
ceph-main/src/test/libcephfs/access.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "common/ceph_argparse.h"
#include "include/buffer.h"
#include "include/stringify.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include "include/rados/librados.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <sys/uio.h>
#include <iostream>
#include <vector>
#include "json_spirit/json_spirit.h"
#include "include/fs_types.h"
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#endif
using namespace std;
rados_t cluster;
string key;
int do_mon_command(string s, string *key)
{
char *outs, *outbuf;
size_t outs_len, outbuf_len;
const char *ss = s.c_str();
int r = rados_mon_command(cluster, (const char **)&ss, 1,
0, 0,
&outbuf, &outbuf_len,
&outs, &outs_len);
if (outbuf_len) {
string s(outbuf, outbuf_len);
std::cout << "out: " << s << std::endl;
// parse out the key
json_spirit::mValue v, k;
json_spirit::read_or_throw(s, v);
k = v.get_array()[0].get_obj().find("key")->second;
*key = k.get_str();
std::cout << "key: " << *key << std::endl;
free(outbuf);
} else {
return -CEPHFS_EINVAL;
}
if (outs_len) {
string s(outs, outs_len);
std::cout << "outs: " << s << std::endl;
free(outs);
}
return r;
}
string get_unique_dir()
{
return string("/ceph_test_libcephfs_access.") + stringify(rand());
}
TEST(AccessTest, Foo) {
string dir = get_unique_dir();
string user = "libcephfs_foo_test." + stringify(rand());
// admin mount to set up test
struct ceph_mount_info *admin;
ASSERT_EQ(0, ceph_create(&admin, NULL));
ASSERT_EQ(0, ceph_conf_read_file(admin, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(admin, NULL));
ASSERT_EQ(0, ceph_mount(admin, "/"));
ASSERT_EQ(0, ceph_mkdir(admin, dir.c_str(), 0755));
// create access key
string key;
ASSERT_EQ(0, do_mon_command(
"{\"prefix\": \"auth get-or-create\", \"entity\": \"client." + user + "\", "
"\"caps\": [\"mon\", \"allow *\", \"osd\", \"allow rw\", "
"\"mds\", \"allow rw\""
"], \"format\": \"json\"}", &key));
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, user.c_str()));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_conf_set(cmount, "key", key.c_str()));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ceph_shutdown(cmount);
// clean up
ASSERT_EQ(0, ceph_rmdir(admin, dir.c_str()));
ceph_shutdown(admin);
}
TEST(AccessTest, Path) {
string good = get_unique_dir();
string bad = get_unique_dir();
string user = "libcephfs_path_test." + stringify(rand());
struct ceph_mount_info *admin;
ASSERT_EQ(0, ceph_create(&admin, NULL));
ASSERT_EQ(0, ceph_conf_read_file(admin, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(admin, NULL));
ASSERT_EQ(0, ceph_mount(admin, "/"));
ASSERT_EQ(0, ceph_mkdir(admin, good.c_str(), 0755));
ASSERT_EQ(0, ceph_mkdir(admin, string(good + "/p").c_str(), 0755));
ASSERT_EQ(0, ceph_mkdir(admin, bad.c_str(), 0755));
ASSERT_EQ(0, ceph_mkdir(admin, string(bad + "/p").c_str(), 0755));
int fd = ceph_open(admin, string(good + "/q").c_str(), O_CREAT|O_WRONLY, 0755);
ceph_close(admin, fd);
fd = ceph_open(admin, string(bad + "/q").c_str(), O_CREAT|O_WRONLY, 0755);
ceph_close(admin, fd);
fd = ceph_open(admin, string(bad + "/z").c_str(), O_CREAT|O_WRONLY, 0755);
ceph_write(admin, fd, "TEST FAILED", 11, 0);
ceph_close(admin, fd);
string key;
ASSERT_EQ(0, do_mon_command(
"{\"prefix\": \"auth get-or-create\", \"entity\": \"client." + user + "\", "
"\"caps\": [\"mon\", \"allow r\", \"osd\", \"allow rwx\", "
"\"mds\", \"allow r, allow rw path=" + good + "\""
"], \"format\": \"json\"}", &key));
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, user.c_str()));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_conf_set(cmount, "key", key.c_str()));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
// allowed
ASSERT_GE(ceph_mkdir(cmount, string(good + "/x").c_str(), 0755), 0);
ASSERT_GE(ceph_rmdir(cmount, string(good + "/p").c_str()), 0);
ASSERT_GE(ceph_unlink(cmount, string(good + "/q").c_str()), 0);
fd = ceph_open(cmount, string(good + "/y").c_str(), O_CREAT|O_WRONLY, 0755);
ASSERT_GE(fd, 0);
ceph_write(cmount, fd, "bar", 3, 0);
ceph_close(cmount, fd);
ASSERT_GE(ceph_unlink(cmount, string(good + "/y").c_str()), 0);
ASSERT_GE(ceph_rmdir(cmount, string(good + "/x").c_str()), 0);
fd = ceph_open(cmount, string(bad + "/z").c_str(), O_RDONLY, 0644);
ASSERT_GE(fd, 0);
ceph_close(cmount, fd);
// not allowed
ASSERT_LT(ceph_mkdir(cmount, string(bad + "/x").c_str(), 0755), 0);
ASSERT_LT(ceph_rmdir(cmount, string(bad + "/p").c_str()), 0);
ASSERT_LT(ceph_unlink(cmount, string(bad + "/q").c_str()), 0);
fd = ceph_open(cmount, string(bad + "/y").c_str(), O_CREAT|O_WRONLY, 0755);
ASSERT_LT(fd, 0);
// unlink open file
fd = ceph_open(cmount, string(good + "/unlinkme").c_str(), O_CREAT|O_WRONLY, 0755);
ceph_unlink(cmount, string(good + "/unlinkme").c_str());
ASSERT_GE(ceph_write(cmount, fd, "foo", 3, 0), 0);
ASSERT_GE(ceph_fchmod(cmount, fd, 0777), 0);
ASSERT_GE(ceph_ftruncate(cmount, fd, 0), 0);
ASSERT_GE(ceph_fsetxattr(cmount, fd, "user.any", "bar", 3, 0), 0);
ceph_close(cmount, fd);
// rename open file
fd = ceph_open(cmount, string(good + "/renameme").c_str(), O_CREAT|O_WRONLY, 0755);
ASSERT_EQ(ceph_rename(admin, string(good + "/renameme").c_str(),
string(bad + "/asdf").c_str()), 0);
ASSERT_GE(ceph_write(cmount, fd, "foo", 3, 0), 0);
ASSERT_GE(ceph_fchmod(cmount, fd, 0777), -CEPHFS_EACCES);
ASSERT_GE(ceph_ftruncate(cmount, fd, 0), -CEPHFS_EACCES);
ASSERT_GE(ceph_fsetxattr(cmount, fd, "user.any", "bar", 3, 0), -CEPHFS_EACCES);
ceph_close(cmount, fd);
ceph_shutdown(cmount);
ASSERT_EQ(0, ceph_unlink(admin, string(bad + "/q").c_str()));
ASSERT_EQ(0, ceph_unlink(admin, string(bad + "/z").c_str()));
ASSERT_EQ(0, ceph_rmdir(admin, string(bad + "/p").c_str()));
ASSERT_EQ(0, ceph_unlink(admin, string(bad + "/asdf").c_str()));
ASSERT_EQ(0, ceph_rmdir(admin, good.c_str()));
ASSERT_EQ(0, ceph_rmdir(admin, bad.c_str()));
ceph_shutdown(admin);
}
TEST(AccessTest, ReadOnly) {
string dir = get_unique_dir();
string dir2 = get_unique_dir();
string user = "libcephfs_readonly_test." + stringify(rand());
struct ceph_mount_info *admin;
ASSERT_EQ(0, ceph_create(&admin, NULL));
ASSERT_EQ(0, ceph_conf_read_file(admin, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(admin, NULL));
ASSERT_EQ(0, ceph_mount(admin, "/"));
ASSERT_EQ(0, ceph_mkdir(admin, dir.c_str(), 0755));
int fd = ceph_open(admin, string(dir + "/out").c_str(), O_CREAT|O_WRONLY, 0755);
ceph_write(admin, fd, "foo", 3, 0);
ceph_close(admin,fd);
string key;
ASSERT_EQ(0, do_mon_command(
"{\"prefix\": \"auth get-or-create\", \"entity\": \"client." + user + "\", "
"\"caps\": [\"mon\", \"allow r\", \"osd\", \"allow rw\", "
"\"mds\", \"allow r\""
"], \"format\": \"json\"}", &key));
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, user.c_str()));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_conf_set(cmount, "key", key.c_str()));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
// allowed
fd = ceph_open(cmount, string(dir + "/out").c_str(), O_RDONLY, 0644);
ASSERT_GE(fd, 0);
ceph_close(cmount,fd);
// not allowed
fd = ceph_open(cmount, string(dir + "/bar").c_str(), O_CREAT|O_WRONLY, 0755);
ASSERT_LT(fd, 0);
ASSERT_LT(ceph_mkdir(cmount, dir2.c_str(), 0755), 0);
ceph_shutdown(cmount);
ASSERT_EQ(0, ceph_unlink(admin, string(dir + "/out").c_str()));
ASSERT_EQ(0, ceph_rmdir(admin, dir.c_str()));
ceph_shutdown(admin);
}
TEST(AccessTest, User) {
string dir = get_unique_dir();
string user = "libcephfs_user_test." + stringify(rand());
// admin mount to set up test
struct ceph_mount_info *admin;
ASSERT_EQ(0, ceph_create(&admin, NULL));
ASSERT_EQ(0, ceph_conf_read_file(admin, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(admin, NULL));
ASSERT_EQ(0, ceph_conf_set(admin, "client_permissions", "0"));
ASSERT_EQ(0, ceph_mount(admin, "/"));
ASSERT_EQ(0, ceph_mkdir(admin, dir.c_str(), 0755));
// create access key
string key;
ASSERT_EQ(0, do_mon_command(
"{\"prefix\": \"auth get-or-create\", \"entity\": \"client." + user + "\", "
"\"caps\": [\"mon\", \"allow *\", \"osd\", \"allow rw\", "
"\"mds\", \"allow rw uid=123 gids=456,789\""
"], \"format\": \"json\"}", &key));
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, user.c_str()));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_conf_set(cmount, "key", key.c_str()));
ASSERT_EQ(-CEPHFS_EACCES, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_init(cmount));
UserPerm *perms = ceph_userperm_new(123, 456, 0, NULL);
ASSERT_NE(nullptr, perms);
ASSERT_EQ(0, ceph_mount_perms_set(cmount, perms));
ceph_userperm_destroy(perms);
ASSERT_EQ(0, ceph_conf_set(cmount, "client_permissions", "0"));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
// user bits
ASSERT_EQ(0, ceph_chmod(admin, dir.c_str(), 0700));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 123, 456));
ASSERT_EQ(0, ceph_mkdir(cmount, string(dir + "/u1").c_str(), 0755));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 1, 456));
ASSERT_EQ(-CEPHFS_EACCES, ceph_mkdir(cmount, string(dir + "/no").c_str(), 0755));
// group bits
ASSERT_EQ(0, ceph_chmod(admin, dir.c_str(), 0770));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 1, 456));
ASSERT_EQ(0, ceph_mkdir(cmount, string(dir + "/u2").c_str(), 0755));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 1, 2));
ASSERT_EQ(-CEPHFS_EACCES, ceph_mkdir(cmount, string(dir + "/no").c_str(), 0755));
// user overrides group
ASSERT_EQ(0, ceph_chmod(admin, dir.c_str(), 0470));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 123, 456));
ASSERT_EQ(-CEPHFS_EACCES, ceph_mkdir(cmount, string(dir + "/no").c_str(), 0755));
// other
ASSERT_EQ(0, ceph_chmod(admin, dir.c_str(), 0777));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 1, 1));
ASSERT_EQ(0, ceph_mkdir(cmount, string(dir + "/u3").c_str(), 0755));
ASSERT_EQ(0, ceph_chmod(admin, dir.c_str(), 0770));
ASSERT_EQ(-CEPHFS_EACCES, ceph_mkdir(cmount, string(dir + "/no").c_str(), 0755));
// user and group overrides other
ASSERT_EQ(0, ceph_chmod(admin, dir.c_str(), 07));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 1, 456));
ASSERT_EQ(-CEPHFS_EACCES, ceph_mkdir(cmount, string(dir + "/no").c_str(), 0755));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 123, 1));
ASSERT_EQ(-CEPHFS_EACCES, ceph_mkdir(cmount, string(dir + "/no").c_str(), 0755));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 123, 456));
ASSERT_EQ(-CEPHFS_EACCES, ceph_mkdir(cmount, string(dir + "/no").c_str(), 0755));
// chown and chgrp
ASSERT_EQ(0, ceph_chmod(admin, dir.c_str(), 0700));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 123, 456));
// FIXME: Re-enable these 789 tests once we can set multiple GIDs via libcephfs/config
// ASSERT_EQ(0, ceph_chown(cmount, dir.c_str(), 123, 789));
ASSERT_EQ(0, ceph_chown(cmount, dir.c_str(), 123, 456));
// ASSERT_EQ(0, ceph_chown(cmount, dir.c_str(), -1, 789));
ASSERT_EQ(0, ceph_chown(cmount, dir.c_str(), -1, 456));
ASSERT_EQ(-CEPHFS_EACCES, ceph_chown(cmount, dir.c_str(), 123, 1));
ASSERT_EQ(-CEPHFS_EACCES, ceph_chown(cmount, dir.c_str(), 1, 456));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 1, 1));
ASSERT_EQ(-CEPHFS_EACCES, ceph_chown(cmount, dir.c_str(), 123, 456));
ASSERT_EQ(-CEPHFS_EACCES, ceph_chown(cmount, dir.c_str(), 123, -1));
ASSERT_EQ(-CEPHFS_EACCES, ceph_chown(cmount, dir.c_str(), -1, 456));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 1, 456));
ASSERT_EQ(-CEPHFS_EACCES, ceph_chown(cmount, dir.c_str(), 123, 456));
ASSERT_EQ(-CEPHFS_EACCES, ceph_chown(cmount, dir.c_str(), 123, -1));
ASSERT_EQ(-CEPHFS_EACCES, ceph_chown(cmount, dir.c_str(), -1, 456));
ASSERT_EQ(0, ceph_chown(admin, dir.c_str(), 123, 1));
ASSERT_EQ(0, ceph_chown(cmount, dir.c_str(), -1, 456));
// ASSERT_EQ(0, ceph_chown(cmount, dir.c_str(), 123, 789));
ceph_shutdown(cmount);
// clean up
ASSERT_EQ(0, ceph_rmdir(admin, string(dir + "/u1").c_str()));
ASSERT_EQ(0, ceph_rmdir(admin, string(dir + "/u2").c_str()));
ASSERT_EQ(0, ceph_rmdir(admin, string(dir + "/u3").c_str()));
ASSERT_EQ(0, ceph_rmdir(admin, dir.c_str()));
ceph_shutdown(admin);
}
static int update_root_mode()
{
struct ceph_mount_info *admin;
int r = ceph_create(&admin, NULL);
if (r < 0)
return r;
ceph_conf_read_file(admin, NULL);
ceph_conf_parse_env(admin, NULL);
ceph_conf_set(admin, "client_permissions", "false");
r = ceph_mount(admin, "/");
if (r < 0)
goto out;
r = ceph_chmod(admin, "/", 0777);
out:
ceph_shutdown(admin);
return r;
}
int main(int argc, char **argv)
{
int r = update_root_mode();
if (r < 0)
exit(1);
::testing::InitGoogleTest(&argc, argv);
srand(getpid());
r = rados_create(&cluster, NULL);
if (r < 0)
exit(1);
r = rados_conf_read_file(cluster, NULL);
if (r < 0)
exit(1);
rados_conf_parse_env(cluster, NULL);
r = rados_connect(cluster);
if (r < 0)
exit(1);
r = RUN_ALL_TESTS();
rados_shutdown(cluster);
return r;
}
| 14,016 | 34.0425 | 88 |
cc
|
null |
ceph-main/src/test/libcephfs/acl.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/types.h"
#include "gtest/gtest.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include "include/ceph_fs.h"
#include "client/posix_acl.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#ifdef __linux__
#include <sys/xattr.h>
#endif
static size_t acl_ea_size(int count)
{
return sizeof(acl_ea_header) + count * sizeof(acl_ea_entry);
}
static int acl_ea_count(size_t size)
{
if (size < sizeof(acl_ea_header))
return -1;
size -= sizeof(acl_ea_header);
if (size % sizeof(acl_ea_entry))
return -1;
return size / sizeof(acl_ea_entry);
}
static int check_acl_and_mode(const void *buf, size_t size, mode_t mode)
{
const acl_ea_entry *group_entry = NULL, *mask_entry = NULL;
const acl_ea_header *header = reinterpret_cast<const acl_ea_header*>(buf);
const acl_ea_entry *entry = header->a_entries;
int count = (size - sizeof(*header)) / sizeof(*entry);
for (int i = 0; i < count; ++i) {
__u16 tag = entry->e_tag;
__u16 perm = entry->e_perm;
switch(tag) {
case ACL_USER_OBJ:
if (perm != ((mode >> 6) & 7))
return -CEPHFS_EINVAL;
break;
case ACL_USER:
case ACL_GROUP:
break;
case ACL_GROUP_OBJ:
group_entry = entry;
break;
case ACL_OTHER:
if (perm != (mode & 7))
return -CEPHFS_EINVAL;
break;
case ACL_MASK:
mask_entry = entry;
break;
default:
return -CEPHFS_EIO;
}
++entry;
}
if (mask_entry) {
__u16 perm = mask_entry->e_perm;
if (perm != ((mode >> 3) & 7))
return -CEPHFS_EINVAL;
} else {
if (!group_entry)
return -CEPHFS_EIO;
__u16 perm = group_entry->e_perm;
if (perm != ((mode >> 3) & 7))
return -CEPHFS_EINVAL;
}
return 0;
}
static int generate_test_acl(void *buf, size_t size, mode_t mode)
{
if (acl_ea_count(size) != 5)
return -1;
acl_ea_header *header = reinterpret_cast<acl_ea_header*>(buf);
header->a_version = (__u32)ACL_EA_VERSION;
acl_ea_entry *entry = header->a_entries;
entry->e_tag = ACL_USER_OBJ;
entry->e_perm = (mode >> 6) & 7;
++entry;
entry->e_tag = ACL_USER;
entry->e_perm = 7;
entry->e_id = getuid();
++entry;
entry->e_tag = ACL_GROUP_OBJ;
entry->e_perm = (mode >> 3) & 7;
++entry;
entry->e_tag = ACL_MASK;
entry->e_perm = 7;
++entry;
entry->e_tag = ACL_OTHER;
entry->e_perm = mode & 7;
return 0;
}
static int generate_empty_acl(void *buf, size_t size, mode_t mode)
{
if (acl_ea_count(size) != 3)
return -1;
acl_ea_header *header = reinterpret_cast<acl_ea_header*>(buf);
header->a_version = (__u32)ACL_EA_VERSION;
acl_ea_entry *entry = header->a_entries;
entry->e_tag = ACL_USER_OBJ;
entry->e_perm = (mode >> 6) & 7;
++entry;
entry->e_tag = ACL_GROUP_OBJ;
entry->e_perm = (mode >> 3) & 7;
++entry;
entry->e_tag = ACL_OTHER;
entry->e_perm = mode & 7;
return 0;
}
TEST(ACL, SetACL) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_conf_set(cmount, "client_acl_type", "posix_acl"));
ASSERT_EQ(0, ceph_conf_set(cmount, "client_permissions", "0"));
char test_file[256];
sprintf(test_file, "file1_setacl_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0600);
ASSERT_GT(fd, 0);
// change ownership to nobody -- we assume nobody exists and id is always 65534
ASSERT_EQ(ceph_fchown(cmount, fd, 65534, 65534), 0);
ASSERT_EQ(0, ceph_conf_set(cmount, "client_permissions", "1"));
// "nobody" will be ignored on Windows
#ifndef _WIN32
ASSERT_EQ(ceph_open(cmount, test_file, O_RDWR, 0), -CEPHFS_EACCES);
#endif
ASSERT_EQ(0, ceph_conf_set(cmount, "client_permissions", "0"));
size_t acl_buf_size = acl_ea_size(5);
void *acl_buf = malloc(acl_buf_size);
ASSERT_EQ(generate_test_acl(acl_buf, acl_buf_size, 0750), 0);
// can't set default acl for non-directory
ASSERT_EQ(ceph_fsetxattr(cmount, fd, ACL_EA_DEFAULT, acl_buf, acl_buf_size, 0), -CEPHFS_EACCES);
ASSERT_EQ(ceph_fsetxattr(cmount, fd, ACL_EA_ACCESS, acl_buf, acl_buf_size, 0), 0);
int tmpfd = ceph_open(cmount, test_file, O_RDWR, 0);
ASSERT_GT(tmpfd, 0);
ceph_close(cmount, tmpfd);
struct ceph_statx stx;
ASSERT_EQ(ceph_fstatx(cmount, fd, &stx, CEPH_STATX_MODE, 0), 0);
// mode was modified according to ACL
ASSERT_EQ(stx.stx_mode & 0777u, 0770u);
ASSERT_EQ(check_acl_and_mode(acl_buf, acl_buf_size, stx.stx_mode), 0);
acl_buf_size = acl_ea_size(3);
// setting ACL that is equivalent to file mode
ASSERT_EQ(generate_empty_acl(acl_buf, acl_buf_size, 0600), 0);
ASSERT_EQ(ceph_fsetxattr(cmount, fd, ACL_EA_ACCESS, acl_buf, acl_buf_size, 0), 0);
// ACL was deleted
ASSERT_EQ(ceph_fgetxattr(cmount, fd, ACL_EA_ACCESS, NULL, 0), -CEPHFS_ENODATA);
ASSERT_EQ(ceph_fstatx(cmount, fd, &stx, CEPH_STATX_MODE, 0), 0);
// mode was modified according to ACL
ASSERT_EQ(stx.stx_mode & 0777u, 0600u);
free(acl_buf);
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(ACL, Chmod) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_conf_set(cmount, "client_acl_type", "posix_acl"));
char test_file[256];
sprintf(test_file, "file1_acl_chmod_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0600);
ASSERT_GT(fd, 0);
int acl_buf_size = acl_ea_size(5);
void *acl_buf = malloc(acl_buf_size);
ASSERT_EQ(generate_test_acl(acl_buf, acl_buf_size, 0775), 0);
ASSERT_EQ(ceph_fsetxattr(cmount, fd, ACL_EA_ACCESS, acl_buf, acl_buf_size, 0), 0);
struct ceph_statx stx;
ASSERT_EQ(ceph_fstatx(cmount, fd, &stx, CEPH_STATX_MODE, 0), 0);
// mode was updated according to ACL
ASSERT_EQ(stx.stx_mode & 0777u, 0775u);
// change mode
ASSERT_EQ(ceph_fchmod(cmount, fd, 0640), 0);
ASSERT_EQ(ceph_fstatx(cmount, fd, &stx, CEPH_STATX_MODE, 0), 0);
ASSERT_EQ(stx.stx_mode & 0777u, 0640u);
// ACL was updated according to mode
ASSERT_EQ(ceph_fgetxattr(cmount, fd, ACL_EA_ACCESS, acl_buf, acl_buf_size), acl_buf_size);
ASSERT_EQ(check_acl_and_mode(acl_buf, acl_buf_size, stx.stx_mode), 0);
free(acl_buf);
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(ACL, DefaultACL) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_conf_set(cmount, "client_acl_type", "posix_acl"));
int acl_buf_size = acl_ea_size(5);
void *acl1_buf = malloc(acl_buf_size);
void *acl2_buf = malloc(acl_buf_size);
ASSERT_EQ(generate_test_acl(acl1_buf, acl_buf_size, 0750), 0);
char test_dir1[256];
sprintf(test_dir1, "dir1_acl_default_%d", getpid());
ASSERT_EQ(ceph_mkdir(cmount, test_dir1, 0750), 0);
// set default acl
ASSERT_EQ(ceph_setxattr(cmount, test_dir1, ACL_EA_DEFAULT, acl1_buf, acl_buf_size, 0), 0);
char test_dir2[262];
sprintf(test_dir2, "%s/dir2", test_dir1);
ASSERT_EQ(ceph_mkdir(cmount, test_dir2, 0755), 0);
// inherit default acl
ASSERT_EQ(ceph_getxattr(cmount, test_dir2, ACL_EA_DEFAULT, acl2_buf, acl_buf_size), acl_buf_size);
ASSERT_EQ(memcmp(acl1_buf, acl2_buf, acl_buf_size), 0);
// mode and ACL are updated
ASSERT_EQ(ceph_getxattr(cmount, test_dir2, ACL_EA_ACCESS, acl2_buf, acl_buf_size), acl_buf_size);
{
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, test_dir2, &stx, CEPH_STATX_MODE, 0), 0);
// other bits of mode &= acl other perm
ASSERT_EQ(stx.stx_mode & 0777u, 0750u);
ASSERT_EQ(check_acl_and_mode(acl2_buf, acl_buf_size, stx.stx_mode), 0);
}
char test_file1[262];
sprintf(test_file1, "%s/file1", test_dir1);
int fd = ceph_open(cmount, test_file1, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
// no default acl
ASSERT_EQ(ceph_fgetxattr(cmount, fd, ACL_EA_DEFAULT, NULL, 0), -CEPHFS_ENODATA);
// mode and ACL are updated
ASSERT_EQ(ceph_fgetxattr(cmount, fd, ACL_EA_ACCESS, acl2_buf, acl_buf_size), acl_buf_size);
{
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, test_file1, &stx, CEPH_STATX_MODE, 0), 0);
// other bits of mode &= acl other perm
ASSERT_EQ(stx.stx_mode & 0777u, 0660u);
ASSERT_EQ(check_acl_and_mode(acl2_buf, acl_buf_size, stx.stx_mode), 0);
}
free(acl1_buf);
free(acl2_buf);
ASSERT_EQ(ceph_unlink(cmount, test_file1), 0);
ASSERT_EQ(ceph_rmdir(cmount, test_dir2), 0);
ASSERT_EQ(ceph_rmdir(cmount, test_dir1), 0);
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(ACL, Disabled) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_conf_set(cmount, "client_acl_type", ""));
size_t acl_buf_size = acl_ea_size(3);
void *acl_buf = malloc(acl_buf_size);
ASSERT_EQ(generate_empty_acl(acl_buf, acl_buf_size, 0755), 0);
char test_dir[256];
sprintf(test_dir, "dir1_acl_disabled_%d", getpid());
ASSERT_EQ(ceph_mkdir(cmount, test_dir, 0750), 0);
ASSERT_EQ(ceph_setxattr(cmount, test_dir, ACL_EA_DEFAULT, acl_buf, acl_buf_size, 0), -CEPHFS_EOPNOTSUPP);
ASSERT_EQ(ceph_setxattr(cmount, test_dir, ACL_EA_ACCESS, acl_buf, acl_buf_size, 0), -CEPHFS_EOPNOTSUPP);
ASSERT_EQ(ceph_getxattr(cmount, test_dir, ACL_EA_DEFAULT, acl_buf, acl_buf_size), -CEPHFS_EOPNOTSUPP);
ASSERT_EQ(ceph_getxattr(cmount, test_dir, ACL_EA_ACCESS, acl_buf, acl_buf_size), -CEPHFS_EOPNOTSUPP);
free(acl_buf);
ceph_shutdown(cmount);
}
TEST(ACL, SnapdirACL) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_conf_set(cmount, "client_acl_type", "posix_acl"));
int acl_buf_size = acl_ea_size(5);
void *acl1_buf = malloc(acl_buf_size);
void *acl2_buf = malloc(acl_buf_size);
void *acl3_buf = malloc(acl_buf_size);
ASSERT_EQ(generate_test_acl(acl1_buf, acl_buf_size, 0750), 0);
char test_dir1[256];
sprintf(test_dir1, "dir1_acl_default_%d", getpid());
ASSERT_EQ(ceph_mkdir(cmount, test_dir1, 0750), 0);
// set default acl
ASSERT_EQ(ceph_setxattr(cmount, test_dir1, ACL_EA_DEFAULT, acl1_buf, acl_buf_size, 0), 0);
char test_dir2[262];
sprintf(test_dir2, "%s/dir2", test_dir1);
ASSERT_EQ(ceph_mkdir(cmount, test_dir2, 0755), 0);
// inherit default acl
ASSERT_EQ(ceph_getxattr(cmount, test_dir2, ACL_EA_DEFAULT, acl2_buf, acl_buf_size), acl_buf_size);
ASSERT_EQ(memcmp(acl1_buf, acl2_buf, acl_buf_size), 0);
char test_dir2_snapdir[512];
sprintf(test_dir2_snapdir, "%s/dir2/.snap", test_dir1);
// inherit default acl
ASSERT_EQ(ceph_getxattr(cmount, test_dir2_snapdir, ACL_EA_DEFAULT, acl3_buf, acl_buf_size), acl_buf_size);
ASSERT_EQ(memcmp(acl2_buf, acl3_buf, acl_buf_size), 0);
memset(acl2_buf, 0, acl_buf_size);
memset(acl3_buf, 0, acl_buf_size);
ASSERT_EQ(ceph_getxattr(cmount, test_dir2, ACL_EA_ACCESS, acl2_buf, acl_buf_size), acl_buf_size);
ASSERT_EQ(ceph_getxattr(cmount, test_dir2_snapdir, ACL_EA_ACCESS, acl3_buf, acl_buf_size), acl_buf_size);
ASSERT_EQ(memcmp(acl2_buf, acl3_buf, acl_buf_size), 0);
free(acl1_buf);
free(acl2_buf);
free(acl3_buf);
ASSERT_EQ(ceph_rmdir(cmount, test_dir2), 0);
ASSERT_EQ(ceph_rmdir(cmount, test_dir1), 0);
ceph_shutdown(cmount);
}
| 11,920 | 31.394022 | 108 |
cc
|
null |
ceph-main/src/test/libcephfs/caps.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/int_types.h"
#include "gtest/gtest.h"
#include "include/compat.h"
#include "include/ceph_fs.h"
#include "include/cephfs/libcephfs.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#ifdef __linux__
#include <sys/xattr.h>
#endif
#include <signal.h>
TEST(Caps, ReadZero) {
int mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
int i = 0;
for(; i < 30; ++i) {
char c_path[1024];
sprintf(c_path, "/caps_rzfile_%d_%d", mypid, i);
int fd = ceph_open(cmount, c_path, O_CREAT|O_TRUNC|O_WRONLY, 0644);
ASSERT_LT(0, fd);
int expect = CEPH_CAP_FILE_EXCL | CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER;
int caps = ceph_debug_get_fd_caps(cmount, fd);
ASSERT_EQ(expect, caps & expect);
ASSERT_EQ(0, ceph_close(cmount, fd));
caps = ceph_debug_get_file_caps(cmount, c_path);
ASSERT_EQ(expect, caps & expect);
char cw_path[1024];
sprintf(cw_path, "/caps_wzfile_%d_%d", mypid, i);
int wfd = ceph_open(cmount, cw_path, O_CREAT|O_TRUNC|O_WRONLY, 0644);
ASSERT_LT(0, wfd);
char wbuf[4096];
ASSERT_EQ(4096, ceph_write(cmount, wfd, wbuf, 4096, 0));
ASSERT_EQ(0, ceph_close(cmount, wfd));
struct ceph_statx stx;
ASSERT_EQ(0, ceph_statx(cmount, c_path, &stx, CEPH_STATX_MTIME, 0));
caps = ceph_debug_get_file_caps(cmount, c_path);
ASSERT_EQ(expect, caps & expect);
}
ASSERT_EQ(0, ceph_conf_set(cmount, "client_debug_inject_tick_delay", "20"));
for(i = 0; i < 30; ++i) {
char c_path[1024];
sprintf(c_path, "/caps_rzfile_%d_%d", mypid, i);
int fd = ceph_open(cmount, c_path, O_RDONLY, 0);
ASSERT_LT(0, fd);
char buf[256];
int expect = CEPH_CAP_FILE_RD | CEPH_STAT_CAP_SIZE | CEPH_CAP_FILE_CACHE;
int caps = ceph_debug_get_fd_caps(cmount, fd);
ASSERT_EQ(expect, caps & expect);
ASSERT_EQ(0, ceph_read(cmount, fd, buf, 256, 0));
caps = ceph_debug_get_fd_caps(cmount, fd);
ASSERT_EQ(expect, caps & expect);
ASSERT_EQ(0, ceph_close(cmount, fd));
}
ceph_shutdown(cmount);
}
| 2,644 | 26.552083 | 78 |
cc
|
null |
ceph-main/src/test/libcephfs/ceph_pthread_self.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBCEPHFS_PTHREAD_SELF
#define CEPH_TEST_LIBCEPHFS_PTHREAD_SELF
#include <pthread.h>
#include <type_traits>
/*
* There is a difference between libc shipped with FreeBSD and
* glibc shipped with GNU/Linux for the return type of pthread_self().
*
* Introduced a conversion function in include/compat.h
* (uint64_t)ceph_pthread_self()
*
* libc returns an opague pthread_t that is not default convertable
* to a uint64_t, which is what gtest expects.
* And tests using gtest will not compile because of this difference.
*
*/
static uint64_t ceph_pthread_self() {
auto me = pthread_self();
static_assert(std::is_convertible_v<decltype(me), uint64_t> ||
std::is_pointer_v<decltype(me)>,
"we need to use pthread_self() for the owner parameter");
return static_cast<uint64_t>(me);
}
#endif
| 958 | 28.96875 | 73 |
h
|
null |
ceph-main/src/test/libcephfs/deleg.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Tests for Ceph delegation handling
*
* (c) 2017, Jeff Layton <[email protected]>
*/
#include "gtest/gtest.h"
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include "include/stat.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <sys/uio.h>
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#endif
#include <map>
#include <vector>
#include <thread>
#include <atomic>
#include "include/ceph_assert.h"
/* in ms -- 1 minute */
#define MAX_WAIT (60 * 1000)
static void wait_for_atomic_bool(std::atomic_bool &recalled)
{
int i = 0;
while (!recalled.load()) {
ASSERT_LT(i++, MAX_WAIT);
usleep(1000);
}
}
static int ceph_ll_delegation_wait(struct ceph_mount_info *cmount, Fh *fh,
unsigned cmd, ceph_deleg_cb_t cb, void *priv)
{
int ret, retry = 0;
/* Wait 10s at most */
do {
ret = ceph_ll_delegation(cmount, fh, cmd, cb, priv);
usleep(10000);
} while (ret == -CEPHFS_EAGAIN && retry++ < 1000);
return ret;
}
static int set_default_deleg_timeout(struct ceph_mount_info *cmount)
{
uint32_t session_timeout = ceph_get_cap_return_timeout(cmount);
return ceph_set_deleg_timeout(cmount, session_timeout - 1);
}
static void dummy_deleg_cb(Fh *fh, void *priv)
{
std::atomic_bool *recalled = (std::atomic_bool *)priv;
recalled->store(true);
}
static void open_breaker_func(struct ceph_mount_info *cmount, const char *filename, int flags, std::atomic_bool *opened)
{
bool do_shutdown = false;
if (!cmount) {
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(ceph_conf_parse_env(cmount, NULL), 0);
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
ASSERT_EQ(set_default_deleg_timeout(cmount), 0);
do_shutdown = true;
}
Inode *root, *file;
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
Fh *fh;
struct ceph_statx stx;
UserPerm *perms = ceph_mount_perms(cmount);
ASSERT_EQ(ceph_ll_lookup(cmount, root, filename, &file, &stx, CEPH_STATX_ALL_STATS, 0, perms), 0);
int ret, i = 0;
for (;;) {
ASSERT_EQ(ceph_ll_getattr(cmount, file, &stx, CEPH_STATX_ALL_STATS, 0, perms), 0);
ret = ceph_ll_open(cmount, file, flags, &fh, perms);
if (ret != -CEPHFS_EAGAIN)
break;
ASSERT_LT(i++, MAX_WAIT);
usleep(1000);
}
ASSERT_EQ(ret, 0);
opened->store(true);
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
if (do_shutdown)
ceph_shutdown(cmount);
}
enum {
DelegTestLink,
DelegTestRename,
DelegTestUnlink
};
static void namespace_breaker_func(struct ceph_mount_info *cmount, int cmd, const char *oldname, const char *newname)
{
bool do_shutdown = false;
if (!cmount) {
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
ASSERT_EQ(set_default_deleg_timeout(cmount), 0);
do_shutdown = true;
}
Inode *root, *file = nullptr;
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
struct ceph_statx stx;
UserPerm *perms = ceph_mount_perms(cmount);
int ret, i = 0;
for (;;) {
switch (cmd) {
case DelegTestRename:
ret = ceph_ll_rename(cmount, root, oldname, root, newname, perms);
break;
case DelegTestLink:
if (!file) {
ASSERT_EQ(ceph_ll_lookup(cmount, root, oldname, &file, &stx, 0, 0, perms), 0);
}
ret = ceph_ll_link(cmount, file, root, newname, perms);
break;
case DelegTestUnlink:
ret = ceph_ll_unlink(cmount, root, oldname, perms);
break;
default:
// Bad command
ceph_abort();
}
if (ret != -CEPHFS_EAGAIN)
break;
ASSERT_LT(i++, MAX_WAIT);
usleep(1000);
}
ASSERT_EQ(ret, 0);
if (do_shutdown)
ceph_shutdown(cmount);
}
static void simple_deleg_test(struct ceph_mount_info *cmount, struct ceph_mount_info *tcmount)
{
Inode *root, *file;
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
char filename[32];
Fh *fh;
struct ceph_statx stx;
UserPerm *perms = ceph_mount_perms(cmount);
std::atomic_bool recalled(false);
std::atomic_bool opened(false);
// ensure r/w open breaks a r/w delegation
sprintf(filename, "deleg.rwrw.%x", getpid());
ASSERT_EQ(ceph_ll_create(cmount, root, filename, 0666,
O_RDWR|O_CREAT|O_EXCL, &file, &fh, &stx, 0, 0, perms), 0);
ASSERT_EQ(ceph_ll_delegation_wait(cmount, fh, CEPH_DELEGATION_WR, dummy_deleg_cb, &recalled), 0);
std::thread breaker1(open_breaker_func, tcmount, filename, O_RDWR, &opened);
wait_for_atomic_bool(recalled);
ASSERT_EQ(opened.load(), false);
ASSERT_EQ(ceph_ll_delegation(cmount, fh, CEPH_DELEGATION_NONE, dummy_deleg_cb, &recalled), 0);
breaker1.join();
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
ASSERT_EQ(ceph_ll_unlink(cmount, root, filename, perms), 0);
// ensure r/o open breaks a r/w delegation
recalled.store(false);
opened.store(false);
sprintf(filename, "deleg.rorw.%x", getpid());
ASSERT_EQ(ceph_ll_create(cmount, root, filename, 0666,
O_RDWR|O_CREAT|O_EXCL, &file, &fh, &stx, 0, 0, perms), 0);
ASSERT_EQ(ceph_ll_delegation_wait(cmount, fh, CEPH_DELEGATION_WR, dummy_deleg_cb, &recalled), 0);
std::thread breaker2(open_breaker_func, tcmount, filename, O_RDONLY, &opened);
wait_for_atomic_bool(recalled);
ASSERT_EQ(opened.load(), false);
ASSERT_EQ(ceph_ll_delegation(cmount, fh, CEPH_DELEGATION_NONE, dummy_deleg_cb, &recalled), 0);
breaker2.join();
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
ASSERT_EQ(ceph_ll_unlink(cmount, root, filename, perms), 0);
// ensure r/o open does not break a r/o delegation
sprintf(filename, "deleg.rwro.%x", getpid());
ASSERT_EQ(ceph_ll_create(cmount, root, filename, 0666,
O_RDONLY|O_CREAT|O_EXCL, &file, &fh, &stx, 0, 0, perms), 0);
recalled.store(false);
ASSERT_EQ(ceph_ll_delegation_wait(cmount, fh, CEPH_DELEGATION_RD, dummy_deleg_cb, &recalled), 0);
std::thread breaker3(open_breaker_func, tcmount, filename, O_RDONLY, &opened);
breaker3.join();
ASSERT_EQ(recalled.load(), false);
// ensure that r/w open breaks r/o delegation
opened.store(false);
std::thread breaker4(open_breaker_func, tcmount, filename, O_WRONLY, &opened);
wait_for_atomic_bool(recalled);
usleep(1000);
ASSERT_EQ(opened.load(), false);
ASSERT_EQ(ceph_ll_delegation(cmount, fh, CEPH_DELEGATION_NONE, dummy_deleg_cb, &recalled), 0);
breaker4.join();
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
ASSERT_EQ(ceph_ll_unlink(cmount, root, filename, perms), 0);
// ensure hardlinking breaks a r/w delegation
recalled.store(false);
char newname[32];
sprintf(filename, "deleg.old.%x", getpid());
sprintf(newname, "deleg.new.%x", getpid());
ASSERT_EQ(ceph_ll_create(cmount, root, filename, 0666,
O_RDWR|O_CREAT|O_EXCL, &file, &fh, &stx, 0, 0, perms), 0);
ASSERT_EQ(ceph_ll_delegation_wait(cmount, fh, CEPH_DELEGATION_WR, dummy_deleg_cb, &recalled), 0);
std::thread breaker5(namespace_breaker_func, tcmount, DelegTestLink, filename, newname);
wait_for_atomic_bool(recalled);
ASSERT_EQ(ceph_ll_delegation(cmount, fh, CEPH_DELEGATION_NONE, dummy_deleg_cb, &recalled), 0);
breaker5.join();
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
ASSERT_EQ(ceph_ll_unlink(cmount, root, filename, perms), 0);
ASSERT_EQ(ceph_ll_unlink(cmount, root, newname, perms), 0);
// ensure renaming breaks a r/w delegation
recalled.store(false);
ASSERT_EQ(ceph_ll_create(cmount, root, filename, 0666,
O_RDWR|O_CREAT|O_EXCL, &file, &fh, &stx, 0, 0, perms), 0);
ASSERT_EQ(ceph_ll_delegation_wait(cmount, fh, CEPH_DELEGATION_WR, dummy_deleg_cb, &recalled), 0);
std::thread breaker6(namespace_breaker_func, tcmount, DelegTestRename, filename, newname);
wait_for_atomic_bool(recalled);
ASSERT_EQ(ceph_ll_delegation(cmount, fh, CEPH_DELEGATION_NONE, dummy_deleg_cb, &recalled), 0);
breaker6.join();
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
ASSERT_EQ(ceph_ll_unlink(cmount, root, newname, perms), 0);
// ensure unlinking breaks a r/w delegation
recalled.store(false);
ASSERT_EQ(ceph_ll_create(cmount, root, filename, 0666,
O_RDWR|O_CREAT|O_EXCL, &file, &fh, &stx, 0, 0, perms), 0);
ASSERT_EQ(ceph_ll_delegation_wait(cmount, fh, CEPH_DELEGATION_WR, dummy_deleg_cb, &recalled), 0);
std::thread breaker7(namespace_breaker_func, tcmount, DelegTestUnlink, filename, nullptr);
wait_for_atomic_bool(recalled);
ASSERT_EQ(ceph_ll_delegation(cmount, fh, CEPH_DELEGATION_NONE, dummy_deleg_cb, &recalled), 0);
breaker7.join();
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
}
TEST(LibCephFS, DelegMultiClient) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
ASSERT_EQ(set_default_deleg_timeout(cmount), 0);
simple_deleg_test(cmount, nullptr);
ceph_shutdown(cmount);
}
TEST(LibCephFS, DelegSingleClient) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
ASSERT_EQ(set_default_deleg_timeout(cmount), 0);
simple_deleg_test(cmount, cmount);
ceph_shutdown(cmount);
}
TEST(LibCephFS, DelegTimeout) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
// tweak timeout to run quickly, since we don't plan to return it anyway
ASSERT_EQ(ceph_set_deleg_timeout(cmount, 2), 0);
Inode *root, *file;
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
char filename[32];
sprintf(filename, "delegtimeo%x", getpid());
Fh *fh;
struct ceph_statx stx;
UserPerm *perms = ceph_mount_perms(cmount);
ASSERT_EQ(ceph_ll_create(cmount, root, filename, 0666,
O_RDWR|O_CREAT|O_EXCL, &file, &fh, &stx, 0, 0, perms), 0);
/* Reopen read-only */
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
ASSERT_EQ(ceph_ll_open(cmount, file, O_RDONLY, &fh, perms), 0);
std::atomic_bool recalled(false);
ASSERT_EQ(ceph_ll_delegation_wait(cmount, fh, CEPH_DELEGATION_RD, dummy_deleg_cb, &recalled), 0);
std::atomic_bool opened(false);
std::thread breaker1(open_breaker_func, nullptr, filename, O_RDWR, &opened);
breaker1.join();
ASSERT_EQ(recalled.load(), true);
ASSERT_EQ(ceph_ll_getattr(cmount, root, &stx, 0, 0, perms), -CEPHFS_ENOTCONN);
ceph_release(cmount);
}
TEST(LibCephFS, RecalledGetattr) {
struct ceph_mount_info *cmount1;
ASSERT_EQ(ceph_create(&cmount1, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount1, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount1, NULL));
ASSERT_EQ(ceph_mount(cmount1, "/"), 0);
ASSERT_EQ(set_default_deleg_timeout(cmount1), 0);
Inode *root, *file;
ASSERT_EQ(ceph_ll_lookup_root(cmount1, &root), 0);
char filename[32];
sprintf(filename, "recalledgetattr%x", getpid());
Fh *fh;
struct ceph_statx stx;
UserPerm *perms = ceph_mount_perms(cmount1);
ASSERT_EQ(ceph_ll_create(cmount1, root, filename, 0666,
O_RDWR|O_CREAT|O_EXCL, &file, &fh, &stx, 0, 0, perms), 0);
ASSERT_EQ(ceph_ll_write(cmount1, fh, 0, sizeof(filename), filename),
static_cast<int>(sizeof(filename)));
ASSERT_EQ(ceph_ll_close(cmount1, fh), 0);
/* New mount for read delegation */
struct ceph_mount_info *cmount2;
ASSERT_EQ(ceph_create(&cmount2, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount2, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount2, NULL));
ASSERT_EQ(ceph_mount(cmount2, "/"), 0);
ASSERT_EQ(set_default_deleg_timeout(cmount2), 0);
ASSERT_EQ(ceph_ll_lookup_root(cmount2, &root), 0);
perms = ceph_mount_perms(cmount2);
ASSERT_EQ(ceph_ll_lookup(cmount2, root, filename, &file, &stx, 0, 0, perms), 0);
ASSERT_EQ(ceph_ll_open(cmount2, file, O_WRONLY, &fh, perms), 0);
ASSERT_EQ(ceph_ll_write(cmount2, fh, 0, sizeof(filename), filename),
static_cast<int>(sizeof(filename)));
ASSERT_EQ(ceph_ll_close(cmount2, fh), 0);
ASSERT_EQ(ceph_ll_open(cmount2, file, O_RDONLY, &fh, perms), 0);
/* Break delegation */
std::atomic_bool recalled(false);
ASSERT_EQ(ceph_ll_delegation_wait(cmount2, fh, CEPH_DELEGATION_RD, dummy_deleg_cb, &recalled), 0);
ASSERT_EQ(ceph_ll_read(cmount2, fh, 0, sizeof(filename), filename),
static_cast<int>(sizeof(filename)));
ASSERT_EQ(ceph_ll_getattr(cmount2, file, &stx, CEPH_STATX_ALL_STATS, 0, perms), 0);
std::atomic_bool opened(false);
std::thread breaker1(open_breaker_func, cmount1, filename, O_WRONLY, &opened);
int i = 0;
do {
ASSERT_EQ(ceph_ll_getattr(cmount2, file, &stx, CEPH_STATX_ALL_STATS, 0, perms), 0);
ASSERT_LT(i++, MAX_WAIT);
usleep(1000);
} while (!recalled.load());
ASSERT_EQ(opened.load(), false);
ASSERT_EQ(ceph_ll_getattr(cmount2, file, &stx, CEPH_STATX_ALL_STATS, 0, perms), 0);
ASSERT_EQ(ceph_ll_delegation(cmount2, fh, CEPH_DELEGATION_NONE, dummy_deleg_cb, nullptr), 0);
breaker1.join();
ASSERT_EQ(ceph_ll_close(cmount2, fh), 0);
ceph_unmount(cmount2);
ceph_release(cmount2);
ceph_unmount(cmount1);
ceph_release(cmount1);
}
| 13,454 | 32.470149 | 120 |
cc
|
null |
ceph-main/src/test/libcephfs/flock.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <pthread.h>
#include "gtest/gtest.h"
#ifndef GTEST_IS_THREADSAFE
#error "!GTEST_IS_THREADSAFE"
#endif
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/file.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <stdlib.h>
#include <semaphore.h>
#include <time.h>
#ifndef _WIN32
#include <sys/mman.h>
#endif
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#elif __FreeBSD__
#include <sys/types.h>
#include <sys/wait.h>
#endif
#include "include/ceph_assert.h"
#include "ceph_pthread_self.h"
// Startup common: create and mount ceph fs
#define STARTUP_CEPH() do { \
ASSERT_EQ(0, ceph_create(&cmount, NULL)); \
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL)); \
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL)); \
ASSERT_EQ(0, ceph_mount(cmount, NULL)); \
} while(0)
// Cleanup common: unmount and release ceph fs
#define CLEANUP_CEPH() do { \
ASSERT_EQ(0, ceph_unmount(cmount)); \
ASSERT_EQ(0, ceph_release(cmount)); \
} while(0)
static const mode_t fileMode = S_IRWXU | S_IRWXG | S_IRWXO;
// Default wait time for normal and "slow" operations
// (5" should be enough in case of network congestion)
static const long waitMs = 10;
static const long waitSlowMs = 5000;
// Get the absolute struct timespec reference from now + 'ms' milliseconds
static const struct timespec* abstime(struct timespec &ts, long ms) {
if (clock_gettime(CLOCK_REALTIME, &ts) == -1) {
ceph_abort();
}
ts.tv_nsec += ms * 1000000;
ts.tv_sec += ts.tv_nsec / 1000000000;
ts.tv_nsec %= 1000000000;
return &ts;
}
/* Basic locking */
TEST(LibCephFS, BasicLocking) {
struct ceph_mount_info *cmount = NULL;
STARTUP_CEPH();
char c_file[1024];
sprintf(c_file, "/flock_test_%d", getpid());
const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode);
ASSERT_GE(fd, 0);
// Lock exclusively twice
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, 42));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 43));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 44));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 42));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 43));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 44));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 43));
// Lock shared three times
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 42));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 43));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 44));
// And then attempt to lock exclusively
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 45));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 42));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 45));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 44));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 45));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 43));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 45));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, 42));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 45));
// Lock shared with upgrade to exclusive (POSIX)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 42));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, 42));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 42));
// Lock exclusive with downgrade to shared (POSIX)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, 42));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 42));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 42));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, c_file));
CLEANUP_CEPH();
}
/* Locking in different threads */
// Used by ConcurrentLocking test
struct str_ConcurrentLocking {
const char *file;
struct ceph_mount_info *cmount; // !NULL if shared
sem_t sem[2];
sem_t semReply[2];
void sem_init(int pshared) {
ASSERT_EQ(0, ::sem_init(&sem[0], pshared, 0));
ASSERT_EQ(0, ::sem_init(&sem[1], pshared, 0));
ASSERT_EQ(0, ::sem_init(&semReply[0], pshared, 0));
ASSERT_EQ(0, ::sem_init(&semReply[1], pshared, 0));
}
void sem_destroy() {
ASSERT_EQ(0, ::sem_destroy(&sem[0]));
ASSERT_EQ(0, ::sem_destroy(&sem[1]));
ASSERT_EQ(0, ::sem_destroy(&semReply[0]));
ASSERT_EQ(0, ::sem_destroy(&semReply[1]));
}
};
// Wakeup main (for (N) steps)
#define PING_MAIN(n) ASSERT_EQ(0, sem_post(&s.sem[n%2]))
// Wait for main to wake us up (for (RN) steps)
#define WAIT_MAIN(n) \
ASSERT_EQ(0, sem_timedwait(&s.semReply[n%2], abstime(ts, waitSlowMs)))
// Wakeup worker (for (RN) steps)
#define PING_WORKER(n) ASSERT_EQ(0, sem_post(&s.semReply[n%2]))
// Wait for worker to wake us up (for (N) steps)
#define WAIT_WORKER(n) \
ASSERT_EQ(0, sem_timedwait(&s.sem[n%2], abstime(ts, waitSlowMs)))
// Worker shall not wake us up (for (N) steps)
#define NOT_WAIT_WORKER(n) \
ASSERT_EQ(-1, sem_timedwait(&s.sem[n%2], abstime(ts, waitMs)))
// Do twice an operation
#define TWICE(EXPR) do { \
EXPR; \
EXPR; \
} while(0)
/* Locking in different threads */
// Used by ConcurrentLocking test
static void thread_ConcurrentLocking(str_ConcurrentLocking& s) {
struct ceph_mount_info *const cmount = s.cmount;
struct timespec ts;
const int fd = ceph_open(cmount, s.file, O_RDWR | O_CREAT, fileMode);
ASSERT_GE(fd, 0);
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self()));
PING_MAIN(1); // (1)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self()));
PING_MAIN(2); // (2)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
PING_MAIN(3); // (3)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, ceph_pthread_self()));
PING_MAIN(4); // (4)
WAIT_MAIN(1); // (R1)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
PING_MAIN(5); // (5)
WAIT_MAIN(2); // (R2)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self()));
PING_MAIN(6); // (6)
WAIT_MAIN(3); // (R3)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
PING_MAIN(7); // (7)
}
// Used by ConcurrentLocking test
static void* thread_ConcurrentLocking_(void *arg) {
str_ConcurrentLocking *const s =
reinterpret_cast<str_ConcurrentLocking*>(arg);
thread_ConcurrentLocking(*s);
return NULL;
}
TEST(LibCephFS, ConcurrentLocking) {
const pid_t mypid = getpid();
struct ceph_mount_info *cmount;
STARTUP_CEPH();
char c_file[1024];
sprintf(c_file, "/flock_test_%d", mypid);
const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode);
ASSERT_GE(fd, 0);
// Lock
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self()));
// Start locker thread
pthread_t thread;
struct timespec ts;
str_ConcurrentLocking s = { c_file, cmount };
s.sem_init(0);
ASSERT_EQ(0, pthread_create(&thread, NULL, thread_ConcurrentLocking_, &s));
// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(1); // (1)
// Shall not have lock immediately
NOT_WAIT_WORKER(2); // (2)
// Unlock
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
// Shall have lock
// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(2); // (2)
// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(3); // (3)
// Wait for thread to share lock
WAIT_WORKER(4); // (4)
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self()));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, ceph_pthread_self()));
// Wake up thread to unlock shared lock
PING_WORKER(1); // (R1)
WAIT_WORKER(5); // (5)
// Now we can lock exclusively
// Upgrade to exclusive lock (as per POSIX)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self()));
// Wake up thread to lock shared lock
PING_WORKER(2); // (R2)
// Shall not have lock immediately
NOT_WAIT_WORKER(6); // (6)
// Release lock ; thread will get it
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
WAIT_WORKER(6); // (6)
// We no longer have the lock
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self()));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, ceph_pthread_self()));
// Wake up thread to unlock exclusive lock
PING_WORKER(3); // (R3)
WAIT_WORKER(7); // (7)
// We can lock it again
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self()));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
// Cleanup
void *retval = (void*) (uintptr_t) -1;
ASSERT_EQ(0, pthread_join(thread, &retval));
ASSERT_EQ(NULL, retval);
s.sem_destroy();
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, c_file));
CLEANUP_CEPH();
}
TEST(LibCephFS, ThreesomeLocking) {
const pid_t mypid = getpid();
struct ceph_mount_info *cmount;
STARTUP_CEPH();
char c_file[1024];
sprintf(c_file, "/flock_test_%d", mypid);
const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode);
ASSERT_GE(fd, 0);
// Lock
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self()));
// Start locker thread
pthread_t thread[2];
struct timespec ts;
str_ConcurrentLocking s = { c_file, cmount };
s.sem_init(0);
ASSERT_EQ(0, pthread_create(&thread[0], NULL, thread_ConcurrentLocking_, &s));
ASSERT_EQ(0, pthread_create(&thread[1], NULL, thread_ConcurrentLocking_, &s));
// Synchronization point with thread (failure: thread is dead)
TWICE(WAIT_WORKER(1)); // (1)
// Shall not have lock immediately
NOT_WAIT_WORKER(2); // (2)
// Unlock
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
// Shall have lock
TWICE(// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(2); // (2)
// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(3)); // (3)
// Wait for thread to share lock
TWICE(WAIT_WORKER(4)); // (4)
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self()));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, ceph_pthread_self()));
// Wake up thread to unlock shared lock
TWICE(PING_WORKER(1); // (R1)
WAIT_WORKER(5)); // (5)
// Now we can lock exclusively
// Upgrade to exclusive lock (as per POSIX)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self()));
TWICE( // Wake up thread to lock shared lock
PING_WORKER(2); // (R2)
// Shall not have lock immediately
NOT_WAIT_WORKER(6)); // (6)
// Release lock ; thread will get it
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
TWICE(WAIT_WORKER(6); // (6)
// We no longer have the lock
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self()));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, ceph_pthread_self()));
// Wake up thread to unlock exclusive lock
PING_WORKER(3); // (R3)
WAIT_WORKER(7); // (7)
);
// We can lock it again
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self()));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self()));
// Cleanup
void *retval = (void*) (uintptr_t) -1;
ASSERT_EQ(0, pthread_join(thread[0], &retval));
ASSERT_EQ(NULL, retval);
ASSERT_EQ(0, pthread_join(thread[1], &retval));
ASSERT_EQ(NULL, retval);
s.sem_destroy();
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, c_file));
CLEANUP_CEPH();
}
/* Locking in different processes */
#define PROCESS_SLOW_MS() \
static const long waitMs = 100; \
(void) waitMs
// Used by ConcurrentLocking test
static void process_ConcurrentLocking(str_ConcurrentLocking& s) {
const pid_t mypid = getpid();
PROCESS_SLOW_MS();
struct ceph_mount_info *cmount = NULL;
struct timespec ts;
STARTUP_CEPH();
s.cmount = cmount;
const int fd = ceph_open(cmount, s.file, O_RDWR | O_CREAT, fileMode);
ASSERT_GE(fd, 0);
WAIT_MAIN(1); // (R1)
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
PING_MAIN(1); // (1)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
PING_MAIN(2); // (2)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
PING_MAIN(3); // (3)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, mypid));
PING_MAIN(4); // (4)
WAIT_MAIN(2); // (R2)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
PING_MAIN(5); // (5)
WAIT_MAIN(3); // (R3)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
PING_MAIN(6); // (6)
WAIT_MAIN(4); // (R4)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
PING_MAIN(7); // (7)
CLEANUP_CEPH();
s.sem_destroy();
exit(EXIT_SUCCESS);
}
#ifndef _WIN32
// Disabled because of fork() issues (http://tracker.ceph.com/issues/16556)
TEST(LibCephFS, DISABLED_InterProcessLocking) {
PROCESS_SLOW_MS();
// Process synchronization
char c_file[1024];
const pid_t mypid = getpid();
sprintf(c_file, "/flock_test_%d", mypid);
// Note: the semaphores MUST be on a shared memory segment
str_ConcurrentLocking *const shs =
reinterpret_cast<str_ConcurrentLocking*>
(mmap(0, sizeof(*shs), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0));
str_ConcurrentLocking &s = *shs;
s.file = c_file;
s.sem_init(1);
// Start locker process
const pid_t pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
process_ConcurrentLocking(s);
exit(EXIT_FAILURE);
}
struct timespec ts;
struct ceph_mount_info *cmount;
STARTUP_CEPH();
const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode);
ASSERT_GE(fd, 0);
// Lock
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
// Synchronization point with process (failure: process is dead)
PING_WORKER(1); // (R1)
WAIT_WORKER(1); // (1)
// Shall not have lock immediately
NOT_WAIT_WORKER(2); // (2)
// Unlock
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
// Shall have lock
// Synchronization point with process (failure: process is dead)
WAIT_WORKER(2); // (2)
// Synchronization point with process (failure: process is dead)
WAIT_WORKER(3); // (3)
// Wait for process to share lock
WAIT_WORKER(4); // (4)
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, mypid));
// Wake up process to unlock shared lock
PING_WORKER(2); // (R2)
WAIT_WORKER(5); // (5)
// Now we can lock exclusively
// Upgrade to exclusive lock (as per POSIX)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
// Wake up process to lock shared lock
PING_WORKER(3); // (R3)
// Shall not have lock immediately
NOT_WAIT_WORKER(6); // (6)
// Release lock ; process will get it
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
WAIT_WORKER(6); // (6)
// We no longer have the lock
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, mypid));
// Wake up process to unlock exclusive lock
PING_WORKER(4); // (R4)
WAIT_WORKER(7); // (7)
// We can lock it again
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
// Wait pid
int status;
ASSERT_EQ(pid, waitpid(pid, &status, 0));
ASSERT_EQ(EXIT_SUCCESS, status);
// Cleanup
s.sem_destroy();
ASSERT_EQ(0, munmap(shs, sizeof(*shs)));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, c_file));
CLEANUP_CEPH();
}
#endif
#ifndef _WIN32
// Disabled because of fork() issues (http://tracker.ceph.com/issues/16556)
TEST(LibCephFS, DISABLED_ThreesomeInterProcessLocking) {
PROCESS_SLOW_MS();
// Process synchronization
char c_file[1024];
const pid_t mypid = getpid();
sprintf(c_file, "/flock_test_%d", mypid);
// Note: the semaphores MUST be on a shared memory segment
str_ConcurrentLocking *const shs =
reinterpret_cast<str_ConcurrentLocking*>
(mmap(0, sizeof(*shs), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0));
str_ConcurrentLocking &s = *shs;
s.file = c_file;
s.sem_init(1);
// Start locker processes
pid_t pid[2];
pid[0] = fork();
ASSERT_GE(pid[0], 0);
if (pid[0] == 0) {
process_ConcurrentLocking(s);
exit(EXIT_FAILURE);
}
pid[1] = fork();
ASSERT_GE(pid[1], 0);
if (pid[1] == 0) {
process_ConcurrentLocking(s);
exit(EXIT_FAILURE);
}
struct timespec ts;
struct ceph_mount_info *cmount;
STARTUP_CEPH();
const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode);
ASSERT_GE(fd, 0);
// Lock
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
// Synchronization point with process (failure: process is dead)
TWICE(PING_WORKER(1)); // (R1)
TWICE(WAIT_WORKER(1)); // (1)
// Shall not have lock immediately
NOT_WAIT_WORKER(2); // (2)
// Unlock
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
// Shall have lock
TWICE(// Synchronization point with process (failure: process is dead)
WAIT_WORKER(2); // (2)
// Synchronization point with process (failure: process is dead)
WAIT_WORKER(3)); // (3)
// Wait for process to share lock
TWICE(WAIT_WORKER(4)); // (4)
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, mypid));
// Wake up process to unlock shared lock
TWICE(PING_WORKER(2); // (R2)
WAIT_WORKER(5)); // (5)
// Now we can lock exclusively
// Upgrade to exclusive lock (as per POSIX)
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid));
TWICE( // Wake up process to lock shared lock
PING_WORKER(3); // (R3)
// Shall not have lock immediately
NOT_WAIT_WORKER(6)); // (6)
// Release lock ; process will get it
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
TWICE(WAIT_WORKER(6); // (6)
// We no longer have the lock
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
ASSERT_EQ(-CEPHFS_EWOULDBLOCK,
ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, mypid));
// Wake up process to unlock exclusive lock
PING_WORKER(4); // (R4)
WAIT_WORKER(7); // (7)
);
// We can lock it again
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid));
ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid));
// Wait pids
int status;
ASSERT_EQ(pid[0], waitpid(pid[0], &status, 0));
ASSERT_EQ(EXIT_SUCCESS, status);
ASSERT_EQ(pid[1], waitpid(pid[1], &status, 0));
ASSERT_EQ(EXIT_SUCCESS, status);
// Cleanup
s.sem_destroy();
ASSERT_EQ(0, munmap(shs, sizeof(*shs)));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, c_file));
CLEANUP_CEPH();
}
#endif
| 19,512 | 28.79084 | 83 |
cc
|
null |
ceph-main/src/test/libcephfs/lazyio.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat Ltd
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "include/rados/librados.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#if defined(__linux__)
#include <sys/xattr.h>
#endif
rados_t cluster;
TEST(LibCephFS, LazyIOOneWriterMulipleReaders) {
struct ceph_mount_info *ca, *cb;
ASSERT_EQ(ceph_create(&ca, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
ASSERT_EQ(ceph_mount(ca, NULL), 0);
ASSERT_EQ(ceph_create(&cb, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cb, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cb, NULL));
ASSERT_EQ(ceph_mount(cb, NULL), 0);
char name[20];
snprintf(name, sizeof(name), "foo.%d", getpid());
int fda = ceph_open(ca, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fda);
int fdb = ceph_open(cb, name, O_RDONLY, 0644);
ASSERT_LE(0, fdb);
ASSERT_EQ(0, ceph_lazyio(ca, fda, 1));
ASSERT_EQ(0, ceph_lazyio(cb, fdb, 1));
char out_buf[] = "fooooooooo";
/* Client a issues a write and propagates/flushes the buffer */
ASSERT_EQ((int)sizeof(out_buf), ceph_write(ca, fda, out_buf, sizeof(out_buf), 0));
ASSERT_EQ(0, ceph_lazyio_propagate(ca, fda, 0, 0));
/* Client a issues a write and propagates/flushes the buffer */
ASSERT_EQ((int)sizeof(out_buf), ceph_write(ca, fda, out_buf, sizeof(out_buf), 10));
ASSERT_EQ(0, ceph_lazyio_propagate(ca, fda, 0, 0));
char in_buf[40];
/* Calling ceph_lazyio_synchronize here will invalidate client b's cache and hence enable client a to fetch the propagated write of client a in the subsequent read */
ASSERT_EQ(0, ceph_lazyio_synchronize(cb, fdb, 0, 0));
ASSERT_EQ(ceph_read(cb, fdb, in_buf, sizeof(in_buf), 0), 2*strlen(out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooo");
/* Client a does not need to call ceph_lazyio_synchronize here because it is the latest writer and fda holds the updated inode*/
ASSERT_EQ(ceph_read(ca, fda, in_buf, sizeof(in_buf), 0), 2*strlen(out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooo");
ceph_close(ca, fda);
ceph_close(cb, fdb);
ceph_shutdown(ca);
ceph_shutdown(cb);
}
TEST(LibCephFS, LazyIOMultipleWritersMulipleReaders) {
struct ceph_mount_info *ca, *cb;
ASSERT_EQ(ceph_create(&ca, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
ASSERT_EQ(ceph_mount(ca, NULL), 0);
ASSERT_EQ(ceph_create(&cb, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cb, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cb, NULL));
ASSERT_EQ(ceph_mount(cb, NULL), 0);
char name[20];
snprintf(name, sizeof(name), "foo2.%d", getpid());
int fda = ceph_open(ca, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fda);
int fdb = ceph_open(cb, name, O_RDWR, 0644);
ASSERT_LE(0, fdb);
ASSERT_EQ(0, ceph_lazyio(ca, fda, 1));
ASSERT_EQ(0, ceph_lazyio(cb, fdb, 1));
char out_buf[] = "fooooooooo";
/* Client a issues a write and propagates/flushes the buffer */
ASSERT_EQ((int)sizeof(out_buf), ceph_write(ca, fda, out_buf, sizeof(out_buf), 0));
ASSERT_EQ(0, ceph_lazyio_propagate(ca, fda, 0, 0));
/* Client b issues a write and propagates/flushes the buffer*/
ASSERT_EQ((int)sizeof(out_buf), ceph_write(cb, fdb, out_buf, sizeof(out_buf), 10));
ASSERT_EQ(0, ceph_lazyio_propagate(cb, fdb, 0, 0));
char in_buf[40];
/* Calling ceph_lazyio_synchronize here will invalidate client a's cache and hence enable client a to fetch the propagated writes of client b in the subsequent read */
ASSERT_EQ(0, ceph_lazyio_synchronize(ca, fda, 0, 0));
ASSERT_EQ(ceph_read(ca, fda, in_buf, sizeof(in_buf), 0), 2*strlen(out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooo");
/* Client b does not need to call ceph_lazyio_synchronize here because it is the latest writer and the writes before it have already been propagated*/
ASSERT_EQ(ceph_read(cb, fdb, in_buf, sizeof(in_buf), 0), 2*strlen(out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooo");
/* Client a issues a write */
char wait_out_buf[] = "foobarbars";
ASSERT_EQ((int)sizeof(wait_out_buf), ceph_write(ca, fda, wait_out_buf, sizeof(wait_out_buf), 20));
ASSERT_EQ(0, ceph_lazyio_propagate(ca, fda, 0, 0));
/* Client a does not need to call ceph_lazyio_synchronize here because it is the latest writer and the writes before it have already been propagated*/
ASSERT_EQ(ceph_read(ca, fda, in_buf, sizeof(in_buf), 0), (2*(strlen(out_buf)))+strlen(wait_out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooofoobarbars");
/* Calling ceph_lazyio_synchronize here will invalidate client b's cache and hence enable client a to fetch the propagated write of client a in the subsequent read */
ASSERT_EQ(0, ceph_lazyio_synchronize(cb, fdb, 0, 0));
ASSERT_EQ(ceph_read(cb, fdb, in_buf, sizeof(in_buf), 0), (2*(strlen(out_buf)))+strlen(wait_out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooofoobarbars");
ceph_close(ca, fda);
ceph_close(cb, fdb);
ceph_shutdown(ca);
ceph_shutdown(cb);
}
TEST(LibCephFS, LazyIOMultipleWritersOneReader) {
struct ceph_mount_info *ca, *cb;
ASSERT_EQ(ceph_create(&ca, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
ASSERT_EQ(ceph_mount(ca, NULL), 0);
ASSERT_EQ(ceph_create(&cb, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cb, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cb, NULL));
ASSERT_EQ(ceph_mount(cb, NULL), 0);
char name[20];
snprintf(name, sizeof(name), "foo3.%d", getpid());
int fda = ceph_open(ca, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fda);
int fdb = ceph_open(cb, name, O_RDWR, 0644);
ASSERT_LE(0, fdb);
ASSERT_EQ(0, ceph_lazyio(ca, fda, 1));
ASSERT_EQ(0, ceph_lazyio(cb, fdb, 1));
char out_buf[] = "fooooooooo";
/* Client a issues a write and propagates/flushes the buffer */
ASSERT_EQ((int)sizeof(out_buf), ceph_write(ca, fda, out_buf, sizeof(out_buf), 0));
ASSERT_EQ(0, ceph_lazyio_propagate(ca, fda, 0, 0));
/* Client b issues a write and propagates/flushes the buffer*/
ASSERT_EQ((int)sizeof(out_buf), ceph_write(cb, fdb, out_buf, sizeof(out_buf), 10));
ASSERT_EQ(0, ceph_lazyio_propagate(cb, fdb, 0, 0));
char in_buf[40];
/* Client a reads the file and verifies that it only reads it's propagated writes and not Client b's*/
ASSERT_EQ(ceph_read(ca, fda, in_buf, sizeof(in_buf), 0), strlen(out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooo");
/* Client a reads the file again, this time with a lazyio_synchronize to check if the cache gets invalidated and data is refetched i.e all the propagated writes are being read*/
ASSERT_EQ(0, ceph_lazyio_synchronize(ca, fda, 0, 0));
ASSERT_EQ(ceph_read(ca, fda, in_buf, sizeof(in_buf), 0), 2*strlen(out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooo");
ceph_close(ca, fda);
ceph_close(cb, fdb);
ceph_shutdown(ca);
ceph_shutdown(cb);
}
TEST(LibCephFS, LazyIOSynchronizeFlush) {
/* Test to make sure lazyio_synchronize flushes dirty buffers */
struct ceph_mount_info *ca, *cb;
ASSERT_EQ(ceph_create(&ca, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
ASSERT_EQ(ceph_mount(ca, NULL), 0);
ASSERT_EQ(ceph_create(&cb, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cb, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cb, NULL));
ASSERT_EQ(ceph_mount(cb, NULL), 0);
char name[20];
snprintf(name, sizeof(name), "foo4.%d", getpid());
int fda = ceph_open(ca, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fda);
int fdb = ceph_open(cb, name, O_RDWR, 0644);
ASSERT_LE(0, fdb);
ASSERT_EQ(0, ceph_lazyio(ca, fda, 1));
ASSERT_EQ(0, ceph_lazyio(cb, fdb, 1));
char out_buf[] = "fooooooooo";
/* Client a issues a write and propagates it*/
ASSERT_EQ((int)sizeof(out_buf), ceph_write(ca, fda, out_buf, sizeof(out_buf), 0));
ASSERT_EQ(0, ceph_lazyio_propagate(ca, fda, 0, 0));
/* Client b issues writes and without lazyio_propagate*/
ASSERT_EQ((int)sizeof(out_buf), ceph_write(cb, fdb, out_buf, sizeof(out_buf), 10));
ASSERT_EQ((int)sizeof(out_buf), ceph_write(cb, fdb, out_buf, sizeof(out_buf), 20));
char in_buf[40];
/* Calling ceph_lazyio_synchronize here will first flush the possibly pending buffered write of client b and invalidate client b's cache and hence enable client b to fetch all the propagated writes */
ASSERT_EQ(0, ceph_lazyio_synchronize(cb, fdb, 0, 0));
ASSERT_EQ(ceph_read(cb, fdb, in_buf, sizeof(in_buf), 0), 3*strlen(out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooofooooooooo");
/* Required to call ceph_lazyio_synchronize here since client b is the latest writer and client a is out of sync with updated file*/
ASSERT_EQ(0, ceph_lazyio_synchronize(ca, fda, 0, 0));
ASSERT_EQ(ceph_read(ca, fda, in_buf, sizeof(in_buf), 0), 3*strlen(out_buf)+1);
ASSERT_STREQ(in_buf, "fooooooooofooooooooofooooooooo");
ceph_close(ca, fda);
ceph_close(cb, fdb);
ceph_shutdown(ca);
ceph_shutdown(cb);
}
TEST(LibCephFS, WithoutandWithLazyIO) {
struct ceph_mount_info *ca, *cb;
ASSERT_EQ(ceph_create(&ca, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
ASSERT_EQ(ceph_mount(ca, NULL), 0);
ASSERT_EQ(ceph_create(&cb, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cb, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cb, NULL));
ASSERT_EQ(ceph_mount(cb, NULL), 0);
char name[20];
snprintf(name, sizeof(name), "foo5.%d", getpid());
int fda = ceph_open(ca, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fda);
int fdb = ceph_open(cb, name, O_RDWR, 0644);
ASSERT_LE(0, fdb);
char out_buf_w[] = "1234567890";
/* Doing some non lazyio writes and read*/
ASSERT_EQ((int)sizeof(out_buf_w), ceph_write(ca, fda, out_buf_w, sizeof(out_buf_w), 0));
ASSERT_EQ((int)sizeof(out_buf_w), ceph_write(cb, fdb, out_buf_w, sizeof(out_buf_w), 10));
char in_buf_w[30];
ASSERT_EQ(ceph_read(ca, fda, in_buf_w, sizeof(in_buf_w), 0), 2*strlen(out_buf_w)+1);
/* Enable lazyio*/
ASSERT_EQ(0, ceph_lazyio(ca, fda, 1));
ASSERT_EQ(0, ceph_lazyio(cb, fdb, 1));
char out_buf[] = "fooooooooo";
/* Client a issues a write and propagates/flushes the buffer*/
ASSERT_EQ((int)sizeof(out_buf), ceph_write(ca, fda, out_buf, sizeof(out_buf), 20));
ASSERT_EQ(0, ceph_lazyio_propagate(ca, fda, 0, 0));
/* Client b issues a write and propagates/flushes the buffer*/
ASSERT_EQ((int)sizeof(out_buf), ceph_write(cb, fdb, out_buf, sizeof(out_buf), 30));
ASSERT_EQ(0, ceph_lazyio_propagate(cb, fdb, 0, 0));
char in_buf[50];
/* Calling ceph_lazyio_synchronize here will invalidate client a's cache and hence enable client a to fetch the propagated writes of client b in the subsequent read */
ASSERT_EQ(0, ceph_lazyio_synchronize(ca, fda, 0, 0));
ASSERT_EQ(ceph_read(ca, fda, in_buf, sizeof(in_buf), 0), (2*(strlen(out_buf)))+(2*(strlen(out_buf_w)))+1);
ASSERT_STREQ(in_buf, "12345678901234567890fooooooooofooooooooo");
/* Client b does not need to call ceph_lazyio_synchronize here because it is the latest writer and the writes before it have already been propagated*/
ASSERT_EQ(ceph_read(cb, fdb, in_buf, sizeof(in_buf), 0), (2*(strlen(out_buf)))+(2*(strlen(out_buf_w)))+1);
ASSERT_STREQ(in_buf, "12345678901234567890fooooooooofooooooooo");
ceph_close(ca, fda);
ceph_close(cb, fdb);
ceph_shutdown(ca);
ceph_shutdown(cb);
}
static int update_root_mode()
{
struct ceph_mount_info *admin;
int r = ceph_create(&admin, NULL);
if (r < 0)
return r;
ceph_conf_read_file(admin, NULL);
ceph_conf_parse_env(admin, NULL);
ceph_conf_set(admin, "client_permissions", "false");
r = ceph_mount(admin, "/");
if (r < 0)
goto out;
r = ceph_chmod(admin, "/", 0777);
out:
ceph_shutdown(admin);
return r;
}
int main(int argc, char **argv)
{
int r = update_root_mode();
if (r < 0)
exit(1);
::testing::InitGoogleTest(&argc, argv);
srand(getpid());
r = rados_create(&cluster, NULL);
if (r < 0)
exit(1);
r = rados_conf_read_file(cluster, NULL);
if (r < 0)
exit(1);
rados_conf_parse_env(cluster, NULL);
r = rados_connect(cluster);
if (r < 0)
exit(1);
r = RUN_ALL_TESTS();
rados_shutdown(cluster);
return r;
}
| 12,730 | 34.661064 | 202 |
cc
|
null |
ceph-main/src/test/libcephfs/main.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
* Copyright (C) 2016 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
static int update_root_mode()
{
struct ceph_mount_info *admin;
int r = ceph_create(&admin, NULL);
if (r < 0)
return r;
ceph_conf_read_file(admin, NULL);
ceph_conf_parse_env(admin, NULL);
ceph_conf_set(admin, "client_permissions", "false");
r = ceph_mount(admin, "/");
if (r < 0)
goto out;
r = ceph_chmod(admin, "/", 01777);
out:
ceph_shutdown(admin);
return r;
}
int main(int argc, char **argv)
{
int r = update_root_mode();
if (r < 0)
exit(1);
::testing::InitGoogleTest(&argc, argv);
srand(getpid());
return RUN_ALL_TESTS();
}
| 1,108 | 20.745098 | 70 |
cc
|
null |
ceph-main/src/test/libcephfs/monconfig.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include "common/ceph_context.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
class MonConfig : public ::testing::Test
{
protected:
struct ceph_mount_info *ca;
void SetUp() override {
ASSERT_EQ(0, ceph_create(&ca, NULL));
ASSERT_EQ(0, ceph_conf_read_file(ca, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
}
void TearDown() override {
ceph_shutdown(ca);
}
// Helper to remove/unset all possible mon information from ConfigProxy
void clear_mon_config(CephContext *cct) {
auto& conf = cct->_conf;
// Clear safe_to_start_threads, allowing updates to config values
conf._clear_safe_to_start_threads();
ASSERT_EQ(0, conf.set_val("monmap", "", nullptr));
ASSERT_EQ(0, conf.set_val("mon_host", "", nullptr));
ASSERT_EQ(0, conf.set_val("mon_dns_srv_name", "", nullptr));
conf.set_safe_to_start_threads();
}
// Helper to test basic operation on a mount
void use_mount(struct ceph_mount_info *mnt, std::string name_prefix) {
char name[20];
snprintf(name, sizeof(name), "%s.%d", name_prefix.c_str(), getpid());
int fd = ceph_open(mnt, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fd);
ceph_close(mnt, fd);
}
};
TEST_F(MonConfig, MonAddrsMissing) {
CephContext *cct;
// Test mount failure when there is no known mon config source
cct = ceph_get_mount_context(ca);
ASSERT_NE(nullptr, cct);
clear_mon_config(cct);
ASSERT_EQ(-CEPHFS_ENOENT, ceph_mount(ca, NULL));
}
TEST_F(MonConfig, MonAddrsInConfigProxy) {
// Test a successful mount with default mon config source in ConfigProxy
ASSERT_EQ(0, ceph_mount(ca, NULL));
use_mount(ca, "foo");
}
TEST_F(MonConfig, MonAddrsInCct) {
struct ceph_mount_info *cb;
CephContext *cct;
// Perform mount to bootstrap mon addrs in CephContext
ASSERT_EQ(0, ceph_mount(ca, NULL));
// Reuse bootstrapped CephContext, clearing ConfigProxy mon addr sources
cct = ceph_get_mount_context(ca);
ASSERT_NE(nullptr, cct);
clear_mon_config(cct);
ASSERT_EQ(0, ceph_create_with_context(&cb, cct));
// Test a successful mount with only mon values in CephContext
ASSERT_EQ(0, ceph_mount(cb, NULL));
use_mount(ca, "bar");
use_mount(cb, "bar");
ceph_shutdown(cb);
}
| 2,854 | 26.990196 | 75 |
cc
|
null |
ceph-main/src/test/libcephfs/multiclient.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <thread>
#ifdef __linux__
#include <sys/xattr.h>
#endif
TEST(LibCephFS, MulticlientSimple) {
struct ceph_mount_info *ca, *cb;
ASSERT_EQ(ceph_create(&ca, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
ASSERT_EQ(ceph_mount(ca, NULL), 0);
ASSERT_EQ(ceph_create(&cb, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cb, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cb, NULL));
ASSERT_EQ(ceph_mount(cb, NULL), 0);
char name[20];
snprintf(name, sizeof(name), "foo.%d", getpid());
int fda = ceph_open(ca, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fda);
int fdb = ceph_open(cb, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fdb);
char bufa[4] = "foo";
char bufb[4];
for (int i=0; i<10; i++) {
strcpy(bufa, "foo");
ASSERT_EQ((int)sizeof(bufa), ceph_write(ca, fda, bufa, sizeof(bufa), i*6));
ASSERT_EQ((int)sizeof(bufa), ceph_read(cb, fdb, bufb, sizeof(bufa), i*6));
ASSERT_EQ(0, memcmp(bufa, bufb, sizeof(bufa)));
strcpy(bufb, "bar");
ASSERT_EQ((int)sizeof(bufb), ceph_write(cb, fdb, bufb, sizeof(bufb), i*6+3));
ASSERT_EQ((int)sizeof(bufb), ceph_read(ca, fda, bufa, sizeof(bufb), i*6+3));
ASSERT_EQ(0, memcmp(bufa, bufb, sizeof(bufa)));
}
ceph_close(ca, fda);
ceph_close(cb, fdb);
ceph_shutdown(ca);
ceph_shutdown(cb);
}
TEST(LibCephFS, MulticlientHoleEOF) {
struct ceph_mount_info *ca, *cb;
ASSERT_EQ(ceph_create(&ca, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
ASSERT_EQ(ceph_mount(ca, NULL), 0);
ASSERT_EQ(ceph_create(&cb, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cb, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cb, NULL));
ASSERT_EQ(ceph_mount(cb, NULL), 0);
char name[20];
snprintf(name, sizeof(name), "foo.%d", getpid());
int fda = ceph_open(ca, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fda);
int fdb = ceph_open(cb, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fdb);
ASSERT_EQ(3, ceph_write(ca, fda, "foo", 3, 0));
ASSERT_EQ(0, ceph_ftruncate(ca, fda, 1000000));
char buf[4];
ASSERT_EQ(2, ceph_read(cb, fdb, buf, sizeof(buf), 1000000-2));
ASSERT_EQ(0, buf[0]);
ASSERT_EQ(0, buf[1]);
ceph_close(ca, fda);
ceph_close(cb, fdb);
ceph_shutdown(ca);
ceph_shutdown(cb);
}
static void write_func(bool *stop)
{
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char name[20];
snprintf(name, sizeof(name), "foo.%d", getpid());
int fd = ceph_open(cmount, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fd);
int buf_size = 4096;
char *buf = (char *)malloc(buf_size);
if (!buf) {
*stop = true;
printf("write_func failed to allocate buffer!");
return;
}
memset(buf, 1, buf_size);
while (!(*stop)) {
int i;
// truncate the file size to 4096 will set the max_size to 4MB.
ASSERT_EQ(0, ceph_ftruncate(cmount, fd, 4096));
// write 4MB + extra 64KB data will make client to trigger to
// call check_cap() to report new size. And if MDS is revoking
// the Fsxrw caps and we are still holding the Fw caps and will
// trigger tracker#57244.
for (i = 0; i < 1040; i++) {
ASSERT_EQ(ceph_write(cmount, fd, buf, buf_size, 0), buf_size);
}
}
ceph_shutdown(cmount);
}
static void setattr_func(bool *stop)
{
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char name[20];
snprintf(name, sizeof(name), "foo.%d", getpid());
int fd = ceph_open(cmount, name, O_CREAT|O_RDWR, 0644);
ASSERT_LE(0, fd);
while (!(*stop)) {
// setattr will make the MDS to acquire xlock for the filelock and
// force to revoke caps from clients
struct ceph_statx stx = {.stx_size = 0};
ASSERT_EQ(ceph_fsetattrx(cmount, fd, &stx, CEPH_SETATTR_SIZE), 0);
}
ceph_shutdown(cmount);
}
TEST(LibCephFS, MulticlientRevokeCaps) {
std::thread thread1, thread2;
bool stop = false;
int wait = 60; // in second
thread1 = std::thread(write_func, &stop);
thread2 = std::thread(setattr_func, &stop);
printf(" Will run test for %d seconds!\n", wait);
sleep(wait);
stop = true;
thread1.join();
thread2.join();
}
| 5,089 | 27.121547 | 81 |
cc
|
null |
ceph-main/src/test/libcephfs/newops.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2021 Red Hat Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
#include "gmock/gmock-matchers.h"
#include "gmock/gmock-more-matchers.h"
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include "mds/mdstypes.h"
#include "include/stat.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#endif
#include <fmt/format.h>
#include <map>
#include <vector>
#include <thread>
#include <regex>
#include <string>
using ::testing::AnyOf;
using ::testing::Gt;
using ::testing::Eq;
using namespace std;
/*
* Test this with different ceph versions
*/
TEST(LibCephFS, NewOPs)
{
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
const char *test_path = "test_newops_dir";
ASSERT_EQ(0, ceph_mkdirs(cmount, test_path, 0777));
{
char value[1024] = "";
int r = ceph_getxattr(cmount, test_path, "ceph.dir.pin.random", (void*)value, sizeof(value));
// Clients will return -CEPHFS_ENODATA if new getvxattr op not support yet.
EXPECT_THAT(r, AnyOf(Gt(0), Eq(-CEPHFS_ENODATA)));
}
{
double val = (double)1.0/(double)128.0;
std::stringstream ss;
ss << val;
int r = ceph_setxattr(cmount, test_path, "ceph.dir.pin.random", (void*)ss.str().c_str(), strlen(ss.str().c_str()), XATTR_CREATE);
// Old cephs will return -CEPHFS_EINVAL if not support "ceph.dir.pin.random" yet.
EXPECT_THAT(r, AnyOf(Eq(0), Eq(-CEPHFS_EINVAL)));
char value[1024] = "";
r = ceph_getxattr(cmount, test_path, "ceph.dir.pin.random", (void*)value, sizeof(value));
// Clients will return -CEPHFS_ENODATA if new getvxattr op not support yet.
EXPECT_THAT(r, AnyOf(Gt(0), Eq(-CEPHFS_ENODATA)));
}
ASSERT_EQ(0, ceph_rmdir(cmount, test_path));
ceph_shutdown(cmount);
}
| 2,418 | 26.488636 | 133 |
cc
|
null |
ceph-main/src/test/libcephfs/quota.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
* Copyright (C) 2022 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/compat.h"
#include "gtest/gtest.h"
#include "include/cephfs/libcephfs.h"
#include "mds/mdstypes.h"
#include "include/stat.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/resource.h>
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#endif
TEST(LibCephFS, SnapQuota) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_snap_dir_quota_xattr[256];
char test_snap_subdir_quota_xattr[256];
char test_snap_subdir_noquota_xattr[256];
char xattrk[128];
char xattrv[128];
char c_temp[PATH_MAX];
char gxattrv[128];
int xbuflen = sizeof(gxattrv);
pid_t mypid = getpid();
// create dir and set quota
sprintf(test_snap_dir_quota_xattr, "test_snap_dir_quota_xattr_%d", mypid);
ASSERT_EQ(0, ceph_mkdir(cmount, test_snap_dir_quota_xattr, 0777));
sprintf(xattrk, "ceph.quota.max_bytes");
sprintf(xattrv, "65536");
ASSERT_EQ(0, ceph_setxattr(cmount, test_snap_dir_quota_xattr, xattrk, (void *)xattrv, 5, XATTR_CREATE));
// create subdir and set quota
sprintf(test_snap_subdir_quota_xattr, "test_snap_dir_quota_xattr_%d/subdir_quota", mypid);
ASSERT_EQ(0, ceph_mkdirs(cmount, test_snap_subdir_quota_xattr, 0777));
sprintf(xattrk, "ceph.quota.max_bytes");
sprintf(xattrv, "32768");
ASSERT_EQ(0, ceph_setxattr(cmount, test_snap_subdir_quota_xattr, xattrk, (void *)xattrv, 5, XATTR_CREATE));
// create subdir with no quota
sprintf(test_snap_subdir_noquota_xattr, "test_snap_dir_quota_xattr_%d/subdir_noquota", mypid);
ASSERT_EQ(0, ceph_mkdirs(cmount, test_snap_subdir_noquota_xattr, 0777));
// snapshot dir
sprintf(c_temp, "/.snap/test_snap_dir_quota_xattr_snap_%d", mypid);
ASSERT_EQ(0, ceph_mkdirs(cmount, c_temp, 0777));
// check dir quota under snap
sprintf(c_temp, "/.snap/test_snap_dir_quota_xattr_snap_%d/test_snap_dir_quota_xattr_%d", mypid, mypid);
int alen = ceph_getxattr(cmount, c_temp, "ceph.quota.max_bytes", (void *)gxattrv, xbuflen);
ASSERT_LT(0, alen);
ASSERT_LT(alen, xbuflen);
gxattrv[alen] = '\0';
ASSERT_STREQ(gxattrv, "65536");
// check subdir quota under snap
sprintf(c_temp, "/.snap/test_snap_dir_quota_xattr_snap_%d/test_snap_dir_quota_xattr_%d/subdir_quota", mypid, mypid);
alen = ceph_getxattr(cmount, c_temp, "ceph.quota.max_bytes", (void *)gxattrv, xbuflen);
ASSERT_LT(0, alen);
ASSERT_LT(alen, xbuflen);
gxattrv[alen] = '\0';
ASSERT_STREQ(gxattrv, "32768");
// ensure subdir noquota xattr under snap
sprintf(c_temp, "/.snap/test_snap_dir_quota_xattr_snap_%d/test_snap_dir_quota_xattr_%d/subdir_noquota", mypid, mypid);
EXPECT_EQ(-CEPHFS_ENODATA, ceph_getxattr(cmount, c_temp, "ceph.quota.max_bytes", (void *)gxattrv, xbuflen));
// listxattr() shouldn't return ceph.quota.max_bytes vxattr
sprintf(c_temp, "/.snap/test_snap_dir_quota_xattr_snap_%d/test_snap_dir_quota_xattr_%d", mypid, mypid);
char xattrlist[512];
int len = ceph_listxattr(cmount, c_temp, xattrlist, sizeof(xattrlist));
ASSERT_GE(sizeof(xattrlist), (size_t)len);
char *p = xattrlist;
int found = 0;
while (len > 0) {
if (strcmp(p, "ceph.quota.max_bytes") == 0)
found++;
len -= strlen(p) + 1;
p += strlen(p) + 1;
}
ASSERT_EQ(found, 0);
ceph_shutdown(cmount);
}
void statfs_quota_size_check(struct ceph_mount_info *cmount, const char *path,
int blocks, int bsize)
{
struct statvfs stvfs;
ASSERT_EQ(0, ceph_statfs(cmount, path, &stvfs));
ASSERT_EQ(blocks, stvfs.f_blocks);
ASSERT_EQ(bsize, stvfs.f_bsize);
ASSERT_EQ(bsize, stvfs.f_frsize);
}
TEST(LibCephFS, QuotaRealm) {
struct ceph_mount_info *cmount, *pmount1, *pmount2;
char test_quota_realm_pdir[128];
char test_quota_realm_cdir[256];
char xattrk[32];
char xattrv[16];
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
pid_t mypid = getpid();
// create parent directory and set quota size
sprintf(test_quota_realm_pdir, "/test_quota_realm_pdir_%d", mypid);
ASSERT_EQ(0, ceph_mkdir(cmount, test_quota_realm_pdir, 0777));
sprintf(xattrk, "ceph.quota.max_bytes");
sprintf(xattrv, "8388608"); // 8MB
ASSERT_EQ(0, ceph_setxattr(cmount, test_quota_realm_pdir, xattrk, (void *)xattrv, 7, XATTR_CREATE));
// create child directory and set quota file
sprintf(test_quota_realm_cdir, "%s/test_quota_realm_cdir", test_quota_realm_pdir);
ASSERT_EQ(0, ceph_mkdir(cmount, test_quota_realm_cdir, 0777));
sprintf(xattrk, "ceph.quota.max_files");
sprintf(xattrv, "1024"); // 1K files
ASSERT_EQ(0, ceph_setxattr(cmount, test_quota_realm_cdir, xattrk, (void *)xattrv, 4, XATTR_CREATE));
ASSERT_EQ(ceph_create(&pmount1, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(pmount1, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(pmount1, NULL));
ASSERT_EQ(ceph_mount(pmount1, test_quota_realm_pdir), 0);
statfs_quota_size_check(pmount1, "/", 2, 4194304); // 8MB
ASSERT_EQ(ceph_create(&pmount2, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(pmount2, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(pmount2, NULL));
ASSERT_EQ(ceph_mount(pmount2, test_quota_realm_cdir), 0);
statfs_quota_size_check(pmount2, "/", 2, 4194304); // 8MB
ceph_shutdown(pmount1);
ceph_shutdown(pmount2);
ceph_shutdown(cmount);
}
| 6,023 | 34.857143 | 120 |
cc
|
null |
ceph-main/src/test/libcephfs/readdir_r_cb.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include <errno.h>
#include <fcntl.h>
TEST(LibCephFS, ReaddirRCB) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
char c_dir[256];
sprintf(c_dir, "/readdir_r_cb_tests_%d", getpid());
struct ceph_dir_result *dirp;
ASSERT_EQ(0, ceph_mkdirs(cmount, c_dir, 0777));
ASSERT_LE(0, ceph_opendir(cmount, c_dir, &dirp));
// dir is empty, check that it only contains . and ..
int buflen = 100;
char *buf = new char[buflen];
// . is 2, .. is 3 (for null terminators)
ASSERT_EQ(5, ceph_getdnames(cmount, dirp, buf, buflen));
char c_file[256];
sprintf(c_file, "/readdir_r_cb_tests_%d/foo", getpid());
int fd = ceph_open(cmount, c_file, O_CREAT, 0777);
ASSERT_LT(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
// check correctness with one entry
ASSERT_LE(0, ceph_closedir(cmount, dirp));
ASSERT_LE(0, ceph_opendir(cmount, c_dir, &dirp));
ASSERT_EQ(9, ceph_getdnames(cmount, dirp, buf, buflen)); // ., .., foo
// check correctness if buffer is too small
ASSERT_LE(0, ceph_closedir(cmount, dirp));
ASSERT_GE(0, ceph_opendir(cmount, c_dir, &dirp));
ASSERT_EQ(-CEPHFS_ERANGE, ceph_getdnames(cmount, dirp, buf, 1));
//check correctness if it needs to split listing
ASSERT_LE(0, ceph_closedir(cmount, dirp));
ASSERT_LE(0, ceph_opendir(cmount, c_dir, &dirp));
ASSERT_EQ(5, ceph_getdnames(cmount, dirp, buf, 6));
ASSERT_EQ(4, ceph_getdnames(cmount, dirp, buf, 6));
// free cmount after finishing testing
ASSERT_LE(0, ceph_closedir(cmount, dirp));
ASSERT_EQ(0, ceph_unmount(cmount));
ASSERT_EQ(0, ceph_release(cmount));
}
| 2,223 | 32.69697 | 72 |
cc
|
null |
ceph-main/src/test/libcephfs/reclaim.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Tests for Ceph delegation handling
*
* (c) 2017, Jeff Layton <[email protected]>
*/
#include "gtest/gtest.h"
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include "include/stat.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <sys/uio.h>
#include <libgen.h>
#include <stdlib.h>
#ifdef __linux__
#include <sys/xattr.h>
#include <limits.h>
#endif
#ifdef __FreeBSD__
#include <sys/wait.h>
#endif
#include <map>
#include <vector>
#include <thread>
#include <atomic>
#define CEPHFS_RECLAIM_TIMEOUT 60
static int dying_client(int argc, char **argv)
{
struct ceph_mount_info *cmount;
/* Caller must pass in the uuid */
if (argc < 2)
return 1;
if (ceph_create(&cmount, nullptr) != 0)
return 1;
if (ceph_conf_read_file(cmount, nullptr) != 0)
return 1;
if (ceph_conf_parse_env(cmount, nullptr) != 0)
return 1;
if (ceph_init(cmount) != 0)
return 1;
ceph_set_session_timeout(cmount, CEPHFS_RECLAIM_TIMEOUT);
if (ceph_start_reclaim(cmount, argv[1], CEPH_RECLAIM_RESET) != -CEPHFS_ENOENT)
return 1;
ceph_set_uuid(cmount, argv[1]);
if (ceph_mount(cmount, "/") != 0)
return 1;
Inode *root, *file;
if (ceph_ll_lookup_root(cmount, &root) != 0)
return 1;
Fh *fh;
struct ceph_statx stx;
UserPerm *perms = ceph_mount_perms(cmount);
if (ceph_ll_create(cmount, root, argv[1], 0666, O_RDWR|O_CREAT|O_EXCL,
&file, &fh, &stx, 0, 0, perms) != 0)
return 1;
return 0;
}
TEST(LibCephFS, ReclaimReset) {
pid_t pid;
char uuid[256];
const char *exe = "/proc/self/exe";
sprintf(uuid, "simplereclaim:%x", getpid());
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
errno = 0;
execl(exe, exe, uuid, nullptr);
/* It won't be zero of course, which is sort of the point... */
ASSERT_EQ(errno, 0);
}
/* parent - wait for child to exit */
int ret;
pid_t wp = wait(&ret);
ASSERT_GE(wp, 0);
ASSERT_EQ(WIFEXITED(ret), true);
ASSERT_EQ(WEXITSTATUS(ret), 0);
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, nullptr), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, nullptr), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, nullptr));
ASSERT_EQ(ceph_init(cmount), 0);
ceph_set_session_timeout(cmount, CEPHFS_RECLAIM_TIMEOUT);
ASSERT_EQ(ceph_start_reclaim(cmount, uuid, CEPH_RECLAIM_RESET), 0);
ceph_set_uuid(cmount, uuid);
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
Inode *root, *file;
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
UserPerm *perms = ceph_mount_perms(cmount);
struct ceph_statx stx;
ASSERT_EQ(ceph_ll_lookup(cmount, root, uuid, &file, &stx, 0, 0, perms), 0);
Fh *fh;
ASSERT_EQ(ceph_ll_open(cmount, file, O_WRONLY, &fh, perms), 0);
ceph_unmount(cmount);
ceph_release(cmount);
}
static int update_root_mode()
{
struct ceph_mount_info *admin;
int r = ceph_create(&admin, nullptr);
if (r < 0)
return r;
ceph_conf_read_file(admin, nullptr);
ceph_conf_parse_env(admin, nullptr);
ceph_conf_set(admin, "client_permissions", "false");
r = ceph_mount(admin, "/");
if (r < 0)
goto out;
r = ceph_chmod(admin, "/", 01777);
out:
ceph_shutdown(admin);
return r;
}
int main(int argc, char **argv)
{
int r = update_root_mode();
if (r < 0)
exit(1);
::testing::InitGoogleTest(&argc, argv);
if (argc > 1)
return dying_client(argc, argv);
srand(getpid());
return RUN_ALL_TESTS();
}
| 3,627 | 21.121951 | 80 |
cc
|
null |
ceph-main/src/test/libcephfs/recordlock.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
* 2016 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <pthread.h>
#include "gtest/gtest.h"
#ifndef GTEST_IS_THREADSAFE
#error "!GTEST_IS_THREADSAFE"
#endif
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include <errno.h>
#include <sys/fcntl.h>
#include <unistd.h>
#include <sys/file.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <stdlib.h>
#include <semaphore.h>
#include <time.h>
#ifndef _WIN32
#include <sys/mman.h>
#endif
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#elif __FreeBSD__
#include <sys/types.h>
#include <sys/wait.h>
#endif
#include "include/ceph_assert.h"
#include "ceph_pthread_self.h"
// Startup common: create and mount ceph fs
#define STARTUP_CEPH() do { \
ASSERT_EQ(0, ceph_create(&cmount, NULL)); \
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL)); \
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL)); \
ASSERT_EQ(0, ceph_mount(cmount, NULL)); \
} while(0)
// Cleanup common: unmount and release ceph fs
#define CLEANUP_CEPH() do { \
ASSERT_EQ(0, ceph_unmount(cmount)); \
ASSERT_EQ(0, ceph_release(cmount)); \
} while(0)
static const mode_t fileMode = S_IRWXU | S_IRWXG | S_IRWXO;
// Default wait time for normal and "slow" operations
// (5" should be enough in case of network congestion)
static const long waitMs = 10;
static const long waitSlowMs = 5000;
// Get the absolute struct timespec reference from now + 'ms' milliseconds
static const struct timespec* abstime(struct timespec &ts, long ms) {
if (clock_gettime(CLOCK_REALTIME, &ts) == -1) {
ceph_abort();
}
ts.tv_nsec += ms * 1000000;
ts.tv_sec += ts.tv_nsec / 1000000000;
ts.tv_nsec %= 1000000000;
return &ts;
}
/* Basic locking */
TEST(LibCephFS, BasicRecordLocking) {
struct ceph_mount_info *cmount = NULL;
STARTUP_CEPH();
char c_file[1024];
sprintf(c_file, "recordlock_test_%d", getpid());
Fh *fh = NULL;
Inode *root = NULL, *inode = NULL;
struct ceph_statx stx;
int rc;
struct flock lock1, lock2;
UserPerm *perms = ceph_mount_perms(cmount);
// Get the root inode
rc = ceph_ll_lookup_root(cmount, &root);
ASSERT_EQ(rc, 0);
// Get the inode and Fh corresponding to c_file
rc = ceph_ll_create(cmount, root, c_file, fileMode, O_RDWR | O_CREAT,
&inode, &fh, &stx, 0, 0, perms);
ASSERT_EQ(rc, 0);
// write lock twice
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, 42, false));
lock2.l_type = F_WRLCK;
lock2.l_whence = SEEK_SET;
lock2.l_start = 0;
lock2.l_len = 1024;
lock2.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock2, 43, false));
// Now try a conflicting read lock
lock2.l_type = F_RDLCK;
lock2.l_whence = SEEK_SET;
lock2.l_start = 100;
lock2.l_len = 100;
lock2.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock2, 43, false));
// Now do a getlk
ASSERT_EQ(0, ceph_ll_getlk(cmount, fh, &lock2, 43));
ASSERT_EQ(lock2.l_type, F_WRLCK);
ASSERT_EQ(lock2.l_start, 0);
ASSERT_EQ(lock2.l_len, 1024);
ASSERT_EQ(lock2.l_pid, getpid());
// Extend the range of the write lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 1024;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, 42, false));
// Now do a getlk
lock2.l_type = F_RDLCK;
lock2.l_whence = SEEK_SET;
lock2.l_start = 100;
lock2.l_len = 100;
lock2.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_getlk(cmount, fh, &lock2, 43));
ASSERT_EQ(lock2.l_type, F_WRLCK);
ASSERT_EQ(lock2.l_start, 0);
ASSERT_EQ(lock2.l_len, 2048);
ASSERT_EQ(lock2.l_pid, getpid());
// Now release part of the range
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 512;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, 42, false));
// Now do a getlk to check 1st part
lock2.l_type = F_RDLCK;
lock2.l_whence = SEEK_SET;
lock2.l_start = 100;
lock2.l_len = 100;
lock2.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_getlk(cmount, fh, &lock2, 43));
ASSERT_EQ(lock2.l_type, F_WRLCK);
ASSERT_EQ(lock2.l_start, 0);
ASSERT_EQ(lock2.l_len, 512);
ASSERT_EQ(lock2.l_pid, getpid());
// Now do a getlk to check 2nd part
lock2.l_type = F_RDLCK;
lock2.l_whence = SEEK_SET;
lock2.l_start = 2000;
lock2.l_len = 100;
lock2.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_getlk(cmount, fh, &lock2, 43));
ASSERT_EQ(lock2.l_type, F_WRLCK);
ASSERT_EQ(lock2.l_start, 1536);
ASSERT_EQ(lock2.l_len, 512);
ASSERT_EQ(lock2.l_pid, getpid());
// Now do a getlk to check released part
lock2.l_type = F_RDLCK;
lock2.l_whence = SEEK_SET;
lock2.l_start = 512;
lock2.l_len = 1024;
lock2.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_getlk(cmount, fh, &lock2, 43));
ASSERT_EQ(lock2.l_type, F_UNLCK);
ASSERT_EQ(lock2.l_start, 512);
ASSERT_EQ(lock2.l_len, 1024);
ASSERT_EQ(lock2.l_pid, getpid());
// Now downgrade the 1st part of the lock
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 512;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, 42, false));
// Now do a getlk to check 1st part
lock2.l_type = F_WRLCK;
lock2.l_whence = SEEK_SET;
lock2.l_start = 100;
lock2.l_len = 100;
lock2.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_getlk(cmount, fh, &lock2, 43));
ASSERT_EQ(lock2.l_type, F_RDLCK);
ASSERT_EQ(lock2.l_start, 0);
ASSERT_EQ(lock2.l_len, 512);
ASSERT_EQ(lock2.l_pid, getpid());
// Now upgrade the 1st part of the lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 512;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, 42, false));
// Now do a getlk to check 1st part
lock2.l_type = F_WRLCK;
lock2.l_whence = SEEK_SET;
lock2.l_start = 100;
lock2.l_len = 100;
lock2.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_getlk(cmount, fh, &lock2, 43));
ASSERT_EQ(lock2.l_type, F_WRLCK);
ASSERT_EQ(lock2.l_start, 0);
ASSERT_EQ(lock2.l_len, 512);
ASSERT_EQ(lock2.l_pid, getpid());
ASSERT_EQ(0, ceph_ll_close(cmount, fh));
ASSERT_EQ(0, ceph_ll_unlink(cmount, root, c_file, perms));
CLEANUP_CEPH();
}
/* Locking in different threads */
// Used by ConcurrentLocking test
struct str_ConcurrentRecordLocking {
const char *file;
struct ceph_mount_info *cmount; // !NULL if shared
sem_t sem[2];
sem_t semReply[2];
void sem_init(int pshared) {
ASSERT_EQ(0, ::sem_init(&sem[0], pshared, 0));
ASSERT_EQ(0, ::sem_init(&sem[1], pshared, 0));
ASSERT_EQ(0, ::sem_init(&semReply[0], pshared, 0));
ASSERT_EQ(0, ::sem_init(&semReply[1], pshared, 0));
}
void sem_destroy() {
ASSERT_EQ(0, ::sem_destroy(&sem[0]));
ASSERT_EQ(0, ::sem_destroy(&sem[1]));
ASSERT_EQ(0, ::sem_destroy(&semReply[0]));
ASSERT_EQ(0, ::sem_destroy(&semReply[1]));
}
};
// Wakeup main (for (N) steps)
#define PING_MAIN(n) ASSERT_EQ(0, sem_post(&s.sem[n%2]))
// Wait for main to wake us up (for (RN) steps)
#define WAIT_MAIN(n) \
ASSERT_EQ(0, sem_timedwait(&s.semReply[n%2], abstime(ts, waitSlowMs)))
// Wakeup worker (for (RN) steps)
#define PING_WORKER(n) ASSERT_EQ(0, sem_post(&s.semReply[n%2]))
// Wait for worker to wake us up (for (N) steps)
#define WAIT_WORKER(n) \
ASSERT_EQ(0, sem_timedwait(&s.sem[n%2], abstime(ts, waitSlowMs)))
// Worker shall not wake us up (for (N) steps)
#define NOT_WAIT_WORKER(n) \
ASSERT_EQ(-1, sem_timedwait(&s.sem[n%2], abstime(ts, waitMs)))
// Do twice an operation
#define TWICE(EXPR) do { \
EXPR; \
EXPR; \
} while(0)
/* Locking in different threads */
// Used by ConcurrentLocking test
static void thread_ConcurrentRecordLocking(str_ConcurrentRecordLocking& s) {
struct ceph_mount_info *const cmount = s.cmount;
Fh *fh = NULL;
Inode *root = NULL, *inode = NULL;
struct ceph_statx stx;
struct flock lock1;
int rc;
struct timespec ts;
// Get the root inode
rc = ceph_ll_lookup_root(cmount, &root);
ASSERT_EQ(rc, 0);
// Get the inode and Fh corresponding to c_file
rc = ceph_ll_create(cmount, root, s.file, fileMode, O_RDWR | O_CREAT,
&inode, &fh, &stx, 0, 0, ceph_mount_perms(cmount));
ASSERT_EQ(rc, 0);
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
PING_MAIN(1); // (1)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), true));
PING_MAIN(2); // (2)
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
PING_MAIN(3); // (3)
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), true));
PING_MAIN(4); // (4)
WAIT_MAIN(1); // (R1)
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
PING_MAIN(5); // (5)
WAIT_MAIN(2); // (R2)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), true));
PING_MAIN(6); // (6)
WAIT_MAIN(3); // (R3)
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
PING_MAIN(7); // (7)
ASSERT_EQ(0, ceph_ll_close(cmount, fh));
}
// Used by ConcurrentRecordLocking test
static void* thread_ConcurrentRecordLocking_(void *arg) {
str_ConcurrentRecordLocking *const s =
reinterpret_cast<str_ConcurrentRecordLocking*>(arg);
thread_ConcurrentRecordLocking(*s);
return NULL;
}
TEST(LibCephFS, ConcurrentRecordLocking) {
const pid_t mypid = getpid();
struct ceph_mount_info *cmount;
STARTUP_CEPH();
char c_file[1024];
sprintf(c_file, "recordlock_test_%d", mypid);
Fh *fh = NULL;
Inode *root = NULL, *inode = NULL;
struct ceph_statx stx;
struct flock lock1;
int rc;
UserPerm *perms = ceph_mount_perms(cmount);
// Get the root inode
rc = ceph_ll_lookup_root(cmount, &root);
ASSERT_EQ(rc, 0);
// Get the inode and Fh corresponding to c_file
rc = ceph_ll_create(cmount, root, c_file, fileMode, O_RDWR | O_CREAT,
&inode, &fh, &stx, 0, 0, perms);
ASSERT_EQ(rc, 0);
// Lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), true));
// Start locker thread
pthread_t thread;
struct timespec ts;
str_ConcurrentRecordLocking s = { c_file, cmount };
s.sem_init(0);
ASSERT_EQ(0, pthread_create(&thread, NULL, thread_ConcurrentRecordLocking_, &s));
// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(1); // (1)
// Shall not have lock immediately
NOT_WAIT_WORKER(2); // (2)
// Unlock
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Shall have lock
// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(2); // (2)
// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(3); // (3)
// Wait for thread to share lock
WAIT_WORKER(4); // (4)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Wake up thread to unlock shared lock
PING_WORKER(1); // (R1)
WAIT_WORKER(5); // (5)
// Now we can lock exclusively
// Upgrade to exclusive lock (as per POSIX)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), true));
// Wake up thread to lock shared lock
PING_WORKER(2); // (R2)
// Shall not have lock immediately
NOT_WAIT_WORKER(6); // (6)
// Release lock ; thread will get it
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
WAIT_WORKER(6); // (6)
// We no longer have the lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Wake up thread to unlock exclusive lock
PING_WORKER(3); // (R3)
WAIT_WORKER(7); // (7)
// We can lock it again
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Cleanup
void *retval = (void*) (uintptr_t) -1;
ASSERT_EQ(0, pthread_join(thread, &retval));
ASSERT_EQ(NULL, retval);
s.sem_destroy();
ASSERT_EQ(0, ceph_ll_close(cmount, fh));
ASSERT_EQ(0, ceph_ll_unlink(cmount, root, c_file, perms));
CLEANUP_CEPH();
}
TEST(LibCephFS, ThreesomeRecordLocking) {
const pid_t mypid = getpid();
struct ceph_mount_info *cmount;
STARTUP_CEPH();
char c_file[1024];
sprintf(c_file, "recordlock_test_%d", mypid);
Fh *fh = NULL;
Inode *root = NULL, *inode = NULL;
struct ceph_statx stx;
struct flock lock1;
int rc;
UserPerm *perms = ceph_mount_perms(cmount);
// Get the root inode
rc = ceph_ll_lookup_root(cmount, &root);
ASSERT_EQ(rc, 0);
// Get the inode and Fh corresponding to c_file
rc = ceph_ll_create(cmount, root, c_file, fileMode, O_RDWR | O_CREAT,
&inode, &fh, &stx, 0, 0, perms);
ASSERT_EQ(rc, 0);
// Lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), true));
// Start locker thread
pthread_t thread[2];
struct timespec ts;
str_ConcurrentRecordLocking s = { c_file, cmount };
s.sem_init(0);
ASSERT_EQ(0, pthread_create(&thread[0], NULL, thread_ConcurrentRecordLocking_, &s));
ASSERT_EQ(0, pthread_create(&thread[1], NULL, thread_ConcurrentRecordLocking_, &s));
// Synchronization point with thread (failure: thread is dead)
TWICE(WAIT_WORKER(1)); // (1)
// Shall not have lock immediately
NOT_WAIT_WORKER(2); // (2)
// Unlock
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Shall have lock
TWICE(// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(2); // (2)
// Synchronization point with thread (failure: thread is dead)
WAIT_WORKER(3)); // (3)
// Wait for thread to share lock
TWICE(WAIT_WORKER(4)); // (4)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Wake up thread to unlock shared lock
TWICE(PING_WORKER(1); // (R1)
WAIT_WORKER(5)); // (5)
// Now we can lock exclusively
// Upgrade to exclusive lock (as per POSIX)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), true));
TWICE( // Wake up thread to lock shared lock
PING_WORKER(2); // (R2)
// Shall not have lock immediately
NOT_WAIT_WORKER(6)); // (6)
// Release lock ; thread will get it
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
TWICE(WAIT_WORKER(6); // (6)
// We no longer have the lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Wake up thread to unlock exclusive lock
PING_WORKER(3); // (R3)
WAIT_WORKER(7); // (7)
);
// We can lock it again
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Cleanup
void *retval = (void*) (uintptr_t) -1;
ASSERT_EQ(0, pthread_join(thread[0], &retval));
ASSERT_EQ(NULL, retval);
ASSERT_EQ(0, pthread_join(thread[1], &retval));
ASSERT_EQ(NULL, retval);
s.sem_destroy();
ASSERT_EQ(0, ceph_ll_close(cmount, fh));
ASSERT_EQ(0, ceph_ll_unlink(cmount, root, c_file, perms));
CLEANUP_CEPH();
}
/* Locking in different processes */
#define PROCESS_SLOW_MS() \
static const long waitMs = 100; \
(void) waitMs
// Used by ConcurrentLocking test
static void process_ConcurrentRecordLocking(str_ConcurrentRecordLocking& s) {
const pid_t mypid = getpid();
PROCESS_SLOW_MS();
struct ceph_mount_info *cmount = NULL;
struct timespec ts;
Fh *fh = NULL;
Inode *root = NULL, *inode = NULL;
struct ceph_statx stx;
int rc;
struct flock lock1;
STARTUP_CEPH();
s.cmount = cmount;
// Get the root inode
rc = ceph_ll_lookup_root(cmount, &root);
ASSERT_EQ(rc, 0);
// Get the inode and Fh corresponding to c_file
rc = ceph_ll_create(cmount, root, s.file, fileMode, O_RDWR | O_CREAT,
&inode, &fh, &stx, 0, 0, ceph_mount_perms(cmount));
ASSERT_EQ(rc, 0);
WAIT_MAIN(1); // (R1)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
PING_MAIN(1); // (1)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, true));
PING_MAIN(2); // (2)
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
PING_MAIN(3); // (3)
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, true));
PING_MAIN(4); // (4)
WAIT_MAIN(2); // (R2)
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
PING_MAIN(5); // (5)
WAIT_MAIN(3); // (R3)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, true));
PING_MAIN(6); // (6)
WAIT_MAIN(4); // (R4)
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
PING_MAIN(7); // (7)
ASSERT_EQ(0, ceph_ll_close(cmount, fh));
CLEANUP_CEPH();
s.sem_destroy();
exit(EXIT_SUCCESS);
}
// Disabled because of fork() issues (http://tracker.ceph.com/issues/16556)
#ifndef _WIN32
TEST(LibCephFS, DISABLED_InterProcessRecordLocking) {
PROCESS_SLOW_MS();
// Process synchronization
char c_file[1024];
const pid_t mypid = getpid();
sprintf(c_file, "recordlock_test_%d", mypid);
Fh *fh = NULL;
Inode *root = NULL, *inode = NULL;
struct ceph_statx stx;
struct flock lock1;
int rc;
// Note: the semaphores MUST be on a shared memory segment
str_ConcurrentRecordLocking *const shs =
reinterpret_cast<str_ConcurrentRecordLocking*>
(mmap(0, sizeof(*shs), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0));
str_ConcurrentRecordLocking &s = *shs;
s.file = c_file;
s.sem_init(1);
// Start locker process
const pid_t pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
process_ConcurrentRecordLocking(s);
exit(EXIT_FAILURE);
}
struct timespec ts;
struct ceph_mount_info *cmount;
STARTUP_CEPH();
UserPerm *perms = ceph_mount_perms(cmount);
// Get the root inode
rc = ceph_ll_lookup_root(cmount, &root);
ASSERT_EQ(rc, 0);
// Get the inode and Fh corresponding to c_file
rc = ceph_ll_create(cmount, root, c_file, fileMode, O_RDWR | O_CREAT,
&inode, &fh, &stx, 0, 0, perms);
ASSERT_EQ(rc, 0);
// Lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, true));
// Synchronization point with process (failure: process is dead)
PING_WORKER(1); // (R1)
WAIT_WORKER(1); // (1)
// Shall not have lock immediately
NOT_WAIT_WORKER(2); // (2)
// Unlock
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
// Shall have lock
// Synchronization point with process (failure: process is dead)
WAIT_WORKER(2); // (2)
// Synchronization point with process (failure: process is dead)
WAIT_WORKER(3); // (3)
// Wait for process to share lock
WAIT_WORKER(4); // (4)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
// Wake up process to unlock shared lock
PING_WORKER(2); // (R2)
WAIT_WORKER(5); // (5)
// Now we can lock exclusively
// Upgrade to exclusive lock (as per POSIX)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, true));
// Wake up process to lock shared lock
PING_WORKER(3); // (R3)
// Shall not have lock immediately
NOT_WAIT_WORKER(6); // (6)
// Release lock ; process will get it
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
WAIT_WORKER(6); // (6)
// We no longer have the lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
// Wake up process to unlock exclusive lock
PING_WORKER(4); // (R4)
WAIT_WORKER(7); // (7)
// We can lock it again
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
// Wait pid
int status;
ASSERT_EQ(pid, waitpid(pid, &status, 0));
ASSERT_EQ(EXIT_SUCCESS, status);
// Cleanup
s.sem_destroy();
ASSERT_EQ(0, munmap(shs, sizeof(*shs)));
ASSERT_EQ(0, ceph_ll_close(cmount, fh));
ASSERT_EQ(0, ceph_ll_unlink(cmount, root, c_file, perms));
CLEANUP_CEPH();
}
#endif
#ifndef _WIN32
// Disabled because of fork() issues (http://tracker.ceph.com/issues/16556)
TEST(LibCephFS, DISABLED_ThreesomeInterProcessRecordLocking) {
PROCESS_SLOW_MS();
// Process synchronization
char c_file[1024];
const pid_t mypid = getpid();
sprintf(c_file, "recordlock_test_%d", mypid);
Fh *fh = NULL;
Inode *root = NULL, *inode = NULL;
struct ceph_statx stx;
struct flock lock1;
int rc;
// Note: the semaphores MUST be on a shared memory segment
str_ConcurrentRecordLocking *const shs =
reinterpret_cast<str_ConcurrentRecordLocking*>
(mmap(0, sizeof(*shs), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1, 0));
str_ConcurrentRecordLocking &s = *shs;
s.file = c_file;
s.sem_init(1);
// Start locker processes
pid_t pid[2];
pid[0] = fork();
ASSERT_GE(pid[0], 0);
if (pid[0] == 0) {
process_ConcurrentRecordLocking(s);
exit(EXIT_FAILURE);
}
pid[1] = fork();
ASSERT_GE(pid[1], 0);
if (pid[1] == 0) {
process_ConcurrentRecordLocking(s);
exit(EXIT_FAILURE);
}
struct timespec ts;
struct ceph_mount_info *cmount;
STARTUP_CEPH();
// Get the root inode
rc = ceph_ll_lookup_root(cmount, &root);
ASSERT_EQ(rc, 0);
// Get the inode and Fh corresponding to c_file
UserPerm *perms = ceph_mount_perms(cmount);
rc = ceph_ll_create(cmount, root, c_file, fileMode, O_RDWR | O_CREAT,
&inode, &fh, &stx, 0, 0, perms);
ASSERT_EQ(rc, 0);
// Lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, true));
// Synchronization point with process (failure: process is dead)
TWICE(PING_WORKER(1)); // (R1)
TWICE(WAIT_WORKER(1)); // (1)
// Shall not have lock immediately
NOT_WAIT_WORKER(2); // (2)
// Unlock
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
// Shall have lock
TWICE(// Synchronization point with process (failure: process is dead)
WAIT_WORKER(2); // (2)
// Synchronization point with process (failure: process is dead)
WAIT_WORKER(3)); // (3)
// Wait for process to share lock
TWICE(WAIT_WORKER(4)); // (4)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
// Wake up process to unlock shared lock
TWICE(PING_WORKER(2); // (R2)
WAIT_WORKER(5)); // (5)
// Now we can lock exclusively
// Upgrade to exclusive lock (as per POSIX)
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, true));
TWICE( // Wake up process to lock shared lock
PING_WORKER(3); // (R3)
// Shall not have lock immediately
NOT_WAIT_WORKER(6)); // (6)
// Release lock ; process will get it
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
TWICE(WAIT_WORKER(6); // (6)
// We no longer have the lock
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
lock1.l_type = F_RDLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(-CEPHFS_EAGAIN, ceph_ll_setlk(cmount, fh, &lock1, ceph_pthread_self(), false));
// Wake up process to unlock exclusive lock
PING_WORKER(4); // (R4)
WAIT_WORKER(7); // (7)
);
// We can lock it again
lock1.l_type = F_WRLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
lock1.l_type = F_UNLCK;
lock1.l_whence = SEEK_SET;
lock1.l_start = 0;
lock1.l_len = 1024;
lock1.l_pid = getpid();
ASSERT_EQ(0, ceph_ll_setlk(cmount, fh, &lock1, mypid, false));
// Wait pids
int status;
ASSERT_EQ(pid[0], waitpid(pid[0], &status, 0));
ASSERT_EQ(EXIT_SUCCESS, status);
ASSERT_EQ(pid[1], waitpid(pid[1], &status, 0));
ASSERT_EQ(EXIT_SUCCESS, status);
// Cleanup
s.sem_destroy();
ASSERT_EQ(0, munmap(shs, sizeof(*shs)));
ASSERT_EQ(0, ceph_ll_close(cmount, fh));
ASSERT_EQ(0, ceph_ll_unlink(cmount, root, c_file, perms));
CLEANUP_CEPH();
}
#endif
| 31,209 | 27.218807 | 91 |
cc
|
null |
ceph-main/src/test/libcephfs/snapdiff.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "include/cephfs/libcephfs.h"
#include "include/stat.h"
#include "include/ceph_assert.h"
#include "include/object.h"
#include "include/stringify.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <string>
#include <vector>
#include <algorithm>
#include <limits.h>
#include <dirent.h>
using namespace std;
class TestMount {
ceph_mount_info* cmount = nullptr;
char dir_path[64];
public:
TestMount( const char* root_dir_name = "dir0") {
ceph_create(&cmount, NULL);
ceph_conf_read_file(cmount, NULL);
ceph_conf_parse_env(cmount, NULL);
ceph_assert(0 == ceph_mount(cmount, NULL));
sprintf(dir_path, "/%s_%d", root_dir_name, getpid());
ceph_assert(0 == ceph_mkdir(cmount, dir_path, 0777));
}
~TestMount()
{
if (cmount) {
ceph_assert(0 == purge_dir(""));
}
ceph_rmdir(cmount, dir_path);
ceph_shutdown(cmount);
}
int conf_get(const char *option, char *buf, size_t len) {
return ceph_conf_get(cmount, option, buf, len);
}
string make_file_path(const char* relpath) {
char path[PATH_MAX];
sprintf(path, "%s/%s", dir_path, relpath);
return path;
}
string make_snap_name(const char* name) {
char snap_name[64];
if (name && *name) {
sprintf(snap_name, "%s_%d", name, getpid());
} else {
// just simulate empty snapname
snap_name[0] = 0;
}
return snap_name;
}
string make_snap_path(const char* sname, const char* subdir = nullptr) {
char snap_path[PATH_MAX];
string snap_name = subdir ?
concat_path(make_snap_name(sname), subdir) :
make_snap_name(sname);
sprintf(snap_path, ".snap/%s", snap_name.c_str());
return snap_path;
}
int mksnap(const char* name) {
string snap_name = make_snap_name(name);
return ceph_mksnap(cmount, dir_path, snap_name.c_str(),
0755, nullptr, 0);
}
int rmsnap(const char* name) {
string snap_name = make_snap_name(name);
return ceph_rmsnap(cmount, dir_path, snap_name.c_str());
}
int get_snapid(const char* name, uint64_t* res)
{
ceph_assert(res);
snap_info snap_info;
char snap_path[PATH_MAX];
string snap_name = make_snap_name(name);
sprintf(snap_path, "%s/.snap/%s", dir_path, snap_name.c_str());
int r = ceph_get_snap_info(cmount, snap_path, &snap_info);
if (r >= 0) {
*res = snap_info.id;
r = 0;
}
return r;
}
int write_full(const char* relpath, const string& data)
{
auto file_path = make_file_path(relpath);
int fd = ceph_open(cmount, file_path.c_str(), O_WRONLY | O_CREAT, 0666);
if (fd < 0) {
return -EACCES;
}
int r = ceph_write(cmount, fd, data.c_str(), data.size(), 0);
if (r >= 0) {
ceph_truncate(cmount, file_path.c_str(), data.size());
ceph_fsync(cmount, fd, 0);
}
ceph_close(cmount, fd);
return r;
}
string concat_path(string_view path, string_view name) {
string s(path);
if (s.empty() || s.back() != '/') {
s += '/';
}
s += name;
return s;
}
int unlink(const char* relpath)
{
auto file_path = make_file_path(relpath);
return ceph_unlink(cmount, file_path.c_str());
}
int test_open(const char* relpath)
{
auto subdir_path = make_file_path(relpath);
struct ceph_dir_result* ls_dir;
int r = ceph_opendir(cmount, subdir_path.c_str(), &ls_dir);
if (r != 0) {
return r;
}
ceph_assert(0 == ceph_closedir(cmount, ls_dir));
return r;
}
int for_each_readdir(const char* relpath,
std::function<bool(const dirent*, const struct ceph_statx*)> fn)
{
auto subdir_path = make_file_path(relpath);
struct ceph_dir_result* ls_dir;
int r = ceph_opendir(cmount, subdir_path.c_str(), &ls_dir);
if (r != 0) {
return r;
}
while (1) {
struct dirent result;
struct ceph_statx stx;
r = ceph_readdirplus_r(
cmount, ls_dir, &result, &stx, CEPH_STATX_BASIC_STATS,
0,
NULL);
if (!r)
break;
if (r < 0) {
std::cerr << "ceph_readdirplus_r failed, error: "
<< r << std::endl;
return r;
}
if (strcmp(result.d_name, ".") == 0 ||
strcmp(result.d_name, "..") == 0) {
continue;
}
if (!fn(&result, &stx)) {
r = -EINTR;
break;
}
}
ceph_assert(0 == ceph_closedir(cmount, ls_dir));
return r;
}
int readdir_and_compare(const char* relpath,
const vector<string>& expected0)
{
vector<string> expected(expected0);
auto end = expected.end();
int r = for_each_readdir(relpath,
[&](const dirent* dire, const struct ceph_statx* stx) {
std::string name(dire->d_name);
auto it = std::find(expected.begin(), end, name);
if (it == end) {
std::cerr << "readdir_and_compare error: unexpected name:"
<< name << std::endl;
return false;
}
expected.erase(it);
return true;
});
if (r == 0 && !expected.empty()) {
std::cerr << __func__ << " error: left entries:" << std::endl;
for (auto& e : expected) {
std::cerr << e << std::endl;
}
std::cerr << __func__ << " ************" << std::endl;
r = -ENOTEMPTY;
}
return r;
}
int for_each_readdir_snapdiff(const char* relpath,
const char* snap1,
const char* snap2,
std::function<bool(const dirent*, uint64_t)> fn)
{
auto s1 = make_snap_name(snap1);
auto s2 = make_snap_name(snap2);
ceph_snapdiff_info info;
ceph_snapdiff_entry_t res_entry;
int r = ceph_open_snapdiff(cmount,
dir_path,
relpath,
s1.c_str(),
s2.c_str(),
&info);
if (r != 0) {
std::cerr << " Failed to open snapdiff, ret:" << r << std::endl;
return r;
}
while (0 < (r = ceph_readdir_snapdiff(&info,
&res_entry))) {
if (strcmp(res_entry.dir_entry.d_name, ".") == 0 ||
strcmp(res_entry.dir_entry.d_name, "..") == 0) {
continue;
}
if (!fn(&res_entry.dir_entry, res_entry.snapid)) {
r = -EINTR;
break;
}
}
ceph_assert(0 == ceph_close_snapdiff(&info));
if (r != 0) {
std::cerr << " Failed to readdir snapdiff, ret:" << r
<< " " << relpath << ", " << snap1 << " vs. " << snap2
<< std::endl;
}
return r;
}
int readdir_snapdiff_and_compare(const char* relpath,
const char* snap1,
const char* snap2,
const vector<pair<string, uint64_t>>& expected0)
{
vector<pair<string, uint64_t>> expected(expected0);
auto end = expected.end();
int r = for_each_readdir_snapdiff(relpath, snap1, snap2,
[&](const dirent* dire, uint64_t snapid) {
pair<string, uint64_t> p = std::make_pair(dire->d_name, snapid);
auto it = std::find(expected.begin(), end, p);
if (it == end) {
std::cerr << "readdir_snapdiff_and_compare error: unexpected name:"
<< dire->d_name << "/" << snapid << std::endl;
return false;
}
expected.erase(it);
return true;
});
if (r == 0 && !expected.empty()) {
std::cerr << __func__ << " error: left entries:" << std::endl;
for (auto& e : expected) {
std::cerr << e.first << "/" << e.second << std::endl;
}
std::cerr << __func__ << " ************" << std::endl;
r = -ENOTEMPTY;
}
return r;
}
int mkdir(const char* relpath)
{
auto path = make_file_path(relpath);
return ceph_mkdir(cmount, path.c_str(), 0777);
}
int rmdir(const char* relpath)
{
auto path = make_file_path(relpath);
return ceph_rmdir(cmount, path.c_str());
}
int purge_dir(const char* relpath0, bool inclusive = true)
{
int r =
for_each_readdir(relpath0,
[&](const dirent* dire, const struct ceph_statx* stx) {
string relpath = concat_path(relpath0, dire->d_name);
if (S_ISDIR(stx->stx_mode)) {
purge_dir(relpath.c_str());
rmdir(relpath.c_str());
} else {
unlink(relpath.c_str());
}
return true;
});
if (r != 0) {
return r;
}
if (*relpath0 != 0) {
r = rmdir(relpath0);
}
return r;
}
void remove_all() {
purge_dir("/", false);
}
ceph_mount_info* get_cmount() {
return cmount;
}
void verify_snap_diff(vector<pair<string, uint64_t>>& expected,
const char* relpath,
const char* snap1,
const char* snap2);
void print_snap_diff(const char* relpath,
const char* snap1,
const char* snap2);
void prepareSnapDiffLib1Cases();
void prepareSnapDiffLib2Cases();
void prepareSnapDiffLib3Cases();
void prepareHugeSnapDiff(const std::string& name_prefix_start,
const std::string& name_prefix_bulk,
const std::string& name_prefix_end,
size_t file_count,
bool bulk_diff);
};
// Helper function to verify readdir_snapdiff returns expected results
void TestMount::verify_snap_diff(vector<pair<string, uint64_t>>& expected,
const char* relpath,
const char* snap1,
const char* snap2)
{
std::cout << "---------" << snap1 << " vs. " << snap2
<< " diff listing verification for /" << (relpath ? relpath : "")
<< std::endl;
ASSERT_EQ(0,
readdir_snapdiff_and_compare(relpath, snap1, snap2, expected));
};
// Helper function to print readdir_snapdiff results
void TestMount::print_snap_diff(const char* relpath,
const char* snap1,
const char* snap2)
{
std::cout << "---------" << snap1 << " vs. " << snap2
<< " diff listing for /" << (relpath ? relpath : "")
<< std::endl;
ASSERT_EQ(0, for_each_readdir_snapdiff(relpath, snap1, snap2,
[&](const dirent* dire, uint64_t snapid) {
std::cout << dire->d_name << " snap " << snapid << std::endl;
return true;
}));
};
/* The following method creates some files/folders/snapshots layout,
described in the sheet below.
We're to test SnapDiff readdir API against that structure.
* where:
- xN denotes file 'x' version N.
- X denotes folder name
- * denotes no/removed file/folder
# snap1 snap2
# fileA1 | fileA2 |
# * | fileB2 |
# fileC1 | * |
# fileD1 | fileD1 |
# dirA | dirA |
# dirA/fileA1 | dirA/fileA2 |
# * | dirB |
# * | dirB/fileb2 |
# dirC | * |
# dirC/filec1 | * |
# dirD | dirD |
# dirD/fileD1 | dirD/fileD1 |
*/
void TestMount::prepareSnapDiffLib1Cases()
{
//************ snap1 *************
ASSERT_LE(0, write_full("fileA", "hello world"));
ASSERT_LE(0, write_full("fileC", "hello world to be removed"));
ASSERT_LE(0, write_full("fileD", "hello world unmodified"));
ASSERT_EQ(0, mkdir("dirA"));
ASSERT_LE(0, write_full("dirA/fileA", "file 'A/a' v1"));
ASSERT_EQ(0, mkdir("dirC"));
ASSERT_LE(0, write_full("dirC/filec", "file 'C/c' v1"));
ASSERT_EQ(0, mkdir("dirD"));
ASSERT_LE(0, write_full("dirD/filed", "file 'D/d' v1"));
ASSERT_EQ(0, mksnap("snap1"));
//************ snap2 *************
ASSERT_LE(0, write_full("fileA", "hello world again in A"));
ASSERT_LE(0, write_full("fileB", "hello world in B"));
ASSERT_EQ(0, unlink("fileC"));
ASSERT_LE(0, write_full("dirA/fileA", "file 'A/a' v2"));
ASSERT_EQ(0, purge_dir("dirC"));
ASSERT_EQ(0, mkdir("dirB"));
ASSERT_LE(0, write_full("dirB/fileb", "file 'B/b' v2"));
ASSERT_EQ(0, mksnap("snap2"));
}
/*
* Basic functionality testing for the SnapDiff readdir API
*/
TEST(LibCephFS, SnapDiffLib)
{
TestMount test_mount;
// Create simple directory tree with a couple of snapshots
// to test against
test_mount.prepareSnapDiffLib1Cases();
uint64_t snapid1;
uint64_t snapid2;
// learn snapshot ids and do basic verification
ASSERT_EQ(0, test_mount.get_snapid("snap1", &snapid1));
ASSERT_EQ(0, test_mount.get_snapid("snap2", &snapid2));
ASSERT_GT(snapid1, 0);
ASSERT_GT(snapid2, 0);
ASSERT_GT(snapid2, snapid1);
std::cout << snapid1 << " vs. " << snapid2 << std::endl;
//
// Make sure root listing for snapshot snap1 is as expected
//
{
std::cout << "---------snap1 listing verification---------" << std::endl;
string snap_path = test_mount.make_snap_path("snap1");
vector<string> expected;
expected.push_back("fileA");
expected.push_back("fileC");
expected.push_back("fileD");
expected.push_back("dirA");
expected.push_back("dirC");
expected.push_back("dirD");
ASSERT_EQ(0,
test_mount.readdir_and_compare(snap_path.c_str(), expected));
}
//
// Make sure root listing for snapshot snap2 is as expected
//
{
std::cout << "---------snap2 listing verification---------" << std::endl;
string snap_path = test_mount.make_snap_path("snap2");
vector<string> expected;
expected.push_back("fileA");
expected.push_back("fileB");
expected.push_back("fileD");
expected.push_back("dirA");
expected.push_back("dirB");
expected.push_back("dirD");
ASSERT_EQ(0,
test_mount.readdir_and_compare(snap_path.c_str(), expected));
}
//
// Print snap1 vs. snap2 delta for the root
//
test_mount.print_snap_diff("", "snap1", "snap2");
//
// Make sure snap1 vs. snap2 delta for the root is as expected
//
{
vector<pair<string, uint64_t>> expected;
expected.emplace_back("fileA", snapid2);
expected.emplace_back("fileB", snapid2);
expected.emplace_back("fileC", snapid1);
expected.emplace_back("dirA", snapid2);
expected.emplace_back("dirB", snapid2);
expected.emplace_back("dirC", snapid1);
expected.emplace_back("dirD", snapid2);
test_mount.verify_snap_diff(expected, "", "snap1", "snap2");
}
//
// Make sure snap1 vs. snap2 delta for /dirA is as expected
//
{
vector<pair<string, uint64_t>> expected;
expected.emplace_back("fileA", snapid2);
test_mount.verify_snap_diff(expected, "dirA", "snap1", "snap2");
}
//
// Make sure snap1 vs. snap2 delta for /dirB is as expected
//
{
vector<pair<string, uint64_t>> expected;
expected.emplace_back("fileb", snapid2);
test_mount.verify_snap_diff(expected, "dirB", "snap1", "snap2");
}
//
// Make sure snap1 vs. snap2 delta for /dirC is as expected
//
{
vector<pair<string, uint64_t>> expected;
expected.emplace_back("filec", snapid1);
test_mount.verify_snap_diff(expected, "dirC", "snap2", "snap1");
}
//
// Make sure snap1 vs. snap2 delta for /dirD is as expected
//
{
vector<pair<string, uint64_t>> expected;
test_mount.verify_snap_diff(expected, "dirD", "snap1", "snap2");
}
// Make sure SnapDiff returns an error when provided with the same
// snapshot name for both parties A and B.
{
string snap_path = test_mount.make_snap_path("snap2");
string snap_other_path = snap_path;
std::cout << "---------invalid snapdiff params, the same snaps---------" << std::endl;
ASSERT_EQ(-EINVAL, test_mount.for_each_readdir_snapdiff(
"",
"snap2",
"snap2",
[&](const dirent* dire, uint64_t snapid) {
return true;
}));
}
// Make sure SnapDiff returns an error when provided with an empty
// snapshot name for one of the parties
{
std::cout << "---------invalid snapdiff params, no snap_other ---------" << std::endl;
string snap_path = test_mount.make_snap_path("snap2");
string snap_other_path;
ASSERT_EQ(-EINVAL, test_mount.for_each_readdir_snapdiff(
"",
"snap2",
"",
[&](const dirent* dire, uint64_t snapid) {
return true;
}));
}
std::cout << "------------- closing -------------" << std::endl;
ASSERT_EQ(0, test_mount.purge_dir(""));
ASSERT_EQ(0, test_mount.rmsnap("snap1"));
ASSERT_EQ(0, test_mount.rmsnap("snap2"));
}
/* The following method creates some files/folders/snapshots layout,
described in the sheet below.
We're to test SnapDiff readdir API against that structure.
* where:
- xN denotes file 'x' version N.
- X denotes folder name
- * denotes no/removed file/folder
# snap1 snap2 snap3 head
# fileA1 | fileA2 | fileA2
# * | fileB2 | fileB2
# fileC1 | * | fileC3
# fileD1 | fileD1 | fileD3
# * | * | fileE3
# fileF1 | * | *
# fileG1 | fileG2 | *
# dirA | dirA | *
# dirA/fileA1 | dirA/fileA2 | *
# * | dirB | *
# * | dirB/fileb2 | *
# dirC | * | *
# dirC/filec1 | * | *
# dirD | dirD | dirD
# dirD/filed1 | dirD/filed1 | dirD/filed1
*/
void TestMount::prepareSnapDiffLib2Cases()
{
//************ snap1 *************
ASSERT_LE(0, write_full("fileA", "hello world"));
ASSERT_LE(0, write_full("fileC", "hello world to be removed temporarily"));
ASSERT_LE(0, write_full("fileD", "hello world unmodified"));
ASSERT_LE(0, write_full("fileF", "hello world to be removed completely"));
ASSERT_LE(0, write_full("fileG", "hello world to be overwritten at snap2"));
ASSERT_EQ(0, mkdir("dirA"));
ASSERT_LE(0, write_full("dirA/fileA", "file 'A/a' v1"));
ASSERT_EQ(0, mkdir("dirC"));
ASSERT_LE(0, write_full("dirC/filec", "file 'C/c' v1"));
ASSERT_EQ(0, mkdir("dirD"));
ASSERT_LE(0, write_full("dirD/filed", "file 'D/d' v1"));
ASSERT_EQ(0, mksnap("snap1"));
//************ snap2 *************
ASSERT_LE(0, write_full("fileA", "hello world again in A"));
ASSERT_LE(0, write_full("fileB", "hello world in B"));
ASSERT_LE(0, write_full("fileG", "hello world to be removed at snap3"));
ASSERT_EQ(0, unlink("fileC"));
ASSERT_EQ(0, unlink("fileF"));
ASSERT_LE(0, write_full("dirA/fileA", "file 'A/a' v2"));
ASSERT_EQ(0, mkdir("dirB"));
ASSERT_LE(0, write_full("dirB/fileb", "file 'B/b' v2"));
ASSERT_EQ(0, purge_dir("dirC"));
ASSERT_EQ(0, mksnap("snap2"));
//************ snap3 *************
ASSERT_LE(0, write_full("fileC", "hello world in C recovered"));
ASSERT_LE(0, write_full("fileD", "hello world in D now modified"));
ASSERT_LE(0, write_full("fileE", "file 'E' created at snap3"));
ASSERT_EQ(0, unlink("fileG"));
ASSERT_EQ(0, purge_dir("dirA"));
ASSERT_EQ(0, purge_dir("dirB"));
ASSERT_EQ(0, mksnap("snap3"));
}
/* The following method creates a folder with tons of file
updated between two snapshots
We're to test SnapDiff readdir API against that structure.
* where:
- xN denotes file 'x' version N.
- X denotes folder name
- * denotes no/removed file/folder
# snap1 snap2
* aaaaA1 | aaaaA1 |
* aaaaB1 | * |
* * | aaaaC2 |
* aaaaD1 | aaaaD2 |
# file<NNN>1 | file<NNN>2|
* fileZ1 | fileA1 |
* zzzzA1 | zzzzA1 |
* zzzzB1 | * |
* * | zzzzC2 |
* zzzzD1 | zzzzD2 |
*/
void TestMount::prepareHugeSnapDiff(const std::string& name_prefix_start,
const std::string& name_prefix_bulk,
const std::string& name_prefix_end,
size_t file_count,
bool bulk_diff)
{
//************ snap1 *************
std::string startA = name_prefix_start + "A";
std::string startB = name_prefix_start + "B";
std::string startC = name_prefix_start + "C";
std::string startD = name_prefix_start + "D";
std::string endA = name_prefix_end + "A";
std::string endB = name_prefix_end + "B";
std::string endC = name_prefix_end + "C";
std::string endD = name_prefix_end + "D";
ASSERT_LE(0, write_full(startA.c_str(), "hello world"));
ASSERT_LE(0, write_full(startB.c_str(), "hello world"));
ASSERT_LE(0, write_full(startD.c_str(), "hello world"));
for(size_t i = 0; i < file_count; i++) {
auto s = name_prefix_bulk + stringify(i);
ASSERT_LE(0, write_full(s.c_str(), "hello world"));
}
ASSERT_LE(0, write_full(endA.c_str(), "hello world"));
ASSERT_LE(0, write_full(endB.c_str(), "hello world"));
ASSERT_LE(0, write_full(endD.c_str(), "hello world"));
ASSERT_EQ(0, mksnap("snap1"));
ASSERT_LE(0, unlink(startB.c_str()));
ASSERT_LE(0, write_full(startC.c_str(), "hello world2"));
ASSERT_LE(0, write_full(startD.c_str(), "hello world2"));
if (bulk_diff) {
for(size_t i = 0; i < file_count; i++) {
auto s = std::string(name_prefix_bulk) + stringify(i);
ASSERT_LE(0, write_full(s.c_str(), "hello world2"));
}
}
ASSERT_LE(0, unlink(endB.c_str()));
ASSERT_LE(0, write_full(endC.c_str(), "hello world2"));
ASSERT_LE(0, write_full(endD.c_str(), "hello world2"));
ASSERT_EQ(0, mksnap("snap2"));
}
/*
* More versatile SnapDiff readdir API verification,
* includes 3 different snapshots and interleaving/repetitive calls to make sure
* the results aren't spoiled due to caching.
*/
TEST(LibCephFS, SnapDiffLib2)
{
TestMount test_mount;
test_mount.prepareSnapDiffLib2Cases();
// Create simple directory tree with a couple of snapshots to test against
uint64_t snapid1;
uint64_t snapid2;
uint64_t snapid3;
ASSERT_EQ(0, test_mount.get_snapid("snap1", &snapid1));
ASSERT_EQ(0, test_mount.get_snapid("snap2", &snapid2));
ASSERT_EQ(0, test_mount.get_snapid("snap3", &snapid3));
std::cout << snapid1 << " vs. " << snapid2 << " vs. " << snapid3 << std::endl;
ASSERT_GT(snapid1, 0);
ASSERT_GT(snapid2, 0);
ASSERT_GT(snapid3, 0);
ASSERT_GT(snapid2, snapid1);
ASSERT_GT(snapid3, snapid2);
// define a labda which verifies snap1/snap2/snap3 listings
auto verify_snap_listing = [&]()
{
{
string snap_path = test_mount.make_snap_path("snap1");
std::cout << "---------snap1 listing verification---------" << std::endl;
vector<string> expected;
expected.push_back("fileA");
expected.push_back("fileC");
expected.push_back("fileD");
expected.push_back("fileF");
expected.push_back("fileG");
expected.push_back("dirA");
expected.push_back("dirC");
expected.push_back("dirD");
ASSERT_EQ(0,
test_mount.readdir_and_compare(snap_path.c_str(), expected));
}
{
std::cout << "---------snap2 listing verification---------" << std::endl;
string snap_path = test_mount.make_snap_path("snap2");
vector<string> expected;
expected.push_back("fileA");
expected.push_back("fileB");
expected.push_back("fileD");
expected.push_back("fileG");
expected.push_back("dirA");
expected.push_back("dirB");
expected.push_back("dirD");
ASSERT_EQ(0,
test_mount.readdir_and_compare(snap_path.c_str(), expected));
}
{
std::cout << "---------snap3 listing verification---------" << std::endl;
string snap_path = test_mount.make_snap_path("snap3");
vector<string> expected;
expected.push_back("fileA");
expected.push_back("fileB");
expected.push_back("fileC");
expected.push_back("fileD");
expected.push_back("fileE");
expected.push_back("dirD");
ASSERT_EQ(0,
test_mount.readdir_and_compare(snap_path.c_str(), expected));
}
};
// Prepare expected delta for snap1 vs. snap2
vector<pair<string, uint64_t>> snap1_2_diff_expected;
snap1_2_diff_expected.emplace_back("fileA", snapid2);
snap1_2_diff_expected.emplace_back("fileB", snapid2);
snap1_2_diff_expected.emplace_back("fileC", snapid1);
snap1_2_diff_expected.emplace_back("fileF", snapid1);
snap1_2_diff_expected.emplace_back("fileG", snapid2);
snap1_2_diff_expected.emplace_back("dirA", snapid2);
snap1_2_diff_expected.emplace_back("dirB", snapid2);
snap1_2_diff_expected.emplace_back("dirC", snapid1);
snap1_2_diff_expected.emplace_back("dirD", snapid2);
// Prepare expected delta for snap1 vs. snap3
vector<pair<string, uint64_t>> snap1_3_diff_expected;
snap1_3_diff_expected.emplace_back("fileA", snapid3);
snap1_3_diff_expected.emplace_back("fileB", snapid3);
snap1_3_diff_expected.emplace_back("fileC", snapid3);
snap1_3_diff_expected.emplace_back("fileD", snapid3);
snap1_3_diff_expected.emplace_back("fileE", snapid3);
snap1_3_diff_expected.emplace_back("fileF", snapid1);
snap1_3_diff_expected.emplace_back("fileG", snapid1);
snap1_3_diff_expected.emplace_back("dirA", snapid1);
snap1_3_diff_expected.emplace_back("dirC", snapid1);
snap1_3_diff_expected.emplace_back("dirD", snapid3);
// Prepare expected delta for snap2 vs. snap3
vector<pair<string, uint64_t>> snap2_3_diff_expected;
snap2_3_diff_expected.emplace_back("fileC", snapid3);
snap2_3_diff_expected.emplace_back("fileD", snapid3);
snap2_3_diff_expected.emplace_back("fileE", snapid3);
snap2_3_diff_expected.emplace_back("fileG", snapid2);
snap2_3_diff_expected.emplace_back("dirA", snapid2);
snap2_3_diff_expected.emplace_back("dirB", snapid2);
snap2_3_diff_expected.emplace_back("dirD", snapid3);
// Check snapshot listings on a cold cache
verify_snap_listing();
// Check snapshot listings on a warm cache
verify_snap_listing(); // served from cache
// Print snap1 vs. snap2 delta against the root folder
test_mount.print_snap_diff("", "snap1", "snap2");
// Verify snap1 vs. snap2 delta for the root
test_mount.verify_snap_diff(snap1_2_diff_expected, "", "snap1", "snap2");
// Check snapshot listings on a warm cache once again
// to make sure it wasn't spoiled by SnapDiff
verify_snap_listing(); // served from cache
// Verify snap2 vs. snap1 delta
test_mount.verify_snap_diff(snap1_2_diff_expected, "", "snap2", "snap1");
// Check snapshot listings on a warm cache once again
// to make sure it wasn't spoiled by SnapDiff
verify_snap_listing(); // served from cache
// Verify snap1 vs. snap3 delta for the root
test_mount.verify_snap_diff(snap1_3_diff_expected, "", "snap1", "snap3");
// Verify snap2 vs. snap3 delta for the root
test_mount.verify_snap_diff(snap2_3_diff_expected, "", "snap2", "snap3");
// Check snapshot listings on a warm cache once again
// to make sure it wasn't spoiled by SnapDiff
verify_snap_listing(); // served from cache
// Print snap1 vs. snap2 delta against /dirA folder
test_mount.print_snap_diff("dirA", "snap1", "snap2");
// Verify snap1 vs. snap2 delta for /dirA
{
vector<pair<string, uint64_t>> expected;
expected.emplace_back("fileA", snapid2);
test_mount.verify_snap_diff(expected, "dirA", "snap1", "snap2");
}
// Print snap1 vs. snap2 delta against /dirB folder
test_mount.print_snap_diff("dirB", "snap1", "snap2");
// Verify snap1 vs. snap2 delta for /dirB
{
vector<pair<string, uint64_t>> expected;
expected.emplace_back("fileb", snapid2);
test_mount.verify_snap_diff(expected, "dirB", "snap1", "snap2");
}
// Print snap1 vs. snap2 delta against /dirD folder
test_mount.print_snap_diff("dirD", "snap1", "snap2");
// Verify snap1 vs. snap2 delta for /dirD
{
vector<pair<string, uint64_t>> expected;
test_mount.verify_snap_diff(expected, "dirD", "snap1", "snap2");
}
// Check snapshot listings on a warm cache once again
// to make sure it wasn't spoiled by SnapDiff
verify_snap_listing(); // served from cache
// Verify snap1 vs. snap2 delta for the root once again
test_mount.verify_snap_diff(snap1_2_diff_expected, "", "snap1", "snap2");
// Verify snap2 vs. snap3 delta for the root once again
test_mount.verify_snap_diff(snap2_3_diff_expected, "", "snap3", "snap2");
// Verify snap1 vs. snap3 delta for the root once again
test_mount.verify_snap_diff(snap1_3_diff_expected, "", "snap1", "snap3");
std::cout << "------------- closing -------------" << std::endl;
ASSERT_EQ(0, test_mount.purge_dir(""));
ASSERT_EQ(0, test_mount.rmsnap("snap1"));
ASSERT_EQ(0, test_mount.rmsnap("snap2"));
ASSERT_EQ(0, test_mount.rmsnap("snap3"));
}
/* The following method creates some files/folders/snapshots layout,
described in the sheet below.
We're to test SnapDiff against that structure.
* where:
- xN denotes file 'x' version N.
- X denotes folder name
- * denotes no/removed file/folder
# snap1 snap2 snap3 head
# a1 | a1 | a3 | a4
# b1 | b2 | b3 | b3
# c1 | * | * | *
# * | d2 | d3 | d3
# f1 | f2 | * | *
# ff1 | ff1 | * | *
# g1 | * | g3 | g3
# * | * | * | h4
# i1 | i1 | i1 | i1
# S | S | S | S
# S/sa1 | S/sa2 | S/sa3 | S/sa3
# * | * | * | S/sh4
# * | T | T | T
# * | T/td2 | T/td3 | T/td3
# C | * | * | *
# C/cc1 | * | * | *
# C/C1 | * | * | *
# C/C1/c1| * | * | *
# G | * | G | G
# G/gg1 | * | G/gg3 | G/gg3
# * | k2 | * | *
# * | l2 | l2 | *
# * | K | * | *
# * | K/kk2 | * | *
# * | * | H | H
# * | * | H/hh3 | H/hh3
# I | I | I | *
# I/ii1 | I/ii2 | I/ii3 | *
# I/iii1 | I/iii1 | I/iii3| *
# * | * | I/iiii3| *
# * | I/J | I/J | *
# * | I/J/i2 | I/J/i3 | *
# * | I/J/j2 | I/J/j2 | *
# * | I/J/k2 | * | *
# * | * | I/J/l3 | *
# L | L | L | L
# L/ll1 | L/ll1 | L/ll3 | L/ll3
# L/LL | L/LL | L/LL | L/LL
# * | L/LL/ll2| L/LL/ll3| L/LL/ll4
# * | L/LM | * | *
# * | L/LM/lm2| * | *
# * | L/LN | L/LN | *
*/
void TestMount::prepareSnapDiffLib3Cases()
{
//************ snap1 *************
ASSERT_LE(0, write_full("a", "file 'a' v1"));
ASSERT_LE(0, write_full("b", "file 'b' v1"));
ASSERT_LE(0, write_full("c", "file 'c' v1"));
ASSERT_LE(0, write_full("e", "file 'e' v1"));
ASSERT_LE(0, write_full("~e", "file '~e' v1"));
ASSERT_LE(0, write_full("f", "file 'f' v1"));
ASSERT_LE(0, write_full("ff", "file 'ff' v1"));
ASSERT_LE(0, write_full("g", "file 'g' v1"));
ASSERT_LE(0, write_full("i", "file 'i' v1"));
ASSERT_EQ(0, mkdir("S"));
ASSERT_LE(0, write_full("S/sa", "file 'S/sa' v1"));
ASSERT_EQ(0, mkdir("C"));
ASSERT_LE(0, write_full("C/cc", "file 'C/cc' v1"));
ASSERT_EQ(0, mkdir("C/CC"));
ASSERT_LE(0, write_full("C/CC/c", "file 'C/CC/c' v1"));
ASSERT_EQ(0, mkdir("G"));
ASSERT_LE(0, write_full("G/gg", "file 'G/gg' v1"));
ASSERT_EQ(0, mkdir("I"));
ASSERT_LE(0, write_full("I/ii", "file 'I/ii' v1"));
ASSERT_LE(0, write_full("I/iii", "file 'I/iii' v1"));
ASSERT_EQ(0, mkdir("L"));
ASSERT_LE(0, write_full("L/ll", "file 'L/ll' v1"));
ASSERT_EQ(0, mkdir("L/LL"));
ASSERT_EQ(0, mksnap("snap1"));
//************ snap2 *************
ASSERT_LE(0, write_full("b", "file 'b' v2"));
ASSERT_EQ(0, unlink("c"));
ASSERT_LE(0, write_full("d", "file 'd' v2"));
ASSERT_LE(0, write_full("e", "file 'e' v2"));
ASSERT_LE(0, write_full("~e", "file '~e' v2"));
ASSERT_LE(0, write_full("f", "file 'f' v2"));
ASSERT_EQ(0, unlink("g"));
ASSERT_LE(0, write_full("S/sa", "file 'S/sa' v2"));
ASSERT_EQ(0, mkdir("T"));
ASSERT_LE(0, write_full("T/td", "file 'T/td' v2"));
ASSERT_EQ(0, purge_dir("C"));
ASSERT_EQ(0, purge_dir("G"));
ASSERT_LE(0, write_full("k", "file 'k' v2"));
ASSERT_LE(0, write_full("l", "file 'l' v2"));
ASSERT_EQ(0, mkdir("K"));
ASSERT_LE(0, write_full("K/kk", "file 'K/kk' v2"));
ASSERT_LE(0, write_full("I/ii", "file 'I/ii' v2"));
ASSERT_EQ(0, mkdir("I/J"));
ASSERT_LE(0, write_full("I/J/i", "file 'I/J/i' v2"));
ASSERT_LE(0, write_full("I/J/j", "file 'I/J/j' v2"));
ASSERT_LE(0, write_full("I/J/k", "file 'I/J/k' v2"));
ASSERT_LE(0, write_full("L/LL/ll", "file 'L/LL/ll' v2"));
ASSERT_EQ(0, mkdir("L/LM"));
ASSERT_LE(0, write_full("L/LM/lm", "file 'L/LM/lm' v2"));
ASSERT_EQ(0, mkdir("L/LN"));
ASSERT_EQ(0, mksnap("snap2"));
//************ snap3 *************
ASSERT_LE(0, write_full("a", "file 'a' v3"));
ASSERT_LE(0, write_full("b", "file 'b' v3"));
ASSERT_LE(0, write_full("d", "file 'd' v3"));
ASSERT_EQ(0, unlink("e"));
ASSERT_EQ(0, unlink("~e"));
ASSERT_EQ(0, unlink("f"));
ASSERT_EQ(0, unlink("ff"));
ASSERT_LE(0, write_full("g", "file 'g' v3"));
ASSERT_LE(0, write_full("S/sa", "file 'S/sa' v3"));
ASSERT_LE(0, write_full("T/td", "file 'T/td' v3"));
ASSERT_EQ(0, mkdir("G"));
ASSERT_LE(0, write_full("G/gg", "file 'G/gg' v3"));
ASSERT_EQ(0, unlink("k"));
ASSERT_EQ(0, purge_dir("K"));
ASSERT_EQ(0, mkdir("H"));
ASSERT_LE(0, write_full("H/hh", "file 'H/hh' v3"));
ASSERT_LE(0, write_full("I/ii", "file 'I/ii' v3"));
ASSERT_LE(0, write_full("I/iii", "file 'I/iii' v3"));
ASSERT_LE(0, write_full("I/iiii", "file 'I/iiii' v3"));
ASSERT_LE(0, write_full("I/J/i", "file 'I/J/i' v3"));
ASSERT_EQ(0, unlink("I/J/k"));
ASSERT_LE(0, write_full("I/J/l", "file 'I/J/l' v3"));
ASSERT_LE(0, write_full("L/ll", "file 'L/ll' v3"));
ASSERT_LE(0, write_full("L/LL/ll", "file 'L/LL/ll' v3"));
ASSERT_EQ(0, purge_dir("L/LM"));
ASSERT_EQ(0, mksnap("snap3"));
//************ head *************
ASSERT_LE(0, write_full("a", "file 'a' head"));
ASSERT_LE(0, write_full("h", "file 'h' head"));
ASSERT_LE(0, write_full("S/sh", "file 'S/sh' head"));
ASSERT_EQ(0, unlink("l"));
ASSERT_EQ(0, purge_dir("I"));
ASSERT_LE(0, write_full("L/LL/ll", "file 'L/LL/ll' head"));
ASSERT_EQ(0, purge_dir("L/LN"));
}
//
// This case tests SnapDiff functionality for snap1/snap2 snapshot delta
// It operates against FS layout created by prepareSnapDiffCases() method,
// see relevant table before that function for FS state overview.
//
TEST(LibCephFS, SnapDiffCases1_2)
{
TestMount test_mount;
// Create directory tree evolving through a bunch of snapshots
test_mount.prepareSnapDiffLib3Cases();
uint64_t snapid1;
uint64_t snapid2;
ASSERT_EQ(0, test_mount.get_snapid("snap1", &snapid1));
ASSERT_EQ(0, test_mount.get_snapid("snap2", &snapid2));
std::cout << snapid1 << " vs. " << snapid2 << std::endl;
ASSERT_GT(snapid1, 0);
ASSERT_GT(snapid2, 0);
ASSERT_GT(snapid2, snapid1);
// Print snapshot delta (snap1 vs. snap2) results for root in a
// human-readable form.
test_mount.print_snap_diff("", "snap1", "snap2");
{
// Make sure the root delta is as expected
// One should use columns snap1 and snap2 from
// the table preceeding prepareSnapDiffCases() function
// to learn which names to expect in the delta.
//
// - file 'a' is unchanged hence not present in delta
// - file 'ff' is unchanged hence not present in delta
// - file 'i' is unchanged hence not present in delta
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("b", snapid2); // file 'b' is updated in snap2
expected.emplace_back("c", snapid1); // file 'c' is removed in snap2
expected.emplace_back("d", snapid2); // file 'd' is created in snap2
expected.emplace_back("e", snapid2); // file 'e' is updated in snap2
expected.emplace_back("~e", snapid2); // file '~e' is updated in snap2
expected.emplace_back("f", snapid2); // file 'f' is updated in snap2
expected.emplace_back("g", snapid1); // file 'g' is removed in snap2
expected.emplace_back("S", snapid2); // folder 'S' is present in snap2 hence reported
expected.emplace_back("T", snapid2); // folder 'T' is created in snap2
expected.emplace_back("C", snapid1); // folder 'C' is removed in snap2
expected.emplace_back("G", snapid1); // folder 'G' is removed in snap2
expected.emplace_back("k", snapid2); // file 'k' is created in snap2
expected.emplace_back("l", snapid2); // file 'l' is created in snap2
expected.emplace_back("K", snapid2); // folder 'K' is created in snap2
expected.emplace_back("I", snapid2); // folder 'I' is created in snap2
expected.emplace_back("L", snapid2); // folder 'L' is present in snap2 but got more
// subfolders
test_mount.verify_snap_diff(expected, "", "snap1", "snap2");
}
{
//
// Make sure snapshot delta for /S (existed at both snap1 and snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("sa", snapid2);
test_mount.verify_snap_diff(expected, "S", "snap1", "snap2");
}
{
//
// Make sure snapshot delta for /T (created at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("td", snapid2);
test_mount.verify_snap_diff(expected, "T", "snap1", "snap2");
}
{
//
// Make sure snapshot delta for /C (removed at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("cc", snapid1);
expected.emplace_back("CC", snapid1);
test_mount.verify_snap_diff(expected, "C", "snap2", "snap1");
}
{
//
// Make sure snapshot delta for /C/CC (removed at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("c", snapid1);
test_mount.verify_snap_diff(expected, "C/CC", "snap2", "snap1");
}
{
//
// Make sure snapshot delta for /I (created at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("ii", snapid2);
expected.emplace_back("J", snapid2);
test_mount.verify_snap_diff(expected, "I", "snap1", "snap2");
}
{
//
// Make sure snapshot delta for /I/J (created at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("i", snapid2);
expected.emplace_back("j", snapid2);
expected.emplace_back("k", snapid2);
test_mount.verify_snap_diff(expected, "I/J", "snap1", "snap2");
}
{
//
// Make sure snapshot delta for /L (extended at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("LL", snapid2);
expected.emplace_back("LM", snapid2);
expected.emplace_back("LN", snapid2);
test_mount.verify_snap_diff(expected, "L", "snap1", "snap2");
}
{
//
// Make sure snapshot delta for /L/LL (updated at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("ll", snapid2);
test_mount.verify_snap_diff(expected, "L/LL", "snap1", "snap2");
}
{
//
// Make sure snapshot delta for /L/LN (created empty at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
test_mount.verify_snap_diff(expected, "L/LN", "snap1", "snap2");
}
{
// Make sure snapshot delta for /L/LM (created at snap2)
// is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("lm", snapid2);
test_mount.verify_snap_diff(expected, "L/LM", "snap1", "snap2");
}
std::cout << "-------------" << std::endl;
test_mount.remove_all();
test_mount.rmsnap("snap1");
test_mount.rmsnap("snap2");
test_mount.rmsnap("snap3");
}
//
// This case tests SnapDiff functionality for snap2/snap3 snapshot delta
// retrieved through .snap path-based query API.
// It operates against FS layout created by prepareSnapDiffCases() method,
// see relevant table before that function for FS state overview.
//
TEST(LibCephFS, SnapDiffCases2_3)
{
TestMount test_mount;
// Create directory tree evolving through a bunch of snapshots
test_mount.prepareSnapDiffLib3Cases();
uint64_t snapid2;
uint64_t snapid3;
ASSERT_EQ(0, test_mount.get_snapid("snap2", &snapid2));
ASSERT_EQ(0, test_mount.get_snapid("snap3", &snapid3));
std::cout << snapid2 << " vs. " << snapid3 << std::endl;
ASSERT_GT(snapid3, 0);
ASSERT_GT(snapid3, 0);
ASSERT_GT(snapid3, snapid2);
// Print snapshot delta (snap2 vs. snap3) results for root in a
// human-readable form.
test_mount.print_snap_diff("", "snap2", "snap3");
{
// Make sure the root delta is as expected
// One should use columns snap1 and snap2 from
// the table preceeding prepareSnapDiffCases() function
// to learn which names to expect in the delta.
//
// - file 'c' is removed since snap1 hence not present in delta
// - file 'l' is unchanged hence not present in delta
// - file 'i' is unchanged hence not present in delta
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("a", snapid3); // file 'a' is updated in snap3
expected.emplace_back("b", snapid3); // file 'b' is updated in snap3
expected.emplace_back("d", snapid3); // file 'd' is updated in snap3
expected.emplace_back("~e", snapid2); // file '~e' is removed in snap3
expected.emplace_back("e", snapid2); // file 'e' is removed in snap3
expected.emplace_back("f", snapid2); // file 'f' is removed in snap3
expected.emplace_back("ff", snapid2); // file 'ff' is removed in snap3
expected.emplace_back("g", snapid3); // file 'g' re-appeared in snap3
expected.emplace_back("S", snapid3); // folder 'S' is present in snap3 hence reported
expected.emplace_back("T", snapid3); // folder 'T' is present in snap3 hence reported
expected.emplace_back("G", snapid3); // folder 'G' re-appeared in snap3 hence reported
expected.emplace_back("k", snapid2); // file 'k' is removed in snap3
expected.emplace_back("K", snapid2); // folder 'K' is removed in snap3
expected.emplace_back("H", snapid3); // folder 'H' is created in snap3 hence reported
expected.emplace_back("I", snapid3); // folder 'I' is present in snap3 hence reported
expected.emplace_back("L", snapid3); // folder 'L' is present in snap3 hence reported
test_mount.verify_snap_diff(expected, "", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /S (children updated) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("sa", snapid3);
test_mount.verify_snap_diff(expected, "S", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /T (children updated) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("td", snapid3);
test_mount.verify_snap_diff(expected, "T", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /G (re-appeared) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("gg", snapid3);
test_mount.verify_snap_diff(expected, "G", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /K (removed) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("kk", snapid2);
test_mount.verify_snap_diff(expected, "K", "snap3", "snap2");
}
{
//
// Make sure snapshot delta for /H (created) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("hh", snapid3);
test_mount.verify_snap_diff(expected, "H", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /I (children updated) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("ii", snapid3);
expected.emplace_back("iii", snapid3);
expected.emplace_back("iiii", snapid3);
expected.emplace_back("J", snapid3);
test_mount.verify_snap_diff(expected, "I", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /I/J (children updated/removed) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("i", snapid3);
expected.emplace_back("k", snapid2);
expected.emplace_back("l", snapid3);
test_mount.verify_snap_diff(expected, "I/J", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /L (children updated/removed) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("ll", snapid3);
expected.emplace_back("LL", snapid3);
expected.emplace_back("LM", snapid2);
expected.emplace_back("LN", snapid3);
test_mount.verify_snap_diff(expected, "L", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /L/LL (children updated) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("ll", snapid3);
test_mount.verify_snap_diff(expected, "L/LL", "snap2", "snap3");
}
{
//
// Make sure snapshot delta for /L/LM (removed) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("lm", snapid2);
test_mount.verify_snap_diff(expected, "L/LM", "snap3", "snap2");
}
{
//
// Make sure snapshot delta for /L/LN (created empty) is as expected
//
vector<std::pair<string, uint64_t>> expected;
test_mount.verify_snap_diff(expected, "L/LN", "snap2", "snap3");
}
test_mount.remove_all();
test_mount.rmsnap("snap1");
test_mount.rmsnap("snap2");
test_mount.rmsnap("snap3");
}
//
// This case tests SnapDiff functionality for snap1/snap3 snapshot delta
// retrieved through .snap path-based query API.
// It operates against FS layout created by prepareSnapDiffCases() method,
// see relevant table before that function for FS state overview.
//
TEST(LibCephFS, SnapDiffCases1_3)
{
TestMount test_mount;
// Create directory tree evolving through a bunch of snapshots
test_mount.prepareSnapDiffLib3Cases();
uint64_t snapid1;
uint64_t snapid3;
ASSERT_EQ(0, test_mount.get_snapid("snap1", &snapid1));
ASSERT_EQ(0, test_mount.get_snapid("snap3", &snapid3));
std::cout << snapid1 << " vs. " << snapid3 << std::endl;
ASSERT_GT(snapid3, 0);
ASSERT_GT(snapid3, 0);
ASSERT_GT(snapid3, snapid1);
// Print snapshot delta (snap2 vs. snap3) results for root in a
// human-readable form.
test_mount.print_snap_diff("", "snap1", "snap3");
{
// Make sure the root delta is as expected
// One should use columns snap1 and snap3 from
// the table preceeding prepareSnapDiffCases() function
// to learn which names to expect in the delta.
//
// - file 'i' is unchanged hence not present in delta
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("a", snapid3); // file 'a' is updated in snap3
expected.emplace_back("b", snapid3); // file 'b' is updated in snap3
expected.emplace_back("c", snapid1); // file 'c' is removed in snap2
expected.emplace_back("d", snapid3); // file 'd' is updated in snap3
expected.emplace_back("~e", snapid1); // file '~e' is removed in snap3
expected.emplace_back("e", snapid1); // file 'e' is removed in snap3
expected.emplace_back("f", snapid1); // file 'f' is removed in snap3
expected.emplace_back("ff", snapid1); // file 'ff' is removed in snap3
expected.emplace_back("g", snapid3); // file 'g' removed in snap2 and
// re-appeared in snap3
expected.emplace_back("S", snapid3); // folder 'S' is present in snap3 hence reported
expected.emplace_back("T", snapid3); // folder 'T' is present in snap3 hence reported
expected.emplace_back("C", snapid1); // folder 'C' is removed in snap2
// folder 'G' is removed in snap2 and re-appeared in snap3
// hence reporting it twice under different snapid
expected.emplace_back("G", snapid1);
expected.emplace_back("G", snapid3);
expected.emplace_back("l", snapid3); // file 'l' is created in snap2
expected.emplace_back("H", snapid3); // folder 'H' is created in snap3 hence reported
expected.emplace_back("I", snapid3); // folder 'I' is created in snap3 hence reported
expected.emplace_back("L", snapid3); // folder 'L' is created in snap3 hence reported
test_mount.verify_snap_diff(expected, "", "snap3", "snap1");
}
{
//
// Make sure snapshot delta for /S (children updated) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("sa", snapid3);
test_mount.verify_snap_diff(expected, "S", "snap3", "snap1");
}
{
//
// Make sure snapshot delta for /T (created and children updated) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("td", snapid3);
test_mount.verify_snap_diff(expected, "T", "snap3", "snap1");
}
{
//
// Make sure snapshot delta for /C (removed) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("cc", snapid1);
expected.emplace_back("CC", snapid1);
test_mount.verify_snap_diff(expected, "C", "snap3", "snap1");
}
{
//
// Make sure snapshot delta for /C/CC (removed) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("c", snapid1);
test_mount.verify_snap_diff(expected, "C/CC", "snap3", "snap1");
}
{
//
// Make sure snapshot delta for /G (removed) is as expected
// For this case (G@snap1 and G@snap3 are different entries)
// the order in which snapshot names are provided is crucial.
// Making G@snap1 vs. snap3 delta returns everything from G@snap1
// but omits any entries from G/snap3 (since it's a different entry).
// And making G@snap3 vs. snap1 delta returns everything from G@snap3
// but nothing from snap1,
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("gg", snapid1);
test_mount.verify_snap_diff(expected, "G", "snap1", "snap3");
}
{
//
// Make sure snapshot delta for /G (re-created) is as expected
// The snapshot names order is important, see above.
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("gg", snapid3);
test_mount.verify_snap_diff(expected, "G", "snap3", "snap1");
}
{
//
// Make sure snapshot delta for /H (created) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("hh", snapid3);
test_mount.verify_snap_diff(expected, "H", "snap1", "snap3");
}
{
//
// Make sure snapshot delta for /I (chinldren updated) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("ii", snapid3);
expected.emplace_back("iii", snapid3);
expected.emplace_back("iiii", snapid3);
expected.emplace_back("J", snapid3);
test_mount.verify_snap_diff(expected, "I", "snap1", "snap3");
}
{
//
// Make sure snapshot delta for /I/J (created at snap2) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("i", snapid3);
expected.emplace_back("j", snapid3);
expected.emplace_back("l", snapid3);
test_mount.verify_snap_diff(expected, "I/J", "snap1", "snap3");
}
{
//
// Make sure snapshot delta for /L is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("ll", snapid3);
expected.emplace_back("LL", snapid3);
expected.emplace_back("LN", snapid3);
test_mount.verify_snap_diff(expected, "L", "snap1", "snap3");
}
{
//
// Make sure snapshot delta for /L/LL (children updated) is as expected
//
vector<std::pair<string, uint64_t>> expected;
expected.emplace_back("ll", snapid3);
test_mount.verify_snap_diff(expected, "L/LL", "snap1", "snap3");
}
{
vector<std::pair<string, uint64_t>> expected;
test_mount.verify_snap_diff(expected, "L/LN", "snap1", "snap3");
}
std::cout << "-------------" << std::endl;
test_mount.remove_all();
test_mount.rmsnap("snap1");
test_mount.rmsnap("snap2");
test_mount.rmsnap("snap3");
}
/*
* SnapDiff readdir API testing for huge dir
* when delta is minor.
*/
TEST(LibCephFS, HugeSnapDiffSmallDelta)
{
TestMount test_mount;
long int file_count = 10000;
printf("Seeding %ld files...\n", file_count);
// Create simple directory tree with a couple of snapshots
// to test against.
string name_prefix_start = "aaaa";
string name_prefix_bulk = "file";
string name_prefix_end = "zzzz";
test_mount.prepareHugeSnapDiff(name_prefix_start,
name_prefix_bulk,
name_prefix_end,
file_count,
false);
uint64_t snapid1;
uint64_t snapid2;
// learn snapshot ids and do basic verification
ASSERT_EQ(0, test_mount.get_snapid("snap1", &snapid1));
ASSERT_EQ(0, test_mount.get_snapid("snap2", &snapid2));
ASSERT_GT(snapid1, 0);
ASSERT_GT(snapid2, 0);
ASSERT_GT(snapid2, snapid1);
std::cout << snapid1 << " vs. " << snapid2 << std::endl;
//
// Make sure snap1 vs. snap2 delta for the root is as expected
//
{
vector<pair<string, uint64_t>> expected;
expected.emplace_back(name_prefix_start + "B", snapid1);
expected.emplace_back(name_prefix_start + "C", snapid2);
expected.emplace_back(name_prefix_start + "D", snapid2);
expected.emplace_back(name_prefix_end + "B", snapid1);
expected.emplace_back(name_prefix_end + "C", snapid2);
expected.emplace_back(name_prefix_end + "D", snapid2);
test_mount.verify_snap_diff(expected, "", "snap1", "snap2");
}
std::cout << "------------- closing -------------" << std::endl;
ASSERT_EQ(0, test_mount.purge_dir(""));
ASSERT_EQ(0, test_mount.rmsnap("snap1"));
ASSERT_EQ(0, test_mount.rmsnap("snap2"));
}
/*
* SnapDiff readdir API testing for huge dir
* when delta is large
*/
TEST(LibCephFS, HugeSnapDiffLargeDelta)
{
TestMount test_mount;
// Calculate amount of files required to have multiple directory fragments
// using relevant config parameters.
// file_count = mds_bal_spli_size * mds_bal_fragment_fast_factor + 100
char buf[256];
int r = test_mount.conf_get("mds_bal_split_size", buf, sizeof(buf));
ASSERT_TRUE(r >= 0);
long int file_count = strtol(buf, nullptr, 10);
r = test_mount.conf_get("mds_bal_fragment_fast_factor ", buf, sizeof(buf));
ASSERT_TRUE(r >= 0);
double factor = strtod(buf, nullptr);
file_count *= factor;
file_count += 100;
printf("Seeding %ld files...\n", file_count);
// Create simple directory tree with a couple of snapshots
// to test against.
string name_prefix_start = "aaaa";
string name_prefix_bulk = "file";
string name_prefix_end = "zzzz";
test_mount.prepareHugeSnapDiff(name_prefix_start,
name_prefix_bulk,
name_prefix_end,
file_count,
true);
uint64_t snapid1;
uint64_t snapid2;
// learn snapshot ids and do basic verification
ASSERT_EQ(0, test_mount.get_snapid("snap1", &snapid1));
ASSERT_EQ(0, test_mount.get_snapid("snap2", &snapid2));
ASSERT_GT(snapid1, 0);
ASSERT_GT(snapid2, 0);
ASSERT_GT(snapid2, snapid1);
std::cout << snapid1 << " vs. " << snapid2 << std::endl;
//
// Make sure snap1 vs. snap2 delta for the root is as expected
//
{
vector<pair<string, uint64_t>> expected;
expected.emplace_back(name_prefix_start + "B", snapid1);
expected.emplace_back(name_prefix_start + "C", snapid2);
expected.emplace_back(name_prefix_start + "D", snapid2);
for (size_t i = 0; i < (size_t)file_count; i++) {
expected.emplace_back(name_prefix_bulk + stringify(i), snapid2);
}
expected.emplace_back(name_prefix_end + "B", snapid1);
expected.emplace_back(name_prefix_end + "C", snapid2);
expected.emplace_back(name_prefix_end + "D", snapid2);
test_mount.verify_snap_diff(expected, "", "snap1", "snap2");
}
std::cout << "------------- closing -------------" << std::endl;
ASSERT_EQ(0, test_mount.purge_dir(""));
ASSERT_EQ(0, test_mount.rmsnap("snap1"));
ASSERT_EQ(0, test_mount.rmsnap("snap2"));
}
| 57,060 | 32.864095 | 92 |
cc
|
null |
ceph-main/src/test/libcephfs/suidsgid.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2023 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "common/ceph_argparse.h"
#include "include/buffer.h"
#include "include/fs_types.h"
#include "include/stringify.h"
#include "include/cephfs/libcephfs.h"
#include "include/rados/librados.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <sys/uio.h>
#include <iostream>
#include <vector>
#include "json_spirit/json_spirit.h"
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#endif
using namespace std;
struct ceph_mount_info *admin;
struct ceph_mount_info *cmount;
char filename[128];
void run_fallocate_test_case(int mode, int result, bool with_admin=false)
{
struct ceph_statx stx;
int flags = FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE;
ASSERT_EQ(0, ceph_chmod(admin, filename, mode));
struct ceph_mount_info *_cmount = cmount;
if (with_admin) {
_cmount = admin;
}
int fd = ceph_open(_cmount, filename, O_RDWR, 0);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_fallocate(_cmount, fd, flags, 1024, 40960));
ASSERT_EQ(ceph_statx(_cmount, filename, &stx, CEPH_STATX_MODE, 0), 0);
std::cout << "After ceph_fallocate, mode: 0" << oct << mode << " -> 0"
<< (stx.stx_mode & 07777) << dec << std::endl;
ASSERT_EQ(stx.stx_mode & (S_ISUID|S_ISGID), result);
ceph_close(_cmount, fd);
}
rados_t cluster;
int do_mon_command(string s, string *key)
{
char *outs, *outbuf;
size_t outs_len, outbuf_len;
const char *ss = s.c_str();
int r = rados_mon_command(cluster, (const char **)&ss, 1,
0, 0,
&outbuf, &outbuf_len,
&outs, &outs_len);
if (outbuf_len) {
string s(outbuf, outbuf_len);
std::cout << "out: " << s << std::endl;
// parse out the key
json_spirit::mValue v, k;
json_spirit::read_or_throw(s, v);
k = v.get_array()[0].get_obj().find("key")->second;
*key = k.get_str();
std::cout << "key: " << *key << std::endl;
free(outbuf);
} else {
return -CEPHFS_EINVAL;
}
if (outs_len) {
string s(outs, outs_len);
std::cout << "outs: " << s << std::endl;
free(outs);
}
return r;
}
void run_write_test_case(int mode, int result, bool with_admin=false)
{
struct ceph_statx stx;
ASSERT_EQ(0, ceph_chmod(admin, filename, mode));
struct ceph_mount_info *_cmount = cmount;
if (with_admin) {
_cmount = admin;
}
int fd = ceph_open(_cmount, filename, O_RDWR, 0);
ASSERT_LE(0, fd);
ASSERT_EQ(ceph_write(_cmount, fd, "foo", 3, 0), 3);
ASSERT_EQ(ceph_statx(_cmount, filename, &stx, CEPH_STATX_MODE, 0), 0);
std::cout << "After ceph_write, mode: 0" << oct << mode << " -> 0"
<< (stx.stx_mode & 07777) << dec << std::endl;
ASSERT_EQ(stx.stx_mode & (S_ISUID|S_ISGID), result);
ceph_close(_cmount, fd);
}
void run_truncate_test_case(int mode, int result, size_t size, bool with_admin=false)
{
struct ceph_statx stx;
ASSERT_EQ(0, ceph_chmod(admin, filename, mode));
struct ceph_mount_info *_cmount = cmount;
if (with_admin) {
_cmount = admin;
}
int fd = ceph_open(_cmount, filename, O_RDWR, 0);
ASSERT_LE(0, fd);
ASSERT_GE(ceph_ftruncate(_cmount, fd, size), 0);
ASSERT_EQ(ceph_statx(_cmount, filename, &stx, CEPH_STATX_MODE, 0), 0);
std::cout << "After ceph_truncate size " << size << " mode: 0" << oct
<< mode << " -> 0" << (stx.stx_mode & 07777) << dec << std::endl;
ASSERT_EQ(stx.stx_mode & (S_ISUID|S_ISGID), result);
ceph_close(_cmount, fd);
}
TEST(SuidsgidTest, WriteClearSetuid) {
ASSERT_EQ(0, ceph_create(&admin, NULL));
ASSERT_EQ(0, ceph_conf_read_file(admin, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(admin, NULL));
ASSERT_EQ(0, ceph_mount(admin, "/"));
sprintf(filename, "/clear_suidsgid_file_%d", getpid());
int fd = ceph_open(admin, filename, O_CREAT|O_RDWR, 0766);
ASSERT_GE(ceph_ftruncate(admin, fd, 10000000), 0);
ceph_close(admin, fd);
string user = "clear_suidsgid_" + stringify(rand());
// create access key
string key;
ASSERT_EQ(0, do_mon_command(
"{\"prefix\": \"auth get-or-create\", \"entity\": \"client." + user + "\", "
"\"caps\": [\"mon\", \"allow *\", \"osd\", \"allow *\", \"mgr\", \"allow *\", "
"\"mds\", \"allow *\"], \"format\": \"json\"}", &key));
ASSERT_EQ(0, ceph_create(&cmount, user.c_str()));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_conf_set(cmount, "key", key.c_str()));
ASSERT_EQ(ceph_init(cmount), 0);
UserPerm *perms = ceph_userperm_new(123, 456, 0, NULL);
ASSERT_NE(nullptr, perms);
ASSERT_EQ(0, ceph_mount_perms_set(cmount, perms));
ceph_userperm_destroy(perms);
ASSERT_EQ(0, ceph_mount(cmount, "/"));
// 1, Commit to a non-exec file by an unprivileged user clears suid and sgid.
run_fallocate_test_case(06666, 0); // a+rws
// 2, Commit to a group-exec file by an unprivileged user clears suid and sgid.
run_fallocate_test_case(06676, 0); // g+x,a+rws
// 3, Commit to a user-exec file by an unprivileged user clears suid and sgid.
run_fallocate_test_case(06766, 0); // u+x,a+rws,g-x
// 4, Commit to a all-exec file by an unprivileged user clears suid and sgid.
run_fallocate_test_case(06777, 0); // a+rwxs
// 5, Commit to a non-exec file by root leaves suid and sgid.
run_fallocate_test_case(06666, S_ISUID|S_ISGID, true); // a+rws
// 6, Commit to a group-exec file by root leaves suid and sgid.
run_fallocate_test_case(06676, S_ISUID|S_ISGID, true); // g+x,a+rws
// 7, Commit to a user-exec file by root leaves suid and sgid.
run_fallocate_test_case(06766, S_ISUID|S_ISGID, true); // u+x,a+rws,g-x
// 8, Commit to a all-exec file by root leaves suid and sgid.
run_fallocate_test_case(06777, S_ISUID|S_ISGID, true); // a+rwxs
// 9, Commit to a group-exec file by an unprivileged user clears sgid
run_fallocate_test_case(02676, 0); // a+rw,g+rwxs
// 10, Commit to a all-exec file by an unprivileged user clears sgid.
run_fallocate_test_case(02777, 0); // a+rwx,g+rwxs
// 11, Write by privileged user leaves the suid and sgid
run_write_test_case(06766, S_ISUID | S_ISGID, true);
// 12, Write by unprivileged user clears the suid and sgid
run_write_test_case(06766, 0);
// 13, Truncate by privileged user leaves the suid and sgid
run_truncate_test_case(06766, S_ISUID | S_ISGID, 10000, true);
// 14, Truncate by unprivileged user clears the suid and sgid
run_truncate_test_case(06766, 0, 100);
// clean up
ceph_shutdown(cmount);
ceph_shutdown(admin);
}
TEST(LibCephFS, ChownClearSetuid) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
Inode *root;
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
char filename[32];
sprintf(filename, "clearsetuid%x", getpid());
Fh *fh;
Inode *in;
struct ceph_statx stx;
const mode_t after_mode = S_IRWXU;
const mode_t before_mode = S_IRWXU | S_ISUID | S_ISGID;
const unsigned want = CEPH_STATX_UID|CEPH_STATX_GID|CEPH_STATX_MODE;
UserPerm *usercred = ceph_mount_perms(cmount);
ceph_ll_unlink(cmount, root, filename, usercred);
ASSERT_EQ(ceph_ll_create(cmount, root, filename, before_mode,
O_RDWR|O_CREAT|O_EXCL, &in, &fh, &stx, want, 0,
usercred), 0);
ASSERT_EQ(stx.stx_mode & (mode_t)ALLPERMS, before_mode);
// chown -- for this we need to be "root"
UserPerm *rootcred = ceph_userperm_new(0, 0, 0, NULL);
ASSERT_TRUE(rootcred);
stx.stx_uid++;
stx.stx_gid++;
ASSERT_EQ(ceph_ll_setattr(cmount, in, &stx, CEPH_SETATTR_UID|CEPH_SETATTR_GID, rootcred), 0);
ASSERT_EQ(ceph_ll_getattr(cmount, in, &stx, CEPH_STATX_MODE, 0, usercred), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_MODE);
ASSERT_EQ(stx.stx_mode & (mode_t)ALLPERMS, after_mode);
/* test chown with supplementary groups, and chown with/without exe bit */
uid_t u = 65534;
gid_t g = 65534;
gid_t gids[] = {65533,65532};
UserPerm *altcred = ceph_userperm_new(u, g, sizeof gids / sizeof gids[0], gids);
stx.stx_uid = u;
stx.stx_gid = g;
mode_t m = S_ISGID|S_ISUID|S_IRUSR|S_IWUSR;
stx.stx_mode = m;
ASSERT_EQ(ceph_ll_setattr(cmount, in, &stx, CEPH_SETATTR_MODE|CEPH_SETATTR_UID|CEPH_SETATTR_GID, rootcred), 0);
ASSERT_EQ(ceph_ll_getattr(cmount, in, &stx, CEPH_STATX_MODE, 0, altcred), 0);
ASSERT_EQ(stx.stx_mode&(mode_t)ALLPERMS, m);
/* not dropped without exe bit */
stx.stx_gid = gids[0];
ASSERT_EQ(ceph_ll_setattr(cmount, in, &stx, CEPH_SETATTR_GID, altcred), 0);
ASSERT_EQ(ceph_ll_getattr(cmount, in, &stx, CEPH_STATX_MODE, 0, altcred), 0);
ASSERT_EQ(stx.stx_mode&(mode_t)ALLPERMS, m);
/* now check dropped with exe bit */
m = S_ISGID|S_ISUID|S_IRWXU;
stx.stx_mode = m;
ASSERT_EQ(ceph_ll_setattr(cmount, in, &stx, CEPH_STATX_MODE, altcred), 0);
ASSERT_EQ(ceph_ll_getattr(cmount, in, &stx, CEPH_STATX_MODE, 0, altcred), 0);
ASSERT_EQ(stx.stx_mode&(mode_t)ALLPERMS, m);
stx.stx_gid = gids[1];
ASSERT_EQ(ceph_ll_setattr(cmount, in, &stx, CEPH_SETATTR_GID, altcred), 0);
ASSERT_EQ(ceph_ll_getattr(cmount, in, &stx, CEPH_STATX_MODE, 0, altcred), 0);
ASSERT_EQ(stx.stx_mode&(mode_t)ALLPERMS, m&(S_IRWXU|S_IRWXG|S_IRWXO));
ceph_userperm_destroy(altcred);
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
ceph_shutdown(cmount);
}
static int update_root_mode()
{
struct ceph_mount_info *admin;
int r = ceph_create(&admin, NULL);
if (r < 0)
return r;
ceph_conf_read_file(admin, NULL);
ceph_conf_parse_env(admin, NULL);
ceph_conf_set(admin, "client_permissions", "false");
r = ceph_mount(admin, "/");
if (r < 0)
goto out;
r = ceph_chmod(admin, "/", 0777);
out:
ceph_shutdown(admin);
return r;
}
int main(int argc, char **argv)
{
int r = update_root_mode();
if (r < 0)
exit(1);
::testing::InitGoogleTest(&argc, argv);
srand(getpid());
r = rados_create(&cluster, NULL);
if (r < 0)
exit(1);
r = rados_conf_read_file(cluster, NULL);
if (r < 0)
exit(1);
rados_conf_parse_env(cluster, NULL);
r = rados_connect(cluster);
if (r < 0)
exit(1);
r = RUN_ALL_TESTS();
rados_shutdown(cluster);
return r;
}
| 10,664 | 31.123494 | 113 |
cc
|
null |
ceph-main/src/test/libcephfs/test.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/compat.h"
#include "gtest/gtest.h"
#include "include/cephfs/libcephfs.h"
#include "mds/mdstypes.h"
#include "include/stat.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <sys/uio.h>
#include <sys/time.h>
#ifndef _WIN32
#include <sys/resource.h>
#endif
#include "common/Clock.h"
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#endif
#include <fmt/format.h>
#include <map>
#include <vector>
#include <thread>
#include <regex>
using namespace std;
TEST(LibCephFS, OpenEmptyComponent) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
char c_dir[1024];
sprintf(c_dir, "/open_test_%d", mypid);
struct ceph_dir_result *dirp;
ASSERT_EQ(0, ceph_mkdirs(cmount, c_dir, 0777));
ASSERT_EQ(0, ceph_opendir(cmount, c_dir, &dirp));
char c_path[1024];
sprintf(c_path, "/open_test_%d//created_file_%d", mypid, mypid);
int fd = ceph_open(cmount, c_path, O_RDONLY|O_CREAT, 0666);
ASSERT_LT(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_closedir(cmount, dirp));
ceph_shutdown(cmount);
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
fd = ceph_open(cmount, c_path, O_RDONLY, 0666);
ASSERT_LT(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
// cleanup
ASSERT_EQ(0, ceph_unlink(cmount, c_path));
ASSERT_EQ(0, ceph_rmdir(cmount, c_dir));
ceph_shutdown(cmount);
}
TEST(LibCephFS, OpenReadTruncate) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
auto path = fmt::format("test_open_rdt_{}", getpid());
int fd = ceph_open(cmount, path.c_str(), O_WRONLY|O_CREAT, 0666);
ASSERT_LE(0, fd);
auto data = std::string("hello world");
ASSERT_EQ(ceph_write(cmount, fd, data.c_str(), data.size(), 0), (int)data.size());
ASSERT_EQ(0, ceph_close(cmount, fd));
fd = ceph_open(cmount, path.c_str(), O_RDONLY, 0);
ASSERT_LE(0, fd);
ASSERT_EQ(ceph_ftruncate(cmount, fd, 0), -CEPHFS_EBADF);
ASSERT_EQ(ceph_ftruncate(cmount, fd, 1), -CEPHFS_EBADF);
ASSERT_EQ(0, ceph_close(cmount, fd));
ceph_shutdown(cmount);
}
TEST(LibCephFS, OpenReadWrite) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
char c_path[1024];
sprintf(c_path, "test_open_rdwr_%d", getpid());
int fd = ceph_open(cmount, c_path, O_WRONLY|O_CREAT, 0666);
ASSERT_LT(0, fd);
const char *out_buf = "hello world";
size_t size = strlen(out_buf);
char in_buf[100];
ASSERT_EQ(ceph_write(cmount, fd, out_buf, size, 0), (int)size);
ASSERT_EQ(ceph_read(cmount, fd, in_buf, sizeof(in_buf), 0), -CEPHFS_EBADF);
ASSERT_EQ(0, ceph_close(cmount, fd));
fd = ceph_open(cmount, c_path, O_RDONLY, 0);
ASSERT_LT(0, fd);
ASSERT_EQ(ceph_write(cmount, fd, out_buf, size, 0), -CEPHFS_EBADF);
ASSERT_EQ(ceph_read(cmount, fd, in_buf, sizeof(in_buf), 0), (int)size);
ASSERT_EQ(0, ceph_close(cmount, fd));
fd = ceph_open(cmount, c_path, O_RDWR, 0);
ASSERT_LT(0, fd);
ASSERT_EQ(ceph_write(cmount, fd, out_buf, size, 0), (int)size);
ASSERT_EQ(ceph_read(cmount, fd, in_buf, sizeof(in_buf), 0), (int)size);
ASSERT_EQ(0, ceph_close(cmount, fd));
ceph_shutdown(cmount);
}
TEST(LibCephFS, MountNonExist) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_NE(0, ceph_mount(cmount, "/non-exist"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, MountDouble) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(-CEPHFS_EISCONN, ceph_mount(cmount, "/"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, MountRemount) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
CephContext *cct = ceph_get_mount_context(cmount);
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_unmount(cmount));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(cct, ceph_get_mount_context(cmount));
ceph_shutdown(cmount);
}
TEST(LibCephFS, UnmountUnmounted) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(-CEPHFS_ENOTCONN, ceph_unmount(cmount));
ceph_shutdown(cmount);
}
TEST(LibCephFS, ReleaseUnmounted) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_release(cmount));
}
TEST(LibCephFS, ReleaseMounted) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(-CEPHFS_EISCONN, ceph_release(cmount));
ASSERT_EQ(0, ceph_unmount(cmount));
ASSERT_EQ(0, ceph_release(cmount));
}
TEST(LibCephFS, UnmountRelease) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_unmount(cmount));
ASSERT_EQ(0, ceph_release(cmount));
}
TEST(LibCephFS, Mount) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ceph_shutdown(cmount);
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, OpenLayout) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
/* valid layout */
char test_layout_file[256];
sprintf(test_layout_file, "test_layout_%d_b", getpid());
int fd = ceph_open_layout(cmount, test_layout_file, O_CREAT|O_WRONLY, 0666, (1<<20), 7, (1<<20), NULL);
ASSERT_GT(fd, 0);
char poolname[80];
ASSERT_LT(0, ceph_get_file_pool_name(cmount, fd, poolname, sizeof(poolname)));
ASSERT_LT(0, ceph_get_file_pool_name(cmount, fd, poolname, 0));
/* on already-written file (CEPHFS_ENOTEMPTY) */
ceph_write(cmount, fd, "hello world", 11, 0);
ceph_close(cmount, fd);
char xattrk[128];
char xattrv[128];
sprintf(xattrk, "ceph.file.layout.stripe_unit");
sprintf(xattrv, "65536");
ASSERT_EQ(-CEPHFS_ENOTEMPTY, ceph_setxattr(cmount, test_layout_file, xattrk, (void *)xattrv, 5, 0));
/* invalid layout */
sprintf(test_layout_file, "test_layout_%d_c", getpid());
fd = ceph_open_layout(cmount, test_layout_file, O_CREAT, 0666, (1<<20), 1, 19, NULL);
ASSERT_EQ(fd, -CEPHFS_EINVAL);
/* with data pool */
sprintf(test_layout_file, "test_layout_%d_d", getpid());
fd = ceph_open_layout(cmount, test_layout_file, O_CREAT, 0666, (1<<20), 7, (1<<20), poolname);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
/* with metadata pool (invalid) */
sprintf(test_layout_file, "test_layout_%d_e", getpid());
fd = ceph_open_layout(cmount, test_layout_file, O_CREAT, 0666, (1<<20), 7, (1<<20), "metadata");
ASSERT_EQ(fd, -CEPHFS_EINVAL);
/* with metadata pool (does not exist) */
sprintf(test_layout_file, "test_layout_%d_f", getpid());
fd = ceph_open_layout(cmount, test_layout_file, O_CREAT, 0666, (1<<20), 7, (1<<20), "asdfjasdfjasdf");
ASSERT_EQ(fd, -CEPHFS_EINVAL);
ceph_shutdown(cmount);
}
TEST(LibCephFS, DirLs) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
struct ceph_dir_result *ls_dir = NULL;
char foostr[256];
sprintf(foostr, "dir_ls%d", mypid);
ASSERT_EQ(ceph_opendir(cmount, foostr, &ls_dir), -CEPHFS_ENOENT);
ASSERT_EQ(ceph_mkdir(cmount, foostr, 0777), 0);
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, foostr, &stx, 0, 0), 0);
ASSERT_NE(S_ISDIR(stx.stx_mode), 0);
char barstr[256];
sprintf(barstr, "dir_ls2%d", mypid);
ASSERT_EQ(ceph_statx(cmount, barstr, &stx, 0, AT_SYMLINK_NOFOLLOW), -CEPHFS_ENOENT);
// insert files into directory and test open
char bazstr[256];
int i = 0, r = rand() % 4096;
if (getenv("LIBCEPHFS_RAND")) {
r = atoi(getenv("LIBCEPHFS_RAND"));
}
printf("rand: %d\n", r);
for(; i < r; ++i) {
sprintf(bazstr, "dir_ls%d/dirf%d", mypid, i);
int fd = ceph_open(cmount, bazstr, O_CREAT|O_RDONLY, 0666);
ASSERT_GT(fd, 0);
ASSERT_EQ(ceph_close(cmount, fd), 0);
// set file sizes for readdirplus
ceph_truncate(cmount, bazstr, i);
}
ASSERT_EQ(ceph_opendir(cmount, foostr, &ls_dir), 0);
// not guaranteed to get . and .. first, but its a safe assumption in this case
struct dirent *result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, ".");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "..");
std::vector<std::string> entries;
std::map<std::string, int64_t> offset_map;
int64_t offset = ceph_telldir(cmount, ls_dir);
for(i = 0; i < r; ++i) {
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
entries.push_back(result->d_name);
offset_map[result->d_name] = offset;
offset = ceph_telldir(cmount, ls_dir);
}
ASSERT_TRUE(ceph_readdir(cmount, ls_dir) == NULL);
offset = ceph_telldir(cmount, ls_dir);
ASSERT_EQ(offset_map.size(), entries.size());
for(i = 0; i < r; ++i) {
sprintf(bazstr, "dirf%d", i);
ASSERT_TRUE(offset_map.count(bazstr) == 1);
}
// test seekdir
ceph_seekdir(cmount, ls_dir, offset);
ASSERT_TRUE(ceph_readdir(cmount, ls_dir) == NULL);
for (auto p = offset_map.begin(); p != offset_map.end(); ++p) {
ceph_seekdir(cmount, ls_dir, p->second);
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
std::string d_name(result->d_name);
ASSERT_EQ(p->first, d_name);
}
// test rewinddir
ceph_rewinddir(cmount, ls_dir);
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, ".");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "..");
ceph_rewinddir(cmount, ls_dir);
int t = ceph_telldir(cmount, ls_dir);
ASSERT_GT(t, -1);
ASSERT_TRUE(ceph_readdir(cmount, ls_dir) != NULL);
// test seekdir - move back to the beginning
ceph_seekdir(cmount, ls_dir, t);
// test getdents
struct dirent *getdents_entries;
size_t getdents_entries_len = (r + 2) * sizeof(*getdents_entries);
getdents_entries = (struct dirent *)malloc(getdents_entries_len);
int count = 0;
std::vector<std::string> found;
while (true) {
int len = ceph_getdents(cmount, ls_dir, (char *)getdents_entries, getdents_entries_len);
if (len == 0)
break;
ASSERT_GT(len, 0);
ASSERT_TRUE((len % sizeof(*getdents_entries)) == 0);
int n = len / sizeof(*getdents_entries);
int j;
if (count == 0) {
ASSERT_STREQ(getdents_entries[0].d_name, ".");
ASSERT_STREQ(getdents_entries[1].d_name, "..");
j = 2;
} else {
j = 0;
}
count += n;
for(; j < n; ++i, ++j) {
const char *name = getdents_entries[j].d_name;
found.push_back(name);
}
}
ASSERT_EQ(found, entries);
free(getdents_entries);
// test readdir_r
ceph_rewinddir(cmount, ls_dir);
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, ".");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "..");
found.clear();
while (true) {
struct dirent rdent;
int len = ceph_readdir_r(cmount, ls_dir, &rdent);
if (len == 0)
break;
ASSERT_EQ(len, 1);
found.push_back(rdent.d_name);
}
ASSERT_EQ(found, entries);
// test readdirplus
ceph_rewinddir(cmount, ls_dir);
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, ".");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "..");
found.clear();
while (true) {
struct dirent rdent;
struct ceph_statx stx;
int len = ceph_readdirplus_r(cmount, ls_dir, &rdent, &stx,
CEPH_STATX_SIZE, AT_STATX_DONT_SYNC, NULL);
if (len == 0)
break;
ASSERT_EQ(len, 1);
const char *name = rdent.d_name;
found.push_back(name);
int size;
sscanf(name, "dirf%d", &size);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_SIZE);
ASSERT_EQ(stx.stx_size, (size_t)size);
// On Windows, dirent uses long (4B) inodes, which get trimmed
// and can't be used.
// TODO: consider defining ceph_dirent.
#ifndef _WIN32
ASSERT_EQ(stx.stx_ino, rdent.d_ino);
#endif
//ASSERT_EQ(st.st_mode, (mode_t)0666);
}
ASSERT_EQ(found, entries);
ASSERT_EQ(ceph_closedir(cmount, ls_dir), 0);
// cleanup
for(i = 0; i < r; ++i) {
sprintf(bazstr, "dir_ls%d/dirf%d", mypid, i);
ASSERT_EQ(0, ceph_unlink(cmount, bazstr));
}
ASSERT_EQ(0, ceph_rmdir(cmount, foostr));
ceph_shutdown(cmount);
}
TEST(LibCephFS, ManyNestedDirs) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
const char *many_path = "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a";
ASSERT_EQ(ceph_mkdirs(cmount, many_path, 0755), 0);
int i = 0;
for(; i < 39; ++i) {
ASSERT_EQ(ceph_chdir(cmount, "a"), 0);
struct ceph_dir_result *dirp;
ASSERT_EQ(ceph_opendir(cmount, "a", &dirp), 0);
struct dirent *dent = ceph_readdir(cmount, dirp);
ASSERT_TRUE(dent != NULL);
ASSERT_STREQ(dent->d_name, ".");
dent = ceph_readdir(cmount, dirp);
ASSERT_TRUE(dent != NULL);
ASSERT_STREQ(dent->d_name, "..");
dent = ceph_readdir(cmount, dirp);
ASSERT_TRUE(dent != NULL);
ASSERT_STREQ(dent->d_name, "a");
ASSERT_EQ(ceph_closedir(cmount, dirp), 0);
}
ASSERT_STREQ(ceph_getcwd(cmount), "/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a");
ASSERT_EQ(ceph_chdir(cmount, "a/a/a"), 0);
for(i = 0; i < 39; ++i) {
ASSERT_EQ(ceph_chdir(cmount, ".."), 0);
ASSERT_EQ(ceph_rmdir(cmount, "a"), 0);
}
ASSERT_EQ(ceph_chdir(cmount, "/"), 0);
ASSERT_EQ(ceph_rmdir(cmount, "a/a/a"), 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, Xattrs) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_xattr_file[256];
sprintf(test_xattr_file, "test_xattr_%d", getpid());
int fd = ceph_open(cmount, test_xattr_file, O_CREAT, 0666);
ASSERT_GT(fd, 0);
// test removing non-existent xattr
ASSERT_EQ(-CEPHFS_ENODATA, ceph_removexattr(cmount, test_xattr_file, "user.nosuchxattr"));
char i = 'a';
char xattrk[128];
char xattrv[128];
for(; i < 'a'+26; ++i) {
sprintf(xattrk, "user.test_xattr_%c", i);
int len = sprintf(xattrv, "testxattr%c", i);
ASSERT_EQ(ceph_setxattr(cmount, test_xattr_file, xattrk, (void *) xattrv, len, XATTR_CREATE), 0);
}
// zero size should return required buffer length
int len_needed = ceph_listxattr(cmount, test_xattr_file, NULL, 0);
ASSERT_GT(len_needed, 0);
// buffer size smaller than needed should fail
char xattrlist[128*26];
ASSERT_GT(sizeof(xattrlist), (size_t)len_needed);
int len = ceph_listxattr(cmount, test_xattr_file, xattrlist, len_needed - 1);
ASSERT_EQ(-CEPHFS_ERANGE, len);
len = ceph_listxattr(cmount, test_xattr_file, xattrlist, sizeof(xattrlist));
ASSERT_EQ(len, len_needed);
char *p = xattrlist;
char *n;
i = 'a';
while (len > 0) {
// ceph.* xattrs should not be listed
ASSERT_NE(strncmp(p, "ceph.", 5), 0);
sprintf(xattrk, "user.test_xattr_%c", i);
ASSERT_STREQ(p, xattrk);
char gxattrv[128];
std::cout << "getting attr " << p << std::endl;
int alen = ceph_getxattr(cmount, test_xattr_file, p, (void *) gxattrv, 128);
ASSERT_GT(alen, 0);
sprintf(xattrv, "testxattr%c", i);
ASSERT_TRUE(!strncmp(xattrv, gxattrv, alen));
n = strchr(p, '\0');
n++;
len -= (n - p);
p = n;
++i;
}
i = 'a';
for(i = 'a'; i < 'a'+26; ++i) {
sprintf(xattrk, "user.test_xattr_%c", i);
ASSERT_EQ(ceph_removexattr(cmount, test_xattr_file, xattrk), 0);
}
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, Xattrs_ll) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_xattr_file[256];
sprintf(test_xattr_file, "test_xattr_%d", getpid());
int fd = ceph_open(cmount, test_xattr_file, O_CREAT, 0666);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
Inode *root = NULL;
Inode *existent_file_handle = NULL;
int res = ceph_ll_lookup_root(cmount, &root);
ASSERT_EQ(res, 0);
UserPerm *perms = ceph_mount_perms(cmount);
struct ceph_statx stx;
res = ceph_ll_lookup(cmount, root, test_xattr_file, &existent_file_handle,
&stx, 0, 0, perms);
ASSERT_EQ(res, 0);
const char *valid_name = "user.attrname";
const char *value = "attrvalue";
char value_buf[256] = { 0 };
res = ceph_ll_setxattr(cmount, existent_file_handle, valid_name, value, strlen(value), 0, perms);
ASSERT_EQ(res, 0);
res = ceph_ll_getxattr(cmount, existent_file_handle, valid_name, value_buf, 256, perms);
ASSERT_EQ(res, (int)strlen(value));
value_buf[res] = '\0';
ASSERT_STREQ(value_buf, value);
ceph_shutdown(cmount);
}
TEST(LibCephFS, LstatSlashdot) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, "/.", &stx, 0, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(ceph_statx(cmount, ".", &stx, 0, AT_SYMLINK_NOFOLLOW), 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, StatDirNlink) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_dir1[256];
sprintf(test_dir1, "dir1_symlinks_%d", getpid());
ASSERT_EQ(ceph_mkdir(cmount, test_dir1, 0700), 0);
int fd = ceph_open(cmount, test_dir1, O_DIRECTORY|O_RDONLY, 0);
ASSERT_GT(fd, 0);
struct ceph_statx stx;
ASSERT_EQ(ceph_fstatx(cmount, fd, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 2u);
{
char test_dir2[296];
sprintf(test_dir2, "%s/.", test_dir1);
ASSERT_EQ(ceph_statx(cmount, test_dir2, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 2u);
}
{
char test_dir2[296];
sprintf(test_dir2, "%s/1", test_dir1);
ASSERT_EQ(ceph_mkdir(cmount, test_dir2, 0700), 0);
ASSERT_EQ(ceph_statx(cmount, test_dir2, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 2u);
ASSERT_EQ(ceph_statx(cmount, test_dir1, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 3u);
sprintf(test_dir2, "%s/2", test_dir1);
ASSERT_EQ(ceph_mkdir(cmount, test_dir2, 0700), 0);
ASSERT_EQ(ceph_statx(cmount, test_dir1, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 4u);
sprintf(test_dir2, "%s/1/1", test_dir1);
ASSERT_EQ(ceph_mkdir(cmount, test_dir2, 0700), 0);
ASSERT_EQ(ceph_statx(cmount, test_dir1, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 4u);
ASSERT_EQ(ceph_rmdir(cmount, test_dir2), 0);
ASSERT_EQ(ceph_statx(cmount, test_dir1, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 4u);
sprintf(test_dir2, "%s/1", test_dir1);
ASSERT_EQ(ceph_rmdir(cmount, test_dir2), 0);
ASSERT_EQ(ceph_statx(cmount, test_dir1, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 3u);
sprintf(test_dir2, "%s/2", test_dir1);
ASSERT_EQ(ceph_rmdir(cmount, test_dir2), 0);
ASSERT_EQ(ceph_statx(cmount, test_dir1, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 2u);
}
ASSERT_EQ(ceph_rmdir(cmount, test_dir1), 0);
ASSERT_EQ(ceph_fstatx(cmount, fd, &stx, CEPH_STATX_NLINK, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_nlink, 0u);
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, DoubleChmod) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_file[256];
sprintf(test_file, "test_perms_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
// write some stuff
const char *bytes = "foobarbaz";
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), (int)strlen(bytes));
ceph_close(cmount, fd);
// set perms to read but can't write
ASSERT_EQ(ceph_chmod(cmount, test_file, 0400), 0);
fd = ceph_open(cmount, test_file, O_RDWR, 0);
ASSERT_EQ(fd, -CEPHFS_EACCES);
fd = ceph_open(cmount, test_file, O_RDONLY, 0);
ASSERT_GT(fd, -1);
char buf[100];
int ret = ceph_read(cmount, fd, buf, 100, 0);
ASSERT_EQ(ret, (int)strlen(bytes));
buf[ret] = '\0';
ASSERT_STREQ(buf, bytes);
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), -CEPHFS_EBADF);
ceph_close(cmount, fd);
// reset back to writeable
ASSERT_EQ(ceph_chmod(cmount, test_file, 0600), 0);
// ensure perms are correct
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, test_file, &stx, CEPH_STATX_MODE, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(stx.stx_mode, 0100600U);
fd = ceph_open(cmount, test_file, O_RDWR, 0);
ASSERT_GT(fd, 0);
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), (int)strlen(bytes));
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, Fchmod) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_file[256];
sprintf(test_file, "test_perms_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
// write some stuff
const char *bytes = "foobarbaz";
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), (int)strlen(bytes));
// set perms to read but can't write
ASSERT_EQ(ceph_fchmod(cmount, fd, 0400), 0);
char buf[100];
int ret = ceph_read(cmount, fd, buf, 100, 0);
ASSERT_EQ(ret, (int)strlen(bytes));
buf[ret] = '\0';
ASSERT_STREQ(buf, bytes);
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), (int)strlen(bytes));
ceph_close(cmount, fd);
ASSERT_EQ(ceph_open(cmount, test_file, O_RDWR, 0), -CEPHFS_EACCES);
// reset back to writeable
ASSERT_EQ(ceph_chmod(cmount, test_file, 0600), 0);
fd = ceph_open(cmount, test_file, O_RDWR, 0);
ASSERT_GT(fd, 0);
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), (int)strlen(bytes));
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, Lchmod) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_file[256];
sprintf(test_file, "test_perms_lchmod_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
// write some stuff
const char *bytes = "foobarbaz";
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), (int)strlen(bytes));
ceph_close(cmount, fd);
// Create symlink
char test_symlink[256];
sprintf(test_symlink, "test_lchmod_sym_%d", getpid());
ASSERT_EQ(ceph_symlink(cmount, test_file, test_symlink), 0);
// get symlink stat - lstat
struct ceph_statx stx_orig1;
ASSERT_EQ(ceph_statx(cmount, test_symlink, &stx_orig1, CEPH_STATX_ALL_STATS, AT_SYMLINK_NOFOLLOW), 0);
// Change mode on symlink file
ASSERT_EQ(ceph_lchmod(cmount, test_symlink, 0400), 0);
struct ceph_statx stx_orig2;
ASSERT_EQ(ceph_statx(cmount, test_symlink, &stx_orig2, CEPH_STATX_ALL_STATS, AT_SYMLINK_NOFOLLOW), 0);
// Compare modes
ASSERT_NE(stx_orig1.stx_mode, stx_orig2.stx_mode);
static const int permbits = S_IRWXU|S_IRWXG|S_IRWXO;
ASSERT_EQ(permbits&stx_orig1.stx_mode, 0777);
ASSERT_EQ(permbits&stx_orig2.stx_mode, 0400);
ceph_shutdown(cmount);
}
TEST(LibCephFS, Fchown) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_file[256];
sprintf(test_file, "test_fchown_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
// set perms to readable and writeable only by owner
ASSERT_EQ(ceph_fchmod(cmount, fd, 0600), 0);
// change ownership to nobody -- we assume nobody exists and id is always 65534
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "0"), 0);
ASSERT_EQ(ceph_fchown(cmount, fd, 65534, 65534), 0);
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "1"), 0);
ceph_close(cmount, fd);
// "nobody" will be ignored on Windows
#ifndef _WIN32
fd = ceph_open(cmount, test_file, O_RDWR, 0);
ASSERT_EQ(fd, -CEPHFS_EACCES);
#endif
ceph_shutdown(cmount);
}
#if defined(__linux__) && defined(O_PATH)
TEST(LibCephFS, FlagO_PATH) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, NULL));
char test_file[PATH_MAX];
sprintf(test_file, "test_oflag_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR|O_PATH, 0666);
ASSERT_EQ(-CEPHFS_ENOENT, fd);
fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
ASSERT_EQ(0, ceph_close(cmount, fd));
// ok, the file has been created. perform real checks now
fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR|O_PATH, 0666);
ASSERT_GT(fd, 0);
char buf[128];
ASSERT_EQ(-CEPHFS_EBADF, ceph_read(cmount, fd, buf, sizeof(buf), 0));
ASSERT_EQ(-CEPHFS_EBADF, ceph_write(cmount, fd, buf, sizeof(buf), 0));
// set perms to readable and writeable only by owner
ASSERT_EQ(-CEPHFS_EBADF, ceph_fchmod(cmount, fd, 0600));
// change ownership to nobody -- we assume nobody exists and id is always 65534
ASSERT_EQ(-CEPHFS_EBADF, ceph_fchown(cmount, fd, 65534, 65534));
// try to sync
ASSERT_EQ(-CEPHFS_EBADF, ceph_fsync(cmount, fd, false));
struct ceph_statx stx;
ASSERT_EQ(0, ceph_fstatx(cmount, fd, &stx, 0, 0));
ASSERT_EQ(0, ceph_close(cmount, fd));
ceph_shutdown(cmount);
}
#endif /* __linux */
TEST(LibCephFS, Symlinks) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_file[256];
sprintf(test_file, "test_symlinks_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
char test_symlink[256];
sprintf(test_symlink, "test_symlinks_sym_%d", getpid());
ASSERT_EQ(ceph_symlink(cmount, test_file, test_symlink), 0);
// test the O_NOFOLLOW case
fd = ceph_open(cmount, test_symlink, O_NOFOLLOW, 0);
ASSERT_EQ(fd, -CEPHFS_ELOOP);
// stat the original file
struct ceph_statx stx_orig;
ASSERT_EQ(ceph_statx(cmount, test_file, &stx_orig, CEPH_STATX_ALL_STATS, 0), 0);
// stat the symlink
struct ceph_statx stx_symlink_orig;
ASSERT_EQ(ceph_statx(cmount, test_symlink, &stx_symlink_orig, CEPH_STATX_ALL_STATS, 0), 0);
// ensure the statx bufs are equal
ASSERT_EQ(memcmp(&stx_orig, &stx_symlink_orig, sizeof(stx_orig)), 0);
sprintf(test_file, "/test_symlinks_abs_%d", getpid());
fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
sprintf(test_symlink, "/test_symlinks_abs_sym_%d", getpid());
ASSERT_EQ(ceph_symlink(cmount, test_file, test_symlink), 0);
// stat the original file
ASSERT_EQ(ceph_statx(cmount, test_file, &stx_orig, CEPH_STATX_ALL_STATS, 0), 0);
// stat the symlink
ASSERT_EQ(ceph_statx(cmount, test_symlink, &stx_symlink_orig, CEPH_STATX_ALL_STATS, 0), 0);
// ensure the statx bufs are equal
ASSERT_TRUE(!memcmp(&stx_orig, &stx_symlink_orig, sizeof(stx_orig)));
// test lstat
ASSERT_EQ(ceph_statx(cmount, test_symlink, &stx_orig, CEPH_STATX_ALL_STATS, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_TRUE(S_ISLNK(stx_orig.stx_mode));
ceph_shutdown(cmount);
}
TEST(LibCephFS, DirSyms) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_dir1[256];
sprintf(test_dir1, "dir1_symlinks_%d", getpid());
ASSERT_EQ(ceph_mkdir(cmount, test_dir1, 0700), 0);
char test_symdir[256];
sprintf(test_symdir, "symdir_symlinks_%d", getpid());
ASSERT_EQ(ceph_symlink(cmount, test_dir1, test_symdir), 0);
char test_file[256];
sprintf(test_file, "/symdir_symlinks_%d/test_symdir_file", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0600);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, test_file, &stx, 0, AT_SYMLINK_NOFOLLOW), 0);
// ensure that its a file not a directory we get back
ASSERT_TRUE(S_ISREG(stx.stx_mode));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LoopSyms) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_dir1[256];
sprintf(test_dir1, "dir1_loopsym_%d", getpid());
ASSERT_EQ(ceph_mkdir(cmount, test_dir1, 0700), 0);
char test_dir2[256];
sprintf(test_dir2, "/dir1_loopsym_%d/loop_dir", getpid());
ASSERT_EQ(ceph_mkdir(cmount, test_dir2, 0700), 0);
// symlink it itself: /path/to/mysym -> /path/to/mysym
char test_symdir[256];
sprintf(test_symdir, "/dir1_loopsym_%d/loop_dir/symdir", getpid());
ASSERT_EQ(ceph_symlink(cmount, test_symdir, test_symdir), 0);
char test_file[256];
sprintf(test_file, "/dir1_loopsym_%d/loop_dir/symdir/test_loopsym_file", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0600);
ASSERT_EQ(fd, -CEPHFS_ELOOP);
// loop: /a -> /b, /b -> /c, /c -> /a
char a[264], b[264], c[264];
sprintf(a, "/%s/a", test_dir1);
sprintf(b, "/%s/b", test_dir1);
sprintf(c, "/%s/c", test_dir1);
ASSERT_EQ(ceph_symlink(cmount, a, b), 0);
ASSERT_EQ(ceph_symlink(cmount, b, c), 0);
ASSERT_EQ(ceph_symlink(cmount, c, a), 0);
ASSERT_EQ(ceph_open(cmount, a, O_RDWR, 0), -CEPHFS_ELOOP);
ceph_shutdown(cmount);
}
TEST(LibCephFS, HardlinkNoOriginal) {
int mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir[256];
sprintf(dir, "/test_rmdirfail%d", mypid);
ASSERT_EQ(ceph_mkdir(cmount, dir, 0777), 0);
ASSERT_EQ(ceph_chdir(cmount, dir), 0);
int fd = ceph_open(cmount, "f1", O_CREAT, 0644);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
// create hard link
ASSERT_EQ(ceph_link(cmount, "f1", "hardl1"), 0);
// remove file link points to
ASSERT_EQ(ceph_unlink(cmount, "f1"), 0);
ceph_shutdown(cmount);
// now cleanup
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ASSERT_EQ(ceph_chdir(cmount, dir), 0);
ASSERT_EQ(ceph_unlink(cmount, "hardl1"), 0);
ASSERT_EQ(ceph_rmdir(cmount, dir), 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, BadArgument) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
int fd = ceph_open(cmount, "test_file", O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
char buf[100];
ASSERT_EQ(ceph_write(cmount, fd, buf, sizeof(buf), 0), (int)sizeof(buf));
ASSERT_EQ(ceph_read(cmount, fd, buf, 0, 5), 0);
ceph_close(cmount, fd);
ASSERT_EQ(ceph_unlink(cmount, "test_file"), 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, BadFileDesc) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ASSERT_EQ(ceph_fchmod(cmount, -1, 0655), -CEPHFS_EBADF);
ASSERT_EQ(ceph_close(cmount, -1), -CEPHFS_EBADF);
ASSERT_EQ(ceph_lseek(cmount, -1, 0, SEEK_SET), -CEPHFS_EBADF);
char buf[0];
ASSERT_EQ(ceph_read(cmount, -1, buf, 0, 0), -CEPHFS_EBADF);
ASSERT_EQ(ceph_write(cmount, -1, buf, 0, 0), -CEPHFS_EBADF);
ASSERT_EQ(ceph_ftruncate(cmount, -1, 0), -CEPHFS_EBADF);
ASSERT_EQ(ceph_fsync(cmount, -1, 0), -CEPHFS_EBADF);
struct ceph_statx stx;
ASSERT_EQ(ceph_fstatx(cmount, -1, &stx, 0, 0), -CEPHFS_EBADF);
struct sockaddr_storage addr;
ASSERT_EQ(ceph_get_file_stripe_address(cmount, -1, 0, &addr, 1), -CEPHFS_EBADF);
ASSERT_EQ(ceph_get_file_stripe_unit(cmount, -1), -CEPHFS_EBADF);
ASSERT_EQ(ceph_get_file_pool(cmount, -1), -CEPHFS_EBADF);
char poolname[80];
ASSERT_EQ(ceph_get_file_pool_name(cmount, -1, poolname, sizeof(poolname)), -CEPHFS_EBADF);
ASSERT_EQ(ceph_get_file_replication(cmount, -1), -CEPHFS_EBADF);
ASSERT_EQ(ceph_get_file_object_size(cmount, -1), -CEPHFS_EBADF);
int stripe_unit, stripe_count, object_size, pg_pool;
ASSERT_EQ(ceph_get_file_layout(cmount, -1, &stripe_unit, &stripe_count, &object_size, &pg_pool), -CEPHFS_EBADF);
ASSERT_EQ(ceph_get_file_stripe_count(cmount, -1), -CEPHFS_EBADF);
ceph_shutdown(cmount);
}
TEST(LibCephFS, ReadEmptyFile) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
// test the read_sync path in the client for zero files
ASSERT_EQ(ceph_conf_set(cmount, "client_debug_force_sync_read", "true"), 0);
int mypid = getpid();
char testf[256];
sprintf(testf, "test_reademptyfile%d", mypid);
int fd = ceph_open(cmount, testf, O_CREAT|O_TRUNC|O_WRONLY, 0644);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
fd = ceph_open(cmount, testf, O_RDONLY, 0);
ASSERT_GT(fd, 0);
char buf[4096];
ASSERT_EQ(ceph_read(cmount, fd, buf, 4096, 0), 0);
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, PreadvPwritev) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
int mypid = getpid();
char testf[256];
sprintf(testf, "test_preadvpwritevfile%d", mypid);
int fd = ceph_open(cmount, testf, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
char out0[] = "hello ";
char out1[] = "world\n";
struct iovec iov_out[2] = {
{out0, sizeof(out0)},
{out1, sizeof(out1)},
};
char in0[sizeof(out0)];
char in1[sizeof(out1)];
struct iovec iov_in[2] = {
{in0, sizeof(in0)},
{in1, sizeof(in1)},
};
ssize_t nwritten = iov_out[0].iov_len + iov_out[1].iov_len;
ssize_t nread = iov_in[0].iov_len + iov_in[1].iov_len;
ASSERT_EQ(ceph_pwritev(cmount, fd, iov_out, 2, 0), nwritten);
ASSERT_EQ(ceph_preadv(cmount, fd, iov_in, 2, 0), nread);
ASSERT_EQ(0, strncmp((const char*)iov_in[0].iov_base, (const char*)iov_out[0].iov_base, iov_out[0].iov_len));
ASSERT_EQ(0, strncmp((const char*)iov_in[1].iov_base, (const char*)iov_out[1].iov_base, iov_out[1].iov_len));
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, LlreadvLlwritev) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
int mypid = getpid();
char filename[256];
sprintf(filename, "test_llreadvllwritevfile%u", mypid);
Inode *root, *file;
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
Fh *fh;
struct ceph_statx stx;
UserPerm *perms = ceph_mount_perms(cmount);
ASSERT_EQ(ceph_ll_create(cmount, root, filename, 0666,
O_RDWR|O_CREAT|O_TRUNC, &file, &fh, &stx, 0, 0, perms), 0);
/* Reopen read-only */
char out0[] = "hello ";
char out1[] = "world\n";
struct iovec iov_out[2] = {
{out0, sizeof(out0)},
{out1, sizeof(out1)},
};
char in0[sizeof(out0)];
char in1[sizeof(out1)];
struct iovec iov_in[2] = {
{in0, sizeof(in0)},
{in1, sizeof(in1)},
};
ssize_t nwritten = iov_out[0].iov_len + iov_out[1].iov_len;
ssize_t nread = iov_in[0].iov_len + iov_in[1].iov_len;
ASSERT_EQ(ceph_ll_writev(cmount, fh, iov_out, 2, 0), nwritten);
ASSERT_EQ(ceph_ll_readv(cmount, fh, iov_in, 2, 0), nread);
ASSERT_EQ(0, strncmp((const char*)iov_in[0].iov_base, (const char*)iov_out[0].iov_base, iov_out[0].iov_len));
ASSERT_EQ(0, strncmp((const char*)iov_in[1].iov_base, (const char*)iov_out[1].iov_base, iov_out[1].iov_len));
ceph_ll_close(cmount, fh);
ceph_shutdown(cmount);
}
TEST(LibCephFS, StripeUnitGran) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ASSERT_GT(ceph_get_stripe_unit_granularity(cmount), 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, Rename) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
int mypid = getpid();
char path_src[256];
char path_dst[256];
/* make a source file */
sprintf(path_src, "test_rename_src%d", mypid);
int fd = ceph_open(cmount, path_src, O_CREAT|O_TRUNC|O_WRONLY, 0777);
ASSERT_GT(fd, 0);
ASSERT_EQ(0, ceph_close(cmount, fd));
/* rename to a new dest path */
sprintf(path_dst, "test_rename_dst%d", mypid);
ASSERT_EQ(0, ceph_rename(cmount, path_src, path_dst));
/* test that dest path exists */
struct ceph_statx stx;
ASSERT_EQ(0, ceph_statx(cmount, path_dst, &stx, 0, 0));
/* test that src path doesn't exist */
ASSERT_EQ(-CEPHFS_ENOENT, ceph_statx(cmount, path_src, &stx, 0, AT_SYMLINK_NOFOLLOW));
/* rename with non-existent source path */
ASSERT_EQ(-CEPHFS_ENOENT, ceph_rename(cmount, path_src, path_dst));
ASSERT_EQ(0, ceph_unlink(cmount, path_dst));
ceph_shutdown(cmount);
}
TEST(LibCephFS, UseUnmounted) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
struct statvfs stvfs;
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_statfs(cmount, "/", &stvfs));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_local_osd(cmount));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_chdir(cmount, "/"));
struct ceph_dir_result *dirp;
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_opendir(cmount, "/", &dirp));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_closedir(cmount, dirp));
ceph_readdir(cmount, dirp);
EXPECT_EQ(CEPHFS_ENOTCONN, errno);
struct dirent rdent;
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_readdir_r(cmount, dirp, &rdent));
struct ceph_statx stx;
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_readdirplus_r(cmount, dirp, &rdent, &stx, 0, 0, NULL));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_getdents(cmount, dirp, NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_getdnames(cmount, dirp, NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_telldir(cmount, dirp));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_link(cmount, "/", "/link"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_unlink(cmount, "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_rename(cmount, "/path", "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_mkdir(cmount, "/", 0655));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_mkdirs(cmount, "/", 0655));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_rmdir(cmount, "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_readlink(cmount, "/path", NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_symlink(cmount, "/path", "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_statx(cmount, "/path", &stx, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_setattrx(cmount, "/path", &stx, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_getxattr(cmount, "/path", "name", NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_lgetxattr(cmount, "/path", "name", NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_listxattr(cmount, "/path", NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_llistxattr(cmount, "/path", NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_removexattr(cmount, "/path", "name"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_lremovexattr(cmount, "/path", "name"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_setxattr(cmount, "/path", "name", NULL, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_lsetxattr(cmount, "/path", "name", NULL, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_fsetattrx(cmount, 0, &stx, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_chmod(cmount, "/path", 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_fchmod(cmount, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_chown(cmount, "/path", 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_lchown(cmount, "/path", 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_fchown(cmount, 0, 0, 0));
struct utimbuf utb;
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_utime(cmount, "/path", &utb));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_truncate(cmount, "/path", 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_mknod(cmount, "/path", 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_open(cmount, "/path", 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_open_layout(cmount, "/path", 0, 0, 0, 0, 0, "pool"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_close(cmount, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_lseek(cmount, 0, 0, SEEK_SET));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_read(cmount, 0, NULL, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_write(cmount, 0, NULL, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_ftruncate(cmount, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_fsync(cmount, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_fstatx(cmount, 0, &stx, 0, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_sync_fs(cmount));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_stripe_unit(cmount, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_stripe_count(cmount, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_layout(cmount, 0, NULL, NULL ,NULL ,NULL));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_object_size(cmount, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_pool(cmount, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_pool_name(cmount, 0, NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_replication(cmount, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_path_replication(cmount, "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_path_layout(cmount, "/path", NULL, NULL, NULL, NULL));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_path_object_size(cmount, "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_path_stripe_count(cmount, "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_path_stripe_unit(cmount, "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_path_pool(cmount, "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_path_pool_name(cmount, "/path", NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_pool_name(cmount, 0, NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_stripe_address(cmount, 0, 0, NULL, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_localize_reads(cmount, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_debug_get_fd_caps(cmount, 0));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_debug_get_file_caps(cmount, "/path"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_stripe_unit_granularity(cmount));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_pool_id(cmount, "data"));
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_pool_replication(cmount, 1));
ceph_release(cmount);
}
TEST(LibCephFS, GetPoolId) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char name[80];
memset(name, 0, sizeof(name));
ASSERT_LE(0, ceph_get_path_pool_name(cmount, "/", name, sizeof(name)));
ASSERT_GE(ceph_get_pool_id(cmount, name), 0);
ASSERT_EQ(ceph_get_pool_id(cmount, "weflkjwelfjwlkejf"), -CEPHFS_ENOENT);
ceph_shutdown(cmount);
}
TEST(LibCephFS, GetPoolReplication) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
/* negative pools */
ASSERT_EQ(ceph_get_pool_replication(cmount, -10), -CEPHFS_ENOENT);
/* valid pool */
int pool_id;
int stripe_unit, stripe_count, object_size;
ASSERT_EQ(0, ceph_get_path_layout(cmount, "/", &stripe_unit, &stripe_count,
&object_size, &pool_id));
ASSERT_GE(pool_id, 0);
ASSERT_GT(ceph_get_pool_replication(cmount, pool_id), 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, GetExtentOsds) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_file_extent_osds(cmount, 0, 0, NULL, NULL, 0));
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
int stripe_unit = (1<<18);
/* make a file! */
char test_file[256];
sprintf(test_file, "test_extent_osds_%d", getpid());
int fd = ceph_open_layout(cmount, test_file, O_CREAT|O_RDWR, 0666,
stripe_unit, 2, stripe_unit*2, NULL);
ASSERT_GT(fd, 0);
/* get back how many osds > 0 */
int ret = ceph_get_file_extent_osds(cmount, fd, 0, NULL, NULL, 0);
EXPECT_GT(ret, 0);
int64_t len;
int osds[ret];
/* full stripe extent */
EXPECT_EQ(ret, ceph_get_file_extent_osds(cmount, fd, 0, &len, osds, ret));
EXPECT_EQ(len, (int64_t)stripe_unit);
/* half stripe extent */
EXPECT_EQ(ret, ceph_get_file_extent_osds(cmount, fd, stripe_unit/2, &len, osds, ret));
EXPECT_EQ(len, (int64_t)stripe_unit/2);
/* 1.5 stripe unit offset -1 byte */
EXPECT_EQ(ret, ceph_get_file_extent_osds(cmount, fd, 3*stripe_unit/2-1, &len, osds, ret));
EXPECT_EQ(len, (int64_t)stripe_unit/2+1);
/* 1.5 stripe unit offset +1 byte */
EXPECT_EQ(ret, ceph_get_file_extent_osds(cmount, fd, 3*stripe_unit/2+1, &len, osds, ret));
EXPECT_EQ(len, (int64_t)stripe_unit/2-1);
/* only when more than 1 osd */
if (ret > 1) {
EXPECT_EQ(-CEPHFS_ERANGE, ceph_get_file_extent_osds(cmount, fd, 0, NULL, osds, 1));
}
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, GetOsdCrushLocation) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_osd_crush_location(cmount, 0, NULL, 0));
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ASSERT_EQ(ceph_get_osd_crush_location(cmount, 0, NULL, 1), -CEPHFS_EINVAL);
char path[256];
ASSERT_EQ(ceph_get_osd_crush_location(cmount, 9999999, path, 0), -CEPHFS_ENOENT);
ASSERT_EQ(ceph_get_osd_crush_location(cmount, -1, path, 0), -CEPHFS_EINVAL);
char test_file[256];
sprintf(test_file, "test_osds_loc_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT|O_RDWR, 0666);
ASSERT_GT(fd, 0);
/* get back how many osds > 0 */
int ret = ceph_get_file_extent_osds(cmount, fd, 0, NULL, NULL, 0);
EXPECT_GT(ret, 0);
/* full stripe extent */
int osds[ret];
EXPECT_EQ(ret, ceph_get_file_extent_osds(cmount, fd, 0, NULL, osds, ret));
ASSERT_GT(ceph_get_osd_crush_location(cmount, 0, path, 0), 0);
ASSERT_EQ(ceph_get_osd_crush_location(cmount, 0, path, 1), -CEPHFS_ERANGE);
for (int i = 0; i < ret; i++) {
int len = ceph_get_osd_crush_location(cmount, osds[i], path, sizeof(path));
ASSERT_GT(len, 0);
int pos = 0;
while (pos < len) {
std::string type(path + pos);
ASSERT_GT((int)type.size(), 0);
pos += type.size() + 1;
std::string name(path + pos);
ASSERT_GT((int)name.size(), 0);
pos += name.size() + 1;
}
}
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, GetOsdAddr) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
EXPECT_EQ(-CEPHFS_ENOTCONN, ceph_get_osd_addr(cmount, 0, NULL));
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ASSERT_EQ(-CEPHFS_EINVAL, ceph_get_osd_addr(cmount, 0, NULL));
struct sockaddr_storage addr;
ASSERT_EQ(-CEPHFS_ENOENT, ceph_get_osd_addr(cmount, -1, &addr));
ASSERT_EQ(-CEPHFS_ENOENT, ceph_get_osd_addr(cmount, 9999999, &addr));
ASSERT_EQ(0, ceph_get_osd_addr(cmount, 0, &addr));
ceph_shutdown(cmount);
}
TEST(LibCephFS, OpenNoClose) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
pid_t mypid = getpid();
char str_buf[256];
sprintf(str_buf, "open_no_close_dir%d", mypid);
ASSERT_EQ(0, ceph_mkdirs(cmount, str_buf, 0777));
struct ceph_dir_result *ls_dir = NULL;
ASSERT_EQ(ceph_opendir(cmount, str_buf, &ls_dir), 0);
sprintf(str_buf, "open_no_close_file%d", mypid);
int fd = ceph_open(cmount, str_buf, O_RDONLY|O_CREAT, 0666);
ASSERT_LT(0, fd);
// shutdown should force close opened file/dir
ceph_shutdown(cmount);
}
TEST(LibCephFS, Nlink) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
Inode *root, *dir, *file;
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
char dirname[32], filename[32], linkname[32];
sprintf(dirname, "nlinkdir%x", getpid());
sprintf(filename, "nlinkorig%x", getpid());
sprintf(linkname, "nlinklink%x", getpid());
struct ceph_statx stx;
Fh *fh;
UserPerm *perms = ceph_mount_perms(cmount);
ASSERT_EQ(ceph_ll_mkdir(cmount, root, dirname, 0755, &dir, &stx, 0, 0, perms), 0);
ASSERT_EQ(ceph_ll_create(cmount, dir, filename, 0666, O_RDWR|O_CREAT|O_EXCL,
&file, &fh, &stx, CEPH_STATX_NLINK, 0, perms), 0);
ASSERT_EQ(ceph_ll_close(cmount, fh), 0);
ASSERT_EQ(stx.stx_nlink, (nlink_t)1);
ASSERT_EQ(ceph_ll_link(cmount, file, dir, linkname, perms), 0);
ASSERT_EQ(ceph_ll_getattr(cmount, file, &stx, CEPH_STATX_NLINK, 0, perms), 0);
ASSERT_EQ(stx.stx_nlink, (nlink_t)2);
ASSERT_EQ(ceph_ll_unlink(cmount, dir, linkname, perms), 0);
ASSERT_EQ(ceph_ll_lookup(cmount, dir, filename, &file, &stx,
CEPH_STATX_NLINK, 0, perms), 0);
ASSERT_EQ(stx.stx_nlink, (nlink_t)1);
ceph_shutdown(cmount);
}
TEST(LibCephFS, SlashDotDot) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, "/.", &stx, CEPH_STATX_INO, 0), 0);
ino_t ino = stx.stx_ino;
ASSERT_EQ(ceph_statx(cmount, "/..", &stx, CEPH_STATX_INO, 0), 0);
/* At root, "." and ".." should be the same inode */
ASSERT_EQ(ino, stx.stx_ino);
/* Test accessing the parent of an unlinked directory */
char dir1[32], dir2[56];
sprintf(dir1, "/sldotdot%x", getpid());
sprintf(dir2, "%s/sub%x", dir1, getpid());
ASSERT_EQ(ceph_mkdir(cmount, dir1, 0755), 0);
ASSERT_EQ(ceph_mkdir(cmount, dir2, 0755), 0);
ASSERT_EQ(ceph_chdir(cmount, dir2), 0);
/* Test behavior when unlinking cwd */
struct ceph_dir_result *rdir;
ASSERT_EQ(ceph_opendir(cmount, ".", &rdir), 0);
ASSERT_EQ(ceph_rmdir(cmount, dir2), 0);
/* get "." entry */
struct dirent *result = ceph_readdir(cmount, rdir);
ino = result->d_ino;
/* get ".." entry */
result = ceph_readdir(cmount, rdir);
ASSERT_EQ(ino, result->d_ino);
ceph_closedir(cmount, rdir);
/* Make sure it works same way when mounting subtree */
ASSERT_EQ(ceph_unmount(cmount), 0);
ASSERT_EQ(ceph_mount(cmount, dir1), 0);
ASSERT_EQ(ceph_statx(cmount, "/..", &stx, CEPH_STATX_INO, 0), 0);
/* Test readdir behavior */
ASSERT_EQ(ceph_opendir(cmount, "/", &rdir), 0);
result = ceph_readdir(cmount, rdir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, ".");
ino = result->d_ino;
result = ceph_readdir(cmount, rdir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "..");
ASSERT_EQ(ino, result->d_ino);
ceph_shutdown(cmount);
}
static inline bool
timespec_eq(timespec const& lhs, timespec const& rhs)
{
return lhs.tv_sec == rhs.tv_sec && lhs.tv_nsec == rhs.tv_nsec;
}
TEST(LibCephFS, Btime) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char filename[32];
sprintf(filename, "/getattrx%x", getpid());
ceph_unlink(cmount, filename);
int fd = ceph_open(cmount, filename, O_RDWR|O_CREAT|O_EXCL, 0666);
ASSERT_LT(0, fd);
/* make sure fstatx works */
struct ceph_statx stx;
ASSERT_EQ(ceph_fstatx(cmount, fd, &stx, CEPH_STATX_CTIME|CEPH_STATX_BTIME, 0), 0);
ASSERT_TRUE(stx.stx_mask & (CEPH_STATX_CTIME|CEPH_STATX_BTIME));
ASSERT_TRUE(timespec_eq(stx.stx_ctime, stx.stx_btime));
ceph_close(cmount, fd);
ASSERT_EQ(ceph_statx(cmount, filename, &stx, CEPH_STATX_CTIME|CEPH_STATX_BTIME, 0), 0);
ASSERT_TRUE(timespec_eq(stx.stx_ctime, stx.stx_btime));
ASSERT_TRUE(stx.stx_mask & (CEPH_STATX_CTIME|CEPH_STATX_BTIME));
struct timespec old_btime = stx.stx_btime;
/* Now sleep, do a chmod and verify that the ctime changed, but btime didn't */
sleep(1);
ASSERT_EQ(ceph_chmod(cmount, filename, 0644), 0);
ASSERT_EQ(ceph_statx(cmount, filename, &stx, CEPH_STATX_CTIME|CEPH_STATX_BTIME, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_BTIME);
ASSERT_TRUE(timespec_eq(stx.stx_btime, old_btime));
ASSERT_FALSE(timespec_eq(stx.stx_ctime, stx.stx_btime));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SetBtime) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char filename[32];
sprintf(filename, "/setbtime%x", getpid());
ceph_unlink(cmount, filename);
int fd = ceph_open(cmount, filename, O_RDWR|O_CREAT|O_EXCL, 0666);
ASSERT_LT(0, fd);
ceph_close(cmount, fd);
struct ceph_statx stx;
struct timespec old_btime = { 1, 2 };
stx.stx_btime = old_btime;
ASSERT_EQ(ceph_setattrx(cmount, filename, &stx, CEPH_SETATTR_BTIME, 0), 0);
ASSERT_EQ(ceph_statx(cmount, filename, &stx, CEPH_STATX_BTIME, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_BTIME);
ASSERT_TRUE(timespec_eq(stx.stx_btime, old_btime));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LazyStatx) {
struct ceph_mount_info *cmount1, *cmount2;
ASSERT_EQ(ceph_create(&cmount1, NULL), 0);
ASSERT_EQ(ceph_create(&cmount2, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount1, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount2, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount1, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount2, NULL));
ASSERT_EQ(ceph_mount(cmount1, "/"), 0);
ASSERT_EQ(ceph_mount(cmount2, "/"), 0);
char filename[32];
sprintf(filename, "lazystatx%x", getpid());
Inode *root1, *file1, *root2, *file2;
struct ceph_statx stx;
Fh *fh;
UserPerm *perms1 = ceph_mount_perms(cmount1);
UserPerm *perms2 = ceph_mount_perms(cmount2);
ASSERT_EQ(ceph_ll_lookup_root(cmount1, &root1), 0);
ceph_ll_unlink(cmount1, root1, filename, perms1);
ASSERT_EQ(ceph_ll_create(cmount1, root1, filename, 0666, O_RDWR|O_CREAT|O_EXCL,
&file1, &fh, &stx, 0, 0, perms1), 0);
ASSERT_EQ(ceph_ll_close(cmount1, fh), 0);
ASSERT_EQ(ceph_ll_lookup_root(cmount2, &root2), 0);
ASSERT_EQ(ceph_ll_lookup(cmount2, root2, filename, &file2, &stx, CEPH_STATX_CTIME, 0, perms2), 0);
struct timespec old_ctime = stx.stx_ctime;
/*
* Now sleep, do a chmod on the first client and the see whether we get a
* different ctime with a statx that uses AT_STATX_DONT_SYNC
*/
sleep(1);
stx.stx_mode = 0644;
ASSERT_EQ(ceph_ll_setattr(cmount1, file1, &stx, CEPH_SETATTR_MODE, perms1), 0);
ASSERT_EQ(ceph_ll_getattr(cmount2, file2, &stx, CEPH_STATX_CTIME, AT_STATX_DONT_SYNC, perms2), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_CTIME);
ASSERT_TRUE(stx.stx_ctime.tv_sec == old_ctime.tv_sec &&
stx.stx_ctime.tv_nsec == old_ctime.tv_nsec);
ceph_shutdown(cmount1);
ceph_shutdown(cmount2);
}
TEST(LibCephFS, ChangeAttr) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char filename[32];
sprintf(filename, "/changeattr%x", getpid());
ceph_unlink(cmount, filename);
int fd = ceph_open(cmount, filename, O_RDWR|O_CREAT|O_EXCL, 0666);
ASSERT_LT(0, fd);
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, filename, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
uint64_t old_change_attr = stx.stx_version;
/* do chmod, and check whether change_attr changed */
ASSERT_EQ(ceph_chmod(cmount, filename, 0644), 0);
ASSERT_EQ(ceph_statx(cmount, filename, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_NE(stx.stx_version, old_change_attr);
old_change_attr = stx.stx_version;
/* now do a write and see if it changed again */
ASSERT_EQ(3, ceph_write(cmount, fd, "foo", 3, 0));
ASSERT_EQ(ceph_statx(cmount, filename, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_NE(stx.stx_version, old_change_attr);
old_change_attr = stx.stx_version;
/* Now truncate and check again */
ASSERT_EQ(0, ceph_ftruncate(cmount, fd, 0));
ASSERT_EQ(ceph_statx(cmount, filename, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_NE(stx.stx_version, old_change_attr);
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, DirChangeAttrCreateFile) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dirpath[32], filepath[56];
sprintf(dirpath, "/dirchange%x", getpid());
sprintf(filepath, "%s/foo", dirpath);
ASSERT_EQ(ceph_mkdir(cmount, dirpath, 0755), 0);
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
uint64_t old_change_attr = stx.stx_version;
/* Important: Follow an operation that changes the directory's ctime (setxattr)
* with one that changes the directory's mtime and ctime (create).
* Check that directory's change_attr is updated everytime ctime changes.
*/
/* set xattr on dir, and check whether dir's change_attr is incremented */
ASSERT_EQ(ceph_setxattr(cmount, dirpath, "user.name", (void*)"bob", 3, XATTR_CREATE), 0);
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_GT(stx.stx_version, old_change_attr);
old_change_attr = stx.stx_version;
/* create a file within dir, and check whether dir's change_attr is incremented */
int fd = ceph_open(cmount, filepath, O_RDWR|O_CREAT|O_EXCL, 0666);
ASSERT_LT(0, fd);
ceph_close(cmount, fd);
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_GT(stx.stx_version, old_change_attr);
ASSERT_EQ(0, ceph_unlink(cmount, filepath));
ASSERT_EQ(0, ceph_rmdir(cmount, dirpath));
ceph_shutdown(cmount);
}
TEST(LibCephFS, DirChangeAttrRenameFile) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dirpath[32], filepath[56], newfilepath[56];
sprintf(dirpath, "/dirchange%x", getpid());
sprintf(filepath, "%s/foo", dirpath);
ASSERT_EQ(ceph_mkdir(cmount, dirpath, 0755), 0);
int fd = ceph_open(cmount, filepath, O_RDWR|O_CREAT|O_EXCL, 0666);
ASSERT_LT(0, fd);
ceph_close(cmount, fd);
/* Important: Follow an operation that changes the directory's ctime (setattr)
* with one that changes the directory's mtime and ctime (rename).
* Check that directory's change_attr is updated everytime ctime changes.
*/
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
uint64_t old_change_attr = stx.stx_version;
/* chmod dir, and check whether dir's change_attr is incremented */
ASSERT_EQ(ceph_chmod(cmount, dirpath, 0777), 0);
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_GT(stx.stx_version, old_change_attr);
old_change_attr = stx.stx_version;
/* rename a file within dir, and check whether dir's change_attr is incremented */
sprintf(newfilepath, "%s/bar", dirpath);
ASSERT_EQ(ceph_rename(cmount, filepath, newfilepath), 0);
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_GT(stx.stx_version, old_change_attr);
ASSERT_EQ(0, ceph_unlink(cmount, newfilepath));
ASSERT_EQ(0, ceph_rmdir(cmount, dirpath));
ceph_shutdown(cmount);
}
TEST(LibCephFS, DirChangeAttrRemoveFile) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dirpath[32], filepath[56];
sprintf(dirpath, "/dirchange%x", getpid());
sprintf(filepath, "%s/foo", dirpath);
ASSERT_EQ(ceph_mkdir(cmount, dirpath, 0755), 0);
ASSERT_EQ(ceph_setxattr(cmount, dirpath, "user.name", (void*)"bob", 3, XATTR_CREATE), 0);
int fd = ceph_open(cmount, filepath, O_RDWR|O_CREAT|O_EXCL, 0666);
ASSERT_LT(0, fd);
ceph_close(cmount, fd);
/* Important: Follow an operation that changes the directory's ctime (removexattr)
* with one that changes the directory's mtime and ctime (remove a file).
* Check that directory's change_attr is updated everytime ctime changes.
*/
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
uint64_t old_change_attr = stx.stx_version;
/* remove xattr, and check whether dir's change_attr is incremented */
ASSERT_EQ(ceph_removexattr(cmount, dirpath, "user.name"), 0);
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_GT(stx.stx_version, old_change_attr);
old_change_attr = stx.stx_version;
/* remove a file within dir, and check whether dir's change_attr is incremented */
ASSERT_EQ(0, ceph_unlink(cmount, filepath));
ASSERT_EQ(ceph_statx(cmount, dirpath, &stx, CEPH_STATX_VERSION, 0), 0);
ASSERT_TRUE(stx.stx_mask & CEPH_STATX_VERSION);
ASSERT_GT(stx.stx_version, old_change_attr);
ASSERT_EQ(0, ceph_rmdir(cmount, dirpath));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SetSize) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char filename[32];
sprintf(filename, "/setsize%x", getpid());
ceph_unlink(cmount, filename);
int fd = ceph_open(cmount, filename, O_RDWR|O_CREAT|O_EXCL, 0666);
ASSERT_LT(0, fd);
struct ceph_statx stx;
uint64_t size = 8388608;
stx.stx_size = size;
ASSERT_EQ(ceph_fsetattrx(cmount, fd, &stx, CEPH_SETATTR_SIZE), 0);
ASSERT_EQ(ceph_fstatx(cmount, fd, &stx, CEPH_STATX_SIZE, 0), 0);
ASSERT_EQ(stx.stx_size, size);
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, OperationsOnRoot)
{
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dirname[32];
sprintf(dirname, "/somedir%x", getpid());
ASSERT_EQ(ceph_mkdir(cmount, dirname, 0755), 0);
ASSERT_EQ(ceph_rmdir(cmount, "/"), -CEPHFS_EBUSY);
ASSERT_EQ(ceph_link(cmount, "/", "/"), -CEPHFS_EEXIST);
ASSERT_EQ(ceph_link(cmount, dirname, "/"), -CEPHFS_EEXIST);
ASSERT_EQ(ceph_link(cmount, "nonExisitingDir", "/"), -CEPHFS_ENOENT);
ASSERT_EQ(ceph_unlink(cmount, "/"), -CEPHFS_EISDIR);
ASSERT_EQ(ceph_rename(cmount, "/", "/"), -CEPHFS_EBUSY);
ASSERT_EQ(ceph_rename(cmount, dirname, "/"), -CEPHFS_EBUSY);
ASSERT_EQ(ceph_rename(cmount, "nonExistingDir", "/"), -CEPHFS_EBUSY);
ASSERT_EQ(ceph_rename(cmount, "/", dirname), -CEPHFS_EBUSY);
ASSERT_EQ(ceph_rename(cmount, "/", "nonExistingDir"), -CEPHFS_EBUSY);
ASSERT_EQ(ceph_mkdir(cmount, "/", 0777), -CEPHFS_EEXIST);
ASSERT_EQ(ceph_mknod(cmount, "/", 0, 0), -CEPHFS_EEXIST);
ASSERT_EQ(ceph_symlink(cmount, "/", "/"), -CEPHFS_EEXIST);
ASSERT_EQ(ceph_symlink(cmount, dirname, "/"), -CEPHFS_EEXIST);
ASSERT_EQ(ceph_symlink(cmount, "nonExistingDir", "/"), -CEPHFS_EEXIST);
ceph_shutdown(cmount);
}
// no rlimits on Windows
#ifndef _WIN32
static void shutdown_racer_func()
{
const int niter = 32;
struct ceph_mount_info *cmount;
int i;
for (i = 0; i < niter; ++i) {
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
ceph_shutdown(cmount);
}
}
// See tracker #20988
TEST(LibCephFS, ShutdownRace)
{
const int nthreads = 32;
std::thread threads[nthreads];
// Need a bunch of fd's for this test
struct rlimit rold, rnew;
ASSERT_EQ(getrlimit(RLIMIT_NOFILE, &rold), 0);
rnew = rold;
rnew.rlim_cur = rnew.rlim_max;
cout << "Setting RLIMIT_NOFILE from " << rold.rlim_cur <<
" to " << rnew.rlim_cur << std::endl;
ASSERT_EQ(setrlimit(RLIMIT_NOFILE, &rnew), 0);
for (int i = 0; i < nthreads; ++i)
threads[i] = std::thread(shutdown_racer_func);
for (int i = 0; i < nthreads; ++i)
threads[i].join();
/*
* Let's just ignore restoring the open files limit,
* the kernel will defer releasing the file descriptors
* and then the process will be possibly reachthe open
* files limit. More detail, please see tracer#43039
*/
// ASSERT_EQ(setrlimit(RLIMIT_NOFILE, &rold), 0);
}
#endif
static void get_current_time_utimbuf(struct utimbuf *utb)
{
utime_t t = ceph_clock_now();
utb->actime = t.sec();
utb->modtime = t.sec();
}
static void get_current_time_timeval(struct timeval tv[2])
{
utime_t t = ceph_clock_now();
t.copy_to_timeval(&tv[0]);
t.copy_to_timeval(&tv[1]);
}
static void get_current_time_timespec(struct timespec ts[2])
{
utime_t t = ceph_clock_now();
t.to_timespec(&ts[0]);
t.to_timespec(&ts[1]);
}
TEST(LibCephFS, TestUtime) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_file[256];
sprintf(test_file, "test_utime_file_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT, 0666);
ASSERT_GT(fd, 0);
struct utimbuf utb;
struct ceph_statx stx;
get_current_time_utimbuf(&utb);
// ceph_utime()
EXPECT_EQ(0, ceph_utime(cmount, test_file, &utb));
ASSERT_EQ(ceph_statx(cmount, test_file, &stx,
CEPH_STATX_MTIME|CEPH_STATX_ATIME, 0), 0);
ASSERT_EQ(utime_t(stx.stx_atime), utime_t(utb.actime, 0));
ASSERT_EQ(utime_t(stx.stx_mtime), utime_t(utb.modtime, 0));
get_current_time_utimbuf(&utb);
// ceph_futime()
EXPECT_EQ(0, ceph_futime(cmount, fd, &utb));
ASSERT_EQ(ceph_statx(cmount, test_file, &stx,
CEPH_STATX_MTIME|CEPH_STATX_ATIME, 0), 0);
ASSERT_EQ(utime_t(stx.stx_atime), utime_t(utb.actime, 0));
ASSERT_EQ(utime_t(stx.stx_mtime), utime_t(utb.modtime, 0));
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, TestUtimes) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_file[256];
char test_symlink[256];
sprintf(test_file, "test_utimes_file_%d", getpid());
sprintf(test_symlink, "test_utimes_symlink_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT, 0666);
ASSERT_GT(fd, 0);
ASSERT_EQ(ceph_symlink(cmount, test_file, test_symlink), 0);
struct timeval times[2];
struct ceph_statx stx;
get_current_time_timeval(times);
// ceph_utimes() on symlink, validate target file time
EXPECT_EQ(0, ceph_utimes(cmount, test_symlink, times));
ASSERT_EQ(ceph_statx(cmount, test_symlink, &stx,
CEPH_STATX_MTIME|CEPH_STATX_ATIME, 0), 0);
ASSERT_EQ(utime_t(stx.stx_atime), utime_t(times[0]));
ASSERT_EQ(utime_t(stx.stx_mtime), utime_t(times[1]));
get_current_time_timeval(times);
// ceph_lutimes() on symlink, validate symlink time
EXPECT_EQ(0, ceph_lutimes(cmount, test_symlink, times));
ASSERT_EQ(ceph_statx(cmount, test_symlink, &stx,
CEPH_STATX_MTIME|CEPH_STATX_ATIME, AT_SYMLINK_NOFOLLOW), 0);
ASSERT_EQ(utime_t(stx.stx_atime), utime_t(times[0]));
ASSERT_EQ(utime_t(stx.stx_mtime), utime_t(times[1]));
get_current_time_timeval(times);
// ceph_futimes()
EXPECT_EQ(0, ceph_futimes(cmount, fd, times));
ASSERT_EQ(ceph_statx(cmount, test_file, &stx,
CEPH_STATX_MTIME|CEPH_STATX_ATIME, 0), 0);
ASSERT_EQ(utime_t(stx.stx_atime), utime_t(times[0]));
ASSERT_EQ(utime_t(stx.stx_mtime), utime_t(times[1]));
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, TestFutimens) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_file[256];
sprintf(test_file, "test_futimens_file_%d", getpid());
int fd = ceph_open(cmount, test_file, O_CREAT, 0666);
ASSERT_GT(fd, 0);
struct timespec times[2];
struct ceph_statx stx;
get_current_time_timespec(times);
// ceph_futimens()
EXPECT_EQ(0, ceph_futimens(cmount, fd, times));
ASSERT_EQ(ceph_statx(cmount, test_file, &stx,
CEPH_STATX_MTIME|CEPH_STATX_ATIME, 0), 0);
ASSERT_EQ(utime_t(stx.stx_atime), utime_t(times[0]));
ASSERT_EQ(utime_t(stx.stx_mtime), utime_t(times[1]));
ceph_close(cmount, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, OperationsOnDotDot) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char c_dir[512], c_dir_dot[1024], c_dir_dotdot[1024];
char c_non_existent_dir[1024], c_non_existent_dirs[1024];
char c_temp[1024];
pid_t mypid = getpid();
sprintf(c_dir, "/oodd_dir_%d", mypid);
sprintf(c_dir_dot, "/%s/.", c_dir);
sprintf(c_dir_dotdot, "/%s/..", c_dir);
sprintf(c_non_existent_dir, "/%s/../oodd_nonexistent/..", c_dir);
sprintf(c_non_existent_dirs,
"/%s/../ood_nonexistent1_%d/oodd_nonexistent2_%d", c_dir, mypid, mypid);
sprintf(c_temp, "/oodd_temp_%d", mypid);
ASSERT_EQ(0, ceph_mkdir(cmount, c_dir, 0777));
ASSERT_EQ(-CEPHFS_EEXIST, ceph_mkdir(cmount, c_dir_dot, 0777));
ASSERT_EQ(-CEPHFS_EEXIST, ceph_mkdir(cmount, c_dir_dotdot, 0777));
ASSERT_EQ(0, ceph_mkdirs(cmount, c_non_existent_dirs, 0777));
ASSERT_EQ(-CEPHFS_ENOTEMPTY, ceph_rmdir(cmount, c_dir_dot));
ASSERT_EQ(-CEPHFS_ENOTEMPTY, ceph_rmdir(cmount, c_dir_dotdot));
// non existent directory should return -CEPHFS_ENOENT
ASSERT_EQ(-CEPHFS_ENOENT, ceph_rmdir(cmount, c_non_existent_dir));
ASSERT_EQ(-CEPHFS_EBUSY, ceph_rename(cmount, c_dir_dot, c_temp));
ASSERT_EQ(0, ceph_chdir(cmount, c_dir));
ASSERT_EQ(0, ceph_mkdir(cmount, c_temp, 0777));
ASSERT_EQ(-CEPHFS_EBUSY, ceph_rename(cmount, c_temp, ".."));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Caps_vxattr) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_caps_vxattr_file[256];
char gxattrv[128];
int xbuflen = sizeof(gxattrv);
pid_t mypid = getpid();
sprintf(test_caps_vxattr_file, "test_caps_vxattr_%d", mypid);
int fd = ceph_open(cmount, test_caps_vxattr_file, O_CREAT, 0666);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
int alen = ceph_getxattr(cmount, test_caps_vxattr_file, "ceph.caps", (void *)gxattrv, xbuflen);
ASSERT_GT(alen, 0);
gxattrv[alen] = '\0';
char caps_regex[] = "pA[sx]*L[sx]*X[sx]*F[sxcrwbal]*/0x[0-9a-fA-f]+";
ASSERT_TRUE(regex_match(gxattrv, regex(caps_regex)) == 1);
ceph_shutdown(cmount);
}
TEST(LibCephFS, SnapXattrs) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_snap_xattr_file[256];
char c_temp[PATH_MAX];
char gxattrv[128];
char gxattrv2[128];
int xbuflen = sizeof(gxattrv);
pid_t mypid = getpid();
sprintf(test_snap_xattr_file, "test_snap_xattr_%d", mypid);
int fd = ceph_open(cmount, test_snap_xattr_file, O_CREAT, 0666);
ASSERT_GT(fd, 0);
ceph_close(cmount, fd);
sprintf(c_temp, "/.snap/test_snap_xattr_snap_%d", mypid);
ASSERT_EQ(0, ceph_mkdir(cmount, c_temp, 0777));
int alen = ceph_getxattr(cmount, c_temp, "ceph.snap.btime", (void *)gxattrv, xbuflen);
// xattr value is secs.nsecs (don't assume zero-term)
ASSERT_LT(0, alen);
ASSERT_LT(alen, xbuflen);
gxattrv[alen] = '\0';
char *s = strchr(gxattrv, '.');
char *q = NULL;
ASSERT_NE(q, s);
ASSERT_LT(s, gxattrv + alen);
ASSERT_EQ('.', *s);
*s = '\0';
utime_t btime = utime_t(strtoull(gxattrv, NULL, 10), strtoull(s + 1, NULL, 10));
*s = '.'; // restore for later strcmp
// file within the snapshot should carry the same btime
sprintf(c_temp, "/.snap/test_snap_xattr_snap_%d/%s", mypid, test_snap_xattr_file);
int alen2 = ceph_getxattr(cmount, c_temp, "ceph.snap.btime", (void *)gxattrv2, xbuflen);
ASSERT_EQ(alen, alen2);
ASSERT_EQ(0, strncmp(gxattrv, gxattrv2, alen));
// non-snap file shouldn't carry the xattr
alen = ceph_getxattr(cmount, test_snap_xattr_file, "ceph.snap.btime", (void *)gxattrv2, xbuflen);
ASSERT_EQ(-CEPHFS_ENODATA, alen);
// create a second snapshot
sprintf(c_temp, "/.snap/test_snap_xattr_snap2_%d", mypid);
ASSERT_EQ(0, ceph_mkdir(cmount, c_temp, 0777));
// check that the btime for the newer snapshot is > older
alen = ceph_getxattr(cmount, c_temp, "ceph.snap.btime", (void *)gxattrv2, xbuflen);
ASSERT_LT(0, alen);
ASSERT_LT(alen, xbuflen);
gxattrv2[alen] = '\0';
s = strchr(gxattrv2, '.');
ASSERT_NE(q, s);
ASSERT_LT(s, gxattrv2 + alen);
ASSERT_EQ('.', *s);
*s = '\0';
utime_t new_btime = utime_t(strtoull(gxattrv2, NULL, 10), strtoull(s + 1, NULL, 10));
#ifndef _WIN32
// This assertion sometimes fails on Windows, possibly due to the clock precision.
ASSERT_LT(btime, new_btime);
#endif
// listxattr() shouldn't return snap.btime vxattr
char xattrlist[512];
int len = ceph_listxattr(cmount, test_snap_xattr_file, xattrlist, sizeof(xattrlist));
ASSERT_GE(sizeof(xattrlist), (size_t)len);
char *p = xattrlist;
int found = 0;
while (len > 0) {
if (strcmp(p, "ceph.snap.btime") == 0)
found++;
len -= strlen(p) + 1;
p += strlen(p) + 1;
}
ASSERT_EQ(found, 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, Lseek) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
char c_path[1024];
sprintf(c_path, "test_lseek_%d", getpid());
int fd = ceph_open(cmount, c_path, O_RDWR|O_CREAT|O_TRUNC, 0666);
ASSERT_LT(0, fd);
const char *out_buf = "hello world";
size_t size = strlen(out_buf);
ASSERT_EQ(ceph_write(cmount, fd, out_buf, size, 0), (int)size);
/* basic SEEK_SET/END/CUR tests */
ASSERT_EQ(0, ceph_lseek(cmount, fd, 0, SEEK_SET));
ASSERT_EQ(size, ceph_lseek(cmount, fd, 0, SEEK_END));
ASSERT_EQ(0, ceph_lseek(cmount, fd, -size, SEEK_CUR));
/* Test basic functionality and out of bounds conditions for SEEK_HOLE/DATA */
#ifdef SEEK_HOLE
ASSERT_EQ(size, ceph_lseek(cmount, fd, 0, SEEK_HOLE));
ASSERT_EQ(-CEPHFS_ENXIO, ceph_lseek(cmount, fd, -1, SEEK_HOLE));
ASSERT_EQ(-CEPHFS_ENXIO, ceph_lseek(cmount, fd, size + 1, SEEK_HOLE));
#endif
#ifdef SEEK_DATA
ASSERT_EQ(0, ceph_lseek(cmount, fd, 0, SEEK_DATA));
ASSERT_EQ(-CEPHFS_ENXIO, ceph_lseek(cmount, fd, -1, SEEK_DATA));
ASSERT_EQ(-CEPHFS_ENXIO, ceph_lseek(cmount, fd, size + 1, SEEK_DATA));
#endif
ASSERT_EQ(0, ceph_close(cmount, fd));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SnapInfoOnNonSnapshot) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
struct snap_info info;
ASSERT_EQ(-CEPHFS_EINVAL, ceph_get_snap_info(cmount, "/", &info));
ceph_shutdown(cmount);
}
TEST(LibCephFS, EmptySnapInfo) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_path[64];
char snap_path[PATH_MAX];
sprintf(dir_path, "/dir0_%d", getpid());
sprintf(snap_path, "%s/.snap/snap0_%d", dir_path, getpid());
ASSERT_EQ(0, ceph_mkdir(cmount, dir_path, 0755));
// snapshot without custom metadata
ASSERT_EQ(0, ceph_mkdir(cmount, snap_path, 0755));
struct snap_info info;
ASSERT_EQ(0, ceph_get_snap_info(cmount, snap_path, &info));
ASSERT_GT(info.id, 0);
ASSERT_EQ(info.nr_snap_metadata, 0);
ASSERT_EQ(0, ceph_rmdir(cmount, snap_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SnapInfo) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_path[64];
char snap_name[64];
char snap_path[PATH_MAX];
sprintf(dir_path, "/dir0_%d", getpid());
sprintf(snap_name, "snap0_%d", getpid());
sprintf(snap_path, "%s/.snap/%s", dir_path, snap_name);
ASSERT_EQ(0, ceph_mkdir(cmount, dir_path, 0755));
// snapshot with custom metadata
struct snap_metadata snap_meta[] = {{"foo", "bar"},{"this", "that"},{"abcdefg", "12345"}};
ASSERT_EQ(0, ceph_mksnap(cmount, dir_path, snap_name, 0755, snap_meta, std::size(snap_meta)));
struct snap_info info;
ASSERT_EQ(0, ceph_get_snap_info(cmount, snap_path, &info));
ASSERT_GT(info.id, 0);
ASSERT_EQ(info.nr_snap_metadata, std::size(snap_meta));
for (size_t i = 0; i < info.nr_snap_metadata; ++i) {
auto &k1 = info.snap_metadata[i].key;
auto &v1 = info.snap_metadata[i].value;
bool found = false;
for (size_t j = 0; j < info.nr_snap_metadata; ++j) {
auto &k2 = snap_meta[j].key;
auto &v2 = snap_meta[j].value;
if (strncmp(k1, k2, strlen(k1)) == 0 && strncmp(v1, v2, strlen(v1)) == 0) {
found = true;
break;
}
}
ASSERT_TRUE(found);
}
ceph_free_snap_info_buffer(&info);
ASSERT_EQ(0, ceph_rmsnap(cmount, dir_path, snap_name));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LookupInoMDSDir) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
Inode *inode;
auto ino = inodeno_t(0x100); /* rank 0 ~mdsdir */
ASSERT_EQ(-CEPHFS_ESTALE, ceph_ll_lookup_inode(cmount, ino, &inode));
ino = inodeno_t(0x600); /* rank 0 first stray dir */
ASSERT_EQ(-CEPHFS_ESTALE, ceph_ll_lookup_inode(cmount, ino, &inode));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LookupVino) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_path[64];
char snap_name[64];
char snapdir_path[128];
char snap_path[256];
char file_path[PATH_MAX];
char snap_file[PATH_MAX];
sprintf(dir_path, "/dir0_%d", getpid());
sprintf(snap_name, "snap0_%d", getpid());
sprintf(file_path, "%s/file_%d", dir_path, getpid());
sprintf(snapdir_path, "%s/.snap", dir_path);
sprintf(snap_path, "%s/%s", snapdir_path, snap_name);
sprintf(snap_file, "%s/file_%d", snap_path, getpid());
ASSERT_EQ(0, ceph_mkdir(cmount, dir_path, 0755));
int fd = ceph_open(cmount, file_path, O_WRONLY|O_CREAT, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_mksnap(cmount, dir_path, snap_name, 0755, nullptr, 0));
// record vinos for all of them
struct ceph_statx stx;
ASSERT_EQ(0, ceph_statx(cmount, dir_path, &stx, CEPH_STATX_INO, 0));
vinodeno_t dir_vino(stx.stx_ino, stx.stx_dev);
ASSERT_EQ(0, ceph_statx(cmount, file_path, &stx, CEPH_STATX_INO, 0));
vinodeno_t file_vino(stx.stx_ino, stx.stx_dev);
ASSERT_EQ(0, ceph_statx(cmount, snapdir_path, &stx, CEPH_STATX_INO, 0));
vinodeno_t snapdir_vino(stx.stx_ino, stx.stx_dev);
ASSERT_EQ(0, ceph_statx(cmount, snap_path, &stx, CEPH_STATX_INO, 0));
vinodeno_t snap_vino(stx.stx_ino, stx.stx_dev);
ASSERT_EQ(0, ceph_statx(cmount, snap_file, &stx, CEPH_STATX_INO, 0));
vinodeno_t snap_file_vino(stx.stx_ino, stx.stx_dev);
// Remount
ASSERT_EQ(0, ceph_unmount(cmount));
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, NULL));
// Find them all
Inode *inode;
ASSERT_EQ(0, ceph_ll_lookup_vino(cmount, dir_vino, &inode));
ceph_ll_put(cmount, inode);
ASSERT_EQ(0, ceph_ll_lookup_vino(cmount, file_vino, &inode));
ceph_ll_put(cmount, inode);
ASSERT_EQ(0, ceph_ll_lookup_vino(cmount, snapdir_vino, &inode));
ceph_ll_put(cmount, inode);
ASSERT_EQ(0, ceph_ll_lookup_vino(cmount, snap_vino, &inode));
ceph_ll_put(cmount, inode);
ASSERT_EQ(0, ceph_ll_lookup_vino(cmount, snap_file_vino, &inode));
ceph_ll_put(cmount, inode);
// cleanup
ASSERT_EQ(0, ceph_rmsnap(cmount, dir_path, snap_name));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Openat) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
char c_rel_dir[64];
char c_dir[128];
sprintf(c_rel_dir, "open_test_%d", mypid);
sprintf(c_dir, "/%s", c_rel_dir);
ASSERT_EQ(0, ceph_mkdir(cmount, c_dir, 0777));
int root_fd = ceph_open(cmount, "/", O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, root_fd);
int dir_fd = ceph_openat(cmount, root_fd, c_rel_dir, O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, dir_fd);
struct ceph_statx stx;
ASSERT_EQ(ceph_statxat(cmount, root_fd, c_rel_dir, &stx, 0, 0), 0);
ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFDIR);
char c_rel_path[64];
char c_path[256];
sprintf(c_rel_path, "created_file_%d", mypid);
sprintf(c_path, "%s/created_file_%d", c_dir, mypid);
int file_fd = ceph_openat(cmount, dir_fd, c_rel_path, O_RDONLY | O_CREAT, 0666);
ASSERT_LE(0, file_fd);
ASSERT_EQ(ceph_statxat(cmount, dir_fd, c_rel_path, &stx, 0, 0), 0);
ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFREG);
ASSERT_EQ(0, ceph_close(cmount, file_fd));
ASSERT_EQ(0, ceph_close(cmount, dir_fd));
ASSERT_EQ(0, ceph_close(cmount, root_fd));
ASSERT_EQ(0, ceph_unlink(cmount, c_path));
ASSERT_EQ(0, ceph_rmdir(cmount, c_dir));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Statxat) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_name[64];
char rel_file_name_1[128];
char rel_file_name_2[256];
char dir_path[512];
char file_path[1024];
// relative paths for *at() calls
sprintf(dir_name, "dir0_%d", getpid());
sprintf(rel_file_name_1, "file_%d", getpid());
sprintf(rel_file_name_2, "%s/%s", dir_name, rel_file_name_1);
sprintf(dir_path, "/%s", dir_name);
sprintf(file_path, "%s/%s", dir_path, rel_file_name_1);
ASSERT_EQ(0, ceph_mkdir(cmount, dir_path, 0755));
int fd = ceph_open(cmount, file_path, O_WRONLY|O_CREAT, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
struct ceph_statx stx;
// test relative to root
fd = ceph_open(cmount, "/", O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
ASSERT_EQ(ceph_statxat(cmount, fd, dir_name, &stx, 0, 0), 0);
ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFDIR);
ASSERT_EQ(ceph_statxat(cmount, fd, rel_file_name_2, &stx, 0, 0), 0);
ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFREG);
ASSERT_EQ(0, ceph_close(cmount, fd));
// test relative to dir
fd = ceph_open(cmount, dir_path, O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
ASSERT_EQ(ceph_statxat(cmount, fd, rel_file_name_1, &stx, 0, 0), 0);
ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFREG);
// delete the dirtree, recreate and verify
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ASSERT_EQ(0, ceph_mkdir(cmount, dir_path, 0755));
int fd1 = ceph_open(cmount, file_path, O_WRONLY|O_CREAT, 0666);
ASSERT_LE(0, fd1);
ASSERT_EQ(0, ceph_close(cmount, fd1));
ASSERT_EQ(ceph_statxat(cmount, fd, rel_file_name_1, &stx, 0, 0), -CEPHFS_ENOENT);
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, StatxatATFDCWD) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_name[64];
char rel_file_name_1[128];
char dir_path[512];
char file_path[1024];
// relative paths for *at() calls
sprintf(dir_name, "dir0_%d", getpid());
sprintf(rel_file_name_1, "file_%d", getpid());
sprintf(dir_path, "/%s", dir_name);
sprintf(file_path, "%s/%s", dir_path, rel_file_name_1);
ASSERT_EQ(0, ceph_mkdir(cmount, dir_path, 0755));
int fd = ceph_open(cmount, file_path, O_WRONLY|O_CREAT, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
struct ceph_statx stx;
// chdir and test with CEPHFS_AT_FDCWD
ASSERT_EQ(0, ceph_chdir(cmount, dir_path));
ASSERT_EQ(ceph_statxat(cmount, CEPHFS_AT_FDCWD, rel_file_name_1, &stx, 0, 0), 0);
ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFREG);
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Fdopendir) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char foostr[256];
sprintf(foostr, "/dir_ls%d", mypid);
ASSERT_EQ(ceph_mkdir(cmount, foostr, 0777), 0);
char bazstr[512];
sprintf(bazstr, "%s/elif", foostr);
int fd = ceph_open(cmount, bazstr, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
fd = ceph_open(cmount, foostr, O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
struct ceph_dir_result *ls_dir = NULL;
ASSERT_EQ(ceph_fdopendir(cmount, fd, &ls_dir), 0);
// not guaranteed to get . and .. first, but its a safe assumption in this case
struct dirent *result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, ".");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "..");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "elif");
ASSERT_TRUE(ceph_readdir(cmount, ls_dir) == NULL);
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_closedir(cmount, ls_dir));
ASSERT_EQ(0, ceph_unlink(cmount, bazstr));
ASSERT_EQ(0, ceph_rmdir(cmount, foostr));
ceph_shutdown(cmount);
}
TEST(LibCephFS, FdopendirATFDCWD) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char foostr[256];
sprintf(foostr, "/dir_ls%d", mypid);
ASSERT_EQ(ceph_mkdir(cmount, foostr, 0777), 0);
char bazstr[512];
sprintf(bazstr, "%s/elif", foostr);
int fd = ceph_open(cmount, bazstr, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_chdir(cmount, foostr));
struct ceph_dir_result *ls_dir = NULL;
ASSERT_EQ(ceph_fdopendir(cmount, CEPHFS_AT_FDCWD, &ls_dir), 0);
// not guaranteed to get . and .. first, but its a safe assumption in this case
struct dirent *result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, ".");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "..");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "elif");
ASSERT_TRUE(ceph_readdir(cmount, ls_dir) == NULL);
ASSERT_EQ(0, ceph_closedir(cmount, ls_dir));
ASSERT_EQ(0, ceph_unlink(cmount, bazstr));
ASSERT_EQ(0, ceph_rmdir(cmount, foostr));
ceph_shutdown(cmount);
}
TEST(LibCephFS, FdopendirReaddirTestWithDelete) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char foostr[256];
sprintf(foostr, "/dir_ls%d", mypid);
ASSERT_EQ(ceph_mkdir(cmount, foostr, 0777), 0);
char bazstr[512];
sprintf(bazstr, "%s/elif", foostr);
int fd = ceph_open(cmount, bazstr, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
fd = ceph_open(cmount, foostr, O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
struct ceph_dir_result *ls_dir = NULL;
ASSERT_EQ(ceph_fdopendir(cmount, fd, &ls_dir), 0);
ASSERT_EQ(0, ceph_unlink(cmount, bazstr));
ASSERT_EQ(0, ceph_rmdir(cmount, foostr));
// not guaranteed to get . and .. first, but its a safe assumption
// in this case. also, note that we may or may not get other
// entries.
struct dirent *result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, ".");
result = ceph_readdir(cmount, ls_dir);
ASSERT_TRUE(result != NULL);
ASSERT_STREQ(result->d_name, "..");
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_closedir(cmount, ls_dir));
ceph_shutdown(cmount);
}
TEST(LibCephFS, FdopendirOnNonDir) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char foostr[256];
sprintf(foostr, "/dir_ls%d", mypid);
ASSERT_EQ(ceph_mkdir(cmount, foostr, 0777), 0);
char bazstr[512];
sprintf(bazstr, "%s/file", foostr);
int fd = ceph_open(cmount, bazstr, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
struct ceph_dir_result *ls_dir = NULL;
ASSERT_EQ(ceph_fdopendir(cmount, fd, &ls_dir), -CEPHFS_ENOTDIR);
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, bazstr));
ASSERT_EQ(0, ceph_rmdir(cmount, foostr));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Mkdirat) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path1[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path1, "/%s", dir_name);
char dir_path2[512];
char rel_dir_path2[512];
sprintf(dir_path2, "%s/dir_%d", dir_path1, mypid);
sprintf(rel_dir_path2, "%s/dir_%d", dir_name, mypid);
int fd = ceph_open(cmount, "/", O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_mkdirat(cmount, fd, dir_name, 0777));
ASSERT_EQ(0, ceph_mkdirat(cmount, fd, rel_dir_path2, 0666));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path2));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path1));
ceph_shutdown(cmount);
}
TEST(LibCephFS, MkdiratATFDCWD) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path1[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path1, "/%s", dir_name);
char dir_path2[512];
sprintf(dir_path2, "%s/dir_%d", dir_path1, mypid);
ASSERT_EQ(0, ceph_mkdirat(cmount, CEPHFS_AT_FDCWD, dir_name, 0777));
ASSERT_EQ(0, ceph_chdir(cmount, dir_path1));
ASSERT_EQ(0, ceph_mkdirat(cmount, CEPHFS_AT_FDCWD, dir_name, 0666));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path2));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path1));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Readlinkat) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512];
sprintf(rel_file_path, "%s/elif", dir_name);
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
char link_path[128];
char rel_link_path[64];
sprintf(rel_link_path, "linkfile_%d", mypid);
sprintf(link_path, "/%s", rel_link_path);
ASSERT_EQ(0, ceph_symlink(cmount, rel_file_path, link_path));
fd = ceph_open(cmount, "/", O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
size_t target_len = strlen(rel_file_path);
char target[target_len+1];
ASSERT_EQ(target_len, ceph_readlinkat(cmount, fd, rel_link_path, target, target_len));
target[target_len] = '\0';
ASSERT_EQ(0, memcmp(target, rel_file_path, target_len));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, link_path));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, ReadlinkatATFDCWD) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "./elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
char link_path[PATH_MAX];
char rel_link_path[1024];
sprintf(rel_link_path, "./linkfile_%d", mypid);
sprintf(link_path, "%s/%s", dir_path, rel_link_path);
ASSERT_EQ(0, ceph_symlink(cmount, rel_file_path, link_path));
ASSERT_EQ(0, ceph_chdir(cmount, dir_path));
size_t target_len = strlen(rel_file_path);
char target[target_len+1];
ASSERT_EQ(target_len, ceph_readlinkat(cmount, CEPHFS_AT_FDCWD, rel_link_path, target, target_len));
target[target_len] = '\0';
ASSERT_EQ(0, memcmp(target, rel_file_path, target_len));
ASSERT_EQ(0, ceph_unlink(cmount, link_path));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Symlinkat) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512];
sprintf(rel_file_path, "%s/elif", dir_name);
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
char link_path[128];
char rel_link_path[64];
sprintf(rel_link_path, "linkfile_%d", mypid);
sprintf(link_path, "/%s", rel_link_path);
fd = ceph_open(cmount, "/", O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_symlinkat(cmount, rel_file_path, fd, rel_link_path));
size_t target_len = strlen(rel_file_path);
char target[target_len+1];
ASSERT_EQ(target_len, ceph_readlinkat(cmount, fd, rel_link_path, target, target_len));
target[target_len] = '\0';
ASSERT_EQ(0, memcmp(target, rel_file_path, target_len));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, link_path));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SymlinkatATFDCWD) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "./elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
char link_path[PATH_MAX];
char rel_link_path[1024];
sprintf(rel_link_path, "./linkfile_%d", mypid);
sprintf(link_path, "%s/%s", dir_path, rel_link_path);
ASSERT_EQ(0, ceph_chdir(cmount, dir_path));
ASSERT_EQ(0, ceph_symlinkat(cmount, rel_file_path, CEPHFS_AT_FDCWD, rel_link_path));
size_t target_len = strlen(rel_file_path);
char target[target_len+1];
ASSERT_EQ(target_len, ceph_readlinkat(cmount, CEPHFS_AT_FDCWD, rel_link_path, target, target_len));
target[target_len] = '\0';
ASSERT_EQ(0, memcmp(target, rel_file_path, target_len));
ASSERT_EQ(0, ceph_unlink(cmount, link_path));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Unlinkat) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
fd = ceph_open(cmount, dir_path, O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
ASSERT_EQ(-CEPHFS_ENOTDIR, ceph_unlinkat(cmount, fd, rel_file_path, AT_REMOVEDIR));
ASSERT_EQ(0, ceph_unlinkat(cmount, fd, rel_file_path, 0));
ASSERT_EQ(0, ceph_close(cmount, fd));
fd = ceph_open(cmount, "/", O_DIRECTORY | O_RDONLY, 0);
ASSERT_EQ(-CEPHFS_EISDIR, ceph_unlinkat(cmount, fd, dir_name, 0));
ASSERT_EQ(0, ceph_unlinkat(cmount, fd, dir_name, AT_REMOVEDIR));
ASSERT_LE(0, fd);
ceph_shutdown(cmount);
}
TEST(LibCephFS, UnlinkatATFDCWD) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDONLY, 0666);
ASSERT_LE(0, fd);
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_chdir(cmount, dir_path));
ASSERT_EQ(-CEPHFS_ENOTDIR, ceph_unlinkat(cmount, CEPHFS_AT_FDCWD, rel_file_path, AT_REMOVEDIR));
ASSERT_EQ(0, ceph_unlinkat(cmount, CEPHFS_AT_FDCWD, rel_file_path, 0));
ASSERT_EQ(0, ceph_chdir(cmount, "/"));
ASSERT_EQ(-CEPHFS_EISDIR, ceph_unlinkat(cmount, CEPHFS_AT_FDCWD, dir_name, 0));
ASSERT_EQ(0, ceph_unlinkat(cmount, CEPHFS_AT_FDCWD, dir_name, AT_REMOVEDIR));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Chownat) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDWR, 0666);
ASSERT_LE(0, fd);
// set perms to readable and writeable only by owner
ASSERT_EQ(ceph_fchmod(cmount, fd, 0600), 0);
ceph_close(cmount, fd);
fd = ceph_open(cmount, dir_path, O_DIRECTORY | O_RDONLY, 0);
// change ownership to nobody -- we assume nobody exists and id is always 65534
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "0"), 0);
ASSERT_EQ(ceph_chownat(cmount, fd, rel_file_path, 65534, 65534, 0), 0);
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "1"), 0);
ceph_close(cmount, fd);
// "nobody" will be ignored on Windows
#ifndef _WIN32
fd = ceph_open(cmount, file_path, O_RDWR, 0);
ASSERT_EQ(fd, -CEPHFS_EACCES);
#endif
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "0"), 0);
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "1"), 0);
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, ChownatATFDCWD) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDWR, 0666);
ASSERT_LE(0, fd);
// set perms to readable and writeable only by owner
ASSERT_EQ(ceph_fchmod(cmount, fd, 0600), 0);
ceph_close(cmount, fd);
ASSERT_EQ(0, ceph_chdir(cmount, dir_path));
// change ownership to nobody -- we assume nobody exists and id is always 65534
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "0"), 0);
ASSERT_EQ(ceph_chownat(cmount, CEPHFS_AT_FDCWD, rel_file_path, 65534, 65534, 0), 0);
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "1"), 0);
// "nobody" will be ignored on Windows
#ifndef _WIN32
fd = ceph_open(cmount, file_path, O_RDWR, 0);
ASSERT_EQ(fd, -CEPHFS_EACCES);
#endif
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "0"), 0);
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "1"), 0);
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Chmodat) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDWR, 0666);
ASSERT_LE(0, fd);
const char *bytes = "foobarbaz";
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), (int)strlen(bytes));
ASSERT_EQ(0, ceph_close(cmount, fd));
fd = ceph_open(cmount, dir_path, O_DIRECTORY | O_RDONLY, 0);
// set perms to read but can't write
ASSERT_EQ(ceph_chmodat(cmount, fd, rel_file_path, 0400, 0), 0);
ASSERT_EQ(ceph_open(cmount, file_path, O_RDWR, 0), -CEPHFS_EACCES);
// reset back to writeable
ASSERT_EQ(ceph_chmodat(cmount, fd, rel_file_path, 0600, 0), 0);
int fd2 = ceph_open(cmount, file_path, O_RDWR, 0);
ASSERT_LE(0, fd2);
ASSERT_EQ(0, ceph_close(cmount, fd2));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, ChmodatATFDCWD) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, "/"), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDWR, 0666);
ASSERT_LE(0, fd);
const char *bytes = "foobarbaz";
ASSERT_EQ(ceph_write(cmount, fd, bytes, strlen(bytes), 0), (int)strlen(bytes));
ASSERT_EQ(0, ceph_close(cmount, fd));
// set perms to read but can't write
ASSERT_EQ(0, ceph_chdir(cmount, dir_path));
ASSERT_EQ(ceph_chmodat(cmount, CEPHFS_AT_FDCWD, rel_file_path, 0400, 0), 0);
ASSERT_EQ(ceph_open(cmount, file_path, O_RDWR, 0), -CEPHFS_EACCES);
// reset back to writeable
ASSERT_EQ(ceph_chmodat(cmount, CEPHFS_AT_FDCWD, rel_file_path, 0600, 0), 0);
int fd2 = ceph_open(cmount, file_path, O_RDWR, 0);
ASSERT_LE(0, fd2);
ASSERT_EQ(0, ceph_close(cmount, fd2));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, Utimensat) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDWR, 0666);
ASSERT_LE(0, fd);
struct timespec times[2];
get_current_time_timespec(times);
fd = ceph_open(cmount, dir_path, O_DIRECTORY | O_RDONLY, 0);
ASSERT_LE(0, fd);
EXPECT_EQ(0, ceph_utimensat(cmount, fd, rel_file_path, times, 0));
ceph_close(cmount, fd);
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, file_path, &stx,
CEPH_STATX_MTIME|CEPH_STATX_ATIME, 0), 0);
ASSERT_EQ(utime_t(stx.stx_atime), utime_t(times[0]));
ASSERT_EQ(utime_t(stx.stx_mtime), utime_t(times[1]));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, UtimensatATFDCWD) {
pid_t mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_name[128];
char dir_path[256];
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
char file_path[512];
char rel_file_path[512] = "elif";
sprintf(file_path, "%s/elif", dir_path);
int fd = ceph_open(cmount, file_path, O_CREAT|O_RDWR, 0666);
ASSERT_LE(0, fd);
struct timespec times[2];
get_current_time_timespec(times);
ASSERT_EQ(0, ceph_chdir(cmount, dir_path));
EXPECT_EQ(0, ceph_utimensat(cmount, CEPHFS_AT_FDCWD, rel_file_path, times, 0));
struct ceph_statx stx;
ASSERT_EQ(ceph_statx(cmount, file_path, &stx,
CEPH_STATX_MTIME|CEPH_STATX_ATIME, 0), 0);
ASSERT_EQ(utime_t(stx.stx_atime), utime_t(times[0]));
ASSERT_EQ(utime_t(stx.stx_mtime), utime_t(times[1]));
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LookupMdsPrivateInos) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
Inode *inode;
for (int ino = 0; ino < MDS_INO_SYSTEM_BASE; ino++) {
if (MDS_IS_PRIVATE_INO(ino)) {
ASSERT_EQ(-CEPHFS_ESTALE, ceph_ll_lookup_inode(cmount, ino, &inode));
} else if (ino == CEPH_INO_ROOT || ino == CEPH_INO_GLOBAL_SNAPREALM) {
ASSERT_EQ(0, ceph_ll_lookup_inode(cmount, ino, &inode));
ceph_ll_put(cmount, inode);
} else if (ino == CEPH_INO_LOST_AND_FOUND) {
// the ino 3 will only exists after the recovery tool ran, so
// it may return -CEPHFS_ESTALE with a fresh fs cluster
int r = ceph_ll_lookup_inode(cmount, ino, &inode);
if (r == 0) {
ceph_ll_put(cmount, inode);
} else {
ASSERT_TRUE(r == -CEPHFS_ESTALE);
}
} else {
// currently the ino 0 and 4~99 is not useded yet.
ASSERT_EQ(-CEPHFS_ESTALE, ceph_ll_lookup_inode(cmount, ino, &inode));
}
}
ceph_shutdown(cmount);
}
TEST(LibCephFS, SetMountTimeoutPostMount) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ASSERT_EQ(-CEPHFS_EINVAL, ceph_set_mount_timeout(cmount, 5));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SetMountTimeout) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_set_mount_timeout(cmount, 5));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
ceph_shutdown(cmount);
}
TEST(LibCephFS, FsCrypt) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char test_xattr_file[NAME_MAX];
sprintf(test_xattr_file, "test_fscrypt_%d", getpid());
int fd = ceph_open(cmount, test_xattr_file, O_RDWR|O_CREAT, 0666);
ASSERT_GT(fd, 0);
ASSERT_EQ(0, ceph_fsetxattr(cmount, fd, "ceph.fscrypt.auth", "foo", 3, CEPH_XATTR_CREATE));
ASSERT_EQ(0, ceph_fsetxattr(cmount, fd, "ceph.fscrypt.file", "foo", 3, CEPH_XATTR_CREATE));
char buf[64];
ASSERT_EQ(3, ceph_fgetxattr(cmount, fd, "ceph.fscrypt.auth", buf, sizeof(buf)));
ASSERT_EQ(3, ceph_fgetxattr(cmount, fd, "ceph.fscrypt.file", buf, sizeof(buf)));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unmount(cmount));
ASSERT_EQ(0, ceph_mount(cmount, NULL));
fd = ceph_open(cmount, test_xattr_file, O_RDWR, 0666);
ASSERT_GT(fd, 0);
ASSERT_EQ(3, ceph_fgetxattr(cmount, fd, "ceph.fscrypt.auth", buf, sizeof(buf)));
ASSERT_EQ(3, ceph_fgetxattr(cmount, fd, "ceph.fscrypt.file", buf, sizeof(buf)));
ASSERT_EQ(0, ceph_close(cmount, fd));
ASSERT_EQ(0, ceph_unmount(cmount));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SnapdirAttrs) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_name[128];
char dir_path[256];
char snap_dir_path[512];
pid_t mypid = getpid();
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
sprintf(snap_dir_path, "%s/.snap", dir_path);
Inode *dir, *root;
struct ceph_statx stx_dir;
struct ceph_statx stx_snap_dir;
struct ceph_statx stx_root_snap_dir;
UserPerm *perms = ceph_mount_perms(cmount);
ASSERT_EQ(ceph_ll_lookup_root(cmount, &root), 0);
ASSERT_EQ(ceph_ll_mkdir(cmount, root, dir_name, 0755, &dir, &stx_dir, 0, 0, perms), 0);
ASSERT_EQ(ceph_statx(cmount, dir_path, &stx_dir,
CEPH_STATX_MTIME|CEPH_STATX_ATIME|CEPH_STATX_MODE|CEPH_STATX_MODE|CEPH_STATX_GID|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir,
CEPH_STATX_MTIME|CEPH_STATX_ATIME|CEPH_STATX_MODE|CEPH_STATX_MODE|CEPH_STATX_GID|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, "/.snap", &stx_root_snap_dir,
CEPH_STATX_MTIME|CEPH_STATX_ATIME|CEPH_STATX_MODE|CEPH_STATX_MODE|CEPH_STATX_GID|CEPH_STATX_VERSION, 0), 0);
// these should match the parent directories attrs
ASSERT_EQ(stx_dir.stx_mode, stx_snap_dir.stx_mode);
ASSERT_EQ(stx_dir.stx_uid, stx_snap_dir.stx_uid);
ASSERT_EQ(stx_dir.stx_gid, stx_snap_dir.stx_gid);
ASSERT_EQ(utime_t(stx_dir.stx_atime), utime_t(stx_snap_dir.stx_atime));
// these should match the closest snaprealm ancestor (root in this
// case) attrs
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_mtime), utime_t(stx_snap_dir.stx_mtime));
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_ctime), utime_t(stx_snap_dir.stx_ctime));
ASSERT_EQ(stx_root_snap_dir.stx_version, stx_snap_dir.stx_version);
// chown -- for this we need to be "root"
UserPerm *rootcred = ceph_userperm_new(0, 0, 0, NULL);
ASSERT_TRUE(rootcred);
stx_dir.stx_uid++;
stx_dir.stx_gid++;
ASSERT_EQ(ceph_ll_setattr(cmount, dir, &stx_dir, CEPH_SETATTR_UID|CEPH_SETATTR_GID, rootcred), 0);
memset(&stx_dir, 0, sizeof(stx_dir));
memset(&stx_snap_dir, 0, sizeof(stx_snap_dir));
ASSERT_EQ(ceph_statx(cmount, dir_path, &stx_dir,
CEPH_STATX_MTIME|CEPH_STATX_ATIME|CEPH_STATX_MODE|CEPH_STATX_MODE|CEPH_STATX_GID|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir,
CEPH_STATX_MTIME|CEPH_STATX_ATIME|CEPH_STATX_MODE|CEPH_STATX_MODE|CEPH_STATX_GID|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(stx_dir.stx_mode, stx_snap_dir.stx_mode);
ASSERT_EQ(stx_dir.stx_uid, stx_snap_dir.stx_uid);
ASSERT_EQ(stx_dir.stx_gid, stx_snap_dir.stx_gid);
ASSERT_EQ(utime_t(stx_dir.stx_atime), utime_t(stx_snap_dir.stx_atime));
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_mtime), utime_t(stx_snap_dir.stx_mtime));
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_ctime), utime_t(stx_snap_dir.stx_ctime));
ASSERT_EQ(stx_root_snap_dir.stx_version, stx_snap_dir.stx_version);
ASSERT_EQ(ceph_ll_rmdir(cmount, root, dir_name, rootcred), 0);
ASSERT_EQ(0, ceph_unmount(cmount));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SnapdirAttrsOnSnapCreate) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_name[128];
char dir_path[256];
char snap_dir_path[512];
pid_t mypid = getpid();
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
sprintf(snap_dir_path, "%s/.snap", dir_path);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
struct ceph_statx stx_dir;
struct ceph_statx stx_snap_dir;
struct ceph_statx stx_root_snap_dir;
ASSERT_EQ(ceph_statx(cmount, dir_path, &stx_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, "/.snap", &stx_root_snap_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_mtime), utime_t(stx_snap_dir.stx_mtime));
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_ctime), utime_t(stx_snap_dir.stx_ctime));
ASSERT_EQ(stx_root_snap_dir.stx_version, stx_snap_dir.stx_version);
char snap_path[1024];
sprintf(snap_path, "%s/snap_a", snap_dir_path);
ASSERT_EQ(ceph_mkdir(cmount, snap_path, 0777), 0);
struct ceph_statx stx_snap_dir_1;
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir_1, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_LT(utime_t(stx_root_snap_dir.stx_mtime), utime_t(stx_snap_dir_1.stx_mtime));
ASSERT_LT(utime_t(stx_root_snap_dir.stx_ctime), utime_t(stx_snap_dir_1.stx_ctime));
ASSERT_LT(stx_root_snap_dir.stx_version, stx_snap_dir_1.stx_version);
ASSERT_EQ(0, ceph_rmdir(cmount, snap_path));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ASSERT_EQ(0, ceph_unmount(cmount));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SnapdirAttrsOnSnapDelete) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_name[128];
char dir_path[256];
char snap_dir_path[512];
pid_t mypid = getpid();
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
sprintf(snap_dir_path, "%s/.snap", dir_path);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
struct ceph_statx stx_dir;
struct ceph_statx stx_snap_dir;
struct ceph_statx stx_root_snap_dir;
ASSERT_EQ(ceph_statx(cmount, dir_path, &stx_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, "/.snap", &stx_root_snap_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_mtime), utime_t(stx_snap_dir.stx_mtime));
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_ctime), utime_t(stx_snap_dir.stx_mtime));
ASSERT_EQ(stx_root_snap_dir.stx_version, stx_snap_dir.stx_version);
char snap_path[1024];
sprintf(snap_path, "%s/snap_a", snap_dir_path);
ASSERT_EQ(ceph_mkdir(cmount, snap_path, 0777), 0);
struct ceph_statx stx_snap_dir_1;
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir_1, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_LT(utime_t(stx_root_snap_dir.stx_mtime), utime_t(stx_snap_dir_1.stx_mtime));
ASSERT_LT(utime_t(stx_root_snap_dir.stx_ctime), utime_t(stx_snap_dir_1.stx_ctime));
ASSERT_LT(stx_root_snap_dir.stx_version, stx_snap_dir_1.stx_version);
ASSERT_EQ(0, ceph_rmdir(cmount, snap_path));
struct ceph_statx stx_snap_dir_2;
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir_2, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
// Flaky assertion on Windows, potentially due to timestamp precision.
#ifndef _WIN32
ASSERT_LT(utime_t(stx_snap_dir_1.stx_mtime), utime_t(stx_snap_dir_2.stx_mtime));
ASSERT_LT(utime_t(stx_snap_dir_1.stx_ctime), utime_t(stx_snap_dir_2.stx_ctime));
#endif
ASSERT_LT(stx_snap_dir_1.stx_version, stx_snap_dir_2.stx_version);
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ASSERT_EQ(0, ceph_unmount(cmount));
ceph_shutdown(cmount);
}
TEST(LibCephFS, SnapdirAttrsOnSnapRename) {
struct ceph_mount_info *cmount;
ASSERT_EQ(ceph_create(&cmount, NULL), 0);
ASSERT_EQ(ceph_conf_read_file(cmount, NULL), 0);
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(ceph_mount(cmount, NULL), 0);
char dir_name[128];
char dir_path[256];
char snap_dir_path[512];
pid_t mypid = getpid();
sprintf(dir_name, "dir_%d", mypid);
sprintf(dir_path, "/%s", dir_name);
sprintf(snap_dir_path, "%s/.snap", dir_path);
ASSERT_EQ(ceph_mkdir(cmount, dir_path, 0777), 0);
struct ceph_statx stx_dir;
struct ceph_statx stx_snap_dir;
struct ceph_statx stx_root_snap_dir;
ASSERT_EQ(ceph_statx(cmount, dir_path, &stx_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(ceph_statx(cmount, "/.snap", &stx_root_snap_dir, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_mtime), utime_t(stx_snap_dir.stx_mtime));
ASSERT_EQ(utime_t(stx_root_snap_dir.stx_ctime), utime_t(stx_snap_dir.stx_ctime));
ASSERT_EQ(stx_root_snap_dir.stx_version, stx_snap_dir.stx_version);
char snap_path[1024];
sprintf(snap_path, "%s/snap_a", snap_dir_path);
ASSERT_EQ(ceph_mkdir(cmount, snap_path, 0777), 0);
struct ceph_statx stx_snap_dir_1;
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir_1, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
ASSERT_LT(utime_t(stx_root_snap_dir.stx_mtime), utime_t(stx_snap_dir_1.stx_mtime));
ASSERT_LT(utime_t(stx_root_snap_dir.stx_ctime), utime_t(stx_snap_dir_1.stx_ctime));
ASSERT_LT(stx_root_snap_dir.stx_version, stx_snap_dir_1.stx_version);
char snap_path_r[1024];
sprintf(snap_path_r, "%s/snap_b", snap_dir_path);
ASSERT_EQ(ceph_rename(cmount, snap_path, snap_path_r), 0);
struct ceph_statx stx_snap_dir_2;
ASSERT_EQ(ceph_statx(cmount, snap_dir_path, &stx_snap_dir_2, CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION, 0), 0);
// Flaky assertion on Windows, potentially due to timestamp precision.
#ifndef _WIN32
ASSERT_LT(utime_t(stx_snap_dir_1.stx_mtime), utime_t(stx_snap_dir_2.stx_mtime));
ASSERT_LT(utime_t(stx_snap_dir_1.stx_ctime), utime_t(stx_snap_dir_2.stx_ctime));
#endif
ASSERT_LT(stx_snap_dir_1.stx_version, stx_snap_dir_2.stx_version);
ASSERT_EQ(0, ceph_rmdir(cmount, snap_path_r));
ASSERT_EQ(0, ceph_rmdir(cmount, dir_path));
ASSERT_EQ(0, ceph_unmount(cmount));
ceph_shutdown(cmount);
}
| 127,923 | 32.824432 | 131 |
cc
|
null |
ceph-main/src/test/libcephfs/vxattr.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2021 Red Hat Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/compat.h"
#include "gtest/gtest.h"
#include "include/cephfs/libcephfs.h"
#include "include/fs_types.h"
#include "mds/mdstypes.h"
#include "include/stat.h"
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <sys/uio.h>
#include <sys/time.h>
#include <string.h>
#ifndef _WIN32
#include <sys/resource.h>
#endif
#include "common/Clock.h"
#include "common/ceph_json.h"
#ifdef __linux__
#include <limits.h>
#include <sys/xattr.h>
#endif
#include <fmt/format.h>
#include <map>
#include <vector>
#include <thread>
#include <regex>
#include <string>
using namespace std;
TEST(LibCephFS, LayoutVerifyDefaultLayout) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d0/subdir", 0777));
{
char value[1024] = "";
int r = 0;
// check for default layout
r = ceph_getxattr(cmount, "/", "ceph.dir.layout.json", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
std::clog << "layout:" << value << std::endl;
ASSERT_STRNE((char*)NULL, strstr(value, "\"inheritance\": \"@default\""));
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0/subdir"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LayoutSetAndVerifyNewAndInheritedLayout) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d0/subdir", 0777));
std::string pool_name_set;
{
char value[1024] = "";
int r = 0;
r = ceph_getxattr(cmount, "/", "ceph.dir.layout.json", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
JSONParser json_parser;
ASSERT_EQ(json_parser.parse(value, r), 1);
ASSERT_EQ(json_parser.is_object(), 1);
std::string pool_name;
JSONDecoder::decode_json("pool_name", pool_name, &json_parser, true);
pool_name_set = pool_name;
// set a new layout
std::string new_layout;
new_layout += "{";
new_layout += "\"stripe_unit\": 65536, ";
new_layout += "\"stripe_count\": 1, ";
new_layout += "\"object_size\": 65536, ";
new_layout += "\"pool_name\": \"" + pool_name + "\"";
new_layout += "}";
ASSERT_EQ(0, ceph_setxattr(cmount, "test/d0", "ceph.dir.layout.json", (void*)new_layout.c_str(), new_layout.length(), XATTR_CREATE));
}
{
char value[1024] = "";
int r = 0;
r = ceph_getxattr(cmount, "test/d0", "ceph.dir.layout.json", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
std::clog << "layout:" << value << std::endl;
JSONParser json_parser;
ASSERT_EQ(json_parser.parse(value, r), 1);
ASSERT_EQ(json_parser.is_object(), 1);
int64_t object_size;
int64_t stripe_unit;
int64_t stripe_count;
std::string pool_name;
std::string inheritance;
JSONDecoder::decode_json("pool_name", pool_name, &json_parser, true);
JSONDecoder::decode_json("object_size", object_size, &json_parser, true);
JSONDecoder::decode_json("stripe_unit", stripe_unit, &json_parser, true);
JSONDecoder::decode_json("stripe_count", stripe_count, &json_parser, true);
JSONDecoder::decode_json("inheritance", inheritance, &json_parser, true);
// now verify the layout
ASSERT_EQ(pool_name.compare(pool_name_set), 0);
ASSERT_EQ(object_size, 65536);
ASSERT_EQ(stripe_unit, 65536);
ASSERT_EQ(stripe_count, 1);
ASSERT_EQ(inheritance.compare("@set"), 0);
}
{
char value[1024] = "";
int r = 0;
JSONParser json_parser;
std::string inheritance;
// now check that the subdir layout is inherited
r = ceph_getxattr(cmount, "test/d0/subdir", "ceph.dir.layout.json", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
std::clog << "layout:" << value << std::endl;
ASSERT_EQ(json_parser.parse(value, r), 1);
ASSERT_EQ(json_parser.is_object(), 1);
JSONDecoder::decode_json("inheritance", inheritance, &json_parser, true);
ASSERT_EQ(inheritance.compare("@inherited"), 0);
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0/subdir"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LayoutSetBadJSON) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d0/subdir", 0777));
{
// set a new layout and verify the same
const char *new_layout = "" // bad json without starting brace
"\"stripe_unit\": 65536, "
"\"stripe_count\": 1, "
"\"object_size\": 65536, "
"\"pool_name\": \"cephfs.a.data\", "
"}";
// try to set a malformed JSON, eg. without an open brace
ASSERT_EQ(-CEPHFS_EINVAL, ceph_setxattr(cmount, "test/d0", "ceph.dir.layout.json", (void*)new_layout, strlen(new_layout), XATTR_CREATE));
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0/subdir"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LayoutSetBadPoolName) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d0/subdir", 0777));
{
// try setting a bad pool name
ASSERT_EQ(-CEPHFS_EINVAL, ceph_setxattr(cmount, "test/d0", "ceph.dir.layout.pool_name", (void*)"UglyPoolName", 12, XATTR_CREATE));
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0/subdir"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LayoutSetBadPoolId) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d0/subdir", 0777));
{
// try setting a bad pool id
ASSERT_EQ(-CEPHFS_EINVAL, ceph_setxattr(cmount, "test/d0", "ceph.dir.layout.pool_id", (void*)"300", 3, XATTR_CREATE));
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0/subdir"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, LayoutSetInvalidFieldName) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d0/subdir", 0777));
{
// try to set in invalid field
ASSERT_EQ(-CEPHFS_ENODATA, ceph_setxattr(cmount, "test/d0", "ceph.dir.layout.bad_field", (void*)"300", 3, XATTR_CREATE));
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0/subdir"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d0"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, GetAndSetDirPin) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d1", 0777));
{
char value[1024] = "";
int r = ceph_getxattr(cmount, "test/d1", "ceph.dir.pin", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
ASSERT_STREQ("-1", value);
}
{
char value[1024] = "";
int r = -1;
ASSERT_EQ(0, ceph_setxattr(cmount, "test/d1", "ceph.dir.pin", (void*)"1", 1, XATTR_CREATE));
r = ceph_getxattr(cmount, "test/d1", "ceph.dir.pin", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
ASSERT_STREQ("1", value);
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d1"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, GetAndSetDirDistribution) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d2", 0777));
{
char value[1024] = "";
int r = ceph_getxattr(cmount, "test/d2", "ceph.dir.pin.distributed", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
ASSERT_STREQ("0", value);
}
{
char value[1024] = "";
int r = -1;
ASSERT_EQ(0, ceph_setxattr(cmount, "test/d2", "ceph.dir.pin.distributed", (void*)"1", 1, XATTR_CREATE));
r = ceph_getxattr(cmount, "test/d2", "ceph.dir.pin.distributed", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
ASSERT_STREQ("1", value);
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d2"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
TEST(LibCephFS, GetAndSetDirRandom) {
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
ASSERT_EQ(0, ceph_mkdirs(cmount, "test/d3", 0777));
{
char value[1024] = "";
int r = ceph_getxattr(cmount, "test/d3", "ceph.dir.pin.random", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
ASSERT_STREQ("0", value);
}
{
double val = (double)1.0/(double)128.0;
std::stringstream ss;
ss << val;
ASSERT_EQ(0, ceph_setxattr(cmount, "test/d3", "ceph.dir.pin.random", (void*)ss.str().c_str(), strlen(ss.str().c_str()), XATTR_CREATE));
char value[1024] = "";
int r = -1;
r = ceph_getxattr(cmount, "test/d3", "ceph.dir.pin.random", (void*)value, sizeof(value));
ASSERT_GT(r, 0);
ASSERT_LT(r, sizeof value);
ASSERT_STREQ(ss.str().c_str(), value);
}
ASSERT_EQ(0, ceph_rmdir(cmount, "test/d3"));
ASSERT_EQ(0, ceph_rmdir(cmount, "test"));
ceph_shutdown(cmount);
}
| 11,147 | 27.880829 | 141 |
cc
|
null |
ceph-main/src/test/libcephsqlite/main.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2021 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License version 2.1, as published by
* the Free Software Foundation. See file COPYING.
*
*/
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <string_view>
#include <stdlib.h>
#include <string.h>
#include <sqlite3.h>
#include <fmt/format.h>
#include "gtest/gtest.h"
#include "include/uuid.h"
#include "include/rados/librados.hpp"
#include "include/libcephsqlite.h"
#include "SimpleRADOSStriper.h"
#include "common/ceph_argparse.h"
#include "common/ceph_crypto.h"
#include "common/ceph_time.h"
#include "common/common_init.h"
#include "common/debug.h"
#define dout_subsys ceph_subsys_client
#undef dout_prefix
#define dout_prefix *_dout << "unittest_libcephsqlite: "
#define sqlcatchcode(S, code) \
do {\
rc = S;\
if (rc != code) {\
std::cout << "[" << __FILE__ << ":" << __LINE__ << "]"\
<< " sqlite3 error: " << rc << " `" << sqlite3_errstr(rc)\
<< "': " << sqlite3_errmsg(db) << std::endl;\
sqlite3_finalize(stmt);\
stmt = NULL;\
goto out;\
}\
} while (0)
#define sqlcatch(S) sqlcatchcode(S, SQLITE_OK)
static boost::intrusive_ptr<CephContext> cct;
class CephSQLiteTest : public ::testing::Test {
public:
inline static const std::string pool = "cephsqlite";
static void SetUpTestSuite() {
librados::Rados cluster;
ASSERT_LE(0, cluster.init_with_context(cct.get()));
ASSERT_LE(0, cluster.connect());
if (int rc = cluster.pool_create(pool.c_str()); rc < 0 && rc != -EEXIST) {
ASSERT_EQ(0, rc);
}
cluster.shutdown();
sleep(5);
}
void SetUp() override {
uuid.generate_random();
ASSERT_LE(0, cluster.init_with_context(cct.get()));
ASSERT_LE(0, cluster.connect());
ASSERT_LE(0, cluster.wait_for_latest_osdmap());
ASSERT_EQ(0, db_open());
}
void TearDown() override {
ASSERT_EQ(SQLITE_OK, sqlite3_close(db));
db = nullptr;
cluster.shutdown();
/* Leave database behind for inspection. */
}
protected:
int db_open()
{
static const char SQL[] =
"PRAGMA journal_mode = PERSIST;"
"PRAGMA page_size = 65536;"
"PRAGMA cache_size = 32768;"
"PRAGMA temp_store = memory;"
"CREATE TEMPORARY TABLE perf (i INTEGER PRIMARY KEY, v TEXT);"
"CREATE TEMPORARY VIEW p AS"
" SELECT perf.i, J.*"
" FROM perf, json_tree(perf.v) AS J;"
"INSERT INTO perf (v)"
" VALUES (ceph_perf());"
;
sqlite3_stmt *stmt = NULL;
const char *current = SQL;
int rc;
auto&& name = get_uri();
sqlcatch(sqlite3_open_v2(name.c_str(), &db, SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE|SQLITE_OPEN_URI, "ceph"));
std::cout << "using database: " << name << std::endl;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_exec(db, current, NULL, NULL, NULL));
rc = 0;
out:
sqlite3_finalize(stmt);
return rc;
}
virtual std::string get_uri() const {
auto uri = fmt::format("file:{}:/{}?vfs=ceph", pool, get_name());
return uri;
}
virtual std::string get_name() const {
auto name = fmt::format("{}.db", uuid.to_string());
return name;
}
sqlite3* db = nullptr;
uuid_d uuid;
librados::Rados cluster;
};
TEST_F(CephSQLiteTest, Create) {
static const char SQL[] =
"CREATE TABLE foo (a INT);"
;
sqlite3_stmt *stmt = NULL;
const char *current = SQL;
int rc;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, InsertBulk4096) {
static const char SQL[] =
"PRAGMA page_size = 4096;"
"CREATE TABLE foo (a INT);"
"WITH RECURSIVE c(x) AS"
" ("
" VALUES(1)"
" UNION ALL"
" SELECT x+1"
" FROM c"
" )"
"INSERT INTO foo (a)"
" SELECT RANDOM()"
" FROM c"
" LIMIT 1000000;"
"PRAGMA page_size;"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_EQ(sqlite3_column_int64(stmt, 0), 4096);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, InsertBulk) {
static const char SQL[] =
"CREATE TABLE foo (a INT);"
"WITH RECURSIVE c(x) AS"
" ("
" VALUES(1)"
" UNION ALL"
" SELECT x+1"
" FROM c"
" )"
"INSERT INTO foo (a)"
" SELECT RANDOM()"
" FROM c"
" LIMIT 1000000;"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_exec(db, current, NULL, NULL, NULL));
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, UpdateBulk) {
static const char SQL[] =
"CREATE TABLE foo (a INT);"
"WITH RECURSIVE c(x) AS"
" ("
" VALUES(1)"
" UNION ALL"
" SELECT x+1"
" FROM c"
" )"
"INSERT INTO foo (a)"
" SELECT x"
" FROM c"
" LIMIT 1000000;"
"SELECT SUM(a) FROM foo;"
"UPDATE foo"
" SET a = a+a;"
"SELECT SUM(a) FROM foo;"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
uint64_t sum, sum2;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
sum = sqlite3_column_int64(stmt, 0);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
sum2 = sqlite3_column_int64(stmt, 0);
ASSERT_EQ(sum*2, sum2);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, InsertRate) {
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
static const char SQL[] =
"CREATE TABLE foo (a INT);"
"INSERT INTO foo (a) VALUES (RANDOM());"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
time t1, t2;
int count = 100;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
t1 = clock::now();
for (int i = 0; i < count; ++i) {
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
}
t2 = clock::now();
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
{
auto diff = std::chrono::duration<double>(t2-t1);
std::cout << "transactions per second: " << count/diff.count() << std::endl;
}
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, DatabaseShrink) {
static const char SQL[] =
"CREATE TABLE foo (a INT);"
"WITH RECURSIVE c(x) AS"
" ("
" VALUES(1)"
" UNION ALL"
" SELECT x+1"
" FROM c"
" )"
"INSERT INTO foo (a)"
" SELECT x"
" FROM c"
" LIMIT 1000000;"
"DELETE FROM foo"
" WHERE RANDOM()%4 < 3;"
"VACUUM;"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
librados::IoCtx ioctx;
std::unique_ptr<SimpleRADOSStriper> rs;
uint64_t size1, size2;
std::cout << SQL << std::endl;
ASSERT_EQ(0, cluster.ioctx_create(pool.c_str(), ioctx));
rs = std::make_unique<SimpleRADOSStriper>(ioctx, get_name());
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
ASSERT_EQ(0, rs->lock(1000));
ASSERT_EQ(0, rs->stat(&size1));
ASSERT_EQ(0, rs->unlock());
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
ASSERT_EQ(0, rs->lock(1000));
ASSERT_EQ(0, rs->stat(&size2));
ASSERT_EQ(0, rs->unlock());
ASSERT_LT(size2, size1/2);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, InsertExclusiveRate) {
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
static const char SQL[] =
"PRAGMA locking_mode=EXCLUSIVE;"
"CREATE TABLE foo (a INT);"
"INSERT INTO foo (a) VALUES (RANDOM());"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
time t1, t2;
int count = 100;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
t1 = clock::now();
for (int i = 0; i < count; ++i) {
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
}
t2 = clock::now();
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
{
auto diff = std::chrono::duration<double>(t2-t1);
std::cout << "transactions per second: " << count/diff.count() << std::endl;
}
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, InsertExclusiveWALRate) {
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
static const char SQL[] =
"PRAGMA locking_mode=EXCLUSIVE;"
"PRAGMA journal_mode=WAL;"
"CREATE TABLE foo (a INT);"
"INSERT INTO foo (a) VALUES (RANDOM());"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
time t1, t2;
int count = 100;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
t1 = clock::now();
for (int i = 0; i < count; ++i) {
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
}
t2 = clock::now();
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
{
auto diff = std::chrono::duration<double>(t2-t1);
std::cout << "transactions per second: " << count/diff.count() << std::endl;
}
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, WALTransactionSync) {
static const char SQL[] =
"PRAGMA locking_mode=EXCLUSIVE;"
"PRAGMA journal_mode=WAL;"
"CREATE TABLE foo (a INT);" /* sets up the -wal journal */
"INSERT INTO perf (v)"
" VALUES (ceph_perf());"
"BEGIN TRANSACTION;"
"INSERT INTO foo (a) VALUES (RANDOM());"
"END TRANSACTION;"
"INSERT INTO perf (v)"
" VALUES (ceph_perf());"
"SELECT a.atom-b.atom"
" FROM p AS a, p AS b"
" WHERE a.i = ? AND"
" b.i = ? AND"
" a.fullkey = '$.libcephsqlite_vfs.opf_sync.avgcount' AND"
" b.fullkey = '$.libcephsqlite_vfs.opf_sync.avgcount';"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
uint64_t id;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
for (int i = 0; i < 10; i++) {
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
}
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
id = sqlite3_last_insert_rowid(db);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatch(sqlite3_bind_int64(stmt, 1, id));
sqlcatch(sqlite3_bind_int64(stmt, 2, id-1));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_EQ(sqlite3_column_int64(stmt, 0), 1);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, PersistTransactionSync) {
static const char SQL[] =
"BEGIN TRANSACTION;"
"CREATE TABLE foo (a INT);"
"INSERT INTO foo (a) VALUES (RANDOM());"
"END TRANSACTION;"
"INSERT INTO perf (v)"
" VALUES (ceph_perf());"
"SELECT a.atom-b.atom"
" FROM p AS a, p AS b"
" WHERE a.i = ? AND"
" b.i = ? AND"
" a.fullkey = '$.libcephsqlite_vfs.opf_sync.avgcount' AND"
" b.fullkey = '$.libcephsqlite_vfs.opf_sync.avgcount';"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
uint64_t id;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
id = sqlite3_last_insert_rowid(db);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatch(sqlite3_bind_int64(stmt, 1, id));
sqlcatch(sqlite3_bind_int64(stmt, 2, id-1));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_EQ(sqlite3_column_int64(stmt, 0), 3); /* journal, db, journal header (PERIST) */
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, InsertExclusiveLock) {
static const char SQL[] =
"PRAGMA locking_mode=EXCLUSIVE;"
"CREATE TABLE foo (a INT);"
"INSERT INTO foo (a) VALUES (RANDOM());"
"INSERT INTO perf (v)"
" VALUES (ceph_perf());"
"SELECT a.atom, b.atom, a.atom-b.atom"
" FROM p AS a, p AS b"
" WHERE a.i = ? AND"
" b.i = ? AND"
" a.fullkey = '$.libcephsqlite_vfs.opf_lock.avgcount' AND"
" b.fullkey = '$.libcephsqlite_vfs.opf_lock.avgcount';"
"SELECT a.atom, b.atom, a.atom-b.atom"
" FROM p AS a, p AS b"
" WHERE a.i = ? AND"
" b.i = ? AND"
" a.fullkey = '$.libcephsqlite_striper.lock' AND"
" b.fullkey = '$.libcephsqlite_striper.lock';"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
uint64_t id;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
id = sqlite3_last_insert_rowid(db);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatch(sqlite3_bind_int64(stmt, 1, id));
sqlcatch(sqlite3_bind_int64(stmt, 2, id-1));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_GT(sqlite3_column_int64(stmt, 0), 0);
ASSERT_GT(sqlite3_column_int64(stmt, 1), 0);
ASSERT_EQ(sqlite3_column_int64(stmt, 2), 3); /* NONE -> SHARED; SHARED -> RESERVED; RESERVED -> EXCLUSIVE */
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatch(sqlite3_bind_int64(stmt, 1, id));
sqlcatch(sqlite3_bind_int64(stmt, 2, id-1));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_GT(sqlite3_column_int64(stmt, 0), 0);
ASSERT_GT(sqlite3_column_int64(stmt, 1), 0);
ASSERT_EQ(sqlite3_column_int64(stmt, 2), 1); /* one actual lock on the striper */
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, TransactionSizeUpdate) {
static const char SQL[] =
"BEGIN TRANSACTION;"
"CREATE TABLE foo (a INT);"
"INSERT INTO foo (a) VALUES (RANDOM());"
"END TRANSACTION;"
"INSERT INTO perf (v)"
" VALUES (ceph_perf());"
"SELECT a.atom, b.atom, a.atom-b.atom"
" FROM p AS a, p AS b"
" WHERE a.i = ? AND"
" b.i = ? AND"
" a.fullkey = '$.libcephsqlite_striper.update_size' AND"
" b.fullkey = '$.libcephsqlite_striper.update_size';"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
uint64_t id;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
id = sqlite3_last_insert_rowid(db);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatch(sqlite3_bind_int64(stmt, 1, id));
sqlcatch(sqlite3_bind_int64(stmt, 2, id-1));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_GT(sqlite3_column_int64(stmt, 0), 0);
ASSERT_GT(sqlite3_column_int64(stmt, 1), 0);
ASSERT_EQ(sqlite3_column_int64(stmt, 2), 2); /* once for journal write and db write (but not journal header clear!) */
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, AllocatedGrowth) {
static const char SQL[] =
"CREATE TABLE foo (a BLOB);"
"WITH RECURSIVE c(x) AS"
" ("
" VALUES(1)"
" UNION ALL"
" SELECT x+1"
" FROM c"
" )"
"INSERT INTO foo (a)"
" SELECT RANDOMBLOB(1<<20)"
" FROM c"
" LIMIT 1024;"
"INSERT INTO perf (v)"
" VALUES (ceph_perf());"
"SELECT a.atom, b.atom, a.atom-b.atom"
" FROM p AS a, p AS b"
" WHERE a.i = ? AND"
" b.i = ? AND"
" a.fullkey = '$.libcephsqlite_striper.update_allocated' AND"
" b.fullkey = '$.libcephsqlite_striper.update_allocated';"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
uint64_t id;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
id = sqlite3_last_insert_rowid(db);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatch(sqlite3_bind_int64(stmt, 1, id));
sqlcatch(sqlite3_bind_int64(stmt, 2, id-1));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_GT(sqlite3_column_int64(stmt, 2), 8); /* max_growth = 128MB, 1024MB of data */
ASSERT_LT(sqlite3_column_int64(stmt, 2), 12);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, DeleteBulk) {
static const char SQL[] =
"CREATE TABLE foo (a INT);"
"WITH RECURSIVE c(x) AS"
" ("
" VALUES(1)"
" UNION ALL"
" SELECT x+1"
" FROM c"
" )"
"INSERT INTO foo (a)"
" SELECT x"
" FROM c"
" LIMIT 1000000;"
"DELETE FROM foo"
" WHERE RANDOM()%2 == 0;"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, DropMassive) {
static const char SQL[] =
"CREATE TABLE foo (a BLOB);"
"WITH RECURSIVE c(x) AS"
" ("
" VALUES(1)"
" UNION ALL"
" SELECT x+1"
" FROM c"
" )"
"INSERT INTO foo (a)"
" SELECT RANDOMBLOB(1<<20)"
" FROM c"
" LIMIT 1024;"
"DROP TABLE foo;"
"VACUUM;"
"INSERT INTO perf (v)"
" VALUES (ceph_perf());"
"SELECT a.atom, b.atom"
" FROM p AS a, p AS b"
" WHERE a.i = ? AND"
" b.i = ? AND"
" a.fullkey = '$.libcephsqlite_striper.shrink' AND"
" b.fullkey = '$.libcephsqlite_striper.shrink';"
"SELECT a.atom-b.atom"
" FROM p AS a, p AS b"
" WHERE a.i = ? AND"
" b.i = ? AND"
" a.fullkey = '$.libcephsqlite_striper.shrink_bytes' AND"
" b.fullkey = '$.libcephsqlite_striper.shrink_bytes';"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
uint64_t id;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
id = sqlite3_last_insert_rowid(db);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatch(sqlite3_bind_int64(stmt, 1, id));
sqlcatch(sqlite3_bind_int64(stmt, 2, id-1));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_GT(sqlite3_column_int64(stmt, 0), sqlite3_column_int64(stmt, 1));
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatch(sqlite3_bind_int64(stmt, 1, id));
sqlcatch(sqlite3_bind_int64(stmt, 2, id-1));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_LT(512*(1<<20), sqlite3_column_int64(stmt, 0));
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, InsertMassiveVerify) {
static const char SQL[] =
"CREATE TABLE foo (a BLOB);"
"CREATE TEMPORARY TABLE bar (a BLOB);"
"WITH RECURSIVE c(x) AS"
" ("
" VALUES(1)"
" UNION ALL"
" SELECT x+1"
" FROM c"
" )"
"INSERT INTO bar (a)"
" SELECT RANDOMBLOB(1<<20)"
" FROM c"
" LIMIT 1024;"
"SELECT a FROM bar;"
"INSERT INTO foo (a)"
" SELECT a FROM bar;"
"SELECT a FROM foo;"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
std::vector<std::string> hashes1, hashes2;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
while ((rc = sqlite3_step(stmt)) == SQLITE_ROW) {
const void* blob = sqlite3_column_blob(stmt, 0);
ceph::bufferlist bl;
bl.append(std::string_view((const char*)blob, (size_t)sqlite3_column_bytes(stmt, 0)));
auto digest = ceph::crypto::digest<ceph::crypto::SHA1>(bl);
hashes1.emplace_back(digest.to_str());
}
sqlcatchcode(rc, SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
while ((rc = sqlite3_step(stmt)) == SQLITE_ROW) {
const void* blob = sqlite3_column_blob(stmt, 0);
ceph::bufferlist bl;
bl.append(std::string_view((const char*)blob, (size_t)sqlite3_column_bytes(stmt, 0)));
auto digest = ceph::crypto::digest<ceph::crypto::SHA1>(bl);
hashes2.emplace_back(digest.to_str());
}
sqlcatchcode(rc, SQLITE_DONE);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
ASSERT_EQ(hashes1, hashes2);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, PerfValid) {
static const char SQL[] =
"SELECT json_valid(ceph_perf());"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_EQ(sqlite3_column_int64(stmt, 0), 1);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, StatusValid) {
static const char SQL[] =
"SELECT json_valid(ceph_status());"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
ASSERT_EQ(sqlite3_column_int64(stmt, 0), 1);
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, CurrentTime) {
static const char SQL[] =
"SELECT strftime('%s', 'now');"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
{
time_t now = time(0);
auto t = sqlite3_column_int64(stmt, 0);
ASSERT_LT(abs(now-t), 5);
}
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
TEST_F(CephSQLiteTest, StatusFields) {
static const char SQL[] =
"SELECT json_extract(ceph_status(), '$.addr');"
"SELECT json_extract(ceph_status(), '$.id');"
;
int rc;
const char *current = SQL;
sqlite3_stmt *stmt = NULL;
std::cout << SQL << std::endl;
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
{
auto addr = sqlite3_column_text(stmt, 0);
std::cout << addr << std::endl;
}
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
sqlcatch(sqlite3_prepare_v2(db, current, -1, &stmt, ¤t));
sqlcatchcode(sqlite3_step(stmt), SQLITE_ROW);
{
auto id = sqlite3_column_int64(stmt, 0);
std::cout << id << std::endl;
ASSERT_GT(id, 0);
}
sqlcatch(sqlite3_finalize(stmt); stmt = NULL);
rc = 0;
out:
sqlite3_finalize(stmt);
ASSERT_EQ(0, rc);
}
int main(int argc, char **argv) {
auto args = argv_to_vec(argc, argv);
std::string conf_file_list;
std::string cluster;
CephInitParameters iparams = ceph_argparse_early_args(args, CEPH_ENTITY_TYPE_CLIENT, &cluster, &conf_file_list);
cct = boost::intrusive_ptr<CephContext>(common_preinit(iparams, CODE_ENVIRONMENT_UTILITY, 0), false);
cct->_conf.parse_config_files(conf_file_list.empty() ? nullptr : conf_file_list.c_str(), &std::cerr, 0);
cct->_conf.parse_env(cct->get_module_type()); // environment variables override
cct->_conf.parse_argv(args);
cct->_conf.apply_changes(nullptr);
common_init_finish(cct.get());
ldout(cct, 1) << "sqlite3 version: " << sqlite3_libversion() << dendl;
if (int rc = sqlite3_config(SQLITE_CONFIG_URI, 1); rc) {
lderr(cct) << "sqlite3 config failed: " << rc << dendl;
exit(EXIT_FAILURE);
}
sqlite3_auto_extension((void (*)())sqlite3_cephsqlite_init);
sqlite3* db = nullptr;
if (int rc = sqlite3_open_v2(":memory:", &db, SQLITE_OPEN_READWRITE, nullptr); rc == SQLITE_OK) {
sqlite3_close(db);
} else {
lderr(cct) << "could not open sqlite3: " << rc << dendl;
exit(EXIT_FAILURE);
}
if (int rc = cephsqlite_setcct(cct.get(), nullptr); rc < 0) {
lderr(cct) << "could not set cct: " << rc << dendl;
exit(EXIT_FAILURE);
}
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 33,382 | 28.542478 | 120 |
cc
|
null |
ceph-main/src/test/librados/TestCase.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include <fmt/format.h>
#include "test/librados/test.h"
#include "test/librados/TestCase.h"
#include "include/scope_guard.h"
#include "crimson_utils.h"
std::string RadosTestNS::pool_name;
rados_t RadosTestNS::s_cluster = NULL;
void RadosTestNS::SetUpTestCase()
{
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_case()->name());
pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool(pool_name, &s_cluster));
}
void RadosTestNS::TearDownTestCase()
{
ASSERT_EQ(0, destroy_one_pool(pool_name, &s_cluster));
}
void RadosTestNS::SetUp()
{
cluster = RadosTestNS::s_cluster;
ASSERT_EQ(0, rados_ioctx_create(cluster, pool_name.c_str(), &ioctx));
int req;
ASSERT_EQ(0, rados_ioctx_pool_requires_alignment2(ioctx, &req));
ASSERT_FALSE(req);
}
void RadosTestNS::TearDown()
{
if (cleanup)
cleanup_all_objects(ioctx);
rados_ioctx_destroy(ioctx);
}
void RadosTestNS::cleanup_all_objects(rados_ioctx_t ioctx)
{
// remove all objects to avoid polluting other tests
rados_ioctx_snap_set_read(ioctx, LIBRADOS_SNAP_HEAD);
rados_ioctx_set_namespace(ioctx, LIBRADOS_ALL_NSPACES);
rados_list_ctx_t list_ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &list_ctx));
auto sg = make_scope_guard([&] { rados_nobjects_list_close(list_ctx); });
int r;
const char *entry = NULL;
const char *key = NULL;
const char *nspace = NULL;
while ((r = rados_nobjects_list_next(list_ctx, &entry, &key, &nspace)) != -ENOENT) {
ASSERT_EQ(0, r);
rados_ioctx_locator_set_key(ioctx, key);
rados_ioctx_set_namespace(ioctx, nspace);
ASSERT_EQ(0, rados_remove(ioctx, entry));
}
}
std::string RadosTestECNS::pool_name;
rados_t RadosTestECNS::s_cluster = NULL;
void RadosTestECNS::SetUpTestCase()
{
SKIP_IF_CRIMSON();
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_case()->name());
pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_ec_pool(pool_name, &s_cluster));
}
void RadosTestECNS::TearDownTestCase()
{
SKIP_IF_CRIMSON();
ASSERT_EQ(0, destroy_one_ec_pool(pool_name, &s_cluster));
}
void RadosTestECNS::SetUp()
{
SKIP_IF_CRIMSON();
cluster = RadosTestECNS::s_cluster;
ASSERT_EQ(0, rados_ioctx_create(cluster, pool_name.c_str(), &ioctx));
int req;
ASSERT_EQ(0, rados_ioctx_pool_requires_alignment2(ioctx, &req));
ASSERT_TRUE(req);
ASSERT_EQ(0, rados_ioctx_pool_required_alignment2(ioctx, &alignment));
ASSERT_NE(0U, alignment);
}
void RadosTestECNS::TearDown()
{
SKIP_IF_CRIMSON();
if (cleanup)
cleanup_all_objects(ioctx);
rados_ioctx_destroy(ioctx);
}
std::string RadosTest::pool_name;
rados_t RadosTest::s_cluster = NULL;
void RadosTest::SetUpTestCase()
{
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_case()->name());
pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool(pool_name, &s_cluster));
}
void RadosTest::TearDownTestCase()
{
ASSERT_EQ(0, destroy_one_pool(pool_name, &s_cluster));
}
void RadosTest::SetUp()
{
cluster = RadosTest::s_cluster;
ASSERT_EQ(0, rados_ioctx_create(cluster, pool_name.c_str(), &ioctx));
nspace = get_temp_pool_name();
rados_ioctx_set_namespace(ioctx, nspace.c_str());
int req;
ASSERT_EQ(0, rados_ioctx_pool_requires_alignment2(ioctx, &req));
ASSERT_FALSE(req);
}
void RadosTest::TearDown()
{
if (cleanup) {
cleanup_default_namespace(ioctx);
cleanup_namespace(ioctx, nspace);
}
rados_ioctx_destroy(ioctx);
}
void RadosTest::cleanup_default_namespace(rados_ioctx_t ioctx)
{
// remove all objects from the default namespace to avoid polluting
// other tests
cleanup_namespace(ioctx, "");
}
void RadosTest::cleanup_namespace(rados_ioctx_t ioctx, std::string ns)
{
rados_ioctx_snap_set_read(ioctx, LIBRADOS_SNAP_HEAD);
rados_ioctx_set_namespace(ioctx, ns.c_str());
rados_list_ctx_t list_ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &list_ctx));
auto sg = make_scope_guard([&] { rados_nobjects_list_close(list_ctx); });
int r;
const char *entry = NULL;
const char *key = NULL;
while ((r = rados_nobjects_list_next(list_ctx, &entry, &key, NULL)) != -ENOENT) {
ASSERT_EQ(0, r);
rados_ioctx_locator_set_key(ioctx, key);
ASSERT_EQ(0, rados_remove(ioctx, entry));
}
}
std::string RadosTestEC::pool_name;
rados_t RadosTestEC::s_cluster = NULL;
void RadosTestEC::SetUpTestCase()
{
SKIP_IF_CRIMSON();
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_case()->name());
pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_ec_pool(pool_name, &s_cluster));
}
void RadosTestEC::TearDownTestCase()
{
SKIP_IF_CRIMSON();
ASSERT_EQ(0, destroy_one_ec_pool(pool_name, &s_cluster));
}
void RadosTestEC::SetUp()
{
SKIP_IF_CRIMSON();
cluster = RadosTestEC::s_cluster;
ASSERT_EQ(0, rados_ioctx_create(cluster, pool_name.c_str(), &ioctx));
nspace = get_temp_pool_name();
rados_ioctx_set_namespace(ioctx, nspace.c_str());
int req;
ASSERT_EQ(0, rados_ioctx_pool_requires_alignment2(ioctx, &req));
ASSERT_TRUE(req);
ASSERT_EQ(0, rados_ioctx_pool_required_alignment2(ioctx, &alignment));
ASSERT_NE(0U, alignment);
}
void RadosTestEC::TearDown()
{
SKIP_IF_CRIMSON();
if (cleanup) {
cleanup_default_namespace(ioctx);
cleanup_namespace(ioctx, nspace);
}
rados_ioctx_destroy(ioctx);
}
| 5,553 | 26.22549 | 106 |
cc
|
null |
ceph-main/src/test/librados/TestCase.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_RADOS_TESTCASE_H
#define CEPH_TEST_RADOS_TESTCASE_H
#include "include/rados/librados.h"
#include "gtest/gtest.h"
#include <string>
/**
* These test cases create a temporary pool that lives as long as the
* test case. We initially use the default namespace and assume
* test will whatever namespaces it wants. After each test all objects
* are removed.
*
* Since pool creation and deletion is slow, this allows many tests to
* run faster.
*/
class RadosTestNS : public ::testing::Test {
public:
RadosTestNS(bool c=false) : cleanup(c) {}
~RadosTestNS() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static void cleanup_all_objects(rados_ioctx_t ioctx);
static rados_t s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
rados_t cluster = nullptr;
rados_ioctx_t ioctx = nullptr;
bool cleanup;
};
struct RadosTestNSCleanup : public RadosTestNS {
RadosTestNSCleanup() : RadosTestNS(true) {}
};
class RadosTestECNS : public RadosTestNS {
public:
RadosTestECNS(bool c=false) : cleanup(c) {}
~RadosTestECNS() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static rados_t s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
rados_t cluster = nullptr;
rados_ioctx_t ioctx = nullptr;
uint64_t alignment = 0;
bool cleanup;
};
struct RadosTestECNSCleanup : public RadosTestECNS {
RadosTestECNSCleanup() : RadosTestECNS(true) {}
};
/**
* These test cases create a temporary pool that lives as long as the
* test case. Each test within a test case gets a new ioctx set to a
* unique namespace within the pool.
*
* Since pool creation and deletion is slow, this allows many tests to
* run faster.
*/
class RadosTest : public ::testing::Test {
public:
RadosTest(bool c=false) : cleanup(c) {}
~RadosTest() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static void cleanup_default_namespace(rados_ioctx_t ioctx);
static void cleanup_namespace(rados_ioctx_t ioctx, std::string ns);
static rados_t s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
rados_t cluster = nullptr;
rados_ioctx_t ioctx = nullptr;
std::string nspace;
bool cleanup;
};
class RadosTestEC : public RadosTest {
public:
RadosTestEC(bool c=false) : cleanup(c) {}
~RadosTestEC() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static rados_t s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
rados_t cluster = nullptr;
rados_ioctx_t ioctx = nullptr;
bool cleanup;
std::string nspace;
uint64_t alignment = 0;
};
/**
* Test case without creating a temporary pool in advance.
* This is necessary for scenarios such that we need to
* manually create a pool, start some long-runing tasks and
* then the related pool is suddenly gone.
*/
class RadosTestNP: public ::testing::Test {
public:
RadosTestNP() {}
~RadosTestNP() override {}
};
#endif
| 3,255 | 25.048 | 71 |
h
|
null |
ceph-main/src/test/librados/aio.cc
|
#include <errno.h>
#include <fcntl.h>
#include <string>
#include <sstream>
#include <utility>
#include <boost/scoped_ptr.hpp>
#include <fmt/format.h>
#include "include/err.h"
#include "include/rados/librados.h"
#include "include/types.h"
#include "include/stringify.h"
#include "include/scope_guard.h"
#include "common/errno.h"
#include "gtest/gtest.h"
#include "test.h"
#include "crimson_utils.h"
using std::ostringstream;
class AioTestData
{
public:
AioTestData()
: m_cluster(NULL),
m_ioctx(NULL),
m_init(false)
{
}
~AioTestData()
{
if (m_init) {
rados_ioctx_destroy(m_ioctx);
destroy_one_pool(m_pool_name, &m_cluster);
}
}
std::string init()
{
int ret;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
m_pool_name = get_temp_pool_name(pool_prefix);
std::string err = create_one_pool(m_pool_name, &m_cluster);
if (!err.empty()) {
ostringstream oss;
oss << "create_one_pool(" << m_pool_name << ") failed: error " << err;
return oss.str();
}
ret = rados_ioctx_create(m_cluster, m_pool_name.c_str(), &m_ioctx);
if (ret) {
destroy_one_pool(m_pool_name, &m_cluster);
ostringstream oss;
oss << "rados_ioctx_create failed: error " << ret;
return oss.str();
}
m_init = true;
return "";
}
rados_t m_cluster;
rados_ioctx_t m_ioctx;
std::string m_pool_name;
bool m_init;
};
TEST(LibRadosAio, TooBig) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(-E2BIG, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, UINT_MAX, 0));
ASSERT_EQ(-E2BIG, rados_aio_write_full(test_data.m_ioctx, "foo",
my_completion, buf, UINT_MAX));
ASSERT_EQ(-E2BIG, rados_aio_append(test_data.m_ioctx, "foo",
my_completion, buf, UINT_MAX));
rados_aio_release(my_completion);
}
TEST(LibRadosAio, SimpleWrite) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
auto sg = make_scope_guard([&] { rados_aio_release(my_completion); });
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_ioctx_set_namespace(test_data.m_ioctx, "nspace");
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
auto sg2 = make_scope_guard([&] { rados_aio_release(my_completion2); });
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion2, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
}
TEST(LibRadosAio, WaitForSafe) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_aio_release(my_completion);
}
TEST(LibRadosAio, RoundTrip) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[256];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAio, RoundTrip2) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAio, RoundTrip3) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
rados_write_op_t op1 = rados_create_write_op();
rados_write_op_write(op1, buf, sizeof(buf), 0);
rados_write_op_set_alloc_hint2(op1, 0, 0, LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, rados_aio_write_op_operate(op1, test_data.m_ioctx, my_completion,
"foo", NULL, 0));
rados_release_write_op(op1);
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_aio_release(my_completion);
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
rados_read_op_t op2 = rados_create_read_op();
rados_read_op_read(op2, 0, sizeof(buf2), buf2, NULL, NULL);
rados_read_op_set_flags(op2, LIBRADOS_OP_FLAG_FADVISE_NOCACHE |
LIBRADOS_OP_FLAG_FADVISE_RANDOM);
ceph_le32 init_value(-1);
ceph_le32 checksum[2];
rados_read_op_checksum(op2, LIBRADOS_CHECKSUM_TYPE_CRC32C,
reinterpret_cast<char *>(&init_value),
sizeof(init_value), 0, 0, 0,
reinterpret_cast<char *>(&checksum),
sizeof(checksum), NULL);
ASSERT_EQ(0, rados_aio_read_op_operate(op2, test_data.m_ioctx, my_completion2,
"foo", 0));
rados_release_read_op(op2);
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion2);
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(1U, checksum[0]);
ASSERT_EQ(bl.crc32c(-1), checksum[1]);
}
TEST(LibRadosAio, RoundTripAppend) {
AioTestData test_data;
rados_completion_t my_completion, my_completion2, my_completion3;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_append(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_append(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
char buf3[sizeof(buf) + sizeof(buf2)];
memset(buf3, 0, sizeof(buf3));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion3, buf3, sizeof(buf3), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ((int)sizeof(buf3), rados_aio_get_return_value(my_completion3));
ASSERT_EQ(0, memcmp(buf3, buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(buf3 + sizeof(buf), buf2, sizeof(buf2)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
}
TEST(LibRadosAio, RemoveTest) {
char buf[128];
char buf2[sizeof(buf)];
rados_completion_t my_completion;
AioTestData test_data;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(test_data.m_ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0, rados_aio_remove(test_data.m_ioctx, "foo", my_completion));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
memset(buf2, 0, sizeof(buf2));
ASSERT_EQ(-ENOENT, rados_read(test_data.m_ioctx, "foo", buf2, sizeof(buf2), 0));
rados_aio_release(my_completion);
}
TEST(LibRadosAio, XattrsRoundTrip) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
// append
AioTestData test_data;
ASSERT_EQ("", test_data.init());
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(test_data.m_ioctx, "foo", buf, sizeof(buf)));
// async getxattr
rados_completion_t my_completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
ASSERT_EQ(0, rados_aio_getxattr(test_data.m_ioctx, "foo", my_completion, attr1, buf, sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(-ENODATA, rados_aio_get_return_value(my_completion));
rados_aio_release(my_completion);
// async setxattr
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_setxattr(test_data.m_ioctx, "foo", my_completion2, attr1, attr1_buf, sizeof(attr1_buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
rados_aio_release(my_completion2);
// async getxattr
rados_completion_t my_completion3;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_getxattr(test_data.m_ioctx, "foo", my_completion3, attr1, buf, sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ((int)sizeof(attr1_buf), rados_aio_get_return_value(my_completion3));
rados_aio_release(my_completion3);
// check content of attribute
ASSERT_EQ(0, memcmp(attr1_buf, buf, sizeof(attr1_buf)));
}
TEST(LibRadosAio, RmXattr) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
// append
memset(buf, 0xaa, sizeof(buf));
AioTestData test_data;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_append(test_data.m_ioctx, "foo", buf, sizeof(buf)));
// async setxattr
rados_completion_t my_completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
ASSERT_EQ(0, rados_aio_setxattr(test_data.m_ioctx, "foo", my_completion, attr1, attr1_buf, sizeof(attr1_buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_aio_release(my_completion);
// async rmxattr
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_rmxattr(test_data.m_ioctx, "foo", my_completion2, attr1));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
rados_aio_release(my_completion2);
// async getxattr after deletion
rados_completion_t my_completion3;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_getxattr(test_data.m_ioctx, "foo", my_completion3, attr1, buf, sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ(-ENODATA, rados_aio_get_return_value(my_completion3));
rados_aio_release(my_completion3);
// Test rmxattr on a removed object
char buf2[128];
char attr2[] = "attr2";
char attr2_buf[] = "foo bar baz";
memset(buf2, 0xbb, sizeof(buf2));
ASSERT_EQ(0, rados_write(test_data.m_ioctx, "foo_rmxattr", buf2, sizeof(buf2), 0));
// asynx setxattr
rados_completion_t my_completion4;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion4));
ASSERT_EQ(0, rados_aio_setxattr(test_data.m_ioctx, "foo_rmxattr", my_completion4, attr2, attr2_buf, sizeof(attr2_buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion4));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion4));
rados_aio_release(my_completion4);
// remove object
ASSERT_EQ(0, rados_remove(test_data.m_ioctx, "foo_rmxattr"));
// async rmxattr on non existing object
rados_completion_t my_completion5;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion5));
ASSERT_EQ(0, rados_aio_rmxattr(test_data.m_ioctx, "foo_rmxattr", my_completion5, attr2));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion5));
}
ASSERT_EQ(-ENOENT, rados_aio_get_return_value(my_completion5));
rados_aio_release(my_completion5);
}
TEST(LibRadosAio, XattrIter) {
AioTestData test_data;
ASSERT_EQ("", test_data.init());
// Create an object with 2 attributes
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
char attr2[] = "attr2";
char attr2_buf[256];
for (size_t j = 0; j < sizeof(attr2_buf); ++j) {
attr2_buf[j] = j % 0xff;
}
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(test_data.m_ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0, rados_setxattr(test_data.m_ioctx, "foo", attr1, attr1_buf, sizeof(attr1_buf)));
ASSERT_EQ(0, rados_setxattr(test_data.m_ioctx, "foo", attr2, attr2_buf, sizeof(attr2_buf)));
// call async version of getxattrs and wait for completion
rados_completion_t my_completion;
ASSERT_EQ(0, rados_aio_create_completion2((void*)&test_data,
nullptr, &my_completion));
rados_xattrs_iter_t iter;
ASSERT_EQ(0, rados_aio_getxattrs(test_data.m_ioctx, "foo", my_completion, &iter));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
// loop over attributes
int num_seen = 0;
while (true) {
const char *name;
const char *val;
size_t len;
ASSERT_EQ(0, rados_getxattrs_next(iter, &name, &val, &len));
if (name == NULL) {
break;
}
ASSERT_LT(num_seen, 2);
if ((strcmp(name, attr1) == 0) && (val != NULL) && (memcmp(val, attr1_buf, len) == 0)) {
num_seen++;
continue;
}
else if ((strcmp(name, attr2) == 0) && (val != NULL) && (memcmp(val, attr2_buf, len) == 0)) {
num_seen++;
continue;
}
else {
ASSERT_EQ(0, 1);
}
}
rados_getxattrs_end(iter);
}
TEST(LibRadosAio, IsComplete) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
// Busy-wait until the AIO completes.
// Normally we wouldn't do this, but we want to test rados_aio_is_complete.
while (true) {
int is_complete = rados_aio_is_complete(my_completion2);
if (is_complete)
break;
}
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAio, IsSafe) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
// Busy-wait until the AIO completes.
// Normally we wouldn't do this, but we want to test rados_aio_is_safe.
while (true) {
int is_safe = rados_aio_is_safe(my_completion);
if (is_safe)
break;
}
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAio, ReturnValue) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0, sizeof(buf));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "nonexistent",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(-ENOENT, rados_aio_get_return_value(my_completion));
rados_aio_release(my_completion);
}
TEST(LibRadosAio, Flush) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xee, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_aio_flush(test_data.m_ioctx));
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf2), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAio, FlushAsync) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
rados_completion_t flush_completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, &flush_completion));
char buf[128];
memset(buf, 0xee, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_aio_flush_async(test_data.m_ioctx, flush_completion));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(flush_completion));
}
ASSERT_EQ(1, rados_aio_is_complete(my_completion));
ASSERT_EQ(1, rados_aio_is_complete(flush_completion));
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf2), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(flush_completion);
}
TEST(LibRadosAio, RoundTripWriteFull) {
AioTestData test_data;
rados_completion_t my_completion, my_completion2, my_completion3;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[64];
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_write_full(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
char buf3[sizeof(buf) + sizeof(buf2)];
memset(buf3, 0, sizeof(buf3));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion3, buf3, sizeof(buf3), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ((int)sizeof(buf2), rados_aio_get_return_value(my_completion3));
ASSERT_EQ(0, memcmp(buf3, buf2, sizeof(buf2)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
}
TEST(LibRadosAio, RoundTripWriteSame) {
AioTestData test_data;
rados_completion_t my_completion, my_completion2, my_completion3;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char full[128];
memset(full, 0xcc, sizeof(full));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, full, sizeof(full), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
/* write the same buf four times */
char buf[32];
size_t ws_write_len = sizeof(full);
memset(buf, 0xdd, sizeof(buf));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_writesame(test_data.m_ioctx, "foo",
my_completion2, buf, sizeof(buf),
ws_write_len, 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion3, full, sizeof(full), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ((int)sizeof(full), rados_aio_get_return_value(my_completion3));
for (char *cmp = full; cmp < full + sizeof(full); cmp += sizeof(buf)) {
ASSERT_EQ(0, memcmp(cmp, buf, sizeof(buf)));
}
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
}
TEST(LibRadosAio, SimpleStat) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
uint64_t psize;
time_t pmtime;
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion2, &psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(sizeof(buf), psize);
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAio, OperateMtime)
{
AioTestData test_data;
ASSERT_EQ("", test_data.init());
time_t set_mtime = 1457129052;
{
rados_write_op_t op = rados_create_write_op();
rados_write_op_create(op, LIBRADOS_CREATE_IDEMPOTENT, nullptr);
rados_completion_t completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, &completion));
ASSERT_EQ(0, rados_aio_write_op_operate(op, test_data.m_ioctx, completion,
"foo", &set_mtime, 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(completion));
rados_aio_release(completion);
rados_release_write_op(op);
}
{
uint64_t size;
timespec mtime;
ASSERT_EQ(0, rados_stat2(test_data.m_ioctx, "foo", &size, &mtime));
EXPECT_EQ(0, size);
EXPECT_EQ(set_mtime, mtime.tv_sec);
EXPECT_EQ(0, mtime.tv_nsec);
}
}
TEST(LibRadosAio, Operate2Mtime)
{
AioTestData test_data;
ASSERT_EQ("", test_data.init());
timespec set_mtime{1457129052, 123456789};
{
rados_write_op_t op = rados_create_write_op();
rados_write_op_create(op, LIBRADOS_CREATE_IDEMPOTENT, nullptr);
rados_completion_t completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, &completion));
ASSERT_EQ(0, rados_aio_write_op_operate2(op, test_data.m_ioctx, completion,
"foo", &set_mtime, 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(completion));
rados_aio_release(completion);
rados_release_write_op(op);
}
{
uint64_t size;
timespec mtime;
ASSERT_EQ(0, rados_stat2(test_data.m_ioctx, "foo", &size, &mtime));
EXPECT_EQ(0, size);
EXPECT_EQ(set_mtime.tv_sec, mtime.tv_sec);
EXPECT_EQ(set_mtime.tv_nsec, mtime.tv_nsec);
}
}
TEST(LibRadosAio, SimpleStatNS) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_ioctx_set_namespace(test_data.m_ioctx, "nspace");
char buf2[64];
memset(buf2, 0xbb, sizeof(buf2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
uint64_t psize;
time_t pmtime;
rados_completion_t my_completion2;
rados_ioctx_set_namespace(test_data.m_ioctx, "");
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion2, &psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(sizeof(buf), psize);
rados_ioctx_set_namespace(test_data.m_ioctx, "nspace");
rados_completion_t my_completion3;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion3, &psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion3));
ASSERT_EQ(sizeof(buf2), psize);
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
}
TEST(LibRadosAio, StatRemove) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
uint64_t psize;
time_t pmtime;
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion2, &psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(sizeof(buf), psize);
rados_completion_t my_completion3;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_remove(test_data.m_ioctx, "foo", my_completion3));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion3));
uint64_t psize2;
time_t pmtime2;
rados_completion_t my_completion4;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion4));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion4, &psize2, &pmtime2));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion4));
}
ASSERT_EQ(-ENOENT, rados_aio_get_return_value(my_completion4));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
rados_aio_release(my_completion4);
}
TEST(LibRadosAio, ExecuteClass) {
AioTestData test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
}
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
char out[128];
ASSERT_EQ(0, rados_aio_exec(test_data.m_ioctx, "foo", my_completion2,
"hello", "say_hello", NULL, 0, out, sizeof(out)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(13, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, strncmp("Hello, world!", out, 13));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
using std::string;
using std::map;
using std::set;
TEST(LibRadosAio, MultiWrite) {
AioTestData test_data;
rados_completion_t my_completion, my_completion2, my_completion3;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[64];
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
char buf3[(sizeof(buf) + sizeof(buf2)) * 3];
memset(buf3, 0, sizeof(buf3));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion3, buf3, sizeof(buf3), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ((int)(sizeof(buf) + sizeof(buf2)), rados_aio_get_return_value(my_completion3));
ASSERT_EQ(0, memcmp(buf3, buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(buf3 + sizeof(buf), buf2, sizeof(buf2)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
}
TEST(LibRadosAio, AioUnlock) {
AioTestData test_data;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_lock_exclusive(test_data.m_ioctx, "foo", "TestLock", "Cookie", "", NULL, 0));
rados_completion_t my_completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
ASSERT_EQ(0, rados_aio_unlock(test_data.m_ioctx, "foo", "TestLock", "Cookie", my_completion));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
ASSERT_EQ(0, rados_lock_exclusive(test_data.m_ioctx, "foo", "TestLock", "Cookie", "", NULL, 0));
}
// EC test cases
class AioTestDataEC
{
public:
AioTestDataEC()
: m_cluster(NULL),
m_ioctx(NULL),
m_init(false)
{
}
~AioTestDataEC()
{
if (m_init) {
rados_ioctx_destroy(m_ioctx);
destroy_one_ec_pool(m_pool_name, &m_cluster);
}
}
std::string init()
{
int ret;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
m_pool_name = get_temp_pool_name(pool_prefix);
std::string err = create_one_ec_pool(m_pool_name, &m_cluster);
if (!err.empty()) {
ostringstream oss;
oss << "create_one_ec_pool(" << m_pool_name << ") failed: error " << err;
return oss.str();
}
ret = rados_ioctx_create(m_cluster, m_pool_name.c_str(), &m_ioctx);
if (ret) {
destroy_one_ec_pool(m_pool_name, &m_cluster);
ostringstream oss;
oss << "rados_ioctx_create failed: error " << ret;
return oss.str();
}
m_init = true;
return "";
}
rados_t m_cluster;
rados_ioctx_t m_ioctx;
std::string m_pool_name;
bool m_init;
};
TEST(LibRadosAioEC, SimpleWrite) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
auto sg = make_scope_guard([&] { rados_aio_release(my_completion); });
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_ioctx_set_namespace(test_data.m_ioctx, "nspace");
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
auto sg2 = make_scope_guard([&] { rados_aio_release(my_completion2); });
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion2, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
}
TEST(LibRadosAioEC, WaitForComplete) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_aio_release(my_completion);
}
TEST(LibRadosAioEC, RoundTrip) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[256];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAioEC, RoundTrip2) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAioEC, RoundTripAppend) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion, my_completion2, my_completion3, my_completion4;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
int req;
ASSERT_EQ(0, rados_ioctx_pool_requires_alignment2(test_data.m_ioctx, &req));
ASSERT_NE(0, req);
uint64_t alignment;
ASSERT_EQ(0, rados_ioctx_pool_required_alignment2(test_data.m_ioctx, &alignment));
ASSERT_NE(0U, alignment);
int bsize = alignment;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
ASSERT_EQ(0, rados_aio_append(test_data.m_ioctx, "foo",
my_completion, buf, bsize));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
int hbsize = bsize / 2;
char *buf2 = (char *)new char[hbsize];
memset(buf2, 0xdd, hbsize);
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_append(test_data.m_ioctx, "foo",
my_completion2, buf2, hbsize));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_append(test_data.m_ioctx, "foo",
my_completion3, buf2, hbsize));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
EXPECT_EQ(-EOPNOTSUPP, rados_aio_get_return_value(my_completion3));
int tbsize = bsize + hbsize;
char *buf3 = (char *)new char[tbsize];
memset(buf3, 0, tbsize);
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion4));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion4, buf3, bsize * 3, 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion4));
}
ASSERT_EQ(tbsize, rados_aio_get_return_value(my_completion4));
ASSERT_EQ(0, memcmp(buf3, buf, bsize));
ASSERT_EQ(0, memcmp(buf3 + bsize, buf2, hbsize));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
rados_aio_release(my_completion4);
delete[] buf;
delete[] buf2;
delete[] buf3;
}
TEST(LibRadosAioEC, IsComplete) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
// Busy-wait until the AIO completes.
// Normally we wouldn't do this, but we want to test rados_aio_is_complete.
while (true) {
int is_complete = rados_aio_is_complete(my_completion2);
if (is_complete)
break;
}
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAioEC, IsSafe) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
// Busy-wait until the AIO completes.
// Normally we wouldn't do this, but we want to test rados_aio_is_safe.
while (true) {
int is_safe = rados_aio_is_safe(my_completion);
if (is_safe)
break;
}
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAioEC, ReturnValue) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0, sizeof(buf));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "nonexistent",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(-ENOENT, rados_aio_get_return_value(my_completion));
rados_aio_release(my_completion);
}
TEST(LibRadosAioEC, Flush) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xee, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_aio_flush(test_data.m_ioctx));
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf2), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAioEC, FlushAsync) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
rados_completion_t flush_completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr, &flush_completion));
char buf[128];
memset(buf, 0xee, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_aio_flush_async(test_data.m_ioctx, flush_completion));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(flush_completion));
}
ASSERT_EQ(1, rados_aio_is_complete(my_completion));
ASSERT_EQ(1, rados_aio_is_complete(flush_completion));
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[128];
memset(buf2, 0, sizeof(buf2));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ((int)sizeof(buf2), rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(flush_completion);
}
TEST(LibRadosAioEC, RoundTripWriteFull) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion, my_completion2, my_completion3;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[64];
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_write_full(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
char buf3[sizeof(buf) + sizeof(buf2)];
memset(buf3, 0, sizeof(buf3));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion3, buf3, sizeof(buf3), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ((int)sizeof(buf2), rados_aio_get_return_value(my_completion3));
ASSERT_EQ(0, memcmp(buf3, buf2, sizeof(buf2)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
}
TEST(LibRadosAioEC, SimpleStat) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
uint64_t psize;
time_t pmtime;
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion2, &psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(sizeof(buf), psize);
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAioEC, SimpleStatNS) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_ioctx_set_namespace(test_data.m_ioctx, "nspace");
char buf2[64];
memset(buf2, 0xbb, sizeof(buf2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf2, sizeof(buf2), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
uint64_t psize;
time_t pmtime;
rados_completion_t my_completion2;
rados_ioctx_set_namespace(test_data.m_ioctx, "");
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion2, &psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(sizeof(buf), psize);
rados_ioctx_set_namespace(test_data.m_ioctx, "nspace");
rados_completion_t my_completion3;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion3, &psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion3));
ASSERT_EQ(sizeof(buf2), psize);
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
}
TEST(LibRadosAioEC, StatRemove) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
uint64_t psize;
time_t pmtime;
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion2, &psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(sizeof(buf), psize);
rados_completion_t my_completion3;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_remove(test_data.m_ioctx, "foo", my_completion3));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion3));
uint64_t psize2;
time_t pmtime2;
rados_completion_t my_completion4;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion4));
ASSERT_EQ(0, rados_aio_stat(test_data.m_ioctx, "foo",
my_completion4, &psize2, &pmtime2));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion4));
}
ASSERT_EQ(-ENOENT, rados_aio_get_return_value(my_completion4));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
rados_aio_release(my_completion4);
}
TEST(LibRadosAioEC, ExecuteClass) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
rados_completion_t my_completion2;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
char out[128];
ASSERT_EQ(0, rados_aio_exec(test_data.m_ioctx, "foo", my_completion2,
"hello", "say_hello", NULL, 0, out, sizeof(out)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(13, rados_aio_get_return_value(my_completion2));
ASSERT_EQ(0, strncmp("Hello, world!", out, 13));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
}
TEST(LibRadosAioEC, MultiWrite) {
SKIP_IF_CRIMSON();
AioTestDataEC test_data;
rados_completion_t my_completion, my_completion2, my_completion3;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion, buf, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion));
}
ASSERT_EQ(0, rados_aio_get_return_value(my_completion));
char buf2[64];
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion2));
ASSERT_EQ(0, rados_aio_write(test_data.m_ioctx, "foo",
my_completion2, buf2, sizeof(buf2), sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion2));
}
ASSERT_EQ(-EOPNOTSUPP, rados_aio_get_return_value(my_completion2));
char buf3[(sizeof(buf) + sizeof(buf2)) * 3];
memset(buf3, 0, sizeof(buf3));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr,
nullptr, &my_completion3));
ASSERT_EQ(0, rados_aio_read(test_data.m_ioctx, "foo",
my_completion3, buf3, sizeof(buf3), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, rados_aio_wait_for_complete(my_completion3));
}
ASSERT_EQ((int)sizeof(buf), rados_aio_get_return_value(my_completion3));
ASSERT_EQ(0, memcmp(buf3, buf, sizeof(buf)));
rados_aio_release(my_completion);
rados_aio_release(my_completion2);
rados_aio_release(my_completion3);
}
| 58,346 | 32.824348 | 122 |
cc
|
null |
ceph-main/src/test/librados/aio_cxx.cc
|
#include <errno.h>
#include <fcntl.h>
#include <sstream>
#include <string>
#include <utility>
#include <boost/scoped_ptr.hpp>
#include <fmt/format.h>
#include "gtest/gtest.h"
#include "common/errno.h"
#include "include/err.h"
#include "include/rados/librados.hpp"
#include "include/types.h"
#include "include/stringify.h"
#include "include/scope_guard.h"
#include "common/ceph_mutex.h"
#include <fmt/format.h>
#include "test_cxx.h"
#include "crimson_utils.h"
using namespace std;
using namespace librados;
class AioTestDataPP
{
public:
AioTestDataPP()
: m_init(false),
m_oid("foo")
{
}
~AioTestDataPP()
{
if (m_init) {
m_ioctx.close();
destroy_one_pool_pp(m_pool_name, m_cluster);
}
}
std::string init()
{
return init({});
}
std::string init(const std::map<std::string, std::string> &config)
{
int ret;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
m_pool_name = get_temp_pool_name(pool_prefix);
std::string err = create_one_pool_pp(m_pool_name, m_cluster, config);
if (!err.empty()) {
ostringstream oss;
oss << "create_one_pool(" << m_pool_name << ") failed: error " << err;
return oss.str();
}
ret = m_cluster.ioctx_create(m_pool_name.c_str(), m_ioctx);
if (ret) {
destroy_one_pool_pp(m_pool_name, m_cluster);
ostringstream oss;
oss << "rados_ioctx_create failed: error " << ret;
return oss.str();
}
m_oid = fmt::format("oid_{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
m_init = true;
return "";
}
Rados m_cluster;
IoCtx m_ioctx;
std::string m_pool_name;
bool m_init;
std::string m_oid;
};
TEST(LibRadosAio, TooBigPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
bufferlist bl;
auto aio_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(-E2BIG, test_data.m_ioctx.aio_write(test_data.m_oid, aio_completion.get(), bl, UINT_MAX, 0));
ASSERT_EQ(-E2BIG, test_data.m_ioctx.aio_append(test_data.m_oid, aio_completion.get(), bl, UINT_MAX));
// ioctx.aio_write_full no way to overflow bl.length()
}
TEST(LibRadosAio, PoolQuotaPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
string p = get_temp_pool_name(pool_prefix);
ASSERT_EQ(0, test_data.m_cluster.pool_create(p.c_str()));
IoCtx ioctx;
ASSERT_EQ(0, test_data.m_cluster.ioctx_create(p.c_str(), ioctx));
ioctx.application_enable("rados", true);
bufferlist inbl;
ASSERT_EQ(0, test_data.m_cluster.mon_command(
"{\"prefix\": \"osd pool set-quota\", \"pool\": \"" + p +
"\", \"field\": \"max_bytes\", \"val\": \"4096\"}",
inbl, NULL, NULL));
bufferlist bl;
bufferptr z(4096);
bl.append(z);
int n;
for (n = 0; n < 1024; ++n) {
ObjectWriteOperation op;
op.write_full(bl);
auto completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, ioctx.aio_operate(test_data.m_oid + stringify(n),
completion.get(), &op,
librados::OPERATION_FULL_TRY));
completion->wait_for_complete();
int r = completion->get_return_value();
if (r == -EDQUOT)
break;
ASSERT_EQ(0, r);
sleep(1);
}
ASSERT_LT(n, 1024);
// make sure we have latest map that marked the pool full
test_data.m_cluster.wait_for_latest_osdmap();
// make sure we block without FULL_TRY
{
ObjectWriteOperation op;
op.write_full(bl);
auto completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, ioctx.aio_operate("bar", completion.get(), &op, 0));
sleep(5);
ASSERT_FALSE(completion->is_complete());
}
ioctx.close();
ASSERT_EQ(0, test_data.m_cluster.pool_delete(p.c_str()));
}
TEST(LibRadosAio, SimpleWritePP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
{
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
}
{
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
test_data.m_ioctx.set_namespace("nspace");
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
}
}
TEST(LibRadosAio, WaitForSafePP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
ASSERT_EQ(0, my_completion->get_return_value());
}
TEST(LibRadosAio, RoundTripPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAio, RoundTripPP2) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
//using ObjectWriteOperation/ObjectReadOperation with iohint
TEST(LibRadosAio, RoundTripPP3)
{
Rados cluster;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
std::string pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
auto my_completion1 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ObjectWriteOperation op;
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
op.write(0, bl);
op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", my_completion1.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion1->wait_for_complete());
}
EXPECT_EQ(0, my_completion1->get_return_value());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
bl.clear();
ObjectReadOperation op1;
op1.read(0, sizeof(buf), &bl, NULL);
op1.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED|LIBRADOS_OP_FLAG_FADVISE_RANDOM);
bufferlist init_value_bl;
encode(static_cast<int32_t>(-1), init_value_bl);
bufferlist csum_bl;
op1.checksum(LIBRADOS_CHECKSUM_TYPE_CRC32C, init_value_bl,
0, 0, 0, &csum_bl, nullptr);
ioctx.aio_operate("test_obj", my_completion2.get(), &op1, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
EXPECT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(0, memcmp(buf, bl.c_str(), sizeof(buf)));
ASSERT_EQ(8U, csum_bl.length());
auto csum_bl_it = csum_bl.cbegin();
uint32_t csum_count;
uint32_t csum;
decode(csum_count, csum_bl_it);
ASSERT_EQ(1U, csum_count);
decode(csum, csum_bl_it);
ASSERT_EQ(bl.crc32c(-1), csum);
ioctx.remove("test_obj");
destroy_one_pool_pp(pool_name, cluster);
}
TEST(LibRadosAio, RoundTripSparseReadPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
std::map<uint64_t, uint64_t> extents;
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_sparse_read(test_data.m_oid, my_completion2.get(),
&extents, &bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
assert_eq_sparse(bl1, extents, bl2);
}
TEST(LibRadosAioPP, ReadIntoBufferlist) {
// here we test reading into a non-empty bufferlist referencing existing
// buffers
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
char buf2[sizeof(buf)];
memset(buf2, 0xbb, sizeof(buf2));
bl2.append(buffer::create_static(sizeof(buf2), buf2));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
}
TEST(LibRadosAioPP, XattrsRoundTripPP) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, test_data.m_ioctx.append(test_data.m_oid, bl1, sizeof(buf)));
bufferlist bl2;
// async getxattr
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_getxattr(test_data.m_oid, my_completion.get(), attr1, bl2));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(-ENODATA, my_completion->get_return_value());
// append
bufferlist bl3;
bl3.append(attr1_buf, sizeof(attr1_buf));
// async setxattr
AioTestDataPP test_data2;
ASSERT_EQ("", test_data2.init());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_setxattr(test_data.m_oid, my_completion2.get(), attr1, bl3));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
// async getxattr
bufferlist bl4;
AioTestDataPP test_data3;
ASSERT_EQ("", test_data3.init());
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_getxattr(test_data.m_oid, my_completion3.get(), attr1, bl4));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ((int)sizeof(attr1_buf), my_completion3->get_return_value());
// check content of attribute
ASSERT_EQ(0, memcmp(bl4.c_str(), attr1_buf, sizeof(attr1_buf)));
}
TEST(LibRadosAioPP, RmXattrPP) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, test_data.m_ioctx.append(test_data.m_oid, bl1, sizeof(buf)));
// async setxattr
bufferlist bl2;
bl2.append(attr1_buf, sizeof(attr1_buf));
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_setxattr(test_data.m_oid, my_completion.get(), attr1, bl2));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
// async rmxattr
AioTestDataPP test_data2;
ASSERT_EQ("", test_data2.init());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_rmxattr(test_data.m_oid, my_completion2.get(), attr1));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
// async getxattr
AioTestDataPP test_data3;
ASSERT_EQ("", test_data3.init());
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
bufferlist bl3;
ASSERT_EQ(0, test_data.m_ioctx.aio_getxattr(test_data.m_oid, my_completion3.get(), attr1, bl3));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ(-ENODATA, my_completion3->get_return_value());
// Test rmxattr on a removed object
char buf2[128];
char attr2[] = "attr2";
char attr2_buf[] = "foo bar baz";
memset(buf2, 0xbb, sizeof(buf2));
bufferlist bl21;
bl21.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.write("foo_rmxattr", bl21, sizeof(buf2), 0));
bufferlist bl22;
bl22.append(attr2_buf, sizeof(attr2_buf));
// async setxattr
AioTestDataPP test_data4;
ASSERT_EQ("", test_data4.init());
auto my_completion4 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_setxattr("foo_rmxattr", my_completion4.get(), attr2, bl22));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion4->wait_for_complete());
}
ASSERT_EQ(0, my_completion4->get_return_value());
// remove object
ASSERT_EQ(0, test_data.m_ioctx.remove("foo_rmxattr"));
// async rmxattr on non existing object
AioTestDataPP test_data5;
ASSERT_EQ("", test_data5.init());
auto my_completion5 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_rmxattr("foo_rmxattr", my_completion5.get(), attr2));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion5->wait_for_complete());
}
ASSERT_EQ(-ENOENT, my_completion5->get_return_value());
}
TEST(LibRadosIoPP, XattrListPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
// create an object with 2 attributes
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
char attr2[] = "attr2";
char attr2_buf[256];
for (size_t j = 0; j < sizeof(attr2_buf); ++j) {
attr2_buf[j] = j % 0xff;
}
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.append(test_data.m_oid, bl1, sizeof(buf)));
bufferlist bl2;
bl2.append(attr1_buf, sizeof(attr1_buf));
ASSERT_EQ(0, test_data.m_ioctx.setxattr(test_data.m_oid, attr1, bl2));
bufferlist bl3;
bl3.append(attr2_buf, sizeof(attr2_buf));
ASSERT_EQ(0, test_data.m_ioctx.setxattr(test_data.m_oid, attr2, bl3));
// call async version of getxattrs
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
std::map<std::string, bufferlist> attrset;
ASSERT_EQ(0, test_data.m_ioctx.aio_getxattrs(test_data.m_oid, my_completion.get(), attrset));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
for (std::map<std::string, bufferlist>::iterator i = attrset.begin();
i != attrset.end(); ++i) {
if (i->first == string(attr1)) {
ASSERT_EQ(0, memcmp(i->second.c_str(), attr1_buf, sizeof(attr1_buf)));
}
else if (i->first == string(attr2)) {
ASSERT_EQ(0, memcmp(i->second.c_str(), attr2_buf, sizeof(attr2_buf)));
}
else {
ASSERT_EQ(0, 1);
}
}
}
TEST(LibRadosAio, IsCompletePP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
// Busy-wait until the AIO completes.
// Normally we wouldn't do this, but we want to test is_complete.
while (true) {
int is_complete = my_completion2->is_complete();
if (is_complete)
break;
}
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAio, IsSafePP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
// Busy-wait until the AIO completes.
// Normally we wouldn't do this, but we want to test rados_aio_is_safe.
while (true) {
int is_complete = my_completion->is_complete();
if (is_complete)
break;
}
}
ASSERT_EQ(0, my_completion->get_return_value());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
bufferlist bl2;
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAio, ReturnValuePP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
bufferlist bl1;
ASSERT_EQ(0, test_data.m_ioctx.aio_read("nonexistent", my_completion.get(),
&bl1, 128, 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(-ENOENT, my_completion->get_return_value());
}
TEST(LibRadosAio, FlushPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xee, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
ASSERT_EQ(0, test_data.m_ioctx.aio_flush());
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAio, FlushAsyncPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
auto flush_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xee, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
ASSERT_EQ(0, test_data.m_ioctx.aio_flush_async(flush_completion.get()));
{
TestAlarm alarm;
ASSERT_EQ(0, flush_completion->wait_for_complete());
}
ASSERT_EQ(1, my_completion->is_complete());
ASSERT_EQ(1, flush_completion->is_complete());
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAio, RoundTripWriteFullPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
char buf2[64];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_write_full(test_data.m_oid, my_completion2.get(), bl2));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
bufferlist bl3;
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion3.get(),
&bl3, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf2), my_completion3->get_return_value());
ASSERT_EQ(sizeof(buf2), bl3.length());
ASSERT_EQ(0, memcmp(bl3.c_str(), buf2, sizeof(buf2)));
}
//using ObjectWriteOperation/ObjectReadOperation with iohint
TEST(LibRadosAio, RoundTripWriteFullPP2)
{
Rados cluster;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
std::string pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
auto my_completion1 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ObjectWriteOperation op;
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf);
op.write_full(bl);
op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", my_completion1.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion1->wait_for_complete());
}
EXPECT_EQ(0, my_completion1->get_return_value());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
bl.clear();
ObjectReadOperation op1;
op1.read(0, sizeof(buf), &bl, NULL);
op1.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED|LIBRADOS_OP_FLAG_FADVISE_RANDOM);
ioctx.aio_operate("test_obj", my_completion2.get(), &op1, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
EXPECT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(0, memcmp(buf, bl.c_str(), sizeof(buf)));
ioctx.remove("test_obj");
destroy_one_pool_pp(pool_name, cluster);
}
TEST(LibRadosAio, RoundTripWriteSamePP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char full[128];
memset(full, 0xcc, sizeof(full));
bufferlist bl1;
bl1.append(full, sizeof(full));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(full), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
/* write the same buf four times */
char buf[32];
size_t ws_write_len = sizeof(full);
memset(buf, 0xdd, sizeof(buf));
bufferlist bl2;
bl2.append(buf, sizeof(buf));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_writesame(test_data.m_oid, my_completion2.get(), bl2,
ws_write_len, 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
bufferlist bl3;
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion3.get(),
&bl3, sizeof(full), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ((int)sizeof(full), my_completion3->get_return_value());
ASSERT_EQ(sizeof(full), bl3.length());
for (char *cmp = bl3.c_str(); cmp < bl3.c_str() + bl3.length();
cmp += sizeof(buf)) {
ASSERT_EQ(0, memcmp(cmp, buf, sizeof(buf)));
}
}
TEST(LibRadosAio, RoundTripWriteSamePP2)
{
Rados cluster;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
std::string pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
auto wr_cmpl = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ObjectWriteOperation op;
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
op.writesame(0, sizeof(buf) * 4, bl);
op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", wr_cmpl.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, wr_cmpl->wait_for_complete());
}
EXPECT_EQ(0, wr_cmpl->get_return_value());
boost::scoped_ptr<AioCompletion>
rd_cmpl(cluster.aio_create_completion(0, 0));
char *cmp;
char full[sizeof(buf) * 4];
memset(full, 0, sizeof(full));
bufferlist fl;
fl.append(full, sizeof(full));
ObjectReadOperation op1;
op1.read(0, sizeof(full), &fl, NULL);
op1.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", rd_cmpl.get(), &op1, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, rd_cmpl->wait_for_complete());
}
EXPECT_EQ(0, rd_cmpl->get_return_value());
for (cmp = fl.c_str(); cmp < fl.c_str() + fl.length(); cmp += sizeof(buf)) {
ASSERT_EQ(0, memcmp(cmp, buf, sizeof(buf)));
}
ioctx.remove("test_obj");
destroy_one_pool_pp(pool_name, cluster);
}
TEST(LibRadosAio, SimpleStatPPNS) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
uint64_t psize;
time_t pmtime;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_stat(test_data.m_oid, my_completion2.get(),
&psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), psize);
}
TEST(LibRadosAio, SimpleStatPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
uint64_t psize;
time_t pmtime;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_stat(test_data.m_oid, my_completion2.get(),
&psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), psize);
}
TEST(LibRadosAio, OperateMtime)
{
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
time_t set_mtime = 1457129052;
{
auto c = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
librados::ObjectWriteOperation op;
op.mtime(&set_mtime);
op.create(false);
ASSERT_EQ(0, test_data.m_ioctx.aio_operate(test_data.m_oid, c.get(), &op));
{
TestAlarm alarm;
ASSERT_EQ(0, c->wait_for_complete());
}
ASSERT_EQ(0, c->get_return_value());
}
{
uint64_t size;
timespec mtime;
ASSERT_EQ(0, test_data.m_ioctx.stat2(test_data.m_oid, &size, &mtime));
EXPECT_EQ(0, size);
EXPECT_EQ(set_mtime, mtime.tv_sec);
EXPECT_EQ(0, mtime.tv_nsec);
}
}
TEST(LibRadosAio, OperateMtime2)
{
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
timespec set_mtime{1457129052, 123456789};
{
auto c = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
librados::ObjectWriteOperation op;
op.mtime2(&set_mtime);
op.create(false);
ASSERT_EQ(0, test_data.m_ioctx.aio_operate(test_data.m_oid, c.get(), &op));
{
TestAlarm alarm;
ASSERT_EQ(0, c->wait_for_complete());
}
ASSERT_EQ(0, c->get_return_value());
}
{
uint64_t size;
timespec mtime;
ASSERT_EQ(0, test_data.m_ioctx.stat2(test_data.m_oid, &size, &mtime));
EXPECT_EQ(0, size);
EXPECT_EQ(set_mtime.tv_sec, mtime.tv_sec);
EXPECT_EQ(set_mtime.tv_nsec, mtime.tv_nsec);
}
}
TEST(LibRadosAio, StatRemovePP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
uint64_t psize;
time_t pmtime;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_stat(test_data.m_oid, my_completion2.get(),
&psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), psize);
uint64_t psize2;
time_t pmtime2;
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_remove(test_data.m_oid, my_completion3.get()));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ(0, my_completion3->get_return_value());
auto my_completion4 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion4);
ASSERT_EQ(0, test_data.m_ioctx.aio_stat(test_data.m_oid, my_completion4.get(),
&psize2, &pmtime2));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion4->wait_for_complete());
}
ASSERT_EQ(-ENOENT, my_completion4->get_return_value());
}
TEST(LibRadosAio, ExecuteClassPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
bufferlist in, out;
ASSERT_EQ(0, test_data.m_ioctx.aio_exec(test_data.m_oid, my_completion2.get(),
"hello", "say_hello", in, &out));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(std::string("Hello, world!"), std::string(out.c_str(), out.length()));
}
using std::string;
using std::map;
using std::set;
TEST(LibRadosAio, OmapPP) {
Rados cluster;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
std::string pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
string header_str = "baz";
bufferptr bp(header_str.c_str(), header_str.size() + 1);
bufferlist header_to_set;
header_to_set.push_back(bp);
map<string, bufferlist> to_set;
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectWriteOperation op;
to_set["foo"] = header_to_set;
to_set["foo2"] = header_to_set;
to_set["qfoo3"] = header_to_set;
op.omap_set(to_set);
op.omap_set_header(header_to_set);
ioctx.aio_operate("test_obj", my_completion.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
}
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectReadOperation op;
map<string, pair<bufferlist, int> > assertions;
bufferlist val;
val.append(string("bar"));
assertions["foo"] = pair<bufferlist, int>(val, CEPH_OSD_CMPXATTR_OP_EQ);
int r;
op.omap_cmp(assertions, &r);
ioctx.aio_operate("test_obj", my_completion.get(), &op, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(-ECANCELED, my_completion->get_return_value());
ASSERT_EQ(-ECANCELED, r);
}
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectReadOperation op;
set<string> set_got;
map<string, bufferlist> map_got;
set<string> to_get;
map<string, bufferlist> got3;
map<string, bufferlist> got4;
bufferlist header;
op.omap_get_keys2("", 1, &set_got, nullptr, 0);
op.omap_get_vals2("foo", 1, &map_got, nullptr, 0);
to_get.insert("foo");
to_get.insert("qfoo3");
op.omap_get_vals_by_keys(to_get, &got3, 0);
op.omap_get_header(&header, 0);
op.omap_get_vals2("foo2", "q", 1, &got4, nullptr, 0);
ioctx.aio_operate("test_obj", my_completion.get(), &op, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
ASSERT_EQ(header.length(), header_to_set.length());
ASSERT_EQ(set_got.size(), (unsigned)1);
ASSERT_EQ(*set_got.begin(), "foo");
ASSERT_EQ(map_got.size(), (unsigned)1);
ASSERT_EQ(map_got.begin()->first, "foo2");
ASSERT_EQ(got3.size(), (unsigned)2);
ASSERT_EQ(got3.begin()->first, "foo");
ASSERT_EQ(got3.rbegin()->first, "qfoo3");
ASSERT_EQ(got4.size(), (unsigned)1);
ASSERT_EQ(got4.begin()->first, "qfoo3");
}
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectWriteOperation op;
set<string> to_remove;
to_remove.insert("foo2");
op.omap_rm_keys(to_remove);
ioctx.aio_operate("test_obj", my_completion.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
}
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectReadOperation op;
set<string> set_got;
op.omap_get_keys2("", -1, &set_got, nullptr, 0);
ioctx.aio_operate("test_obj", my_completion.get(), &op, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
ASSERT_EQ(set_got.size(), (unsigned)2);
}
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectWriteOperation op;
op.omap_clear();
ioctx.aio_operate("test_obj", my_completion.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
}
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectReadOperation op;
set<string> set_got;
op.omap_get_keys2("", -1, &set_got, nullptr, 0);
ioctx.aio_operate("test_obj", my_completion.get(), &op, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
ASSERT_EQ(set_got.size(), (unsigned)0);
}
// omap_clear clears header *and* keys
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectWriteOperation op;
bufferlist bl;
bl.append("some data");
map<string,bufferlist> to_set;
to_set["foo"] = bl;
to_set["foo2"] = bl;
to_set["qfoo3"] = bl;
op.omap_set(to_set);
op.omap_set_header(bl);
ioctx.aio_operate("foo3", my_completion.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
}
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectWriteOperation op;
op.omap_clear();
ioctx.aio_operate("foo3", my_completion.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
}
{
boost::scoped_ptr<AioCompletion> my_completion(cluster.aio_create_completion(0, 0));
ObjectReadOperation op;
set<string> set_got;
bufferlist hdr;
op.omap_get_keys2("", -1, &set_got, nullptr, 0);
op.omap_get_header(&hdr, NULL);
ioctx.aio_operate("foo3", my_completion.get(), &op, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(0, my_completion->get_return_value());
ASSERT_EQ(set_got.size(), (unsigned)0);
ASSERT_EQ(hdr.length(), 0u);
}
ioctx.remove("test_obj");
destroy_one_pool_pp(pool_name, cluster);
}
TEST(LibRadosAio, MultiWritePP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
char buf2[64];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion2.get(),
bl2, sizeof(buf2), sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
bufferlist bl3;
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion3.get(),
&bl3, (sizeof(buf) + sizeof(buf2) * 3), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ((int)(sizeof(buf) + sizeof(buf2)), my_completion3->get_return_value());
ASSERT_EQ(sizeof(buf) + sizeof(buf2), bl3.length());
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(bl3.c_str() + sizeof(buf), buf2, sizeof(buf2)));
}
TEST(LibRadosAio, AioUnlockPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, test_data.m_ioctx.lock_exclusive(test_data.m_oid, "TestLock", "Cookie", "", NULL, 0));
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_unlock(test_data.m_oid, "TestLock", "Cookie", my_completion.get()));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
ASSERT_EQ(0, test_data.m_ioctx.lock_exclusive(test_data.m_oid, "TestLock", "Cookie", "", NULL, 0));
}
class AioTestDataECPP
{
public:
AioTestDataECPP()
: m_init(false),
m_oid("foo")
{}
~AioTestDataECPP()
{
if (m_init) {
m_ioctx.close();
destroy_one_ec_pool_pp(m_pool_name, m_cluster);
}
}
std::string init()
{
int ret;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
m_pool_name = get_temp_pool_name(pool_prefix);
std::string err = create_one_ec_pool_pp(m_pool_name, m_cluster);
if (!err.empty()) {
ostringstream oss;
oss << "create_one_ec_pool(" << m_pool_name << ") failed: error " << err;
return oss.str();
}
ret = m_cluster.ioctx_create(m_pool_name.c_str(), m_ioctx);
if (ret) {
destroy_one_ec_pool_pp(m_pool_name, m_cluster);
ostringstream oss;
oss << "rados_ioctx_create failed: error " << ret;
return oss.str();
}
m_oid = fmt::format("oid_{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
m_init = true;
return "";
}
Rados m_cluster;
IoCtx m_ioctx;
std::string m_pool_name;
bool m_init;
std::string m_oid;
};
// EC test cases
TEST(LibRadosAioEC, SimpleWritePP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
{
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
}
{
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
test_data.m_ioctx.set_namespace("nspace");
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
}
}
TEST(LibRadosAioEC, WaitForSafePP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
ASSERT_EQ(0, my_completion->get_return_value());
}
TEST(LibRadosAioEC, RoundTripPP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAioEC, RoundTripPP2) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
//using ObjectWriteOperation/ObjectReadOperation with iohint
TEST(LibRadosAioEC, RoundTripPP3)
{
SKIP_IF_CRIMSON();
Rados cluster;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
std::string pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
auto my_completion1 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};;
ObjectWriteOperation op;
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf);
op.write(0, bl);
op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", my_completion1.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion1->wait_for_complete());
}
EXPECT_EQ(0, my_completion1->get_return_value());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
bl.clear();
ObjectReadOperation op1;
op1.read(0, sizeof(buf), &bl, NULL);
op1.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED|LIBRADOS_OP_FLAG_FADVISE_RANDOM);
ioctx.aio_operate("test_obj", my_completion2.get(), &op1, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
EXPECT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(0, memcmp(buf, bl.c_str(), sizeof(buf)));
ioctx.remove("test_obj");
destroy_one_pool_pp(pool_name, cluster);
}
TEST(LibRadosAio, RoundTripAppendPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_append(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
char buf2[128];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_append(test_data.m_oid, my_completion2.get(),
bl2, sizeof(buf2)));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
bufferlist bl3;
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion3.get(),
&bl3, 2 * sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ((int)(sizeof(buf) * 2), my_completion3->get_return_value());
ASSERT_EQ(sizeof(buf) * 2, bl3.length());
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(bl3.c_str() + sizeof(buf), buf2, sizeof(buf2)));
}
TEST(LibRadosAioPP, RemoveTestPP) {
char buf[128];
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
ASSERT_EQ(0, test_data.m_ioctx.append(test_data.m_oid, bl1, sizeof(buf)));
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_remove(test_data.m_oid, my_completion.get()));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
ASSERT_EQ(-ENOENT, test_data.m_ioctx.read(test_data.m_oid, bl2, sizeof(buf), 0));
}
TEST(LibRadosAioEC, RoundTripSparseReadPP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
map<uint64_t, uint64_t> extents;
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_sparse_read(test_data.m_oid, my_completion2.get(),
&extents, &bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
assert_eq_sparse(bl1, extents, bl2);
}
TEST(LibRadosAioEC, RoundTripAppendPP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
bool req;
ASSERT_EQ(0, test_data.m_ioctx.pool_requires_alignment2(&req));
ASSERT_TRUE(req);
uint64_t alignment;
ASSERT_EQ(0, test_data.m_ioctx.pool_required_alignment2(&alignment));
ASSERT_NE((unsigned)0, alignment);
int bsize = alignment;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
bufferlist bl1;
bl1.append(buf, bsize);
ASSERT_EQ(0, test_data.m_ioctx.aio_append(test_data.m_oid, my_completion.get(),
bl1, bsize));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
int hbsize = bsize / 2;
char *buf2 = (char *)new char[hbsize];
memset(buf2, 0xdd, hbsize);
bufferlist bl2;
bl2.append(buf2, hbsize);
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_append(test_data.m_oid, my_completion2.get(),
bl2, hbsize));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_append(test_data.m_oid, my_completion3.get(),
bl2, hbsize));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
EXPECT_EQ(-EOPNOTSUPP, my_completion3->get_return_value());
bufferlist bl3;
auto my_completion4 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion4);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion4.get(),
&bl3, bsize * 3, 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion4->wait_for_complete());
}
int tbsize = bsize + hbsize;
ASSERT_EQ(tbsize, my_completion4->get_return_value());
ASSERT_EQ((unsigned)tbsize, bl3.length());
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, bsize));
ASSERT_EQ(0, memcmp(bl3.c_str() + bsize, buf2, hbsize));
delete[] buf;
delete[] buf2;
}
TEST(LibRadosAioEC, IsCompletePP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
// Busy-wait until the AIO completes.
// Normally we wouldn't do this, but we want to test is_complete.
while (true) {
int is_complete = my_completion2->is_complete();
if (is_complete)
break;
}
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAioEC, IsSafePP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
// Busy-wait until the AIO completes.
// Normally we wouldn't do this, but we want to test rados_aio_is_safe.
while (true) {
int is_complete = my_completion->is_complete();
if (is_complete)
break;
}
}
ASSERT_EQ(0, my_completion->get_return_value());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
bufferlist bl2;
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAioEC, ReturnValuePP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
bufferlist bl1;
ASSERT_EQ(0, test_data.m_ioctx.aio_read("nonexistent", my_completion.get(),
&bl1, 128, 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(-ENOENT, my_completion->get_return_value());
}
TEST(LibRadosAioEC, FlushPP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xee, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
ASSERT_EQ(0, test_data.m_ioctx.aio_flush());
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAioEC, FlushAsyncPP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
auto flush_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xee, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
ASSERT_EQ(0, test_data.m_ioctx.aio_flush_async(flush_completion.get()));
{
TestAlarm alarm;
ASSERT_EQ(0, flush_completion->wait_for_complete());
}
ASSERT_EQ(1, my_completion->is_complete());
ASSERT_EQ(1, flush_completion->is_complete());
ASSERT_EQ(0, my_completion->get_return_value());
bufferlist bl2;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion2.get(),
&bl2, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), bl2.length());
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST(LibRadosAioEC, RoundTripWriteFullPP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
char buf2[64];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_write_full(test_data.m_oid, my_completion2.get(), bl2));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
bufferlist bl3;
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion3.get(),
&bl3, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf2), my_completion3->get_return_value());
ASSERT_EQ(sizeof(buf2), bl3.length());
ASSERT_EQ(0, memcmp(bl3.c_str(), buf2, sizeof(buf2)));
}
//using ObjectWriteOperation/ObjectReadOperation with iohint
TEST(LibRadosAioEC, RoundTripWriteFullPP2)
{
SKIP_IF_CRIMSON();
Rados cluster;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
std::string pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
auto my_completion1 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ObjectWriteOperation op;
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf);
op.write_full(bl);
op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_NOCACHE);
ioctx.aio_operate("test_obj", my_completion1.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion1->wait_for_complete());
}
EXPECT_EQ(0, my_completion1->get_return_value());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
bl.clear();
ObjectReadOperation op1;
op1.read(0, sizeof(buf), &bl, NULL);
op1.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_NOCACHE|LIBRADOS_OP_FLAG_FADVISE_RANDOM);
ioctx.aio_operate("test_obj", my_completion2.get(), &op1, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
EXPECT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(0, memcmp(buf, bl.c_str(), sizeof(buf)));
ioctx.remove("test_obj");
destroy_one_pool_pp(pool_name, cluster);
}
TEST(LibRadosAioEC, SimpleStatPP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
uint64_t psize;
time_t pmtime;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_stat(test_data.m_oid, my_completion2.get(),
&psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), psize);
}
TEST(LibRadosAioEC, SimpleStatPPNS) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
uint64_t psize;
time_t pmtime;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_stat(test_data.m_oid, my_completion2.get(),
&psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), psize);
}
TEST(LibRadosAioEC, StatRemovePP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
uint64_t psize;
time_t pmtime;
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_stat(test_data.m_oid, my_completion2.get(),
&psize, &pmtime));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(sizeof(buf), psize);
uint64_t psize2;
time_t pmtime2;
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_remove(test_data.m_oid, my_completion3.get()));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ(0, my_completion3->get_return_value());
auto my_completion4 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion4);
ASSERT_EQ(0, test_data.m_ioctx.aio_stat(test_data.m_oid, my_completion4.get(),
&psize2, &pmtime2));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion4->wait_for_complete());
}
ASSERT_EQ(-ENOENT, my_completion4->get_return_value());
}
TEST(LibRadosAioEC, ExecuteClassPP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
bufferlist in, out;
ASSERT_EQ(0, test_data.m_ioctx.aio_exec(test_data.m_oid, my_completion2.get(),
"hello", "say_hello", in, &out));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
ASSERT_EQ(std::string("Hello, world!"), std::string(out.c_str(), out.length()));
}
TEST(LibRadosAioEC, OmapPP) {
SKIP_IF_CRIMSON();
Rados cluster;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
std::string pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_ec_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
string header_str = "baz";
bufferptr bp(header_str.c_str(), header_str.size() + 1);
bufferlist header_to_set;
header_to_set.push_back(bp);
map<string, bufferlist> to_set;
{
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ObjectWriteOperation op;
to_set["foo"] = header_to_set;
to_set["foo2"] = header_to_set;
to_set["qfoo3"] = header_to_set;
op.omap_set(to_set);
op.omap_set_header(header_to_set);
ioctx.aio_operate("test_obj", my_completion.get(), &op);
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
EXPECT_EQ(-EOPNOTSUPP, my_completion->get_return_value());
}
ioctx.remove("test_obj");
destroy_one_ec_pool_pp(pool_name, cluster);
}
TEST(LibRadosAioEC, MultiWritePP) {
SKIP_IF_CRIMSON();
AioTestDataECPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
char buf2[64];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion2.get(),
bl2, sizeof(buf2), sizeof(buf)));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(-EOPNOTSUPP, my_completion2->get_return_value());
bufferlist bl3;
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion3);
ASSERT_EQ(0, test_data.m_ioctx.aio_read(test_data.m_oid, my_completion3.get(),
&bl3, (sizeof(buf) + sizeof(buf2) * 3), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), my_completion3->get_return_value());
ASSERT_EQ(sizeof(buf), bl3.length());
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, sizeof(buf)));
}
TEST(LibRadosAio, RacingRemovePP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init({{"objecter_retry_writes_after_first_reply", "true"}}));
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion2);
ASSERT_EQ(0, test_data.m_ioctx.aio_remove(test_data.m_oid, my_completion2.get()));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl, sizeof(buf), 0));
{
TestAlarm alarm;
my_completion2->wait_for_complete();
my_completion->wait_for_complete();
}
ASSERT_EQ(-ENOENT, my_completion2->get_return_value());
ASSERT_EQ(0, my_completion->get_return_value());
ASSERT_EQ(0, test_data.m_ioctx.stat(test_data.m_oid, nullptr, nullptr));
}
TEST(LibRadosAio, RoundTripCmpExtPP) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char full[128];
memset(full, 0xcc, sizeof(full));
bufferlist bl1;
bl1.append(full, sizeof(full));
ASSERT_EQ(0, test_data.m_ioctx.aio_write(test_data.m_oid, my_completion.get(),
bl1, sizeof(full), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
/* compare with match */
bufferlist cbl;
cbl.append(full, sizeof(full));
auto my_completion2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_cmpext(test_data.m_oid, my_completion2.get(), 0, cbl));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion2->wait_for_complete());
}
ASSERT_EQ(0, my_completion2->get_return_value());
/* compare with mismatch */
memset(full, 0xdd, sizeof(full));
cbl.clear();
cbl.append(full, sizeof(full));
auto my_completion3 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_EQ(0, test_data.m_ioctx.aio_cmpext(test_data.m_oid, my_completion3.get(), 0, cbl));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion3->wait_for_complete());
}
ASSERT_EQ(-MAX_ERRNO, my_completion3->get_return_value());
}
TEST(LibRadosAio, RoundTripCmpExtPP2)
{
int ret;
char buf[128];
char miscmp_buf[128];
bufferlist cbl;
Rados cluster;
auto pool_prefix = fmt::format("{}_", ::testing::UnitTest::GetInstance()->current_test_info()->name());
std::string pool_name = get_temp_pool_name(pool_prefix);
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
auto wr_cmpl = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ObjectWriteOperation wr_op;
memset(buf, 0xcc, sizeof(buf));
memset(miscmp_buf, 0xdd, sizeof(miscmp_buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
wr_op.write_full(bl);
wr_op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", wr_cmpl.get(), &wr_op);
{
TestAlarm alarm;
ASSERT_EQ(0, wr_cmpl->wait_for_complete());
}
EXPECT_EQ(0, wr_cmpl->get_return_value());
/* cmpext as write op. first match then mismatch */
auto wr_cmpext_cmpl = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
cbl.append(buf, sizeof(buf));
ret = 0;
wr_op.cmpext(0, cbl, &ret);
wr_op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", wr_cmpext_cmpl.get(), &wr_op);
{
TestAlarm alarm;
ASSERT_EQ(0, wr_cmpext_cmpl->wait_for_complete());
}
EXPECT_EQ(0, wr_cmpext_cmpl->get_return_value());
EXPECT_EQ(0, ret);
auto wr_cmpext_cmpl2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
cbl.clear();
cbl.append(miscmp_buf, sizeof(miscmp_buf));
ret = 0;
wr_op.cmpext(0, cbl, &ret);
wr_op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", wr_cmpext_cmpl2.get(), &wr_op);
{
TestAlarm alarm;
ASSERT_EQ(0, wr_cmpext_cmpl2->wait_for_complete());
}
EXPECT_EQ(-MAX_ERRNO, wr_cmpext_cmpl2->get_return_value());
EXPECT_EQ(-MAX_ERRNO, ret);
/* cmpext as read op */
auto rd_cmpext_cmpl = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ObjectReadOperation rd_op;
cbl.clear();
cbl.append(buf, sizeof(buf));
ret = 0;
rd_op.cmpext(0, cbl, &ret);
rd_op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", rd_cmpext_cmpl.get(), &rd_op, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, rd_cmpext_cmpl->wait_for_complete());
}
EXPECT_EQ(0, rd_cmpext_cmpl->get_return_value());
EXPECT_EQ(0, ret);
auto rd_cmpext_cmpl2 = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
cbl.clear();
cbl.append(miscmp_buf, sizeof(miscmp_buf));
ret = 0;
rd_op.cmpext(0, cbl, &ret);
rd_op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ioctx.aio_operate("test_obj", rd_cmpext_cmpl2.get(), &rd_op, 0);
{
TestAlarm alarm;
ASSERT_EQ(0, rd_cmpext_cmpl2->wait_for_complete());
}
EXPECT_EQ(-MAX_ERRNO, rd_cmpext_cmpl2->get_return_value());
EXPECT_EQ(-MAX_ERRNO, ret);
ioctx.remove("test_obj");
destroy_one_pool_pp(pool_name, cluster);
}
ceph::mutex my_lock = ceph::make_mutex("my_lock");
set<unsigned> inflight;
unsigned max_success = 0;
unsigned min_failed = 0;
struct io_info {
unsigned i;
AioCompletion *c;
};
void pool_io_callback(completion_t cb, void *arg /* Actually AioCompletion* */)
{
io_info *info = (io_info *)arg;
unsigned long i = info->i;
{
TestAlarm alarm;
ASSERT_EQ(0, info->c->wait_for_complete());
}
int r = info->c->get_return_value();
//cout << "finish " << i << " r = " << r << std::endl;
std::scoped_lock l(my_lock);
inflight.erase(i);
if (r == 0) {
if (i > max_success) {
max_success = i;
}
} else {
if (!min_failed || i < min_failed) {
min_failed = i;
}
}
}
TEST(LibRadosAio, PoolEIOFlag) {
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
bufferlist bl;
bl.append("some data");
std::thread *t = nullptr;
unsigned max = 100;
unsigned timeout = max * 10;
unsigned long i = 1;
my_lock.lock();
for (; min_failed == 0 && i <= timeout; ++i) {
io_info *info = new io_info;
info->i = i;
info->c = Rados::aio_create_completion();
info->c->set_complete_callback((void*)info, pool_io_callback);
inflight.insert(i);
my_lock.unlock();
int r = test_data.m_ioctx.aio_write(test_data.m_oid, info->c, bl, bl.length(), 0);
//cout << "start " << i << " r = " << r << std::endl;
if (i == max / 2) {
cout << "setting pool EIO" << std::endl;
t = new std::thread(
[&] {
bufferlist empty;
ASSERT_EQ(0, test_data.m_cluster.mon_command(
fmt::format(R"({{
"prefix": "osd pool set",
"pool": "{}",
"var": "eio",
"val": "true"
}})", test_data.m_pool_name),
empty, nullptr, nullptr));
});
}
std::this_thread::sleep_for(10ms);
my_lock.lock();
if (r < 0) {
inflight.erase(i);
break;
}
}
t->join();
delete t;
// wait for ios to finish
for (; !inflight.empty(); ++i) {
cout << "waiting for " << inflight.size() << std::endl;
my_lock.unlock();
sleep(1);
my_lock.lock();
}
cout << "max_success " << max_success << ", min_failed " << min_failed << std::endl;
ASSERT_TRUE(max_success + 1 == min_failed);
my_lock.unlock();
}
// This test case reproduces https://tracker.ceph.com/issues/57152
TEST(LibRadosAio, MultiReads) {
// here we test multithreaded aio reads
AioTestDataPP test_data;
ASSERT_EQ("", test_data.init());
auto my_completion = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(my_completion);
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, test_data.m_ioctx.aio_write("foo", my_completion.get(),
bl1, sizeof(buf), 0));
{
TestAlarm alarm;
ASSERT_EQ(0, my_completion->wait_for_complete());
}
ASSERT_EQ(0, my_completion->get_return_value());
// Don't use std::vector to store bufferlists (e.g for parallelizing aio_reads),
// as they are being moved whenever the vector resizes
// and will cause invalidated references.
std::deque<std::pair<bufferlist, std::unique_ptr<AioCompletion>>> reads;
for (int i = 0; i < 100; i++) {
// std::deque is appropriate here as emplace_back() is obliged to
// preserve the referenced inserted element. (Unlike insert() or erase())
auto& [bl, aiocp] = reads.emplace_back();
aiocp = std::unique_ptr<AioCompletion>{Rados::aio_create_completion()};
ASSERT_TRUE(aiocp);
ASSERT_EQ(0, test_data.m_ioctx.aio_read("foo", aiocp.get(),
&bl, sizeof(buf), 0));
}
for (auto& [bl, aiocp] : reads) {
{
TestAlarm alarm;
ASSERT_EQ(0, aiocp->wait_for_complete());
}
ASSERT_EQ((int)sizeof(buf), aiocp->get_return_value());
ASSERT_EQ(0, memcmp(buf, bl.c_str(), sizeof(buf)));
}
}
| 82,588 | 32.463938 | 107 |
cc
|
null |
ceph-main/src/test/librados/asio.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include "librados/librados_asio.h"
#include <gtest/gtest.h>
#include "common/ceph_argparse.h"
#include "common/debug.h"
#include "common/errno.h"
#include "global/global_init.h"
#include <boost/range/begin.hpp>
#include <boost/range/end.hpp>
#include <spawn/spawn.hpp>
#include <boost/asio/use_future.hpp>
#define dout_subsys ceph_subsys_rados
#define dout_context g_ceph_context
using namespace std;
// test fixture for global setup/teardown
class AsioRados : public ::testing::Test {
static constexpr auto poolname = "ceph_test_rados_api_asio";
protected:
static librados::Rados rados;
static librados::IoCtx io;
// writes to snapio fail immediately with -EROFS. this is used to test errors
// that come from inside the initiating function, rather than passed to the
// AioCompletion callback
static librados::IoCtx snapio;
public:
static void SetUpTestCase() {
ASSERT_EQ(0, rados.init_with_context(g_ceph_context));
ASSERT_EQ(0, rados.connect());
// open/create test pool
int r = rados.ioctx_create(poolname, io);
if (r == -ENOENT) {
r = rados.pool_create(poolname);
if (r == -EEXIST) {
r = 0;
} else if (r == 0) {
r = rados.ioctx_create(poolname, io);
}
}
ASSERT_EQ(0, r);
ASSERT_EQ(0, rados.ioctx_create(poolname, snapio));
snapio.snap_set_read(1);
// initialize the "exist" object
bufferlist bl;
bl.append("hello");
ASSERT_EQ(0, io.write_full("exist", bl));
}
static void TearDownTestCase() {
rados.shutdown();
}
};
librados::Rados AsioRados::rados;
librados::IoCtx AsioRados::io;
librados::IoCtx AsioRados::snapio;
TEST_F(AsioRados, AsyncReadCallback)
{
boost::asio::io_service service;
auto success_cb = [&] (boost::system::error_code ec, bufferlist bl) {
EXPECT_FALSE(ec);
EXPECT_EQ("hello", bl.to_str());
};
librados::async_read(service, io, "exist", 256, 0, success_cb);
auto failure_cb = [&] (boost::system::error_code ec, bufferlist bl) {
EXPECT_EQ(boost::system::errc::no_such_file_or_directory, ec);
};
librados::async_read(service, io, "noexist", 256, 0, failure_cb);
service.run();
}
TEST_F(AsioRados, AsyncReadFuture)
{
boost::asio::io_service service;
std::future<bufferlist> f1 = librados::async_read(service, io, "exist", 256,
0, boost::asio::use_future);
std::future<bufferlist> f2 = librados::async_read(service, io, "noexist", 256,
0, boost::asio::use_future);
service.run();
EXPECT_NO_THROW({
auto bl = f1.get();
EXPECT_EQ("hello", bl.to_str());
});
EXPECT_THROW(f2.get(), boost::system::system_error);
}
TEST_F(AsioRados, AsyncReadYield)
{
boost::asio::io_service service;
auto success_cr = [&] (spawn::yield_context yield) {
boost::system::error_code ec;
auto bl = librados::async_read(service, io, "exist", 256, 0, yield[ec]);
EXPECT_FALSE(ec);
EXPECT_EQ("hello", bl.to_str());
};
spawn::spawn(service, success_cr);
auto failure_cr = [&] (spawn::yield_context yield) {
boost::system::error_code ec;
auto bl = librados::async_read(service, io, "noexist", 256, 0, yield[ec]);
EXPECT_EQ(boost::system::errc::no_such_file_or_directory, ec);
};
spawn::spawn(service, failure_cr);
service.run();
}
TEST_F(AsioRados, AsyncWriteCallback)
{
boost::asio::io_service service;
bufferlist bl;
bl.append("hello");
auto success_cb = [&] (boost::system::error_code ec) {
EXPECT_FALSE(ec);
};
librados::async_write(service, io, "exist", bl, bl.length(), 0,
success_cb);
auto failure_cb = [&] (boost::system::error_code ec) {
EXPECT_EQ(boost::system::errc::read_only_file_system, ec);
};
librados::async_write(service, snapio, "exist", bl, bl.length(), 0,
failure_cb);
service.run();
}
TEST_F(AsioRados, AsyncWriteFuture)
{
boost::asio::io_service service;
bufferlist bl;
bl.append("hello");
auto f1 = librados::async_write(service, io, "exist", bl, bl.length(), 0,
boost::asio::use_future);
auto f2 = librados::async_write(service, snapio, "exist", bl, bl.length(), 0,
boost::asio::use_future);
service.run();
EXPECT_NO_THROW(f1.get());
EXPECT_THROW(f2.get(), boost::system::system_error);
}
TEST_F(AsioRados, AsyncWriteYield)
{
boost::asio::io_service service;
bufferlist bl;
bl.append("hello");
auto success_cr = [&] (spawn::yield_context yield) {
boost::system::error_code ec;
librados::async_write(service, io, "exist", bl, bl.length(), 0,
yield[ec]);
EXPECT_FALSE(ec);
EXPECT_EQ("hello", bl.to_str());
};
spawn::spawn(service, success_cr);
auto failure_cr = [&] (spawn::yield_context yield) {
boost::system::error_code ec;
librados::async_write(service, snapio, "exist", bl, bl.length(), 0,
yield[ec]);
EXPECT_EQ(boost::system::errc::read_only_file_system, ec);
};
spawn::spawn(service, failure_cr);
service.run();
}
TEST_F(AsioRados, AsyncReadOperationCallback)
{
boost::asio::io_service service;
{
librados::ObjectReadOperation op;
op.read(0, 0, nullptr, nullptr);
auto success_cb = [&] (boost::system::error_code ec, bufferlist bl) {
EXPECT_FALSE(ec);
EXPECT_EQ("hello", bl.to_str());
};
librados::async_operate(service, io, "exist", &op, 0, success_cb);
}
{
librados::ObjectReadOperation op;
op.read(0, 0, nullptr, nullptr);
auto failure_cb = [&] (boost::system::error_code ec, bufferlist bl) {
EXPECT_EQ(boost::system::errc::no_such_file_or_directory, ec);
};
librados::async_operate(service, io, "noexist", &op, 0, failure_cb);
}
service.run();
}
TEST_F(AsioRados, AsyncReadOperationFuture)
{
boost::asio::io_service service;
std::future<bufferlist> f1;
{
librados::ObjectReadOperation op;
op.read(0, 0, nullptr, nullptr);
f1 = librados::async_operate(service, io, "exist", &op, 0,
boost::asio::use_future);
}
std::future<bufferlist> f2;
{
librados::ObjectReadOperation op;
op.read(0, 0, nullptr, nullptr);
f2 = librados::async_operate(service, io, "noexist", &op, 0,
boost::asio::use_future);
}
service.run();
EXPECT_NO_THROW({
auto bl = f1.get();
EXPECT_EQ("hello", bl.to_str());
});
EXPECT_THROW(f2.get(), boost::system::system_error);
}
TEST_F(AsioRados, AsyncReadOperationYield)
{
boost::asio::io_service service;
auto success_cr = [&] (spawn::yield_context yield) {
librados::ObjectReadOperation op;
op.read(0, 0, nullptr, nullptr);
boost::system::error_code ec;
auto bl = librados::async_operate(service, io, "exist", &op, 0,
yield[ec]);
EXPECT_FALSE(ec);
EXPECT_EQ("hello", bl.to_str());
};
spawn::spawn(service, success_cr);
auto failure_cr = [&] (spawn::yield_context yield) {
librados::ObjectReadOperation op;
op.read(0, 0, nullptr, nullptr);
boost::system::error_code ec;
auto bl = librados::async_operate(service, io, "noexist", &op, 0,
yield[ec]);
EXPECT_EQ(boost::system::errc::no_such_file_or_directory, ec);
};
spawn::spawn(service, failure_cr);
service.run();
}
TEST_F(AsioRados, AsyncWriteOperationCallback)
{
boost::asio::io_service service;
bufferlist bl;
bl.append("hello");
{
librados::ObjectWriteOperation op;
op.write_full(bl);
auto success_cb = [&] (boost::system::error_code ec) {
EXPECT_FALSE(ec);
};
librados::async_operate(service, io, "exist", &op, 0, success_cb);
}
{
librados::ObjectWriteOperation op;
op.write_full(bl);
auto failure_cb = [&] (boost::system::error_code ec) {
EXPECT_EQ(boost::system::errc::read_only_file_system, ec);
};
librados::async_operate(service, snapio, "exist", &op, 0, failure_cb);
}
service.run();
}
TEST_F(AsioRados, AsyncWriteOperationFuture)
{
boost::asio::io_service service;
bufferlist bl;
bl.append("hello");
std::future<void> f1;
{
librados::ObjectWriteOperation op;
op.write_full(bl);
f1 = librados::async_operate(service, io, "exist", &op, 0,
boost::asio::use_future);
}
std::future<void> f2;
{
librados::ObjectWriteOperation op;
op.write_full(bl);
f2 = librados::async_operate(service, snapio, "exist", &op, 0,
boost::asio::use_future);
}
service.run();
EXPECT_NO_THROW(f1.get());
EXPECT_THROW(f2.get(), boost::system::system_error);
}
TEST_F(AsioRados, AsyncWriteOperationYield)
{
boost::asio::io_service service;
bufferlist bl;
bl.append("hello");
auto success_cr = [&] (spawn::yield_context yield) {
librados::ObjectWriteOperation op;
op.write_full(bl);
boost::system::error_code ec;
librados::async_operate(service, io, "exist", &op, 0, yield[ec]);
EXPECT_FALSE(ec);
};
spawn::spawn(service, success_cr);
auto failure_cr = [&] (spawn::yield_context yield) {
librados::ObjectWriteOperation op;
op.write_full(bl);
boost::system::error_code ec;
librados::async_operate(service, snapio, "exist", &op, 0, yield[ec]);
EXPECT_EQ(boost::system::errc::read_only_file_system, ec);
};
spawn::spawn(service, failure_cr);
service.run();
}
int main(int argc, char **argv)
{
auto args = argv_to_vec(argc, argv);
env_to_vec(args);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY, 0);
common_init_finish(cct.get());
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 10,242 | 26.683784 | 80 |
cc
|
null |
ceph-main/src/test/librados/c_read_operations.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// Tests for the C API coverage of atomic read operations
#include <cstring> // For memcpy
#include <errno.h>
#include <string>
#include "include/buffer.h"
#include "include/denc.h"
#include "include/err.h"
#include "include/rados/librados.h"
#include "include/rbd/features.h" // For RBD_FEATURES_ALL
#include "include/scope_guard.h"
#include "test/librados/TestCase.h"
#include "test/librados/test.h"
const char *data = "testdata";
const char *obj = "testobj";
const size_t len = strlen(data);
class CReadOpsTest : public RadosTest {
protected:
void write_object() {
// Create an object and write to it
ASSERT_EQ(0, rados_write(ioctx, obj, data, len, 0));
}
void remove_object() {
ASSERT_EQ(0, rados_remove(ioctx, obj));
}
int cmp_xattr(const char *xattr, const char *value, size_t value_len,
uint8_t cmp_op)
{
rados_read_op_t op = rados_create_read_op();
rados_read_op_cmpxattr(op, xattr, cmp_op, value, value_len);
int r = rados_read_op_operate(op, ioctx, obj, 0);
rados_release_read_op(op);
return r;
}
void fetch_and_verify_omap_vals(char const* const* keys,
char const* const* vals,
const size_t *lens,
size_t len)
{
rados_omap_iter_t iter_vals, iter_keys, iter_vals_by_key;
int r_vals, r_keys, r_vals_by_key;
rados_read_op_t op = rados_create_read_op();
rados_read_op_omap_get_vals2(op, NULL, NULL, 100, &iter_vals, NULL, &r_vals);
rados_read_op_omap_get_keys2(op, NULL, 100, &iter_keys, NULL, &r_keys);
rados_read_op_omap_get_vals_by_keys(op, keys, len,
&iter_vals_by_key, &r_vals_by_key);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
ASSERT_EQ(0, r_vals);
ASSERT_EQ(0, r_keys);
ASSERT_EQ(0, r_vals_by_key);
const char *zeros[len];
size_t zero_lens[len];
memset(zeros, 0, sizeof(zeros));
memset(zero_lens, 0, sizeof(zero_lens));
compare_omap_vals(keys, vals, lens, len, iter_vals);
compare_omap_vals(keys, zeros, zero_lens, len, iter_keys);
compare_omap_vals(keys, vals, lens, len, iter_vals_by_key);
}
void compare_omap_vals(char const* const* keys,
char const* const* vals,
const size_t *lens,
size_t len,
rados_omap_iter_t iter)
{
size_t i = 0;
char *key = NULL;
char *val = NULL;
size_t val_len = 0;
ASSERT_EQ(len, rados_omap_iter_size(iter));
while (i < len) {
ASSERT_EQ(0, rados_omap_get_next(iter, &key, &val, &val_len));
if (val_len == 0 && key == NULL && val == NULL)
break;
if (key)
EXPECT_EQ(std::string(keys[i]), std::string(key));
else
EXPECT_EQ(keys[i], key);
ASSERT_EQ(0, memcmp(vals[i], val, val_len));
ASSERT_EQ(lens[i], val_len);
++i;
}
ASSERT_EQ(i, len);
ASSERT_EQ(0, rados_omap_get_next(iter, &key, &val, &val_len));
ASSERT_EQ((char*)NULL, key);
ASSERT_EQ((char*)NULL, val);
ASSERT_EQ(0u, val_len);
rados_omap_get_end(iter);
}
// these two used to test omap funcs that accept length for both keys and vals
void fetch_and_verify_omap_vals2(char const* const* keys,
char const* const* vals,
const size_t *keylens,
const size_t *vallens,
size_t len)
{
rados_omap_iter_t iter_vals_by_key;
int r_vals_by_key;
rados_read_op_t op = rados_create_read_op();
rados_read_op_omap_get_vals_by_keys2(op, keys, len, keylens,
&iter_vals_by_key, &r_vals_by_key);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
ASSERT_EQ(0, r_vals_by_key);
compare_omap_vals2(keys, vals, keylens, vallens, len, iter_vals_by_key);
}
void compare_omap_vals2(char const* const* keys,
char const* const* vals,
const size_t *keylens,
const size_t *vallens,
size_t len,
rados_omap_iter_t iter)
{
size_t i = 0;
char *key = NULL;
char *val = NULL;
size_t key_len = 0;
size_t val_len = 0;
ASSERT_EQ(len, rados_omap_iter_size(iter));
while (i < len) {
ASSERT_EQ(0, rados_omap_get_next2(iter, &key, &val, &key_len, &val_len));
if (key_len == 0 && val_len == 0 && key == NULL && val == NULL)
break;
if (key)
EXPECT_EQ(std::string(keys[i], keylens[i]), std::string(key, key_len));
else
EXPECT_EQ(keys[i], key);
ASSERT_EQ(val_len, vallens[i]);
ASSERT_EQ(key_len, keylens[i]);
ASSERT_EQ(0, memcmp(vals[i], val, val_len));
++i;
}
ASSERT_EQ(i, len);
ASSERT_EQ(0, rados_omap_get_next2(iter, &key, &val, &key_len, &val_len));
ASSERT_EQ((char*)NULL, key);
ASSERT_EQ((char*)NULL, val);
ASSERT_EQ(0u, key_len);
ASSERT_EQ(0u, val_len);
rados_omap_get_end(iter);
}
void compare_xattrs(char const* const* keys,
char const* const* vals,
const size_t *lens,
size_t len,
rados_xattrs_iter_t iter)
{
size_t i = 0;
char *key = NULL;
char *val = NULL;
size_t val_len = 0;
while (i < len) {
ASSERT_EQ(0, rados_getxattrs_next(iter, (const char**) &key,
(const char**) &val, &val_len));
if (key == NULL)
break;
EXPECT_EQ(std::string(keys[i]), std::string(key));
if (val != NULL) {
EXPECT_EQ(0, memcmp(vals[i], val, val_len));
}
EXPECT_EQ(lens[i], val_len);
++i;
}
ASSERT_EQ(i, len);
ASSERT_EQ(0, rados_getxattrs_next(iter, (const char**)&key,
(const char**)&val, &val_len));
ASSERT_EQ((char*)NULL, key);
ASSERT_EQ((char*)NULL, val);
ASSERT_EQ(0u, val_len);
rados_getxattrs_end(iter);
}
};
TEST_F(CReadOpsTest, NewDelete) {
rados_read_op_t op = rados_create_read_op();
ASSERT_TRUE(op);
rados_release_read_op(op);
}
TEST_F(CReadOpsTest, SetOpFlags) {
write_object();
rados_read_op_t op = rados_create_read_op();
size_t bytes_read = 0;
char *out = NULL;
int rval = 0;
rados_read_op_exec(op, "rbd", "get_id", NULL, 0, &out,
&bytes_read, &rval);
rados_read_op_set_flags(op, LIBRADOS_OP_FLAG_FAILOK);
EXPECT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
EXPECT_EQ(-EIO, rval);
EXPECT_EQ(0u, bytes_read);
EXPECT_EQ((char*)NULL, out);
rados_release_read_op(op);
remove_object();
}
TEST_F(CReadOpsTest, AssertExists) {
rados_read_op_t op = rados_create_read_op();
rados_read_op_assert_exists(op);
ASSERT_EQ(-ENOENT, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
op = rados_create_read_op();
rados_read_op_assert_exists(op);
rados_completion_t completion;
ASSERT_EQ(0, rados_aio_create_completion(NULL, NULL, NULL, &completion));
auto sg = make_scope_guard([&] { rados_aio_release(completion); });
ASSERT_EQ(0, rados_aio_read_op_operate(op, ioctx, completion, obj, 0));
rados_aio_wait_for_complete(completion);
ASSERT_EQ(-ENOENT, rados_aio_get_return_value(completion));
rados_release_read_op(op);
write_object();
op = rados_create_read_op();
rados_read_op_assert_exists(op);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
remove_object();
}
TEST_F(CReadOpsTest, AssertVersion) {
write_object();
// Write to the object a second time to guarantee that its
// version number is greater than 0
write_object();
uint64_t v = rados_get_last_version(ioctx);
rados_read_op_t op = rados_create_read_op();
rados_read_op_assert_version(op, v+1);
ASSERT_EQ(-EOVERFLOW, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
op = rados_create_read_op();
rados_read_op_assert_version(op, v-1);
ASSERT_EQ(-ERANGE, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
op = rados_create_read_op();
rados_read_op_assert_version(op, v);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
remove_object();
}
TEST_F(CReadOpsTest, CmpXattr) {
write_object();
char buf[len];
memset(buf, 0xcc, sizeof(buf));
const char *xattr = "test";
rados_setxattr(ioctx, obj, xattr, buf, sizeof(buf));
// equal value
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_EQ));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_NE));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_GT));
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_GTE));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_LT));
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_LTE));
// < value
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf) - 1, LIBRADOS_CMPXATTR_OP_EQ));
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf) - 1, LIBRADOS_CMPXATTR_OP_NE));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf) - 1, LIBRADOS_CMPXATTR_OP_GT));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf) - 1, LIBRADOS_CMPXATTR_OP_GTE));
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf) - 1, LIBRADOS_CMPXATTR_OP_LT));
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf) - 1, LIBRADOS_CMPXATTR_OP_LTE));
// > value
memset(buf, 0xcd, sizeof(buf));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_EQ));
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_NE));
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_GT));
EXPECT_EQ(1, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_GTE));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_LT));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, sizeof(buf), LIBRADOS_CMPXATTR_OP_LTE));
// check that null bytes are compared correctly
rados_setxattr(ioctx, obj, xattr, "\0\0", 2);
buf[0] = '\0';
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_EQ));
EXPECT_EQ(1, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_NE));
EXPECT_EQ(1, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_GT));
EXPECT_EQ(1, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_GTE));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_LT));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_LTE));
buf[1] = '\0';
EXPECT_EQ(1, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_EQ));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_NE));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_GT));
EXPECT_EQ(1, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_GTE));
EXPECT_EQ(-ECANCELED, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_LT));
EXPECT_EQ(1, cmp_xattr(xattr, buf, 2, LIBRADOS_CMPXATTR_OP_LTE));
remove_object();
}
TEST_F(CReadOpsTest, Read) {
write_object();
char buf[len];
// check that using read_ops returns the same data with
// or without bytes_read and rval out params
{
rados_read_op_t op = rados_create_read_op();
rados_read_op_read(op, 0, len, buf, NULL, NULL);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
{
rados_read_op_t op = rados_create_read_op();
int rval;
rados_read_op_read(op, 0, len, buf, NULL, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
{
rados_read_op_t op = rados_create_read_op();
size_t bytes_read = 0;
rados_read_op_read(op, 0, len, buf, &bytes_read, NULL);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(len, bytes_read);
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
{
rados_read_op_t op = rados_create_read_op();
size_t bytes_read = 0;
int rval;
rados_read_op_read(op, 0, len, buf, &bytes_read, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(len, bytes_read);
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
{
rados_read_op_t op = rados_create_read_op();
size_t bytes_read = 0;
int rval;
rados_read_op_read(op, 0, len, buf, &bytes_read, &rval);
rados_read_op_set_flags(op, LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(len, bytes_read);
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
remove_object();
}
TEST_F(CReadOpsTest, Checksum) {
write_object();
{
rados_read_op_t op = rados_create_read_op();
ceph_le64 init_value(-1);
rados_read_op_checksum(op, LIBRADOS_CHECKSUM_TYPE_XXHASH64,
reinterpret_cast<char *>(&init_value),
sizeof(init_value), 0, len, 0, NULL, 0, NULL);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
}
{
ceph_le32 init_value(-1);
ceph_le32 crc[2];
rados_read_op_t op = rados_create_read_op();
rados_read_op_checksum(op, LIBRADOS_CHECKSUM_TYPE_CRC32C,
reinterpret_cast<char *>(&init_value),
sizeof(init_value), 0, len, 0,
reinterpret_cast<char *>(&crc), sizeof(crc),
nullptr);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(1U, crc[0]);
uint32_t expected_crc = ceph_crc32c(
-1, reinterpret_cast<const uint8_t*>(data), static_cast<uint32_t>(len));
ASSERT_EQ(expected_crc, crc[1]);
rados_release_read_op(op);
}
{
ceph_le32 init_value(-1);
int rval;
rados_read_op_t op = rados_create_read_op();
rados_read_op_checksum(op, LIBRADOS_CHECKSUM_TYPE_XXHASH32,
reinterpret_cast<char *>(&init_value),
sizeof(init_value), 0, len, 0, nullptr, 0, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(0, rval);
rados_release_read_op(op);
}
{
ceph_le32 init_value(-1);
ceph_le32 crc[3];
int rval;
rados_read_op_t op = rados_create_read_op();
rados_read_op_checksum(op, LIBRADOS_CHECKSUM_TYPE_CRC32C,
reinterpret_cast<char *>(&init_value),
sizeof(init_value), 0, len, 4,
reinterpret_cast<char *>(&crc), sizeof(crc), &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(2U, crc[0]);
uint32_t expected_crc[2];
expected_crc[0] = ceph_crc32c(
-1, reinterpret_cast<const uint8_t*>(data), 4U);
expected_crc[1] = ceph_crc32c(
-1, reinterpret_cast<const uint8_t*>(data + 4), 4U);
ASSERT_EQ(expected_crc[0], crc[1]);
ASSERT_EQ(expected_crc[1], crc[2]);
ASSERT_EQ(0, rval);
rados_release_read_op(op);
}
remove_object();
}
TEST_F(CReadOpsTest, RWOrderedRead) {
write_object();
char buf[len];
rados_read_op_t op = rados_create_read_op();
size_t bytes_read = 0;
int rval;
rados_read_op_read(op, 0, len, buf, &bytes_read, &rval);
rados_read_op_set_flags(op, LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj,
LIBRADOS_OPERATION_ORDER_READS_WRITES));
ASSERT_EQ(len, bytes_read);
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
remove_object();
}
TEST_F(CReadOpsTest, ShortRead) {
write_object();
char buf[len * 2];
// check that using read_ops returns the same data with
// or without bytes_read and rval out params
{
rados_read_op_t op = rados_create_read_op();
rados_read_op_read(op, 0, len * 2, buf, NULL, NULL);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
{
rados_read_op_t op = rados_create_read_op();
int rval;
rados_read_op_read(op, 0, len * 2, buf, NULL, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
{
rados_read_op_t op = rados_create_read_op();
size_t bytes_read = 0;
rados_read_op_read(op, 0, len * 2, buf, &bytes_read, NULL);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(len, bytes_read);
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
{
rados_read_op_t op = rados_create_read_op();
size_t bytes_read = 0;
int rval;
rados_read_op_read(op, 0, len * 2, buf, &bytes_read, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(len, bytes_read);
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(data, buf, len));
rados_release_read_op(op);
}
remove_object();
}
TEST_F(CReadOpsTest, Exec) {
// create object so we don't get -ENOENT
write_object();
rados_read_op_t op = rados_create_read_op();
ASSERT_TRUE(op);
size_t bytes_read = 0;
char *out = NULL;
int rval = 0;
rados_read_op_exec(op, "rbd", "get_all_features", NULL, 0, &out,
&bytes_read, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
EXPECT_EQ(0, rval);
EXPECT_TRUE(out);
uint64_t features;
EXPECT_EQ(sizeof(features), bytes_read);
// make sure buffer is at least as long as it claims
bufferlist bl;
bl.append(out, bytes_read);
auto it = bl.cbegin();
ceph::decode(features, it);
ASSERT_EQ(RBD_FEATURES_ALL, features);
rados_buffer_free(out);
remove_object();
}
TEST_F(CReadOpsTest, ExecUserBuf) {
// create object so we don't get -ENOENT
write_object();
rados_read_op_t op = rados_create_read_op();
size_t bytes_read = 0;
uint64_t features;
char out[sizeof(features)];
int rval = 0;
rados_read_op_exec_user_buf(op, "rbd", "get_all_features", NULL, 0, out,
sizeof(out), &bytes_read, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
EXPECT_EQ(0, rval);
EXPECT_EQ(sizeof(features), bytes_read);
// buffer too short
bytes_read = 1024;
op = rados_create_read_op();
rados_read_op_exec_user_buf(op, "rbd", "get_all_features", NULL, 0, out,
sizeof(features) - 1, &bytes_read, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
EXPECT_EQ(0u, bytes_read);
EXPECT_EQ(-ERANGE, rval);
// input buffer and no rval or bytes_read
op = rados_create_read_op();
rados_read_op_exec_user_buf(op, "rbd", "get_all_features", out, sizeof(out),
out, sizeof(out), NULL, NULL);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
remove_object();
}
TEST_F(CReadOpsTest, Stat) {
rados_read_op_t op = rados_create_read_op();
uint64_t size = 1;
int rval = 0;
rados_read_op_stat(op, &size, NULL, &rval);
EXPECT_EQ(-ENOENT, rados_read_op_operate(op, ioctx, obj, 0));
EXPECT_EQ(-EIO, rval);
EXPECT_EQ(1u, size);
rados_release_read_op(op);
time_t ts = 1457129052;
rados_write_op_t wop = rados_create_write_op();
rados_write_op_write(wop, data, len, 0);
ASSERT_EQ(0, rados_write_op_operate(wop, ioctx, obj, &ts, 0));
rados_release_write_op(wop);
time_t ts2;
op = rados_create_read_op();
rados_read_op_stat(op, &size, &ts2, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
EXPECT_EQ(0, rval);
EXPECT_EQ(len, size);
EXPECT_EQ(ts2, ts);
rados_release_read_op(op);
op = rados_create_read_op();
rados_read_op_stat(op, NULL, NULL, NULL);
EXPECT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
remove_object();
op = rados_create_read_op();
rados_read_op_stat(op, NULL, NULL, NULL);
EXPECT_EQ(-ENOENT, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
}
TEST_F(CReadOpsTest, Stat2) {
rados_read_op_t op = rados_create_read_op();
uint64_t size = 1;
int rval = 0;
rados_read_op_stat2(op, &size, NULL, &rval);
EXPECT_EQ(-ENOENT, rados_read_op_operate(op, ioctx, obj, 0));
EXPECT_EQ(-EIO, rval);
EXPECT_EQ(1u, size);
rados_release_read_op(op);
struct timespec ts;
ts.tv_sec = 1457129052;
ts.tv_nsec = 123456789;
rados_write_op_t wop = rados_create_write_op();
rados_write_op_write(wop, data, len, 0);
ASSERT_EQ(0, rados_write_op_operate2(wop, ioctx, obj, &ts, 0));
rados_release_write_op(wop);
struct timespec ts2 = {};
op = rados_create_read_op();
rados_read_op_stat2(op, &size, &ts2, &rval);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
EXPECT_EQ(0, rval);
EXPECT_EQ(len, size);
EXPECT_EQ(ts2.tv_sec, ts.tv_sec);
EXPECT_EQ(ts2.tv_nsec, ts.tv_nsec);
rados_release_read_op(op);
op = rados_create_read_op();
rados_read_op_stat2(op, NULL, NULL, NULL);
EXPECT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
remove_object();
op = rados_create_read_op();
rados_read_op_stat2(op, NULL, NULL, NULL);
EXPECT_EQ(-ENOENT, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
}
TEST_F(CReadOpsTest, Omap) {
char *keys[] = {(char*)"bar",
(char*)"foo",
(char*)"test1",
(char*)"test2"};
char *vals[] = {(char*)"",
(char*)"\0",
(char*)"abc",
(char*)"va\0lue"};
size_t lens[] = {0, 1, 3, 6};
// check for -ENOENT before the object exists and when it exists
// with no omap entries
rados_omap_iter_t iter_vals;
rados_read_op_t rop = rados_create_read_op();
rados_read_op_omap_get_vals2(rop, "", "", 10, &iter_vals, NULL, NULL);
ASSERT_EQ(-ENOENT, rados_read_op_operate(rop, ioctx, obj, 0));
rados_release_read_op(rop);
compare_omap_vals(NULL, NULL, NULL, 0, iter_vals);
write_object();
fetch_and_verify_omap_vals(NULL, NULL, NULL, 0);
// write and check for the k/v pairs
rados_write_op_t op = rados_create_write_op();
rados_write_op_omap_set(op, keys, vals, lens, 4);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, obj, NULL, 0));
rados_release_write_op(op);
fetch_and_verify_omap_vals(keys, vals, lens, 4);
rados_omap_iter_t iter_keys;
int r_vals = -1, r_keys = -1;
rop = rados_create_read_op();
rados_read_op_omap_get_vals2(rop, "", "test", 1, &iter_vals, NULL, &r_vals);
rados_read_op_omap_get_keys2(rop, "test", 1, &iter_keys, NULL, &r_keys);
ASSERT_EQ(0, rados_read_op_operate(rop, ioctx, obj, 0));
rados_release_read_op(rop);
EXPECT_EQ(0, r_vals);
EXPECT_EQ(0, r_keys);
EXPECT_EQ(1u, rados_omap_iter_size(iter_vals));
EXPECT_EQ(1u, rados_omap_iter_size(iter_keys));
compare_omap_vals(&keys[2], &vals[2], &lens[2], 1, iter_vals);
compare_omap_vals(&keys[2], &vals[0], &lens[0], 1, iter_keys);
// check omap_cmp finds all expected values
rop = rados_create_read_op();
int rvals[4];
for (int i = 0; i < 4; ++i)
rados_read_op_omap_cmp(rop, keys[i], LIBRADOS_CMPXATTR_OP_EQ,
vals[i], lens[i], &rvals[i]);
EXPECT_EQ(0, rados_read_op_operate(rop, ioctx, obj, 0));
rados_release_read_op(rop);
for (int i = 0; i < 4; ++i)
EXPECT_EQ(0, rvals[i]);
// try to remove keys with a guard that should fail
op = rados_create_write_op();
rados_write_op_omap_cmp(op, keys[2], LIBRADOS_CMPXATTR_OP_LT,
vals[2], lens[2], &r_vals);
rados_write_op_omap_rm_keys(op, keys, 2);
EXPECT_EQ(-ECANCELED, rados_write_op_operate(op, ioctx, obj, NULL, 0));
rados_release_write_op(op);
// see http://tracker.ceph.com/issues/19518
//ASSERT_EQ(-ECANCELED, r_vals);
// verifying the keys are still there, and then remove them
op = rados_create_write_op();
rados_write_op_omap_cmp(op, keys[0], LIBRADOS_CMPXATTR_OP_EQ,
vals[0], lens[0], NULL);
rados_write_op_omap_cmp(op, keys[1], LIBRADOS_CMPXATTR_OP_EQ,
vals[1], lens[1], NULL);
rados_write_op_omap_rm_keys(op, keys, 2);
EXPECT_EQ(0, rados_write_op_operate(op, ioctx, obj, NULL, 0));
rados_release_write_op(op);
fetch_and_verify_omap_vals(&keys[2], &vals[2], &lens[2], 2);
// clear the rest and check there are none left
op = rados_create_write_op();
rados_write_op_omap_clear(op);
EXPECT_EQ(0, rados_write_op_operate(op, ioctx, obj, NULL, 0));
rados_release_write_op(op);
fetch_and_verify_omap_vals(NULL, NULL, NULL, 0);
remove_object();
}
TEST_F(CReadOpsTest, OmapNuls) {
char *keys[] = {(char*)"1\0bar",
(char*)"2baar\0",
(char*)"3baa\0rr"};
char *vals[] = {(char*)"_\0var",
(char*)"_vaar\0",
(char*)"__vaa\0rr"};
size_t nklens[] = {5, 6, 7};
size_t nvlens[] = {5, 6, 8};
const int paircount = 3;
// check for -ENOENT before the object exists and when it exists
// with no omap entries
rados_omap_iter_t iter_vals;
rados_read_op_t rop = rados_create_read_op();
rados_read_op_omap_get_vals2(rop, "", "", 10, &iter_vals, NULL, NULL);
ASSERT_EQ(-ENOENT, rados_read_op_operate(rop, ioctx, obj, 0));
rados_release_read_op(rop);
compare_omap_vals(NULL, NULL, NULL, 0, iter_vals);
write_object();
fetch_and_verify_omap_vals(NULL, NULL, NULL, 0);
// write and check for the k/v pairs
rados_write_op_t op = rados_create_write_op();
rados_write_op_omap_set2(op, keys, vals, nklens, nvlens, paircount);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, obj, NULL, 0));
rados_release_write_op(op);
fetch_and_verify_omap_vals2(keys, vals, nklens, nvlens, paircount);
// check omap_cmp finds all expected values
rop = rados_create_read_op();
int rvals[4];
for (int i = 0; i < paircount; ++i)
rados_read_op_omap_cmp2(rop, keys[i], LIBRADOS_CMPXATTR_OP_EQ,
vals[i], nklens[i], nvlens[i], &rvals[i]);
EXPECT_EQ(0, rados_read_op_operate(rop, ioctx, obj, 0));
rados_release_read_op(rop);
for (int i = 0; i < paircount; ++i)
EXPECT_EQ(0, rvals[i]);
// try to remove keys with a guard that should fail
int r_vals = -1;
op = rados_create_write_op();
rados_write_op_omap_cmp2(op, keys[2], LIBRADOS_CMPXATTR_OP_LT,
vals[2], nklens[2], nvlens[2], &r_vals);
rados_write_op_omap_rm_keys(op, keys, 2);
EXPECT_EQ(-ECANCELED, rados_write_op_operate(op, ioctx, obj, NULL, 0));
rados_release_write_op(op);
// verifying the keys are still there, and then remove them
op = rados_create_write_op();
rados_write_op_omap_cmp2(op, keys[0], LIBRADOS_CMPXATTR_OP_EQ,
vals[0], nklens[0], nvlens[0], NULL);
rados_write_op_omap_cmp2(op, keys[1], LIBRADOS_CMPXATTR_OP_EQ,
vals[1], nklens[1], nvlens[1], NULL);
rados_write_op_omap_rm_keys2(op, keys, nklens, 2);
EXPECT_EQ(0, rados_write_op_operate(op, ioctx, obj, NULL, 0));
rados_release_write_op(op);
fetch_and_verify_omap_vals2(&keys[2], &vals[2], &nklens[2], &nvlens[2], 1);
// clear the rest and check there are none left
op = rados_create_write_op();
rados_write_op_omap_clear(op);
EXPECT_EQ(0, rados_write_op_operate(op, ioctx, obj, NULL, 0));
rados_release_write_op(op);
fetch_and_verify_omap_vals(NULL, NULL, NULL, 0);
remove_object();
}
TEST_F(CReadOpsTest, GetXattrs) {
write_object();
char *keys[] = {(char*)"bar",
(char*)"foo",
(char*)"test1",
(char*)"test2"};
char *vals[] = {(char*)"",
(char*)"\0",
(char*)"abc",
(char*)"va\0lue"};
size_t lens[] = {0, 1, 3, 6};
int rval = 1;
rados_read_op_t op = rados_create_read_op();
rados_xattrs_iter_t it;
rados_read_op_getxattrs(op, &it, &rval);
EXPECT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
EXPECT_EQ(0, rval);
rados_release_read_op(op);
compare_xattrs(keys, vals, lens, 0, it);
for (int i = 0; i < 4; ++i)
rados_setxattr(ioctx, obj, keys[i], vals[i], lens[i]);
rval = 1;
op = rados_create_read_op();
rados_read_op_getxattrs(op, &it, &rval);
EXPECT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
EXPECT_EQ(0, rval);
rados_release_read_op(op);
compare_xattrs(keys, vals, lens, 4, it);
remove_object();
}
TEST_F(CReadOpsTest, CmpExt) {
char buf[len];
size_t bytes_read = 0;
int cmpext_val = 0;
int read_val = 0;
write_object();
// cmpext with match should ensure that the following read is successful
rados_read_op_t op = rados_create_read_op();
ASSERT_TRUE(op);
// @obj, @data and @len correspond to object initialised by write_object()
rados_read_op_cmpext(op, data, len, 0, &cmpext_val);
rados_read_op_read(op, 0, len, buf, &bytes_read, &read_val);
ASSERT_EQ(0, rados_read_op_operate(op, ioctx, obj, 0));
ASSERT_EQ(len, bytes_read);
ASSERT_EQ(0, memcmp(data, buf, len));
ASSERT_EQ(cmpext_val, 0);
rados_release_read_op(op);
// cmpext with mismatch should fail and fill mismatch_buf accordingly
memset(buf, 0, sizeof(buf));
bytes_read = 0;
cmpext_val = 0;
read_val = 0;
op = rados_create_read_op();
ASSERT_TRUE(op);
// @obj, @data and @len correspond to object initialised by write_object()
rados_read_op_cmpext(op, "mismatch", strlen("mismatch"), 0, &cmpext_val);
rados_read_op_read(op, 0, len, buf, &bytes_read, &read_val);
ASSERT_EQ(-MAX_ERRNO, rados_read_op_operate(op, ioctx, obj, 0));
rados_release_read_op(op);
ASSERT_EQ(-MAX_ERRNO, cmpext_val);
remove_object();
}
| 29,256 | 31.652902 | 90 |
cc
|
null |
ceph-main/src/test/librados/c_write_operations.cc
|
// Tests for the C API coverage of atomic write operations
#include <errno.h>
#include "gtest/gtest.h"
#include "include/err.h"
#include "include/rados/librados.h"
#include "test/librados/test.h"
TEST(LibradosCWriteOps, NewDelete) {
rados_write_op_t op = rados_create_write_op();
ASSERT_TRUE(op);
rados_release_write_op(op);
}
TEST(LibRadosCWriteOps, assertExists) {
rados_t cluster;
rados_ioctx_t ioctx;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
rados_ioctx_create(cluster, pool_name.c_str(), &ioctx);
rados_write_op_t op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_assert_exists(op);
// -2, ENOENT
ASSERT_EQ(-2, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
rados_write_op_t op2 = rados_create_write_op();
ASSERT_TRUE(op2);
rados_write_op_assert_exists(op2);
rados_completion_t completion;
ASSERT_EQ(0, rados_aio_create_completion(NULL, NULL, NULL, &completion));
ASSERT_EQ(0, rados_aio_write_op_operate(op2, ioctx, completion, "test", NULL, 0));
rados_aio_wait_for_complete(completion);
ASSERT_EQ(-2, rados_aio_get_return_value(completion));
rados_aio_release(completion);
rados_ioctx_destroy(ioctx);
rados_release_write_op(op2);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosCWriteOps, WriteOpAssertVersion) {
rados_t cluster;
rados_ioctx_t ioctx;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
rados_ioctx_create(cluster, pool_name.c_str(), &ioctx);
rados_write_op_t op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_create(op, LIBRADOS_CREATE_EXCLUSIVE, NULL);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
// Write to the object a second time to guarantee that its
// version number is greater than 0
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_write_full(op, "hi", 2);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
uint64_t v = rados_get_last_version(ioctx);
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_assert_version(op, v+1);
ASSERT_EQ(-EOVERFLOW, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_assert_version(op, v-1);
ASSERT_EQ(-ERANGE, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_assert_version(op, v);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
rados_ioctx_destroy(ioctx);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosCWriteOps, Xattrs) {
rados_t cluster;
rados_ioctx_t ioctx;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
rados_ioctx_create(cluster, pool_name.c_str(), &ioctx);
// Create an object with an xattr
rados_write_op_t op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_create(op, LIBRADOS_CREATE_EXCLUSIVE, NULL);
rados_write_op_setxattr(op, "key", "value", 5);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
// Check that xattr exists, if it does, delete it.
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_create(op, LIBRADOS_CREATE_IDEMPOTENT, NULL);
rados_write_op_cmpxattr(op, "key", LIBRADOS_CMPXATTR_OP_EQ, "value", 5);
rados_write_op_rmxattr(op, "key");
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
// Check the xattr exits, if it does, add it again (will fail) with -125
// (ECANCELED)
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_cmpxattr(op, "key", LIBRADOS_CMPXATTR_OP_EQ, "value", 5);
rados_write_op_setxattr(op, "key", "value", 5);
ASSERT_EQ(-ECANCELED, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
rados_ioctx_destroy(ioctx);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosCWriteOps, Write) {
rados_t cluster;
rados_ioctx_t ioctx;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
rados_ioctx_create(cluster, pool_name.c_str(), &ioctx);
// Create an object, write and write full to it
rados_write_op_t op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_create(op, LIBRADOS_CREATE_EXCLUSIVE, NULL);
rados_write_op_write(op, "four", 4, 0);
rados_write_op_write_full(op, "hi", 2);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
char hi[4];
ASSERT_EQ(2, rados_read(ioctx, "test", hi, 4, 0));
rados_release_write_op(op);
//create write op with iohint
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_write_full(op, "ceph", 4);
rados_write_op_set_flags(op, LIBRADOS_OP_FLAG_FADVISE_NOCACHE);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
ASSERT_EQ(4, rados_read(ioctx, "test", hi, 4, 0));
rados_release_write_op(op);
// Truncate and append
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_truncate(op, 1);
rados_write_op_append(op, "hi", 2);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
ASSERT_EQ(3, rados_read(ioctx, "test", hi, 4, 0));
rados_release_write_op(op);
// zero and remove
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_zero(op, 0, 3);
rados_write_op_remove(op);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
// ENOENT
ASSERT_EQ(-2, rados_read(ioctx, "test", hi, 4, 0));
rados_release_write_op(op);
rados_ioctx_destroy(ioctx);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosCWriteOps, Exec) {
rados_t cluster;
rados_ioctx_t ioctx;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
rados_ioctx_create(cluster, pool_name.c_str(), &ioctx);
int rval = 1;
rados_write_op_t op = rados_create_write_op();
rados_write_op_exec(op, "hello", "record_hello", "test", 4, &rval);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
ASSERT_EQ(0, rval);
char hi[100];
ASSERT_EQ(12, rados_read(ioctx, "test", hi, 100, 0));
hi[12] = '\0';
ASSERT_EQ(0, strcmp("Hello, test!", hi));
rados_ioctx_destroy(ioctx);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosCWriteOps, WriteSame) {
rados_t cluster;
rados_ioctx_t ioctx;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
rados_ioctx_create(cluster, pool_name.c_str(), &ioctx);
// Create an object, write to it using writesame
rados_write_op_t op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_create(op, LIBRADOS_CREATE_EXCLUSIVE, NULL);
rados_write_op_writesame(op, "four", 4, 4 * 4, 0);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
char hi[4 * 4];
ASSERT_EQ(sizeof(hi), static_cast<std::size_t>(
rados_read(ioctx, "test", hi,sizeof(hi), 0)));
rados_release_write_op(op);
ASSERT_EQ(0, memcmp("fourfourfourfour", hi, sizeof(hi)));
// cleanup
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_remove(op);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
rados_ioctx_destroy(ioctx);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosCWriteOps, CmpExt) {
rados_t cluster;
rados_ioctx_t ioctx;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
rados_ioctx_create(cluster, pool_name.c_str(), &ioctx);
// create an object, write to it using writesame
rados_write_op_t op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_create(op, LIBRADOS_CREATE_EXCLUSIVE, NULL);
rados_write_op_write(op, "four", 4, 0);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_release_write_op(op);
char hi[4];
ASSERT_EQ(sizeof(hi), static_cast<std::size_t>(rados_read(ioctx, "test", hi, sizeof(hi), 0)));
ASSERT_EQ(0, memcmp("four", hi, sizeof(hi)));
// compare and overwrite on (expected) match
int val = 0;
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_cmpext(op, "four", 4, 0, &val);
rados_write_op_write(op, "five", 4, 0);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
ASSERT_EQ(0, val);
rados_release_write_op(op);
ASSERT_EQ(sizeof(hi), static_cast<std::size_t>(rados_read(ioctx, "test", hi, sizeof(hi), 0)));
ASSERT_EQ(0, memcmp("five", hi, sizeof(hi)));
// Check offset return error value
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_cmpext(op, "four", 4, 0, &val);
rados_write_op_write(op, "six ", 4, 0);
ASSERT_EQ(-MAX_ERRNO - 1, rados_write_op_operate(op, ioctx, "test", NULL,
LIBRADOS_OPERATION_RETURNVEC));
ASSERT_EQ(-MAX_ERRNO - 1, val);
// compare and bail before write due to mismatch
// do it 1000 times to make sure we are hitting
// some socket injection
for (auto i = 0; i < 1000; ++i) {
val = 0;
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_cmpext(op, "four", 4, 0, &val);
rados_write_op_write(op, "six ", 4, 0);
std::string const s = "test_" + std::to_string(i);
ASSERT_EQ(-MAX_ERRNO , rados_write_op_operate(op, ioctx, s.c_str(), NULL,
LIBRADOS_OPERATION_RETURNVEC));
ASSERT_EQ(-MAX_ERRNO , val);
}
// cleanup
op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_remove(op);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "test", NULL, 0));
rados_ioctx_destroy(ioctx);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
| 10,088 | 33.316327 | 96 |
cc
|
null |
ceph-main/src/test/librados/cls.cc
|
#include <errno.h>
#include <map>
#include <sstream>
#include <string>
#include "gtest/gtest.h"
#include "include/rados/librados.hpp"
#include "test/librados/test_cxx.h"
using namespace librados;
using std::map;
using std::ostringstream;
using std::string;
TEST(LibRadosCls, DNE) {
Rados cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
// create an object
string oid = "foo";
bufferlist bl;
ASSERT_EQ(0, ioctx.write(oid, bl, bl.length(), 0));
// call a bogus class
ASSERT_EQ(-EOPNOTSUPP, ioctx.exec(oid, "doesnotexistasdfasdf", "method", bl, bl));
// call a bogus method on existent class
ASSERT_EQ(-EOPNOTSUPP, ioctx.exec(oid, "lock", "doesnotexistasdfasdfasdf", bl, bl));
ioctx.close();
ASSERT_EQ(0, destroy_one_pool_pp(pool_name, cluster));
}
| 907 | 23.540541 | 86 |
cc
|
null |
ceph-main/src/test/librados/cls_remote_reads.cc
|
#include <set>
#include <string>
#include "common/ceph_json.h"
#include "gtest/gtest.h"
#include "test/librados/test_cxx.h"
#include "crimson_utils.h"
using namespace librados;
TEST(ClsTestRemoteReads, TestGather) {
SKIP_IF_CRIMSON();
Rados cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
IoCtx ioctx;
cluster.ioctx_create(pool_name.c_str(), ioctx);
bufferlist in, out;
int object_size = 4096;
char buf[object_size];
memset(buf, 1, sizeof(buf));
// create source objects from which data are gathered
in.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write_full("src_object.1", in));
in.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write_full("src_object.2", in));
in.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write_full("src_object.3", in));
// construct JSON request passed to "test_gather" method, and in turn, to "test_read" method
JSONFormatter *formatter = new JSONFormatter(true);
formatter->open_object_section("foo");
std::set<std::string> src_objects;
src_objects.insert("src_object.1");
src_objects.insert("src_object.2");
src_objects.insert("src_object.3");
encode_json("src_objects", src_objects, formatter);
encode_json("cls", "test_remote_reads", formatter);
encode_json("method", "test_read", formatter);
encode_json("pool", pool_name, formatter);
formatter->close_section();
in.clear();
formatter->flush(in);
// create target object by combining data gathered from source objects using "test_read" method
ASSERT_EQ(0, ioctx.exec("tgt_object", "test_remote_reads", "test_gather", in, out));
// read target object and check its size
ASSERT_EQ(3*object_size, ioctx.read("tgt_object", out, 0, 0));
ASSERT_EQ(0, destroy_one_pool_pp(pool_name, cluster));
}
| 1,812 | 31.375 | 97 |
cc
|
null |
ceph-main/src/test/librados/cmd.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "test/librados/test.h"
#include "gtest/gtest.h"
#include <errno.h>
#include <condition_variable>
#include <map>
#include <sstream>
#include <string>
using std::cout;
using std::list;
using std::map;
using std::ostringstream;
using std::string;
TEST(LibRadosCmd, MonDescribe) {
rados_t cluster;
ASSERT_EQ("", connect_cluster(&cluster));
char *buf, *st;
size_t buflen, stlen;
char *cmd[2];
cmd[1] = NULL;
cmd[0] = (char *)"{\"prefix\":\"get_command_descriptions\"}";
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
ASSERT_LT(0u, buflen);
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"get_command_descriptions";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"asdfqwer";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "{}", 2, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "{}", 2, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"{}";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"{\"abc\":\"something\"}";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"{\"prefix\":\"\"}";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"{\"prefix\":\" \"}";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"{\"prefix\":\";;;,,,;;,,\"}";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"{\"prefix\":\"extra command\"}";
ASSERT_EQ(-EINVAL, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
rados_buffer_free(buf);
rados_buffer_free(st);
cmd[0] = (char *)"{\"prefix\":\"quorum_status\"}";
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
ASSERT_LT(0u, buflen);
//ASSERT_LT(0u, stlen);
rados_buffer_free(buf);
rados_buffer_free(st);
rados_shutdown(cluster);
}
TEST(LibRadosCmd, OSDCmd) {
rados_t cluster;
ASSERT_EQ("", connect_cluster(&cluster));
int r;
char *buf, *st;
size_t buflen, stlen;
char *cmd[2];
cmd[1] = NULL;
// note: tolerate NXIO here in case the cluster is thrashing out underneath us.
cmd[0] = (char *)"asdfasdf";
r = rados_osd_command(cluster, 0, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen);
rados_buffer_free(buf);
rados_buffer_free(st);
ASSERT_TRUE(r == -22 || r == -ENXIO);
cmd[0] = (char *)"version";
r = rados_osd_command(cluster, 0, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen);
rados_buffer_free(buf);
rados_buffer_free(st);
ASSERT_TRUE(r == -22 || r == -ENXIO);
cmd[0] = (char *)"{\"prefix\":\"version\"}";
r = rados_osd_command(cluster, 0, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen);
ASSERT_TRUE((r == 0 && buflen > 0) || (r == -ENXIO && buflen == 0));
rados_buffer_free(buf);
rados_buffer_free(st);
rados_shutdown(cluster);
}
TEST(LibRadosCmd, PGCmd) {
rados_t cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
char *buf, *st;
size_t buflen, stlen;
char *cmd[2];
cmd[1] = NULL;
int64_t poolid = rados_pool_lookup(cluster, pool_name.c_str());
ASSERT_LT(0, poolid);
string pgid = stringify(poolid) + ".0";
cmd[0] = (char *)"asdfasdf";
// note: tolerate NXIO here in case the cluster is thrashing out underneath us.
int r = rados_pg_command(cluster, pgid.c_str(), (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen);
ASSERT_TRUE(r == -22 || r == -ENXIO);
rados_buffer_free(buf);
rados_buffer_free(st);
// make sure the pg exists on the osd before we query it
rados_ioctx_t io;
rados_ioctx_create(cluster, pool_name.c_str(), &io);
for (int i=0; i<100; i++) {
string oid = "obj" + stringify(i);
ASSERT_EQ(-ENOENT, rados_stat(io, oid.c_str(), NULL, NULL));
}
rados_ioctx_destroy(io);
string qstr = "{\"prefix\":\"pg\", \"cmd\":\"query\", \"pgid\":\"" + pgid + "\"}";
cmd[0] = (char *)qstr.c_str();
// note: tolerate ENOENT/ENXIO here if hte osd is thrashing out underneath us
r = rados_pg_command(cluster, pgid.c_str(), (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen);
ASSERT_TRUE(r == 0 || r == -ENOENT || r == -ENXIO);
ASSERT_LT(0u, buflen);
rados_buffer_free(buf);
rados_buffer_free(st);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
struct Log {
list<string> log;
std::condition_variable cond;
std::mutex lock;
bool contains(const string& str) {
std::lock_guard<std::mutex> l(lock);
for (list<string>::iterator p = log.begin(); p != log.end(); ++p) {
if (p->find(str) != std::string::npos)
return true;
}
return false;
}
};
void log_cb(void *arg,
const char *line,
const char *who, uint64_t stampsec, uint64_t stamp_nsec,
uint64_t seq, const char *level,
const char *msg) {
Log *l = static_cast<Log *>(arg);
std::lock_guard<std::mutex> locker(l->lock);
l->log.push_back(line);
l->cond.notify_all();
cout << "got: " << line << std::endl;
}
TEST(LibRadosCmd, WatchLog) {
rados_t cluster;
ASSERT_EQ("", connect_cluster(&cluster));
char *buf, *st;
char *cmd[2];
cmd[1] = NULL;
size_t buflen, stlen;
Log l;
ASSERT_EQ(0, rados_monitor_log(cluster, "info", log_cb, &l));
cmd[0] = (char *)"{\"prefix\":\"log\", \"logtext\":[\"onexx\"]}";
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
for (int i=0; !l.contains("onexx"); i++) {
ASSERT_TRUE(i<100);
sleep(1);
}
ASSERT_TRUE(l.contains("onexx"));
cmd[0] = (char *)"{\"prefix\":\"log\", \"logtext\":[\"twoxx\"]}";
ASSERT_EQ(0, rados_monitor_log(cluster, "err", log_cb, &l));
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
sleep(2);
ASSERT_FALSE(l.contains("twoxx"));
ASSERT_EQ(0, rados_monitor_log(cluster, "info", log_cb, &l));
cmd[0] = (char *)"{\"prefix\":\"log\", \"logtext\":[\"threexx\"]}";
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
for (int i=0; !l.contains("threexx"); i++) {
ASSERT_TRUE(i<100);
sleep(1);
}
ASSERT_EQ(0, rados_monitor_log(cluster, "info", NULL, NULL));
cmd[0] = (char *)"{\"prefix\":\"log\", \"logtext\":[\"fourxx\"]}";
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
sleep(2);
ASSERT_FALSE(l.contains("fourxx"));
rados_shutdown(cluster);
}
| 7,556 | 31.856522 | 109 |
cc
|
null |
ceph-main/src/test/librados/cmd_cxx.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include <condition_variable>
#include <map>
#include <sstream>
#include <string>
#include "gtest/gtest.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "test/librados/test_cxx.h"
using namespace librados;
using std::map;
using std::ostringstream;
using std::string;
TEST(LibRadosCmd, MonDescribePP) {
Rados cluster;
ASSERT_EQ("", connect_cluster_pp(cluster));
bufferlist inbl, outbl;
string outs;
ASSERT_EQ(0, cluster.mon_command("{\"prefix\": \"get_command_descriptions\"}",
inbl, &outbl, &outs));
ASSERT_LT(0u, outbl.length());
ASSERT_LE(0u, outs.length());
cluster.shutdown();
}
TEST(LibRadosCmd, OSDCmdPP) {
Rados cluster;
ASSERT_EQ("", connect_cluster_pp(cluster));
int r;
bufferlist inbl, outbl;
string outs;
string cmd;
// note: tolerate NXIO here in case the cluster is thrashing out underneath us.
cmd = "asdfasdf";
r = cluster.osd_command(0, cmd, inbl, &outbl, &outs);
ASSERT_TRUE(r == -22 || r == -ENXIO);
cmd = "version";
r = cluster.osd_command(0, cmd, inbl, &outbl, &outs);
ASSERT_TRUE(r == -22 || r == -ENXIO);
cmd = "{\"prefix\":\"version\"}";
r = cluster.osd_command(0, cmd, inbl, &outbl, &outs);
ASSERT_TRUE((r == 0 && outbl.length() > 0) || (r == -ENXIO && outbl.length() == 0));
cluster.shutdown();
}
TEST(LibRadosCmd, PGCmdPP) {
Rados cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool_pp(pool_name, cluster));
int r;
bufferlist inbl, outbl;
string outs;
string cmd;
int64_t poolid = cluster.pool_lookup(pool_name.c_str());
ASSERT_LT(0, poolid);
string pgid = stringify(poolid) + ".0";
cmd = "asdfasdf";
// note: tolerate NXIO here in case the cluster is thrashing out underneath us.
r = cluster.pg_command(pgid.c_str(), cmd, inbl, &outbl, &outs);
ASSERT_TRUE(r == -22 || r == -ENXIO);
// make sure the pg exists on the osd before we query it
IoCtx io;
cluster.ioctx_create(pool_name.c_str(), io);
for (int i=0; i<100; i++) {
string oid = "obj" + stringify(i);
ASSERT_EQ(-ENOENT, io.stat(oid, NULL, NULL));
}
io.close();
cmd = "{\"prefix\":\"pg\", \"cmd\":\"query\", \"pgid\":\"" + pgid + "\"}";
// note: tolerate ENOENT/ENXIO here if hte osd is thrashing out underneath us
r = cluster.pg_command(pgid.c_str(), cmd, inbl, &outbl, &outs);
ASSERT_TRUE(r == 0 || r == -ENOENT || r == -ENXIO);
ASSERT_LT(0u, outbl.length());
ASSERT_EQ(0, destroy_one_pool_pp(pool_name, cluster));
}
| 2,618 | 27.16129 | 86 |
cc
|
null |
ceph-main/src/test/librados/completion_speed.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.hpp"
#include "common/ceph_context.h"
#include "common/Finisher.h"
#include "librados/AioCompletionImpl.h"
constexpr int max_completions = 10'000'000;
int completed = 0;
auto cct = (new CephContext(CEPH_ENTITY_TYPE_CLIENT))->get();
Finisher f(cct);
void completion_cb(librados::completion_t cb, void* arg) {
auto c = static_cast<librados::AioCompletion*>(arg);
delete c;
if (++completed < max_completions) {
auto aio = librados::Rados::aio_create_completion();
aio->set_complete_callback(static_cast<void*>(aio), &completion_cb);
f.queue(new librados::C_AioComplete(aio->pc));
}
}
int main(void) {
auto aio = librados::Rados::aio_create_completion();
aio->set_complete_callback(static_cast<void*>(aio), &completion_cb);
f.queue(new librados::C_AioComplete(aio->pc));
f.start();
while (completed < max_completions)
f.wait_for_empty();
f.stop();
assert(completed == max_completions);
cct->put();
}
| 1,075 | 26.589744 | 72 |
cc
|
null |
ceph-main/src/test/librados/crimson_utils.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cstdlib>
static inline bool is_crimson_cluster() {
return getenv("CRIMSON_COMPAT") != nullptr;
}
#define SKIP_IF_CRIMSON() \
if (is_crimson_cluster()) { \
GTEST_SKIP() << "Not supported by crimson yet. Skipped"; \
}
| 371 | 22.25 | 70 |
h
|
null |
ceph-main/src/test/librados/io.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*
// vim: ts=8 sw=2 smarttab
#include <climits>
#include "include/rados/librados.h"
#include "include/encoding.h"
#include "include/err.h"
#include "include/scope_guard.h"
#include "test/librados/test.h"
#include "test/librados/TestCase.h"
#include <errno.h>
#include "gtest/gtest.h"
#include "crimson_utils.h"
using std::string;
typedef RadosTest LibRadosIo;
typedef RadosTestEC LibRadosIoEC;
TEST_F(LibRadosIo, SimpleWrite) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "nspace");
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
}
TEST_F(LibRadosIo, TooBig) {
char buf[1] = { 0 };
ASSERT_EQ(-E2BIG, rados_write(ioctx, "A", buf, UINT_MAX, 0));
ASSERT_EQ(-E2BIG, rados_append(ioctx, "A", buf, UINT_MAX));
ASSERT_EQ(-E2BIG, rados_write_full(ioctx, "A", buf, UINT_MAX));
ASSERT_EQ(-E2BIG, rados_writesame(ioctx, "A", buf, sizeof(buf), UINT_MAX, 0));
}
TEST_F(LibRadosIo, ReadTimeout) {
char buf[128];
memset(buf, 'a', sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
{
// set up a second client
rados_t cluster;
rados_ioctx_t ioctx;
ASSERT_EQ(0, rados_create(&cluster, "admin"));
ASSERT_EQ(0, rados_conf_read_file(cluster, NULL));
ASSERT_EQ(0, rados_conf_parse_env(cluster, NULL));
ASSERT_EQ(0, rados_conf_set(cluster, "rados_osd_op_timeout", "1")); // use any small value that will result in a timeout
ASSERT_EQ(0, rados_conf_set(cluster, "ms_inject_internal_delays", "2")); // create a 2 second delay
ASSERT_EQ(0, rados_connect(cluster));
ASSERT_EQ(0, rados_ioctx_create(cluster, pool_name.c_str(), &ioctx));
rados_ioctx_set_namespace(ioctx, nspace.c_str());
// then we show that the buffer is changed after rados_read returned
// with a timeout
for (int i=0; i<5; i++) {
char buf2[sizeof(buf)];
memset(buf2, 0, sizeof(buf2));
int err = rados_read(ioctx, "foo", buf2, sizeof(buf2), 0);
if (err == -110) {
int startIndex = 0;
// find the index until which librados already read the object before the timeout occurred
for (unsigned b=0; b<sizeof(buf); b++) {
if (buf2[b] != buf[b]) {
startIndex = b;
break;
}
}
// wait some time to give librados a change to do something
sleep(1);
// then check if the buffer was changed after the call
if (buf2[startIndex] == 'a') {
printf("byte at index %d was changed after the timeout to %d\n",
startIndex, (int)buf[startIndex]);
ASSERT_TRUE(0);
break;
}
} else {
printf("no timeout :/\n");
}
}
rados_ioctx_destroy(ioctx);
rados_shutdown(cluster);
}
}
TEST_F(LibRadosIo, RoundTrip) {
char buf[128];
char buf2[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
memset(buf2, 0, sizeof(buf2));
ASSERT_EQ((int)sizeof(buf2), rados_read(ioctx, "foo", buf2, sizeof(buf2), 0));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
uint64_t off = 19;
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "bar", buf, sizeof(buf), off));
memset(buf2, 0, sizeof(buf2));
ASSERT_EQ((int)sizeof(buf2), rados_read(ioctx, "bar", buf2, sizeof(buf2), off));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
}
TEST_F(LibRadosIo, Checksum) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
uint32_t expected_crc = ceph_crc32c(-1, reinterpret_cast<const uint8_t*>(buf),
sizeof(buf));
ceph_le32 init_value(-1);
ceph_le32 crc[2];
ASSERT_EQ(0, rados_checksum(ioctx, "foo", LIBRADOS_CHECKSUM_TYPE_CRC32C,
reinterpret_cast<char*>(&init_value),
sizeof(init_value), sizeof(buf), 0, 0,
reinterpret_cast<char*>(&crc), sizeof(crc)));
ASSERT_EQ(1U, crc[0]);
ASSERT_EQ(expected_crc, crc[1]);
}
TEST_F(LibRadosIo, OverlappingWriteRoundTrip) {
char buf[128];
char buf2[64];
char buf3[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf2, sizeof(buf2), 0));
memset(buf3, 0xdd, sizeof(buf3));
ASSERT_EQ((int)sizeof(buf3), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
ASSERT_EQ(0, memcmp(buf3, buf2, sizeof(buf2)));
ASSERT_EQ(0, memcmp(buf3 + sizeof(buf2), buf, sizeof(buf) - sizeof(buf2)));
}
TEST_F(LibRadosIo, WriteFullRoundTrip) {
char buf[128];
char buf2[64];
char buf3[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_write_full(ioctx, "foo", buf2, sizeof(buf2)));
memset(buf3, 0x00, sizeof(buf3));
ASSERT_EQ((int)sizeof(buf2), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
ASSERT_EQ(0, memcmp(buf2, buf3, sizeof(buf2)));
}
TEST_F(LibRadosIo, AppendRoundTrip) {
char buf[64];
char buf2[64];
char buf3[sizeof(buf) + sizeof(buf2)];
memset(buf, 0xde, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
memset(buf2, 0xad, sizeof(buf2));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf2, sizeof(buf2)));
memset(buf3, 0, sizeof(buf3));
ASSERT_EQ((int)sizeof(buf3), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
ASSERT_EQ(0, memcmp(buf3, buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(buf3 + sizeof(buf), buf2, sizeof(buf2)));
}
TEST_F(LibRadosIo, ZeroLenZero) {
rados_write_op_t op = rados_create_write_op();
ASSERT_TRUE(op);
rados_write_op_zero(op, 0, 0);
ASSERT_EQ(0, rados_write_op_operate(op, ioctx, "foo", NULL, 0));
rados_release_write_op(op);
}
TEST_F(LibRadosIo, TruncTest) {
char buf[128];
char buf2[sizeof(buf)];
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0, rados_trunc(ioctx, "foo", sizeof(buf) / 2));
memset(buf2, 0, sizeof(buf2));
ASSERT_EQ((int)(sizeof(buf)/2), rados_read(ioctx, "foo", buf2, sizeof(buf2), 0));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)/2));
}
TEST_F(LibRadosIo, RemoveTest) {
char buf[128];
char buf2[sizeof(buf)];
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0, rados_remove(ioctx, "foo"));
memset(buf2, 0, sizeof(buf2));
ASSERT_EQ(-ENOENT, rados_read(ioctx, "foo", buf2, sizeof(buf2), 0));
}
TEST_F(LibRadosIo, XattrsRoundTrip) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(-ENODATA, rados_getxattr(ioctx, "foo", attr1, buf, sizeof(buf)));
ASSERT_EQ(0, rados_setxattr(ioctx, "foo", attr1, attr1_buf, sizeof(attr1_buf)));
ASSERT_EQ((int)sizeof(attr1_buf),
rados_getxattr(ioctx, "foo", attr1, buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(attr1_buf, buf, sizeof(attr1_buf)));
}
TEST_F(LibRadosIo, RmXattr) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0,
rados_setxattr(ioctx, "foo", attr1, attr1_buf, sizeof(attr1_buf)));
ASSERT_EQ(0, rados_rmxattr(ioctx, "foo", attr1));
ASSERT_EQ(-ENODATA, rados_getxattr(ioctx, "foo", attr1, buf, sizeof(buf)));
// Test rmxattr on a removed object
char buf2[128];
char attr2[] = "attr2";
char attr2_buf[] = "foo bar baz";
memset(buf2, 0xbb, sizeof(buf2));
ASSERT_EQ(0, rados_write(ioctx, "foo_rmxattr", buf2, sizeof(buf2), 0));
ASSERT_EQ(0,
rados_setxattr(ioctx, "foo_rmxattr", attr2, attr2_buf, sizeof(attr2_buf)));
ASSERT_EQ(0, rados_remove(ioctx, "foo_rmxattr"));
ASSERT_EQ(-ENOENT, rados_rmxattr(ioctx, "foo_rmxattr", attr2));
}
TEST_F(LibRadosIo, XattrIter) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
char attr2[] = "attr2";
char attr2_buf[256];
for (size_t j = 0; j < sizeof(attr2_buf); ++j) {
attr2_buf[j] = j % 0xff;
}
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0, rados_setxattr(ioctx, "foo", attr1, attr1_buf, sizeof(attr1_buf)));
ASSERT_EQ(0, rados_setxattr(ioctx, "foo", attr2, attr2_buf, sizeof(attr2_buf)));
rados_xattrs_iter_t iter;
ASSERT_EQ(0, rados_getxattrs(ioctx, "foo", &iter));
int num_seen = 0;
while (true) {
const char *name;
const char *val;
size_t len;
ASSERT_EQ(0, rados_getxattrs_next(iter, &name, &val, &len));
if (name == NULL) {
break;
}
ASSERT_LT(num_seen, 2);
if ((strcmp(name, attr1) == 0) && (val != NULL) && (memcmp(val, attr1_buf, len) == 0)) {
num_seen++;
continue;
}
else if ((strcmp(name, attr2) == 0) && (val != NULL) && (memcmp(val, attr2_buf, len) == 0)) {
num_seen++;
continue;
}
else {
ASSERT_EQ(0, 1);
}
}
rados_getxattrs_end(iter);
}
TEST_F(LibRadosIoEC, SimpleWrite) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "nspace");
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
}
TEST_F(LibRadosIoEC, RoundTrip) {
SKIP_IF_CRIMSON();
char buf[128];
char buf2[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
memset(buf2, 0, sizeof(buf2));
ASSERT_EQ((int)sizeof(buf2), rados_read(ioctx, "foo", buf2, sizeof(buf2), 0));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
uint64_t off = 19;
ASSERT_EQ(-EOPNOTSUPP, rados_write(ioctx, "bar", buf, sizeof(buf), off));
}
TEST_F(LibRadosIoEC, OverlappingWriteRoundTrip) {
SKIP_IF_CRIMSON();
int bsize = alignment;
int dbsize = bsize * 2;
char *buf = (char *)new char[dbsize];
char *buf2 = (char *)new char[bsize];
char *buf3 = (char *)new char[dbsize];
auto cleanup = [&] {
delete[] buf;
delete[] buf2;
delete[] buf3;
};
scope_guard<decltype(cleanup)> sg(std::move(cleanup));
memset(buf, 0xcc, dbsize);
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, dbsize, 0));
memset(buf2, 0xdd, bsize);
ASSERT_EQ(-EOPNOTSUPP, rados_write(ioctx, "foo", buf2, bsize, 0));
memset(buf3, 0xdd, dbsize);
ASSERT_EQ(dbsize, rados_read(ioctx, "foo", buf3, dbsize, 0));
// Read the same as first write
ASSERT_EQ(0, memcmp(buf3, buf, dbsize));
}
TEST_F(LibRadosIoEC, WriteFullRoundTrip) {
SKIP_IF_CRIMSON();
char buf[128];
char buf2[64];
char buf3[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_write_full(ioctx, "foo", buf2, sizeof(buf2)));
memset(buf3, 0xee, sizeof(buf3));
ASSERT_EQ((int)sizeof(buf2), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
ASSERT_EQ(0, memcmp(buf3, buf2, sizeof(buf2)));
}
TEST_F(LibRadosIoEC, AppendRoundTrip) {
SKIP_IF_CRIMSON();
char *buf = (char *)new char[alignment];
char *buf2 = (char *)new char[alignment];
char *buf3 = (char *)new char[alignment *2];
int uasize = alignment/2;
char *unalignedbuf = (char *)new char[uasize];
auto cleanup = [&] {
delete[] buf;
delete[] buf2;
delete[] buf3;
delete[] unalignedbuf;
};
scope_guard<decltype(cleanup)> sg(std::move(cleanup));
memset(buf, 0xde, alignment);
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, alignment));
memset(buf2, 0xad, alignment);
ASSERT_EQ(0, rados_append(ioctx, "foo", buf2, alignment));
memset(buf3, 0, alignment*2);
ASSERT_EQ((int)alignment*2, rados_read(ioctx, "foo", buf3, alignment*2, 0));
ASSERT_EQ(0, memcmp(buf3, buf, alignment));
ASSERT_EQ(0, memcmp(buf3 + alignment, buf2, alignment));
memset(unalignedbuf, 0, uasize);
ASSERT_EQ(0, rados_append(ioctx, "foo", unalignedbuf, uasize));
ASSERT_EQ(-EOPNOTSUPP, rados_append(ioctx, "foo", unalignedbuf, uasize));
}
TEST_F(LibRadosIoEC, TruncTest) {
SKIP_IF_CRIMSON();
char buf[128];
char buf2[sizeof(buf)];
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(-EOPNOTSUPP, rados_trunc(ioctx, "foo", sizeof(buf) / 2));
memset(buf2, 0, sizeof(buf2));
// Same size
ASSERT_EQ((int)sizeof(buf), rados_read(ioctx, "foo", buf2, sizeof(buf2), 0));
// No change
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
}
TEST_F(LibRadosIoEC, RemoveTest) {
SKIP_IF_CRIMSON();
char buf[128];
char buf2[sizeof(buf)];
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0, rados_remove(ioctx, "foo"));
memset(buf2, 0, sizeof(buf2));
ASSERT_EQ(-ENOENT, rados_read(ioctx, "foo", buf2, sizeof(buf2), 0));
}
TEST_F(LibRadosIoEC, XattrsRoundTrip) {
SKIP_IF_CRIMSON();
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(-ENODATA, rados_getxattr(ioctx, "foo", attr1, buf, sizeof(buf)));
ASSERT_EQ(0, rados_setxattr(ioctx, "foo", attr1, attr1_buf, sizeof(attr1_buf)));
ASSERT_EQ((int)sizeof(attr1_buf),
rados_getxattr(ioctx, "foo", attr1, buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(attr1_buf, buf, sizeof(attr1_buf)));
}
TEST_F(LibRadosIoEC, RmXattr) {
SKIP_IF_CRIMSON();
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0,
rados_setxattr(ioctx, "foo", attr1, attr1_buf, sizeof(attr1_buf)));
ASSERT_EQ(0, rados_rmxattr(ioctx, "foo", attr1));
ASSERT_EQ(-ENODATA, rados_getxattr(ioctx, "foo", attr1, buf, sizeof(buf)));
// Test rmxattr on a removed object
char buf2[128];
char attr2[] = "attr2";
char attr2_buf[] = "foo bar baz";
memset(buf2, 0xbb, sizeof(buf2));
ASSERT_EQ(0, rados_write(ioctx, "foo_rmxattr", buf2, sizeof(buf2), 0));
ASSERT_EQ(0,
rados_setxattr(ioctx, "foo_rmxattr", attr2, attr2_buf, sizeof(attr2_buf)));
ASSERT_EQ(0, rados_remove(ioctx, "foo_rmxattr"));
ASSERT_EQ(-ENOENT, rados_rmxattr(ioctx, "foo_rmxattr", attr2));
}
TEST_F(LibRadosIoEC, XattrIter) {
SKIP_IF_CRIMSON();
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
char attr2[] = "attr2";
char attr2_buf[256];
for (size_t j = 0; j < sizeof(attr2_buf); ++j) {
attr2_buf[j] = j % 0xff;
}
memset(buf, 0xaa, sizeof(buf));
ASSERT_EQ(0, rados_append(ioctx, "foo", buf, sizeof(buf)));
ASSERT_EQ(0, rados_setxattr(ioctx, "foo", attr1, attr1_buf, sizeof(attr1_buf)));
ASSERT_EQ(0, rados_setxattr(ioctx, "foo", attr2, attr2_buf, sizeof(attr2_buf)));
rados_xattrs_iter_t iter;
ASSERT_EQ(0, rados_getxattrs(ioctx, "foo", &iter));
int num_seen = 0;
while (true) {
const char *name;
const char *val;
size_t len;
ASSERT_EQ(0, rados_getxattrs_next(iter, &name, &val, &len));
if (name == NULL) {
break;
}
ASSERT_LT(num_seen, 2);
if ((strcmp(name, attr1) == 0) && (val != NULL) && (memcmp(val, attr1_buf, len) == 0)) {
num_seen++;
continue;
}
else if ((strcmp(name, attr2) == 0) && (val != NULL) && (memcmp(val, attr2_buf, len) == 0)) {
num_seen++;
continue;
}
else {
ASSERT_EQ(0, 1);
}
}
rados_getxattrs_end(iter);
}
| 15,525 | 32.606061 | 124 |
cc
|
null |
ceph-main/src/test/librados/io_cxx.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*
// vim: ts=8 sw=2 smarttab
#include <climits>
#include <errno.h>
#include "gtest/gtest.h"
#include "include/rados/librados.hpp"
#include "include/encoding.h"
#include "include/err.h"
#include "include/scope_guard.h"
#include "test/librados/test_cxx.h"
#include "test/librados/testcase_cxx.h"
#include "crimson_utils.h"
using namespace librados;
using std::string;
typedef RadosTestPP LibRadosIoPP;
typedef RadosTestECPP LibRadosIoECPP;
TEST_F(LibRadosIoPP, TooBigPP) {
IoCtx ioctx;
bufferlist bl;
ASSERT_EQ(-E2BIG, ioctx.write("foo", bl, UINT_MAX, 0));
ASSERT_EQ(-E2BIG, ioctx.append("foo", bl, UINT_MAX));
// ioctx.write_full no way to overflow bl.length()
ASSERT_EQ(-E2BIG, ioctx.writesame("foo", bl, UINT_MAX, 0));
}
TEST_F(LibRadosIoPP, SimpleWritePP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
ioctx.set_namespace("nspace");
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
}
TEST_F(LibRadosIoPP, ReadOpPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
{
bufferlist op_bl;
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist op_bl;
ObjectReadOperation op;
op.read(0, 0, NULL, NULL); //len=0 mean read the whole object data.
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl, op_bl;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), read_bl.length());
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(read_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist op_bl;
int rval = 1000;
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl, op_bl;
int rval = 1000;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), read_bl.length());
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(read_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl1, read_bl2, op_bl;
int rval1 = 1000, rval2 = 1002;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl1, &rval1);
op.read(0, sizeof(buf), &read_bl2, &rval2);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), read_bl1.length());
ASSERT_EQ(sizeof(buf), read_bl2.length());
ASSERT_EQ(sizeof(buf) * 2, op_bl.length());
ASSERT_EQ(0, rval1);
ASSERT_EQ(0, rval2);
ASSERT_EQ(0, memcmp(read_bl1.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(read_bl2.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(op_bl.c_str() + sizeof(buf), buf, sizeof(buf)));
}
{
bufferlist op_bl;
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_EQ(sizeof(buf), read_bl.length());
ASSERT_EQ(0, memcmp(read_bl.c_str(), buf, sizeof(buf)));
}
{
int rval = 1000;
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_EQ(0, rval);
}
{
bufferlist read_bl;
int rval = 1000;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_EQ(sizeof(buf), read_bl.length());
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(read_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl1, read_bl2;
int rval1 = 1000, rval2 = 1002;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl1, &rval1);
op.read(0, sizeof(buf), &read_bl2, &rval2);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_EQ(sizeof(buf), read_bl1.length());
ASSERT_EQ(sizeof(buf), read_bl2.length());
ASSERT_EQ(0, rval1);
ASSERT_EQ(0, rval2);
ASSERT_EQ(0, memcmp(read_bl1.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(read_bl2.c_str(), buf, sizeof(buf)));
}
// read into a preallocated buffer with a cached crc
{
bufferlist op_bl;
op_bl.append(std::string(sizeof(buf), 'x'));
ASSERT_NE(op_bl.crc32c(0), bl.crc32c(0)); // cache 'x' crc
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
ASSERT_EQ(op_bl.crc32c(0), bl.crc32c(0));
}
}
TEST_F(LibRadosIoPP, SparseReadOpPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
{
std::map<uint64_t, uint64_t> extents;
bufferlist read_bl;
int rval = -1;
ObjectReadOperation op;
op.sparse_read(0, sizeof(buf), &extents, &read_bl, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, nullptr));
ASSERT_EQ(0, rval);
assert_eq_sparse(bl, extents, read_bl);
}
{
bufferlist bl;
bl.append(buf, sizeof(buf) / 2);
std::map<uint64_t, uint64_t> extents;
bufferlist read_bl;
int rval = -1;
ObjectReadOperation op;
op.sparse_read(0, sizeof(buf), &extents, &read_bl, &rval, sizeof(buf) / 2, 1);
ASSERT_EQ(0, ioctx.operate("foo", &op, nullptr));
ASSERT_EQ(0, rval);
assert_eq_sparse(bl, extents, read_bl);
}
}
TEST_F(LibRadosIoPP, RoundTripPP) {
char buf[128];
Rados cluster;
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
bufferlist cl;
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", cl, sizeof(buf), 0));
ASSERT_EQ(0, memcmp(buf, cl.c_str(), sizeof(buf)));
}
TEST_F(LibRadosIoPP, RoundTripPP2)
{
bufferlist bl;
bl.append("ceph");
ObjectWriteOperation write;
write.write(0, bl);
write.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, ioctx.operate("foo", &write));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
read.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_NOCACHE|LIBRADOS_OP_FLAG_FADVISE_RANDOM);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "ceph", 4));
}
TEST_F(LibRadosIoPP, Checksum) {
char buf[128];
Rados cluster;
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
bufferlist init_value_bl;
encode(static_cast<uint32_t>(-1), init_value_bl);
bufferlist csum_bl;
ASSERT_EQ(0, ioctx.checksum("foo", LIBRADOS_CHECKSUM_TYPE_CRC32C,
init_value_bl, sizeof(buf), 0, 0, &csum_bl));
auto csum_bl_it = csum_bl.cbegin();
uint32_t csum_count;
decode(csum_count, csum_bl_it);
ASSERT_EQ(1U, csum_count);
uint32_t csum;
decode(csum, csum_bl_it);
ASSERT_EQ(bl.crc32c(-1), csum);
}
TEST_F(LibRadosIoPP, ReadIntoBufferlist) {
// here we test reading into a non-empty bufferlist referencing existing
// buffers
char buf[128];
Rados cluster;
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
bufferlist bl2;
char buf2[sizeof(buf)];
memset(buf2, 0xbb, sizeof(buf2));
bl2.append(buffer::create_static(sizeof(buf2), buf2));
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", bl2, sizeof(buf), 0));
ASSERT_EQ(0, memcmp(buf, buf2, sizeof(buf)));
ASSERT_EQ(0, memcmp(buf, bl2.c_str(), sizeof(buf)));
}
TEST_F(LibRadosIoPP, OverlappingWriteRoundTripPP) {
char buf[128];
char buf2[64];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), 0));
bufferlist bl3;
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", bl3, sizeof(buf), 0));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf2, sizeof(buf2)));
ASSERT_EQ(0, memcmp(bl3.c_str() + sizeof(buf2), buf, sizeof(buf) - sizeof(buf2)));
}
TEST_F(LibRadosIoPP, WriteFullRoundTripPP) {
char buf[128];
char buf2[64];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ASSERT_EQ(0, ioctx.write_full("foo", bl2));
bufferlist bl3;
ASSERT_EQ((int)sizeof(buf2), ioctx.read("foo", bl3, sizeof(buf), 0));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf2, sizeof(buf2)));
}
TEST_F(LibRadosIoPP, WriteFullRoundTripPP2)
{
bufferlist bl;
bl.append("ceph");
ObjectWriteOperation write;
write.write_full(bl);
write.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_NOCACHE);
ASSERT_EQ(0, ioctx.operate("foo", &write));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
read.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED|LIBRADOS_OP_FLAG_FADVISE_RANDOM);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "ceph", 4));
}
TEST_F(LibRadosIoPP, AppendRoundTripPP) {
char buf[64];
char buf2[64];
memset(buf, 0xde, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
memset(buf2, 0xad, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ASSERT_EQ(0, ioctx.append("foo", bl2, sizeof(buf2)));
bufferlist bl3;
ASSERT_EQ((int)(sizeof(buf) + sizeof(buf2)),
ioctx.read("foo", bl3, (sizeof(buf) + sizeof(buf2)), 0));
const char *bl3_str = bl3.c_str();
ASSERT_EQ(0, memcmp(bl3_str, buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(bl3_str + sizeof(buf), buf2, sizeof(buf2)));
}
TEST_F(LibRadosIoPP, TruncTestPP) {
char buf[128];
memset(buf, 0xaa, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl, sizeof(buf)));
ASSERT_EQ(0, ioctx.trunc("foo", sizeof(buf) / 2));
bufferlist bl2;
ASSERT_EQ((int)(sizeof(buf)/2), ioctx.read("foo", bl2, sizeof(buf), 0));
ASSERT_EQ(0, memcmp(bl2.c_str(), buf, sizeof(buf)/2));
}
TEST_F(LibRadosIoPP, RemoveTestPP) {
char buf[128];
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
ASSERT_EQ(0, ioctx.remove("foo"));
bufferlist bl2;
ASSERT_EQ(-ENOENT, ioctx.read("foo", bl2, sizeof(buf), 0));
}
TEST_F(LibRadosIoPP, XattrsRoundTripPP) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
bufferlist bl2;
ASSERT_EQ(-ENODATA, ioctx.getxattr("foo", attr1, bl2));
bufferlist bl3;
bl3.append(attr1_buf, sizeof(attr1_buf));
ASSERT_EQ(0, ioctx.setxattr("foo", attr1, bl3));
bufferlist bl4;
ASSERT_EQ((int)sizeof(attr1_buf),
ioctx.getxattr("foo", attr1, bl4));
ASSERT_EQ(0, memcmp(bl4.c_str(), attr1_buf, sizeof(attr1_buf)));
}
TEST_F(LibRadosIoPP, RmXattrPP) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
bufferlist bl2;
bl2.append(attr1_buf, sizeof(attr1_buf));
ASSERT_EQ(0, ioctx.setxattr("foo", attr1, bl2));
ASSERT_EQ(0, ioctx.rmxattr("foo", attr1));
bufferlist bl3;
ASSERT_EQ(-ENODATA, ioctx.getxattr("foo", attr1, bl3));
// Test rmxattr on a removed object
char buf2[128];
char attr2[] = "attr2";
char attr2_buf[] = "foo bar baz";
memset(buf2, 0xbb, sizeof(buf2));
bufferlist bl21;
bl21.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo_rmxattr", bl21, sizeof(buf2), 0));
bufferlist bl22;
bl22.append(attr2_buf, sizeof(attr2_buf));
ASSERT_EQ(0, ioctx.setxattr("foo_rmxattr", attr2, bl22));
ASSERT_EQ(0, ioctx.remove("foo_rmxattr"));
ASSERT_EQ(-ENOENT, ioctx.rmxattr("foo_rmxattr", attr2));
}
TEST_F(LibRadosIoPP, XattrListPP) {
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
char attr2[] = "attr2";
char attr2_buf[256];
for (size_t j = 0; j < sizeof(attr2_buf); ++j) {
attr2_buf[j] = j % 0xff;
}
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
bufferlist bl2;
bl2.append(attr1_buf, sizeof(attr1_buf));
ASSERT_EQ(0, ioctx.setxattr("foo", attr1, bl2));
bufferlist bl3;
bl3.append(attr2_buf, sizeof(attr2_buf));
ASSERT_EQ(0, ioctx.setxattr("foo", attr2, bl3));
std::map<std::string, bufferlist> attrset;
ASSERT_EQ(0, ioctx.getxattrs("foo", attrset));
for (std::map<std::string, bufferlist>::iterator i = attrset.begin();
i != attrset.end(); ++i) {
if (i->first == string(attr1)) {
ASSERT_EQ(0, memcmp(i->second.c_str(), attr1_buf, sizeof(attr1_buf)));
}
else if (i->first == string(attr2)) {
ASSERT_EQ(0, memcmp(i->second.c_str(), attr2_buf, sizeof(attr2_buf)));
}
else {
ASSERT_EQ(0, 1);
}
}
}
TEST_F(LibRadosIoECPP, SimpleWritePP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
ioctx.set_namespace("nspace");
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
}
TEST_F(LibRadosIoECPP, ReadOpPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
{
bufferlist op_bl;
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist op_bl;
ObjectReadOperation op;
op.read(0, 0, NULL, NULL); //len=0 mean read the whole object data
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl, op_bl;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), read_bl.length());
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(read_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist op_bl;
int rval = 1000;
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl, op_bl;
int rval = 1000;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), read_bl.length());
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(read_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl1, read_bl2, op_bl;
int rval1 = 1000, rval2 = 1002;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl1, &rval1);
op.read(0, sizeof(buf), &read_bl2, &rval2);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), read_bl1.length());
ASSERT_EQ(sizeof(buf), read_bl2.length());
ASSERT_EQ(sizeof(buf) * 2, op_bl.length());
ASSERT_EQ(0, rval1);
ASSERT_EQ(0, rval2);
ASSERT_EQ(0, memcmp(read_bl1.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(read_bl2.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(op_bl.c_str() + sizeof(buf), buf, sizeof(buf)));
}
{
bufferlist op_bl;
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_EQ(sizeof(buf), read_bl.length());
ASSERT_EQ(0, memcmp(read_bl.c_str(), buf, sizeof(buf)));
}
{
int rval = 1000;
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_EQ(0, rval);
}
{
bufferlist read_bl;
int rval = 1000;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_EQ(sizeof(buf), read_bl.length());
ASSERT_EQ(0, rval);
ASSERT_EQ(0, memcmp(read_bl.c_str(), buf, sizeof(buf)));
}
{
bufferlist read_bl1, read_bl2;
int rval1 = 1000, rval2 = 1002;
ObjectReadOperation op;
op.read(0, sizeof(buf), &read_bl1, &rval1);
op.read(0, sizeof(buf), &read_bl2, &rval2);
ASSERT_EQ(0, ioctx.operate("foo", &op, NULL));
ASSERT_EQ(sizeof(buf), read_bl1.length());
ASSERT_EQ(sizeof(buf), read_bl2.length());
ASSERT_EQ(0, rval1);
ASSERT_EQ(0, rval2);
ASSERT_EQ(0, memcmp(read_bl1.c_str(), buf, sizeof(buf)));
ASSERT_EQ(0, memcmp(read_bl2.c_str(), buf, sizeof(buf)));
}
// read into a preallocated buffer with a cached crc
{
bufferlist op_bl;
op_bl.append(std::string(sizeof(buf), 'x'));
ASSERT_NE(op_bl.crc32c(0), bl.crc32c(0)); // cache 'x' crc
ObjectReadOperation op;
op.read(0, sizeof(buf), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &op, &op_bl));
ASSERT_EQ(sizeof(buf), op_bl.length());
ASSERT_EQ(0, memcmp(op_bl.c_str(), buf, sizeof(buf)));
ASSERT_EQ(op_bl.crc32c(0), bl.crc32c(0));
}
}
TEST_F(LibRadosIoECPP, SparseReadOpPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
{
std::map<uint64_t, uint64_t> extents;
bufferlist read_bl;
int rval = -1;
ObjectReadOperation op;
op.sparse_read(0, sizeof(buf), &extents, &read_bl, &rval);
ASSERT_EQ(0, ioctx.operate("foo", &op, nullptr));
ASSERT_EQ(0, rval);
assert_eq_sparse(bl, extents, read_bl);
}
}
TEST_F(LibRadosIoECPP, RoundTripPP) {
SKIP_IF_CRIMSON();
char buf[128];
Rados cluster;
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
bufferlist cl;
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", cl, sizeof(buf) * 3, 0));
ASSERT_EQ(0, memcmp(buf, cl.c_str(), sizeof(buf)));
}
TEST_F(LibRadosIoECPP, RoundTripPP2)
{
SKIP_IF_CRIMSON();
bufferlist bl;
bl.append("ceph");
ObjectWriteOperation write;
write.write(0, bl);
write.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, ioctx.operate("foo", &write));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
read.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED|LIBRADOS_OP_FLAG_FADVISE_RANDOM);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "ceph", 4));
}
TEST_F(LibRadosIoECPP, OverlappingWriteRoundTripPP) {
SKIP_IF_CRIMSON();
int bsize = alignment;
int dbsize = bsize * 2;
char *buf = (char *)new char[dbsize];
char *buf2 = (char *)new char[bsize];
auto cleanup = [&] {
delete[] buf;
delete[] buf2;
};
scope_guard<decltype(cleanup)> sg(std::move(cleanup));
memset(buf, 0xcc, dbsize);
bufferlist bl1;
bl1.append(buf, dbsize);
ASSERT_EQ(0, ioctx.write("foo", bl1, dbsize, 0));
memset(buf2, 0xdd, bsize);
bufferlist bl2;
bl2.append(buf2, bsize);
ASSERT_EQ(-EOPNOTSUPP, ioctx.write("foo", bl2, bsize, 0));
bufferlist bl3;
ASSERT_EQ(dbsize, ioctx.read("foo", bl3, dbsize, 0));
// Read the same as first write
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, dbsize));
}
TEST_F(LibRadosIoECPP, WriteFullRoundTripPP) {
SKIP_IF_CRIMSON();
char buf[128];
char buf2[64];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ASSERT_EQ(0, ioctx.write_full("foo", bl2));
bufferlist bl3;
ASSERT_EQ((int)sizeof(buf2), ioctx.read("foo", bl3, sizeof(buf), 0));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf2, sizeof(buf2)));
}
TEST_F(LibRadosIoECPP, WriteFullRoundTripPP2)
{
SKIP_IF_CRIMSON();
bufferlist bl;
bl.append("ceph");
ObjectWriteOperation write;
write.write_full(bl);
write.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, ioctx.operate("foo", &write));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
read.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_DONTNEED|LIBRADOS_OP_FLAG_FADVISE_RANDOM);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "ceph", 4));
}
TEST_F(LibRadosIoECPP, AppendRoundTripPP) {
SKIP_IF_CRIMSON();
char *buf = (char *)new char[alignment];
char *buf2 = (char *)new char[alignment];
auto cleanup = [&] {
delete[] buf;
delete[] buf2;
};
scope_guard<decltype(cleanup)> sg(std::move(cleanup));
memset(buf, 0xde, alignment);
bufferlist bl1;
bl1.append(buf, alignment);
ASSERT_EQ(0, ioctx.append("foo", bl1, alignment));
memset(buf2, 0xad, alignment);
bufferlist bl2;
bl2.append(buf2, alignment);
ASSERT_EQ(0, ioctx.append("foo", bl2, alignment));
bufferlist bl3;
ASSERT_EQ((int)(alignment * 2),
ioctx.read("foo", bl3, (alignment * 4), 0));
const char *bl3_str = bl3.c_str();
ASSERT_EQ(0, memcmp(bl3_str, buf, alignment));
ASSERT_EQ(0, memcmp(bl3_str + alignment, buf2, alignment));
}
TEST_F(LibRadosIoECPP, TruncTestPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xaa, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl, sizeof(buf)));
ASSERT_EQ(-EOPNOTSUPP, ioctx.trunc("foo", sizeof(buf) / 2));
bufferlist bl2;
// Same size
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", bl2, sizeof(buf), 0));
// No change
ASSERT_EQ(0, memcmp(bl2.c_str(), buf, sizeof(buf)));
}
TEST_F(LibRadosIoECPP, RemoveTestPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
ASSERT_EQ(0, ioctx.remove("foo"));
bufferlist bl2;
ASSERT_EQ(-ENOENT, ioctx.read("foo", bl2, sizeof(buf), 0));
}
TEST_F(LibRadosIoECPP, XattrsRoundTripPP) {
SKIP_IF_CRIMSON();
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
bufferlist bl2;
ASSERT_EQ(-ENODATA, ioctx.getxattr("foo", attr1, bl2));
bufferlist bl3;
bl3.append(attr1_buf, sizeof(attr1_buf));
ASSERT_EQ(0, ioctx.setxattr("foo", attr1, bl3));
bufferlist bl4;
ASSERT_EQ((int)sizeof(attr1_buf),
ioctx.getxattr("foo", attr1, bl4));
ASSERT_EQ(0, memcmp(bl4.c_str(), attr1_buf, sizeof(attr1_buf)));
}
TEST_F(LibRadosIoECPP, RmXattrPP) {
SKIP_IF_CRIMSON();
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
bufferlist bl2;
bl2.append(attr1_buf, sizeof(attr1_buf));
ASSERT_EQ(0, ioctx.setxattr("foo", attr1, bl2));
ASSERT_EQ(0, ioctx.rmxattr("foo", attr1));
bufferlist bl3;
ASSERT_EQ(-ENODATA, ioctx.getxattr("foo", attr1, bl3));
// Test rmxattr on a removed object
char buf2[128];
char attr2[] = "attr2";
char attr2_buf[] = "foo bar baz";
memset(buf2, 0xbb, sizeof(buf2));
bufferlist bl21;
bl21.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo_rmxattr", bl21, sizeof(buf2), 0));
bufferlist bl22;
bl22.append(attr2_buf, sizeof(attr2_buf));
ASSERT_EQ(0, ioctx.setxattr("foo_rmxattr", attr2, bl22));
ASSERT_EQ(0, ioctx.remove("foo_rmxattr"));
ASSERT_EQ(-ENOENT, ioctx.rmxattr("foo_rmxattr", attr2));
}
TEST_F(LibRadosIoECPP, XattrListPP) {
SKIP_IF_CRIMSON();
char buf[128];
char attr1[] = "attr1";
char attr1_buf[] = "foo bar baz";
char attr2[] = "attr2";
char attr2_buf[256];
for (size_t j = 0; j < sizeof(attr2_buf); ++j) {
attr2_buf[j] = j % 0xff;
}
memset(buf, 0xaa, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.append("foo", bl1, sizeof(buf)));
bufferlist bl2;
bl2.append(attr1_buf, sizeof(attr1_buf));
ASSERT_EQ(0, ioctx.setxattr("foo", attr1, bl2));
bufferlist bl3;
bl3.append(attr2_buf, sizeof(attr2_buf));
ASSERT_EQ(0, ioctx.setxattr("foo", attr2, bl3));
std::map<std::string, bufferlist> attrset;
ASSERT_EQ(0, ioctx.getxattrs("foo", attrset));
for (std::map<std::string, bufferlist>::iterator i = attrset.begin();
i != attrset.end(); ++i) {
if (i->first == string(attr1)) {
ASSERT_EQ(0, memcmp(i->second.c_str(), attr1_buf, sizeof(attr1_buf)));
}
else if (i->first == string(attr2)) {
ASSERT_EQ(0, memcmp(i->second.c_str(), attr2_buf, sizeof(attr2_buf)));
}
else {
ASSERT_EQ(0, 1);
}
}
}
TEST_F(LibRadosIoPP, CmpExtPP) {
bufferlist bl;
bl.append("ceph");
ObjectWriteOperation write1;
write1.write(0, bl);
ASSERT_EQ(0, ioctx.operate("foo", &write1));
bufferlist new_bl;
new_bl.append("CEPH");
ObjectWriteOperation write2;
write2.cmpext(0, bl, nullptr);
write2.write(0, new_bl);
ASSERT_EQ(0, ioctx.operate("foo", &write2));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "CEPH", 4));
}
TEST_F(LibRadosIoPP, CmpExtDNEPP) {
bufferlist bl;
bl.append(std::string(4, '\0'));
bufferlist new_bl;
new_bl.append("CEPH");
ObjectWriteOperation write;
write.cmpext(0, bl, nullptr);
write.write(0, new_bl);
ASSERT_EQ(0, ioctx.operate("foo", &write));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "CEPH", 4));
}
TEST_F(LibRadosIoPP, CmpExtMismatchPP) {
bufferlist bl;
bl.append("ceph");
ObjectWriteOperation write1;
write1.write(0, bl);
ASSERT_EQ(0, ioctx.operate("foo", &write1));
bufferlist new_bl;
new_bl.append("CEPH");
ObjectWriteOperation write2;
write2.cmpext(0, new_bl, nullptr);
write2.write(0, new_bl);
ASSERT_EQ(-MAX_ERRNO, ioctx.operate("foo", &write2));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "ceph", 4));
}
TEST_F(LibRadosIoECPP, CmpExtPP) {
SKIP_IF_CRIMSON();
bufferlist bl;
bl.append("ceph");
ObjectWriteOperation write1;
write1.write(0, bl);
ASSERT_EQ(0, ioctx.operate("foo", &write1));
bufferlist new_bl;
new_bl.append("CEPH");
ObjectWriteOperation write2;
write2.cmpext(0, bl, nullptr);
write2.write_full(new_bl);
ASSERT_EQ(0, ioctx.operate("foo", &write2));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "CEPH", 4));
}
TEST_F(LibRadosIoECPP, CmpExtDNEPP) {
SKIP_IF_CRIMSON();
bufferlist bl;
bl.append(std::string(4, '\0'));
bufferlist new_bl;
new_bl.append("CEPH");
ObjectWriteOperation write;
write.cmpext(0, bl, nullptr);
write.write_full(new_bl);
ASSERT_EQ(0, ioctx.operate("foo", &write));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "CEPH", 4));
}
TEST_F(LibRadosIoECPP, CmpExtMismatchPP) {
SKIP_IF_CRIMSON();
bufferlist bl;
bl.append("ceph");
ObjectWriteOperation write1;
write1.write(0, bl);
ASSERT_EQ(0, ioctx.operate("foo", &write1));
bufferlist new_bl;
new_bl.append("CEPH");
ObjectWriteOperation write2;
write2.cmpext(0, new_bl, nullptr);
write2.write_full(new_bl);
ASSERT_EQ(-MAX_ERRNO, ioctx.operate("foo", &write2));
ObjectReadOperation read;
read.read(0, bl.length(), NULL, NULL);
ASSERT_EQ(0, ioctx.operate("foo", &read, &bl));
ASSERT_EQ(0, memcmp(bl.c_str(), "ceph", 4));
}
| 30,243 | 29.642351 | 88 |
cc
|
null |
ceph-main/src/test/librados/librados.cc
|
//#include "common/config.h"
#include "include/rados/librados.h"
#include "gtest/gtest.h"
TEST(Librados, CreateShutdown) {
rados_t cluster;
int err;
err = rados_create(&cluster, "someid");
EXPECT_EQ(err, 0);
rados_shutdown(cluster);
}
| 248 | 16.785714 | 41 |
cc
|
null |
ceph-main/src/test/librados/librados_config.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "include/rados/librados.h"
#include <sstream>
#include <string>
#include <string.h>
#include <errno.h>
using std::string;
TEST(LibRadosConfig, SimpleSet) {
rados_t cl;
int ret = rados_create(&cl, NULL);
ASSERT_EQ(ret, 0);
ret = rados_conf_set(cl, "log_max_new", "21");
ASSERT_EQ(ret, 0);
char buf[128];
memset(buf, 0, sizeof(buf));
ret = rados_conf_get(cl, "log_max_new", buf, sizeof(buf));
ASSERT_EQ(ret, 0);
ASSERT_EQ(string("21"), string(buf));
rados_shutdown(cl);
}
TEST(LibRadosConfig, ArgV) {
rados_t cl;
int ret = rados_create(&cl, NULL);
ASSERT_EQ(ret, 0);
const char *argv[] = { "foo", "--log_max_new", "2",
"--key", "my-key", NULL };
size_t argc = (sizeof(argv) / sizeof(argv[0])) - 1;
rados_conf_parse_argv(cl, argc, argv);
char buf[128];
memset(buf, 0, sizeof(buf));
ret = rados_conf_get(cl, "key", buf, sizeof(buf));
ASSERT_EQ(ret, 0);
ASSERT_EQ(string("my-key"), string(buf));
memset(buf, 0, sizeof(buf));
ret = rados_conf_get(cl, "log_max_new", buf, sizeof(buf));
ASSERT_EQ(ret, 0);
ASSERT_EQ(string("2"), string(buf));
rados_shutdown(cl);
}
TEST(LibRadosConfig, DebugLevels) {
rados_t cl;
int ret = rados_create(&cl, NULL);
ASSERT_EQ(ret, 0);
ret = rados_conf_set(cl, "debug_rados", "3");
ASSERT_EQ(ret, 0);
char buf[128];
memset(buf, 0, sizeof(buf));
ret = rados_conf_get(cl, "debug_rados", buf, sizeof(buf));
ASSERT_EQ(ret, 0);
ASSERT_EQ(0, strcmp("3/3", buf));
ret = rados_conf_set(cl, "debug_rados", "7/8");
ASSERT_EQ(ret, 0);
memset(buf, 0, sizeof(buf));
ret = rados_conf_get(cl, "debug_rados", buf, sizeof(buf));
ASSERT_EQ(ret, 0);
ASSERT_EQ(0, strcmp("7/8", buf));
ret = rados_conf_set(cl, "debug_rados", "foo");
ASSERT_EQ(ret, -EINVAL);
ret = rados_conf_set(cl, "debug_asdkfasdjfajksdf", "foo");
ASSERT_EQ(ret, -ENOENT);
ret = rados_conf_get(cl, "debug_radfjadfsdados", buf, sizeof(buf));
ASSERT_EQ(ret, -ENOENT);
rados_shutdown(cl);
}
| 2,427 | 23.525253 | 70 |
cc
|
null |
ceph-main/src/test/librados/list.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "test/librados/test.h"
#include "test/librados/test_common.h"
#include "test/librados/TestCase.h"
#include "global/global_context.h"
#include "include/types.h"
#include "common/hobject.h"
#include "gtest/gtest.h"
#include <errno.h>
#include <string>
#include <stdexcept>
#include "crimson_utils.h"
using namespace std;
using namespace librados;
typedef RadosTestNSCleanup LibRadosList;
typedef RadosTestECNSCleanup LibRadosListEC;
typedef RadosTestNP LibRadosListNP;
TEST_F(LibRadosList, ListObjects) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
rados_list_ctx_t ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
const char *entry;
bool foundit = false;
while (rados_nobjects_list_next(ctx, &entry, NULL, NULL) != -ENOENT) {
foundit = true;
ASSERT_EQ(std::string(entry), "foo");
}
ASSERT_TRUE(foundit);
rados_nobjects_list_close(ctx);
}
TEST_F(LibRadosList, ListObjectsZeroInName) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo\0bar", buf, sizeof(buf), 0));
rados_list_ctx_t ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
const char *entry;
size_t entry_size;
bool foundit = false;
while (rados_nobjects_list_next2(ctx, &entry, NULL, NULL,
&entry_size, NULL, NULL) != -ENOENT) {
foundit = true;
ASSERT_EQ(std::string(entry, entry_size), "foo\0bar");
}
ASSERT_TRUE(foundit);
rados_nobjects_list_close(ctx);
}
static void check_list(
std::set<std::string>& myset,
rados_list_ctx_t& ctx,
const std::string &check_nspace)
{
const char *entry, *nspace;
cout << "myset " << myset << std::endl;
// we should see every item exactly once.
int ret;
while ((ret = rados_nobjects_list_next(ctx, &entry, NULL, &nspace)) == 0) {
std::string test_name;
if (check_nspace == all_nspaces) {
test_name = std::string(nspace) + ":" + std::string(entry);
} else {
ASSERT_TRUE(std::string(nspace) == check_nspace);
test_name = std::string(entry);
}
cout << test_name << std::endl;
ASSERT_TRUE(myset.end() != myset.find(test_name));
myset.erase(test_name);
}
ASSERT_EQ(-ENOENT, ret);
ASSERT_TRUE(myset.empty());
}
TEST_F(LibRadosList, ListObjectsNS) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
// Create :foo1, :foo2, :foo3, n1:foo1, ns1:foo4, ns1:foo5, ns2:foo6, n2:foo7
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_write(ioctx, "foo1", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "ns1");
ASSERT_EQ(0, rados_write(ioctx, "foo1", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_write(ioctx, "foo2", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_write(ioctx, "foo3", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "ns1");
ASSERT_EQ(0, rados_write(ioctx, "foo4", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_write(ioctx, "foo5", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "ns2");
ASSERT_EQ(0, rados_write(ioctx, "foo6", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_write(ioctx, "foo7", buf, sizeof(buf), 0));
char nspace[4];
ASSERT_EQ(-ERANGE, rados_ioctx_get_namespace(ioctx, nspace, 3));
ASSERT_EQ(static_cast<int>(strlen("ns2")),
rados_ioctx_get_namespace(ioctx, nspace, sizeof(nspace)));
ASSERT_EQ(0, strcmp("ns2", nspace));
std::set<std::string> def, ns1, ns2, all;
def.insert(std::string("foo1"));
def.insert(std::string("foo2"));
def.insert(std::string("foo3"));
ns1.insert(std::string("foo1"));
ns1.insert(std::string("foo4"));
ns1.insert(std::string("foo5"));
ns2.insert(std::string("foo6"));
ns2.insert(std::string("foo7"));
all.insert(std::string(":foo1"));
all.insert(std::string(":foo2"));
all.insert(std::string(":foo3"));
all.insert(std::string("ns1:foo1"));
all.insert(std::string("ns1:foo4"));
all.insert(std::string("ns1:foo5"));
all.insert(std::string("ns2:foo6"));
all.insert(std::string("ns2:foo7"));
rados_list_ctx_t ctx;
// Check default namespace ""
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
check_list(def, ctx, "");
rados_nobjects_list_close(ctx);
// Check namespace "ns1"
rados_ioctx_set_namespace(ioctx, "ns1");
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
check_list(ns1, ctx, "ns1");
rados_nobjects_list_close(ctx);
// Check namespace "ns2"
rados_ioctx_set_namespace(ioctx, "ns2");
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
check_list(ns2, ctx, "ns2");
rados_nobjects_list_close(ctx);
// Check ALL namespaces
rados_ioctx_set_namespace(ioctx, LIBRADOS_ALL_NSPACES);
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
check_list(all, ctx, all_nspaces);
rados_nobjects_list_close(ctx);
}
TEST_F(LibRadosList, ListObjectsStart) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
for (int i=0; i<16; ++i) {
string n = stringify(i);
ASSERT_EQ(0, rados_write(ioctx, n.c_str(), buf, sizeof(buf), 0));
}
rados_list_ctx_t ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
std::map<int, std::set<std::string> > pg_to_obj;
const char *entry;
while (rados_nobjects_list_next(ctx, &entry, NULL, NULL) == 0) {
uint32_t pos = rados_nobjects_list_get_pg_hash_position(ctx);
std::cout << entry << " " << pos << std::endl;
pg_to_obj[pos].insert(entry);
}
rados_nobjects_list_close(ctx);
std::map<int, std::set<std::string> >::reverse_iterator p =
pg_to_obj.rbegin();
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
while (p != pg_to_obj.rend()) {
ASSERT_EQ((uint32_t)p->first, rados_nobjects_list_seek(ctx, p->first));
ASSERT_EQ(0, rados_nobjects_list_next(ctx, &entry, NULL, NULL));
std::cout << "have " << entry << " expect one of " << p->second << std::endl;
ASSERT_TRUE(p->second.count(entry));
++p;
}
rados_nobjects_list_close(ctx);
}
// this function replicates
// librados::operator<<(std::ostream& os, const librados::ObjectCursor& oc)
// because we don't want to use librados in librados client.
std::ostream& operator<<(std::ostream&os, const rados_object_list_cursor& oc)
{
if (oc) {
os << *(hobject_t *)oc;
} else {
os << hobject_t{};
}
return os;
}
TEST_F(LibRadosList, ListObjectsCursor) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
const int max_objs = 16;
for (int i=0; i<max_objs; ++i) {
string n = stringify(i);
ASSERT_EQ(0, rados_write(ioctx, n.c_str(), buf, sizeof(buf), 0));
}
{
rados_list_ctx_t ctx;
const char *entry;
rados_object_list_cursor cursor;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
ASSERT_EQ(rados_nobjects_list_get_cursor(ctx, &cursor), 0);
rados_object_list_cursor first_cursor = cursor;
cout << "x cursor=" << cursor << std::endl;
while (rados_nobjects_list_next(ctx, &entry, NULL, NULL) == 0) {
string oid = entry;
ASSERT_EQ(rados_nobjects_list_get_cursor(ctx, &cursor), 0);
cout << "> oid=" << oid << " cursor=" << cursor << std::endl;
}
rados_nobjects_list_seek_cursor(ctx, first_cursor);
ASSERT_EQ(rados_nobjects_list_next(ctx, &entry, NULL, NULL), 0);
cout << "FIRST> seek to " << first_cursor << " oid=" << string(entry) << std::endl;
}
rados_list_ctx_t ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
std::map<rados_object_list_cursor, string> cursor_to_obj;
int count = 0;
const char *entry;
while (rados_nobjects_list_next(ctx, &entry, NULL, NULL) == 0) {
rados_object_list_cursor cursor;
ASSERT_EQ(rados_nobjects_list_get_cursor(ctx, &cursor), 0);
string oid = entry;
cout << ": oid=" << oid << " cursor=" << cursor << std::endl;
cursor_to_obj[cursor] = oid;
rados_nobjects_list_seek_cursor(ctx, cursor);
cout << ": seek to " << cursor << std::endl;
ASSERT_EQ(rados_nobjects_list_next(ctx, &entry, NULL, NULL), 0);
cout << "> " << cursor << " -> " << entry << std::endl;
ASSERT_EQ(string(entry), oid);
ASSERT_LT(count, max_objs); /* avoid infinite loops due to bad seek */
++count;
}
ASSERT_EQ(count, max_objs);
auto p = cursor_to_obj.rbegin();
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
while (p != cursor_to_obj.rend()) {
cout << ": seek to " << p->first << std::endl;
rados_object_list_cursor cursor;
rados_object_list_cursor oid(p->first);
rados_nobjects_list_seek_cursor(ctx, oid);
ASSERT_EQ(rados_nobjects_list_get_cursor(ctx, &cursor), 0);
cout << ": cursor()=" << cursor << " expected=" << oid << std::endl;
// ASSERT_EQ(ObjectCursor(oid), ObjectCursor(cursor));
ASSERT_EQ(rados_nobjects_list_next(ctx, &entry, NULL, NULL), 0);
cout << "> " << cursor << " -> " << entry << std::endl;
cout << ": entry=" << entry << " expected=" << p->second << std::endl;
ASSERT_EQ(p->second, string(entry));
++p;
rados_object_list_cursor_free(ctx, cursor);
}
}
TEST_F(LibRadosListEC, ListObjects) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
rados_list_ctx_t ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
const char *entry;
bool foundit = false;
while (rados_nobjects_list_next(ctx, &entry, NULL, NULL) != -ENOENT) {
foundit = true;
ASSERT_EQ(std::string(entry), "foo");
}
ASSERT_TRUE(foundit);
rados_nobjects_list_close(ctx);
}
TEST_F(LibRadosListEC, ListObjectsNS) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
// Create :foo1, :foo2, :foo3, n1:foo1, ns1:foo4, ns1:foo5, ns2:foo6, n2:foo7
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_write(ioctx, "foo1", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "ns1");
ASSERT_EQ(0, rados_write(ioctx, "foo1", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_write(ioctx, "foo2", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_write(ioctx, "foo3", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "ns1");
ASSERT_EQ(0, rados_write(ioctx, "foo4", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_write(ioctx, "foo5", buf, sizeof(buf), 0));
rados_ioctx_set_namespace(ioctx, "ns2");
ASSERT_EQ(0, rados_write(ioctx, "foo6", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_write(ioctx, "foo7", buf, sizeof(buf), 0));
std::set<std::string> def, ns1, ns2, all;
def.insert(std::string("foo1"));
def.insert(std::string("foo2"));
def.insert(std::string("foo3"));
ns1.insert(std::string("foo1"));
ns1.insert(std::string("foo4"));
ns1.insert(std::string("foo5"));
ns2.insert(std::string("foo6"));
ns2.insert(std::string("foo7"));
all.insert(std::string(":foo1"));
all.insert(std::string(":foo2"));
all.insert(std::string(":foo3"));
all.insert(std::string("ns1:foo1"));
all.insert(std::string("ns1:foo4"));
all.insert(std::string("ns1:foo5"));
all.insert(std::string("ns2:foo6"));
all.insert(std::string("ns2:foo7"));
rados_list_ctx_t ctx;
// Check default namespace ""
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
check_list(def, ctx, "");
rados_nobjects_list_close(ctx);
// Check default namespace "ns1"
rados_ioctx_set_namespace(ioctx, "ns1");
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
check_list(ns1, ctx, "ns1");
rados_nobjects_list_close(ctx);
// Check default namespace "ns2"
rados_ioctx_set_namespace(ioctx, "ns2");
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
check_list(ns2, ctx, "ns2");
rados_nobjects_list_close(ctx);
// Check all namespaces
rados_ioctx_set_namespace(ioctx, LIBRADOS_ALL_NSPACES);
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
check_list(all, ctx, all_nspaces);
rados_nobjects_list_close(ctx);
}
TEST_F(LibRadosListEC, ListObjectsStart) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
for (int i=0; i<16; ++i) {
string n = stringify(i);
ASSERT_EQ(0, rados_write(ioctx, n.c_str(), buf, sizeof(buf), 0));
}
rados_list_ctx_t ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
std::map<int, std::set<std::string> > pg_to_obj;
const char *entry;
while (rados_nobjects_list_next(ctx, &entry, NULL, NULL) == 0) {
uint32_t pos = rados_nobjects_list_get_pg_hash_position(ctx);
std::cout << entry << " " << pos << std::endl;
pg_to_obj[pos].insert(entry);
}
rados_nobjects_list_close(ctx);
std::map<int, std::set<std::string> >::reverse_iterator p =
pg_to_obj.rbegin();
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
while (p != pg_to_obj.rend()) {
ASSERT_EQ((uint32_t)p->first, rados_nobjects_list_seek(ctx, p->first));
ASSERT_EQ(0, rados_nobjects_list_next(ctx, &entry, NULL, NULL));
std::cout << "have " << entry << " expect one of " << p->second << std::endl;
ASSERT_TRUE(p->second.count(entry));
++p;
}
rados_nobjects_list_close(ctx);
}
TEST_F(LibRadosListNP, ListObjectsError) {
std::string pool_name;
rados_t cluster;
rados_ioctx_t ioctx;
pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
ASSERT_EQ(0, rados_ioctx_create(cluster, pool_name.c_str(), &ioctx));
char buf[128];
memset(buf, 0xcc, sizeof(buf));
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
//ASSERT_EQ(0, rados_pool_delete(cluster, pool_name.c_str()));
{
char *buf, *st;
size_t buflen, stlen;
string c = "{\"prefix\":\"osd pool rm\",\"pool\": \"" + pool_name +
"\",\"pool2\":\"" + pool_name +
"\",\"yes_i_really_really_mean_it_not_faking\": true}";
const char *cmd[2] = { c.c_str(), 0 };
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf, &buflen, &st, &stlen));
ASSERT_EQ(0, rados_wait_for_latest_osdmap(cluster));
}
rados_list_ctx_t ctx;
ASSERT_EQ(0, rados_nobjects_list_open(ioctx, &ctx));
const char *entry;
ASSERT_EQ(-ENOENT, rados_nobjects_list_next(ctx, &entry, NULL, NULL));
rados_nobjects_list_close(ctx);
rados_ioctx_destroy(ioctx);
rados_shutdown(cluster);
}
// ---------------------------------------------
TEST_F(LibRadosList, EnumerateObjects) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
const uint32_t n_objects = 16;
for (unsigned i=0; i<n_objects; ++i) {
ASSERT_EQ(0, rados_write(ioctx, stringify(i).c_str(), buf, sizeof(buf), 0));
}
// Ensure a non-power-of-two PG count to avoid only
// touching the easy path.
if (!is_crimson_cluster()) {
ASSERT_TRUE(set_pg_num(&s_cluster, pool_name, 11).empty());
ASSERT_TRUE(set_pgp_num(&s_cluster, pool_name, 11).empty());
}
std::set<std::string> saw_obj;
rados_object_list_cursor c = rados_object_list_begin(ioctx);
rados_object_list_cursor end = rados_object_list_end(ioctx);
while(!rados_object_list_is_end(ioctx, c))
{
rados_object_list_item results[12];
memset(results, 0, sizeof(rados_object_list_item) * 12);
rados_object_list_cursor temp_end = rados_object_list_end(ioctx);
int r = rados_object_list(ioctx, c, temp_end,
12, NULL, 0, results, &c);
rados_object_list_cursor_free(ioctx, temp_end);
ASSERT_GE(r, 0);
for (int i = 0; i < r; ++i) {
std::string oid(results[i].oid, results[i].oid_length);
if (saw_obj.count(oid)) {
std::cerr << "duplicate obj " << oid << std::endl;
}
ASSERT_FALSE(saw_obj.count(oid));
saw_obj.insert(oid);
}
rados_object_list_free(12, results);
}
rados_object_list_cursor_free(ioctx, c);
rados_object_list_cursor_free(ioctx, end);
for (unsigned i=0; i<n_objects; ++i) {
if (!saw_obj.count(stringify(i))) {
std::cerr << "missing object " << i << std::endl;
}
ASSERT_TRUE(saw_obj.count(stringify(i)));
}
ASSERT_EQ(n_objects, saw_obj.size());
}
TEST_F(LibRadosList, EnumerateObjectsSplit) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
const uint32_t n_objects = 16;
for (unsigned i=0; i<n_objects; ++i) {
ASSERT_EQ(0, rados_write(ioctx, stringify(i).c_str(), buf, sizeof(buf), 0));
}
// Ensure a non-power-of-two PG count to avoid only
// touching the easy path.
if (!is_crimson_cluster()) {
if (auto error = set_pg_num(&s_cluster, pool_name, 11); !error.empty()) {
GTEST_FAIL() << error;
}
if (auto error = set_pgp_num(&s_cluster, pool_name, 11); !error.empty()) {
GTEST_FAIL() << error;
}
}
rados_object_list_cursor begin = rados_object_list_begin(ioctx);
rados_object_list_cursor end = rados_object_list_end(ioctx);
// Step through an odd number of shards
unsigned m = 5;
std::set<std::string> saw_obj;
for (unsigned n = 0; n < m; ++n) {
rados_object_list_cursor shard_start = rados_object_list_begin(ioctx);;
rados_object_list_cursor shard_end = rados_object_list_end(ioctx);;
rados_object_list_slice(
ioctx,
begin,
end,
n,
m,
&shard_start,
&shard_end);
std::cout << "split " << n << "/" << m << " -> "
<< *(hobject_t*)shard_start << " "
<< *(hobject_t*)shard_end << std::endl;
rados_object_list_cursor c = shard_start;
//while(c < shard_end)
while(rados_object_list_cursor_cmp(ioctx, c, shard_end) == -1)
{
rados_object_list_item results[12];
memset(results, 0, sizeof(rados_object_list_item) * 12);
int r = rados_object_list(ioctx,
c, shard_end,
12, NULL, 0, results, &c);
ASSERT_GE(r, 0);
for (int i = 0; i < r; ++i) {
std::string oid(results[i].oid, results[i].oid_length);
if (saw_obj.count(oid)) {
std::cerr << "duplicate obj " << oid << std::endl;
}
ASSERT_FALSE(saw_obj.count(oid));
saw_obj.insert(oid);
}
rados_object_list_free(12, results);
}
rados_object_list_cursor_free(ioctx, shard_start);
rados_object_list_cursor_free(ioctx, shard_end);
}
rados_object_list_cursor_free(ioctx, begin);
rados_object_list_cursor_free(ioctx, end);
for (unsigned i=0; i<n_objects; ++i) {
if (!saw_obj.count(stringify(i))) {
std::cerr << "missing object " << i << std::endl;
}
ASSERT_TRUE(saw_obj.count(stringify(i)));
}
ASSERT_EQ(n_objects, saw_obj.size());
}
| 18,654 | 32.552158 | 103 |
cc
|
null |
ceph-main/src/test/librados/list_cxx.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include <string>
#include <stdexcept>
#include "gtest/gtest.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "include/types.h"
#include "common/hobject.h"
#include "test/librados/test_cxx.h"
#include "test/librados/test_common.h"
#include "test/librados/testcase_cxx.h"
#include "global/global_context.h"
#include "crimson_utils.h"
using namespace librados;
typedef RadosTestPPNSCleanup LibRadosListPP;
typedef RadosTestECPPNSCleanup LibRadosListECPP;
TEST_F(LibRadosListPP, ListObjectsPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
NObjectIterator iter(ioctx.nobjects_begin());
bool foundit = false;
while (iter != ioctx.nobjects_end()) {
foundit = true;
ASSERT_EQ((*iter).get_oid(), "foo");
++iter;
}
ASSERT_TRUE(foundit);
}
TEST_F(LibRadosListPP, ListObjectsTwicePP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
NObjectIterator iter(ioctx.nobjects_begin());
bool foundit = false;
while (iter != ioctx.nobjects_end()) {
foundit = true;
ASSERT_EQ((*iter).get_oid(), "foo");
++iter;
}
ASSERT_TRUE(foundit);
++iter;
ASSERT_TRUE(iter == ioctx.nobjects_end());
foundit = false;
iter.seek(0);
while (iter != ioctx.nobjects_end()) {
foundit = true;
ASSERT_EQ((*iter).get_oid(), "foo");
++iter;
}
ASSERT_TRUE(foundit);
}
TEST_F(LibRadosListPP, ListObjectsCopyIterPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
// make sure this is still valid after the original iterators are gone
NObjectIterator iter3;
{
NObjectIterator iter(ioctx.nobjects_begin());
NObjectIterator iter2(iter);
iter3 = iter2;
ASSERT_EQ((*iter).get_oid(), "foo");
++iter;
ASSERT_TRUE(iter == ioctx.nobjects_end());
++iter;
ASSERT_TRUE(iter == ioctx.nobjects_end());
ASSERT_EQ(iter2->get_oid(), "foo");
ASSERT_EQ(iter3->get_oid(), "foo");
++iter2;
ASSERT_TRUE(iter2 == ioctx.nobjects_end());
}
ASSERT_EQ(iter3->get_oid(), "foo");
iter3 = iter3;
ASSERT_EQ(iter3->get_oid(), "foo");
++iter3;
ASSERT_TRUE(iter3 == ioctx.nobjects_end());
}
TEST_F(LibRadosListPP, ListObjectsEndIter) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
NObjectIterator iter(ioctx.nobjects_begin());
NObjectIterator iter_end(ioctx.nobjects_end());
NObjectIterator iter_end2 = ioctx.nobjects_end();
ASSERT_TRUE(iter_end == iter_end2);
ASSERT_TRUE(iter_end == ioctx.nobjects_end());
ASSERT_TRUE(iter_end2 == ioctx.nobjects_end());
ASSERT_EQ(iter->get_oid(), "foo");
++iter;
ASSERT_TRUE(iter == ioctx.nobjects_end());
ASSERT_TRUE(iter == iter_end);
ASSERT_TRUE(iter == iter_end2);
NObjectIterator iter2 = iter;
ASSERT_TRUE(iter2 == ioctx.nobjects_end());
ASSERT_TRUE(iter2 == iter_end);
ASSERT_TRUE(iter2 == iter_end2);
}
static void check_listpp(std::set<std::string>& myset, IoCtx& ioctx, const std::string &check_nspace)
{
NObjectIterator iter(ioctx.nobjects_begin());
std::set<std::string> orig_set(myset);
/**
* During splitting, we might see duplicate items.
* We assert that every object returned is in myset and that
* we don't hit ENOENT until we have hit every item in myset
* at least once.
*/
while (iter != ioctx.nobjects_end()) {
std::string test_name;
if (check_nspace == all_nspaces) {
test_name = iter->get_nspace() + ":" + iter->get_oid();
} else {
ASSERT_TRUE(iter->get_nspace() == check_nspace);
test_name = iter->get_oid();
}
ASSERT_TRUE(orig_set.end() != orig_set.find(test_name));
myset.erase(test_name);
++iter;
}
ASSERT_TRUE(myset.empty());
}
TEST_F(LibRadosListPP, ListObjectsPPNS) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
// Create :foo1, :foo2, :foo3, n1:foo1, ns1:foo4, ns1:foo5, ns2:foo6, n2:foo7
ioctx.set_namespace("");
ASSERT_EQ(0, ioctx.write("foo1", bl1, sizeof(buf), 0));
ioctx.set_namespace("ns1");
ASSERT_EQ(0, ioctx.write("foo1", bl1, sizeof(buf), 0));
ioctx.set_namespace("");
ASSERT_EQ(0, ioctx.write("foo2", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo3", bl1, sizeof(buf), 0));
ioctx.set_namespace("ns1");
ASSERT_EQ(0, ioctx.write("foo4", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo5", bl1, sizeof(buf), 0));
ioctx.set_namespace("ns2");
ASSERT_EQ(0, ioctx.write("foo6", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo7", bl1, sizeof(buf), 0));
ASSERT_EQ(std::string("ns2"), ioctx.get_namespace());
std::set<std::string> def, ns1, ns2, all;
def.insert(std::string("foo1"));
def.insert(std::string("foo2"));
def.insert(std::string("foo3"));
ns1.insert(std::string("foo1"));
ns1.insert(std::string("foo4"));
ns1.insert(std::string("foo5"));
ns2.insert(std::string("foo6"));
ns2.insert(std::string("foo7"));
all.insert(std::string(":foo1"));
all.insert(std::string(":foo2"));
all.insert(std::string(":foo3"));
all.insert(std::string("ns1:foo1"));
all.insert(std::string("ns1:foo4"));
all.insert(std::string("ns1:foo5"));
all.insert(std::string("ns2:foo6"));
all.insert(std::string("ns2:foo7"));
ioctx.set_namespace("");
check_listpp(def, ioctx, "");
ioctx.set_namespace("ns1");
check_listpp(ns1, ioctx, "ns1");
ioctx.set_namespace("ns2");
check_listpp(ns2, ioctx, "ns2");
ioctx.set_namespace(all_nspaces);
check_listpp(all, ioctx, all_nspaces);
}
TEST_F(LibRadosListPP, ListObjectsManyPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
for (int i=0; i<256; ++i) {
ASSERT_EQ(0, ioctx.write(stringify(i), bl, bl.length(), 0));
}
librados::NObjectIterator it = ioctx.nobjects_begin();
std::set<std::string> saw_obj;
std::set<int> saw_pg;
for (; it != ioctx.nobjects_end(); ++it) {
std::cout << it->get_oid()
<< " " << it.get_pg_hash_position() << std::endl;
saw_obj.insert(it->get_oid());
saw_pg.insert(it.get_pg_hash_position());
}
std::cout << "saw " << saw_pg.size() << " pgs " << std::endl;
// make sure they are 0..n
for (unsigned i = 0; i < saw_pg.size(); ++i)
ASSERT_TRUE(saw_pg.count(i));
}
TEST_F(LibRadosListPP, ListObjectsStartPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
for (int i=0; i<16; ++i) {
ASSERT_EQ(0, ioctx.write(stringify(i), bl, bl.length(), 0));
}
librados::NObjectIterator it = ioctx.nobjects_begin();
std::map<int, std::set<std::string> > pg_to_obj;
for (; it != ioctx.nobjects_end(); ++it) {
std::cout << it->get_oid() << " " << it.get_pg_hash_position() << std::endl;
pg_to_obj[it.get_pg_hash_position()].insert(it->get_oid());
}
std::map<int, std::set<std::string> >::reverse_iterator p =
pg_to_obj.rbegin();
it = ioctx.nobjects_begin(p->first);
while (p != pg_to_obj.rend()) {
ASSERT_EQ((uint32_t)p->first, it.seek(p->first));
std::cout << "have " << it->get_oid() << " expect one of " << p->second << std::endl;
ASSERT_TRUE(p->second.count(it->get_oid()));
++p;
}
}
TEST_F(LibRadosListPP, ListObjectsCursorNSPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
const int max_objs = 16;
map<string, string> oid_to_ns;
for (int i=0; i<max_objs; ++i) {
stringstream ss;
ss << "ns" << i / 4;
ioctx.set_namespace(ss.str());
string oid = stringify(i);
ASSERT_EQ(0, ioctx.write(oid, bl, bl.length(), 0));
oid_to_ns[oid] = ss.str();
}
ioctx.set_namespace(all_nspaces);
librados::NObjectIterator it = ioctx.nobjects_begin();
std::map<librados::ObjectCursor, string> cursor_to_obj;
int count = 0;
librados::ObjectCursor seek_cursor;
map<string, list<librados::ObjectCursor> > ns_to_cursors;
for (it = ioctx.nobjects_begin(); it != ioctx.nobjects_end(); ++it) {
librados::ObjectCursor cursor = it.get_cursor();
string oid = it->get_oid();
cout << "> oid=" << oid << " cursor=" << it.get_cursor() << std::endl;
}
vector<string> objs_order;
for (it = ioctx.nobjects_begin(); it != ioctx.nobjects_end(); ++it, ++count) {
librados::ObjectCursor cursor = it.get_cursor();
string oid = it->get_oid();
std::cout << oid << " " << it.get_pg_hash_position() << std::endl;
cout << ": oid=" << oid << " cursor=" << it.get_cursor() << std::endl;
cursor_to_obj[cursor] = oid;
ASSERT_EQ(oid_to_ns[oid], it->get_nspace());
it.seek(cursor);
cout << ": seek to " << cursor << " it.cursor=" << it.get_cursor() << std::endl;
ASSERT_EQ(oid, it->get_oid());
ASSERT_LT(count, max_objs); /* avoid infinite loops due to bad seek */
ns_to_cursors[it->get_nspace()].push_back(cursor);
if (count == max_objs/2) {
seek_cursor = cursor;
}
objs_order.push_back(it->get_oid());
}
ASSERT_EQ(count, max_objs);
/* check that reading past seek also works */
cout << "seek_cursor=" << seek_cursor << std::endl;
it.seek(seek_cursor);
for (count = max_objs/2; count < max_objs; ++count, ++it) {
ASSERT_EQ(objs_order[count], it->get_oid());
}
/* seek to all cursors, check that we get expected obj */
for (auto& niter : ns_to_cursors) {
const string& ns = niter.first;
list<librados::ObjectCursor>& cursors = niter.second;
for (auto& cursor : cursors) {
cout << ": seek to " << cursor << std::endl;
it.seek(cursor);
ASSERT_EQ(cursor, it.get_cursor());
string& expected_oid = cursor_to_obj[cursor];
cout << ": it->get_cursor()=" << it.get_cursor() << " expected=" << cursor << std::endl;
cout << ": it->get_oid()=" << it->get_oid() << " expected=" << expected_oid << std::endl;
cout << ": it->get_nspace()=" << it->get_oid() << " expected=" << ns << std::endl;
ASSERT_EQ(expected_oid, it->get_oid());
ASSERT_EQ(it->get_nspace(), ns);
}
}
}
TEST_F(LibRadosListPP, ListObjectsCursorPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
const int max_objs = 16;
for (int i=0; i<max_objs; ++i) {
stringstream ss;
ss << "ns" << i / 4;
ioctx.set_namespace(ss.str());
ASSERT_EQ(0, ioctx.write(stringify(i), bl, bl.length(), 0));
}
ioctx.set_namespace(all_nspaces);
librados::NObjectIterator it = ioctx.nobjects_begin();
std::map<librados::ObjectCursor, string> cursor_to_obj;
int count = 0;
for (; it != ioctx.nobjects_end(); ++it, ++count) {
librados::ObjectCursor cursor = it.get_cursor();
string oid = it->get_oid();
std::cout << oid << " " << it.get_pg_hash_position() << std::endl;
cout << ": oid=" << oid << " cursor=" << it.get_cursor() << std::endl;
cursor_to_obj[cursor] = oid;
it.seek(cursor);
cout << ": seek to " << cursor << std::endl;
ASSERT_EQ(oid, it->get_oid());
ASSERT_LT(count, max_objs); /* avoid infinite loops due to bad seek */
}
ASSERT_EQ(count, max_objs);
auto p = cursor_to_obj.rbegin();
it = ioctx.nobjects_begin();
while (p != cursor_to_obj.rend()) {
cout << ": seek to " << p->first << std::endl;
it.seek(p->first);
ASSERT_EQ(p->first, it.get_cursor());
cout << ": it->get_cursor()=" << it.get_cursor() << " expected=" << p->first << std::endl;
cout << ": it->get_oid()=" << it->get_oid() << " expected=" << p->second << std::endl;
ASSERT_EQ(p->second, it->get_oid());
librados::NObjectIterator it2 = ioctx.nobjects_begin(it.get_cursor());
ASSERT_EQ(it2->get_oid(), it->get_oid());
++p;
}
}
TEST_F(LibRadosListECPP, ListObjectsPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
NObjectIterator iter(ioctx.nobjects_begin());
bool foundit = false;
while (iter != ioctx.nobjects_end()) {
foundit = true;
ASSERT_EQ((*iter).get_oid(), "foo");
++iter;
}
ASSERT_TRUE(foundit);
}
TEST_F(LibRadosListECPP, ListObjectsTwicePP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
NObjectIterator iter(ioctx.nobjects_begin());
bool foundit = false;
while (iter != ioctx.nobjects_end()) {
foundit = true;
ASSERT_EQ((*iter).get_oid(), "foo");
++iter;
}
ASSERT_TRUE(foundit);
++iter;
ASSERT_TRUE(iter == ioctx.nobjects_end());
foundit = false;
iter.seek(0);
while (iter != ioctx.nobjects_end()) {
foundit = true;
ASSERT_EQ((*iter).get_oid(), "foo");
++iter;
}
ASSERT_TRUE(foundit);
}
TEST_F(LibRadosListECPP, ListObjectsCopyIterPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
// make sure this is still valid after the original iterators are gone
NObjectIterator iter3;
{
NObjectIterator iter(ioctx.nobjects_begin());
NObjectIterator iter2(iter);
iter3 = iter2;
ASSERT_EQ((*iter).get_oid(), "foo");
++iter;
ASSERT_TRUE(iter == ioctx.nobjects_end());
++iter;
ASSERT_TRUE(iter == ioctx.nobjects_end());
ASSERT_EQ(iter2->get_oid(), "foo");
ASSERT_EQ(iter3->get_oid(), "foo");
++iter2;
ASSERT_TRUE(iter2 == ioctx.nobjects_end());
}
ASSERT_EQ(iter3->get_oid(), "foo");
iter3 = iter3;
ASSERT_EQ(iter3->get_oid(), "foo");
++iter3;
ASSERT_TRUE(iter3 == ioctx.nobjects_end());
}
TEST_F(LibRadosListECPP, ListObjectsEndIter) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
NObjectIterator iter(ioctx.nobjects_begin());
NObjectIterator iter_end(ioctx.nobjects_end());
NObjectIterator iter_end2 = ioctx.nobjects_end();
ASSERT_TRUE(iter_end == iter_end2);
ASSERT_TRUE(iter_end == ioctx.nobjects_end());
ASSERT_TRUE(iter_end2 == ioctx.nobjects_end());
ASSERT_EQ(iter->get_oid(), "foo");
++iter;
ASSERT_TRUE(iter == ioctx.nobjects_end());
ASSERT_TRUE(iter == iter_end);
ASSERT_TRUE(iter == iter_end2);
NObjectIterator iter2 = iter;
ASSERT_TRUE(iter2 == ioctx.nobjects_end());
ASSERT_TRUE(iter2 == iter_end);
ASSERT_TRUE(iter2 == iter_end2);
}
TEST_F(LibRadosListECPP, ListObjectsPPNS) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
// Create :foo1, :foo2, :foo3, n1:foo1, ns1:foo4, ns1:foo5, ns2:foo6, n2:foo7
ioctx.set_namespace("");
ASSERT_EQ(0, ioctx.write("foo1", bl1, sizeof(buf), 0));
ioctx.set_namespace("ns1");
ASSERT_EQ(0, ioctx.write("foo1", bl1, sizeof(buf), 0));
ioctx.set_namespace("");
ASSERT_EQ(0, ioctx.write("foo2", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo3", bl1, sizeof(buf), 0));
ioctx.set_namespace("ns1");
ASSERT_EQ(0, ioctx.write("foo4", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo5", bl1, sizeof(buf), 0));
ioctx.set_namespace("ns2");
ASSERT_EQ(0, ioctx.write("foo6", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo7", bl1, sizeof(buf), 0));
std::set<std::string> def, ns1, ns2;
def.insert(std::string("foo1"));
def.insert(std::string("foo2"));
def.insert(std::string("foo3"));
ns1.insert(std::string("foo1"));
ns1.insert(std::string("foo4"));
ns1.insert(std::string("foo5"));
ns2.insert(std::string("foo6"));
ns2.insert(std::string("foo7"));
ioctx.set_namespace("");
check_listpp(def, ioctx, "");
ioctx.set_namespace("ns1");
check_listpp(ns1, ioctx, "ns1");
ioctx.set_namespace("ns2");
check_listpp(ns2, ioctx, "ns2");
}
TEST_F(LibRadosListECPP, ListObjectsManyPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
for (int i=0; i<256; ++i) {
ASSERT_EQ(0, ioctx.write(stringify(i), bl, bl.length(), 0));
}
librados::NObjectIterator it = ioctx.nobjects_begin();
std::set<std::string> saw_obj;
std::set<int> saw_pg;
for (; it != ioctx.nobjects_end(); ++it) {
std::cout << it->get_oid()
<< " " << it.get_pg_hash_position() << std::endl;
saw_obj.insert(it->get_oid());
saw_pg.insert(it.get_pg_hash_position());
}
std::cout << "saw " << saw_pg.size() << " pgs " << std::endl;
// make sure they are 0..n
for (unsigned i = 0; i < saw_pg.size(); ++i)
ASSERT_TRUE(saw_pg.count(i));
}
TEST_F(LibRadosListECPP, ListObjectsStartPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
for (int i=0; i<16; ++i) {
ASSERT_EQ(0, ioctx.write(stringify(i), bl, bl.length(), 0));
}
librados::NObjectIterator it = ioctx.nobjects_begin();
std::map<int, std::set<std::string> > pg_to_obj;
for (; it != ioctx.nobjects_end(); ++it) {
std::cout << it->get_oid() << " " << it.get_pg_hash_position() << std::endl;
pg_to_obj[it.get_pg_hash_position()].insert(it->get_oid());
}
std::map<int, std::set<std::string> >::reverse_iterator p =
pg_to_obj.rbegin();
it = ioctx.nobjects_begin(p->first);
while (p != pg_to_obj.rend()) {
ASSERT_EQ((uint32_t)p->first, it.seek(p->first));
std::cout << "have " << it->get_oid() << " expect one of " << p->second << std::endl;
ASSERT_TRUE(p->second.count(it->get_oid()));
++p;
}
}
TEST_F(LibRadosListPP, ListObjectsFilterPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist obj_content;
obj_content.append(buf, sizeof(buf));
std::string target_str = "content";
// Write xattr bare, no ::encod'ing
bufferlist target_val;
target_val.append(target_str);
bufferlist nontarget_val;
nontarget_val.append("rhubarb");
ASSERT_EQ(0, ioctx.write("has_xattr", obj_content, obj_content.length(), 0));
ASSERT_EQ(0, ioctx.write("has_wrong_xattr", obj_content, obj_content.length(), 0));
ASSERT_EQ(0, ioctx.write("no_xattr", obj_content, obj_content.length(), 0));
ASSERT_EQ(0, ioctx.setxattr("has_xattr", "theattr", target_val));
ASSERT_EQ(0, ioctx.setxattr("has_wrong_xattr", "theattr", nontarget_val));
bufferlist filter_bl;
std::string filter_name = "plain";
encode(filter_name, filter_bl);
encode("_theattr", filter_bl);
encode(target_str, filter_bl);
NObjectIterator iter(ioctx.nobjects_begin(filter_bl));
bool foundit = false;
int k = 0;
while (iter != ioctx.nobjects_end()) {
foundit = true;
// We should only see the object that matches the filter
ASSERT_EQ((*iter).get_oid(), "has_xattr");
// We should only see it once
ASSERT_EQ(k, 0);
++iter;
++k;
}
ASSERT_TRUE(foundit);
}
TEST_F(LibRadosListPP, EnumerateObjectsPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
const uint32_t n_objects = 16;
for (unsigned i=0; i<n_objects; ++i) {
ASSERT_EQ(0, ioctx.write(stringify(i), bl, sizeof(buf), 0));
}
std::set<std::string> saw_obj;
ObjectCursor c = ioctx.object_list_begin();
ObjectCursor end = ioctx.object_list_end();
while(!ioctx.object_list_is_end(c))
{
std::vector<ObjectItem> result;
int r = ioctx.object_list(c, end, 12, {}, &result, &c);
ASSERT_GE(r, 0);
ASSERT_EQ(r, (int)result.size());
for (int i = 0; i < r; ++i) {
auto oid = result[i].oid;
if (saw_obj.count(oid)) {
std::cerr << "duplicate obj " << oid << std::endl;
}
ASSERT_FALSE(saw_obj.count(oid));
saw_obj.insert(oid);
}
}
for (unsigned i=0; i<n_objects; ++i) {
if (!saw_obj.count(stringify(i))) {
std::cerr << "missing object " << i << std::endl;
}
ASSERT_TRUE(saw_obj.count(stringify(i)));
}
ASSERT_EQ(n_objects, saw_obj.size());
}
TEST_F(LibRadosListPP, EnumerateObjectsSplitPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
const uint32_t n_objects = 16;
for (unsigned i=0; i<n_objects; ++i) {
ASSERT_EQ(0, ioctx.write(stringify(i), bl, sizeof(buf), 0));
}
ObjectCursor begin = ioctx.object_list_begin();
ObjectCursor end = ioctx.object_list_end();
// Step through an odd number of shards
unsigned m = 5;
std::set<std::string> saw_obj;
for (unsigned n = 0; n < m; ++n) {
ObjectCursor shard_start;
ObjectCursor shard_end;
ioctx.object_list_slice(
begin,
end,
n,
m,
&shard_start,
&shard_end);
ObjectCursor c(shard_start);
while(c < shard_end)
{
std::vector<ObjectItem> result;
int r = ioctx.object_list(c, shard_end, 12, {}, &result, &c);
ASSERT_GE(r, 0);
for (const auto & i : result) {
const auto &oid = i.oid;
if (saw_obj.count(oid)) {
std::cerr << "duplicate obj " << oid << std::endl;
}
ASSERT_FALSE(saw_obj.count(oid));
saw_obj.insert(oid);
}
}
}
for (unsigned i=0; i<n_objects; ++i) {
if (!saw_obj.count(stringify(i))) {
std::cerr << "missing object " << i << std::endl;
}
ASSERT_TRUE(saw_obj.count(stringify(i)));
}
ASSERT_EQ(n_objects, saw_obj.size());
}
TEST_F(LibRadosListPP, EnumerateObjectsFilterPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist obj_content;
obj_content.append(buf, sizeof(buf));
std::string target_str = "content";
// Write xattr bare, no ::encod'ing
bufferlist target_val;
target_val.append(target_str);
bufferlist nontarget_val;
nontarget_val.append("rhubarb");
ASSERT_EQ(0, ioctx.write("has_xattr", obj_content, obj_content.length(), 0));
ASSERT_EQ(0, ioctx.write("has_wrong_xattr", obj_content, obj_content.length(), 0));
ASSERT_EQ(0, ioctx.write("no_xattr", obj_content, obj_content.length(), 0));
ASSERT_EQ(0, ioctx.setxattr("has_xattr", "theattr", target_val));
ASSERT_EQ(0, ioctx.setxattr("has_wrong_xattr", "theattr", nontarget_val));
bufferlist filter_bl;
std::string filter_name = "plain";
encode(filter_name, filter_bl);
encode("_theattr", filter_bl);
encode(target_str, filter_bl);
ObjectCursor c = ioctx.object_list_begin();
ObjectCursor end = ioctx.object_list_end();
bool foundit = false;
while(!ioctx.object_list_is_end(c))
{
std::vector<ObjectItem> result;
int r = ioctx.object_list(c, end, 12, filter_bl, &result, &c);
ASSERT_GE(r, 0);
ASSERT_EQ(r, (int)result.size());
for (int i = 0; i < r; ++i) {
auto oid = result[i].oid;
// We should only see the object that matches the filter
ASSERT_EQ(oid, "has_xattr");
// We should only see it once
ASSERT_FALSE(foundit);
foundit = true;
}
}
ASSERT_TRUE(foundit);
}
| 23,354 | 28.827586 | 101 |
cc
|
null |
ceph-main/src/test/librados/lock.cc
|
#include "include/rados/librados.h"
#include "test/librados/test.h"
#include "test/librados/TestCase.h"
#include "cls/lock/cls_lock_client.h"
#include <algorithm>
#include <chrono>
#include <thread>
#include <errno.h>
#include "gtest/gtest.h"
#include <sys/time.h>
#include "crimson_utils.h"
using namespace std::chrono_literals;
typedef RadosTest LibRadosLock;
typedef RadosTestEC LibRadosLockEC;
TEST_F(LibRadosLock, LockExclusive) {
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLock1", "Cookie", "", NULL, 0));
ASSERT_EQ(-EEXIST, rados_lock_exclusive(ioctx, "foo", "TestLock1", "Cookie", "", NULL, 0));
}
TEST_F(LibRadosLock, LockShared) {
ASSERT_EQ(0, rados_lock_shared(ioctx, "foo", "TestLock2", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(-EEXIST, rados_lock_shared(ioctx, "foo", "TestLock2", "Cookie", "Tag", "", NULL, 0));
}
TEST_F(LibRadosLock, LockExclusiveDur) {
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
auto lock_exclusive = [this](timeval* tv) {
return rados_lock_exclusive(ioctx, "foo", "TestLock3", "Cookie", "", tv, 0);
};
constexpr int expected = 0;
ASSERT_EQ(expected, lock_exclusive(&tv));
ASSERT_EQ(expected, wait_until(1.0s, 0.1s, expected, lock_exclusive, nullptr));
}
TEST_F(LibRadosLock, LockSharedDur) {
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
auto lock_shared = [this](timeval* tv) {
return rados_lock_shared(ioctx, "foo", "TestLock4", "Cookie", "Tag", "", tv, 0);
};
constexpr int expected = 0;
ASSERT_EQ(expected, lock_shared(&tv));
ASSERT_EQ(expected, wait_until(1.0s, 0.1s, expected, lock_shared, nullptr));
}
TEST_F(LibRadosLock, LockMayRenew) {
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLock5", "Cookie", "", NULL, 0));
ASSERT_EQ(-EEXIST, rados_lock_exclusive(ioctx, "foo", "TestLock5", "Cookie", "", NULL, 0));
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLock5", "Cookie", "", NULL, LOCK_FLAG_MAY_RENEW));
}
TEST_F(LibRadosLock, Unlock) {
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLock6", "Cookie", "", NULL, 0));
ASSERT_EQ(0, rados_unlock(ioctx, "foo", "TestLock6", "Cookie"));
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLock6", "Cookie", "", NULL, 0));
}
TEST_F(LibRadosLock, ListLockers) {
int exclusive;
char tag[1024];
char clients[1024];
char cookies[1024];
char addresses[1024];
size_t tag_len = 1024;
size_t clients_len = 1024;
size_t cookies_len = 1024;
size_t addresses_len = 1024;
std::stringstream sstm;
sstm << "client." << rados_get_instance_id(cluster);
std::string me = sstm.str();
ASSERT_EQ(0, rados_lock_shared(ioctx, "foo", "TestLock7", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(0, rados_unlock(ioctx, "foo", "TestLock7", "Cookie"));
ASSERT_EQ(0, rados_list_lockers(ioctx, "foo", "TestLock7", &exclusive, tag, &tag_len, clients, &clients_len, cookies, &cookies_len, addresses, &addresses_len ));
ASSERT_EQ(0, rados_lock_shared(ioctx, "foo", "TestLock7", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(-34, rados_list_lockers(ioctx, "foo", "TestLock7", &exclusive, tag, &tag_len, clients, &clients_len, cookies, &cookies_len, addresses, &addresses_len ));
tag_len = 1024;
clients_len = 1024;
cookies_len = 1024;
addresses_len = 1024;
ASSERT_EQ(1, rados_list_lockers(ioctx, "foo", "TestLock7", &exclusive, tag, &tag_len, clients, &clients_len, cookies, &cookies_len, addresses, &addresses_len ));
ASSERT_EQ(0, exclusive);
ASSERT_EQ(0, strcmp(tag, "Tag"));
ASSERT_EQ(strlen("Tag") + 1, tag_len);
ASSERT_EQ(0, strcmp(me.c_str(), clients));
ASSERT_EQ(me.size() + 1, clients_len);
ASSERT_EQ(0, strcmp(cookies, "Cookie"));
ASSERT_EQ(strlen("Cookie") + 1, cookies_len);
}
TEST_F(LibRadosLock, BreakLock) {
int exclusive;
char tag[1024];
char clients[1024];
char cookies[1024];
char addresses[1024];
size_t tag_len = 1024;
size_t clients_len = 1024;
size_t cookies_len = 1024;
size_t addresses_len = 1024;
std::stringstream sstm;
sstm << "client." << rados_get_instance_id(cluster);
std::string me = sstm.str();
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLock8", "Cookie", "", NULL, 0));
ASSERT_EQ(1, rados_list_lockers(ioctx, "foo", "TestLock8", &exclusive, tag, &tag_len, clients, &clients_len, cookies, &cookies_len, addresses, &addresses_len ));
ASSERT_EQ(1, exclusive);
ASSERT_EQ(0, strcmp(tag, ""));
ASSERT_EQ(1U, tag_len);
ASSERT_EQ(0, strcmp(me.c_str(), clients));
ASSERT_EQ(me.size() + 1, clients_len);
ASSERT_EQ(0, strcmp(cookies, "Cookie"));
ASSERT_EQ(strlen("Cookie") + 1, cookies_len);
ASSERT_EQ(0, rados_break_lock(ioctx, "foo", "TestLock8", clients, "Cookie"));
}
// EC testing
TEST_F(LibRadosLockEC, LockExclusive) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLockEC1", "Cookie", "", NULL, 0));
ASSERT_EQ(-EEXIST, rados_lock_exclusive(ioctx, "foo", "TestLockEC1", "Cookie", "", NULL, 0));
}
TEST_F(LibRadosLockEC, LockShared) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, rados_lock_shared(ioctx, "foo", "TestLockEC2", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(-EEXIST, rados_lock_shared(ioctx, "foo", "TestLockEC2", "Cookie", "Tag", "", NULL, 0));
}
TEST_F(LibRadosLockEC, LockExclusiveDur) {
SKIP_IF_CRIMSON();
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
auto lock_exclusive = [this](timeval* tv) {
return rados_lock_exclusive(ioctx, "foo", "TestLockEC3", "Cookie", "", tv, 0);
};
constexpr int expected = 0;
ASSERT_EQ(expected, lock_exclusive(&tv));
ASSERT_EQ(expected, wait_until(1.0s, 0.1s, expected, lock_exclusive, nullptr));
}
TEST_F(LibRadosLockEC, LockSharedDur) {
SKIP_IF_CRIMSON();
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
auto lock_shared = [this](timeval* tv) {
return rados_lock_shared(ioctx, "foo", "TestLockEC4", "Cookie", "Tag", "", tv, 0);
};
constexpr int expected = 0;
ASSERT_EQ(expected, lock_shared(&tv));
ASSERT_EQ(expected, wait_until(1.0s, 0.1s, expected, lock_shared, nullptr));
}
TEST_F(LibRadosLockEC, LockMayRenew) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLockEC5", "Cookie", "", NULL, 0));
ASSERT_EQ(-EEXIST, rados_lock_exclusive(ioctx, "foo", "TestLockEC5", "Cookie", "", NULL, 0));
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLockEC5", "Cookie", "", NULL, LOCK_FLAG_MAY_RENEW));
}
TEST_F(LibRadosLockEC, Unlock) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLockEC6", "Cookie", "", NULL, 0));
ASSERT_EQ(0, rados_unlock(ioctx, "foo", "TestLockEC6", "Cookie"));
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLockEC6", "Cookie", "", NULL, 0));
}
TEST_F(LibRadosLockEC, ListLockers) {
SKIP_IF_CRIMSON();
int exclusive;
char tag[1024];
char clients[1024];
char cookies[1024];
char addresses[1024];
size_t tag_len = 1024;
size_t clients_len = 1024;
size_t cookies_len = 1024;
size_t addresses_len = 1024;
std::stringstream sstm;
sstm << "client." << rados_get_instance_id(cluster);
std::string me = sstm.str();
ASSERT_EQ(0, rados_lock_shared(ioctx, "foo", "TestLockEC7", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(0, rados_unlock(ioctx, "foo", "TestLockEC7", "Cookie"));
ASSERT_EQ(0, rados_list_lockers(ioctx, "foo", "TestLockEC7", &exclusive, tag, &tag_len, clients, &clients_len, cookies, &cookies_len, addresses, &addresses_len ));
ASSERT_EQ(0, rados_lock_shared(ioctx, "foo", "TestLockEC7", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(-34, rados_list_lockers(ioctx, "foo", "TestLockEC7", &exclusive, tag, &tag_len, clients, &clients_len, cookies, &cookies_len, addresses, &addresses_len ));
tag_len = 1024;
clients_len = 1024;
cookies_len = 1024;
addresses_len = 1024;
ASSERT_EQ(1, rados_list_lockers(ioctx, "foo", "TestLockEC7", &exclusive, tag, &tag_len, clients, &clients_len, cookies, &cookies_len, addresses, &addresses_len ));
ASSERT_EQ(0, exclusive);
ASSERT_EQ(0, strcmp(tag, "Tag"));
ASSERT_EQ(strlen("Tag") + 1, tag_len);
ASSERT_EQ(0, strcmp(me.c_str(), clients));
ASSERT_EQ(me.size() + 1, clients_len);
ASSERT_EQ(0, strcmp(cookies, "Cookie"));
ASSERT_EQ(strlen("Cookie") + 1, cookies_len);
}
TEST_F(LibRadosLockEC, BreakLock) {
SKIP_IF_CRIMSON();
int exclusive;
char tag[1024];
char clients[1024];
char cookies[1024];
char addresses[1024];
size_t tag_len = 1024;
size_t clients_len = 1024;
size_t cookies_len = 1024;
size_t addresses_len = 1024;
std::stringstream sstm;
sstm << "client." << rados_get_instance_id(cluster);
std::string me = sstm.str();
ASSERT_EQ(0, rados_lock_exclusive(ioctx, "foo", "TestLockEC8", "Cookie", "", NULL, 0));
ASSERT_EQ(1, rados_list_lockers(ioctx, "foo", "TestLockEC8", &exclusive, tag, &tag_len, clients, &clients_len, cookies, &cookies_len, addresses, &addresses_len ));
ASSERT_EQ(1, exclusive);
ASSERT_EQ(0, strcmp(tag, ""));
ASSERT_EQ(1U, tag_len);
ASSERT_EQ(0, strcmp(me.c_str(), clients));
ASSERT_EQ(me.size() + 1, clients_len);
ASSERT_EQ(0, strcmp(cookies, "Cookie"));
ASSERT_EQ(strlen("Cookie") + 1, cookies_len);
ASSERT_EQ(0, rados_break_lock(ioctx, "foo", "TestLockEC8", clients, "Cookie"));
}
| 9,180 | 37.57563 | 167 |
cc
|
null |
ceph-main/src/test/librados/lock_cxx.cc
|
#include <algorithm>
#include <chrono>
#include <thread>
#include <errno.h>
#include <sys/time.h>
#include "gtest/gtest.h"
#include "include/rados/librados.hpp"
#include "cls/lock/cls_lock_client.h"
#include "test/librados/test_cxx.h"
#include "test/librados/testcase_cxx.h"
#include "crimson_utils.h"
using namespace std::chrono_literals;
using namespace librados;
typedef RadosTestPP LibRadosLockPP;
typedef RadosTestECPP LibRadosLockECPP;
TEST_F(LibRadosLockPP, LockExclusivePP) {
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockPP1", "Cookie", "", NULL, 0));
ASSERT_EQ(-EEXIST, ioctx.lock_exclusive("foo", "TestLockPP1", "Cookie", "", NULL, 0));
}
TEST_F(LibRadosLockPP, LockSharedPP) {
ASSERT_EQ(0, ioctx.lock_shared("foo", "TestLockPP2", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(-EEXIST, ioctx.lock_shared("foo", "TestLockPP2", "Cookie", "Tag", "", NULL, 0));
}
TEST_F(LibRadosLockPP, LockExclusiveDurPP) {
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
auto lock_exclusive = [this](timeval* tv) {
return ioctx.lock_exclusive("foo", "TestLockPP3", "Cookie", "", tv, 0);
};
constexpr int expected = 0;
ASSERT_EQ(expected, lock_exclusive(&tv));
ASSERT_EQ(expected, wait_until(1.0s, 0.1s, expected, lock_exclusive, nullptr));
}
TEST_F(LibRadosLockPP, LockSharedDurPP) {
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
auto lock_shared = [this](timeval* tv) {
return ioctx.lock_shared("foo", "TestLockPP4", "Cookie", "Tag", "", tv, 0);
};
constexpr int expected = 0;
ASSERT_EQ(expected, lock_shared(&tv));
ASSERT_EQ(expected, wait_until(1.0s, 0.1s, expected, lock_shared, nullptr));
}
TEST_F(LibRadosLockPP, LockMayRenewPP) {
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockPP5", "Cookie", "", NULL, 0));
ASSERT_EQ(-EEXIST, ioctx.lock_exclusive("foo", "TestLockPP5", "Cookie", "", NULL, 0));
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockPP5", "Cookie", "", NULL, LOCK_FLAG_MAY_RENEW));
}
TEST_F(LibRadosLockPP, UnlockPP) {
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockPP6", "Cookie", "", NULL, 0));
ASSERT_EQ(0, ioctx.unlock("foo", "TestLockPP6", "Cookie"));
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockPP6", "Cookie", "", NULL, 0));
}
TEST_F(LibRadosLockPP, ListLockersPP) {
std::stringstream sstm;
sstm << "client." << cluster.get_instance_id();
std::string me = sstm.str();
ASSERT_EQ(0, ioctx.lock_shared("foo", "TestLockPP7", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(0, ioctx.unlock("foo", "TestLockPP7", "Cookie"));
{
int exclusive;
std::string tag;
std::list<librados::locker_t> lockers;
ASSERT_EQ(0, ioctx.list_lockers("foo", "TestLockPP7", &exclusive, &tag, &lockers));
}
ASSERT_EQ(0, ioctx.lock_shared("foo", "TestLockPP7", "Cookie", "Tag", "", NULL, 0));
{
int exclusive;
std::string tag;
std::list<librados::locker_t> lockers;
ASSERT_EQ(1, ioctx.list_lockers("foo", "TestLockPP7", &exclusive, &tag, &lockers));
std::list<librados::locker_t>::iterator it = lockers.begin();
ASSERT_FALSE(lockers.end() == it);
ASSERT_EQ(me, it->client);
ASSERT_EQ("Cookie", it->cookie);
}
}
TEST_F(LibRadosLockPP, BreakLockPP) {
int exclusive;
std::string tag;
std::list<librados::locker_t> lockers;
std::stringstream sstm;
sstm << "client." << cluster.get_instance_id();
std::string me = sstm.str();
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockPP8", "Cookie", "", NULL, 0));
ASSERT_EQ(1, ioctx.list_lockers("foo", "TestLockPP8", &exclusive, &tag, &lockers));
std::list<librados::locker_t>::iterator it = lockers.begin();
ASSERT_FALSE(lockers.end() == it);
ASSERT_EQ(me, it->client);
ASSERT_EQ("Cookie", it->cookie);
ASSERT_EQ(0, ioctx.break_lock("foo", "TestLockPP8", it->client, "Cookie"));
}
// EC testing
TEST_F(LibRadosLockECPP, LockExclusivePP) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockECPP1", "Cookie", "", NULL, 0));
ASSERT_EQ(-EEXIST, ioctx.lock_exclusive("foo", "TestLockECPP1", "Cookie", "", NULL, 0));
}
TEST_F(LibRadosLockECPP, LockSharedPP) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, ioctx.lock_shared("foo", "TestLockECPP2", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(-EEXIST, ioctx.lock_shared("foo", "TestLockECPP2", "Cookie", "Tag", "", NULL, 0));
}
TEST_F(LibRadosLockECPP, LockExclusiveDurPP) {
SKIP_IF_CRIMSON();
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
auto lock_exclusive = [this](timeval* tv) {
return ioctx.lock_exclusive("foo", "TestLockECPP3", "Cookie", "", tv, 0);
};
constexpr int expected = 0;
ASSERT_EQ(expected, lock_exclusive(&tv));
ASSERT_EQ(expected, wait_until(1.0s, 0.1s, expected, lock_exclusive, nullptr));
}
TEST_F(LibRadosLockECPP, LockSharedDurPP) {
SKIP_IF_CRIMSON();
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
auto lock_shared = [this](timeval* tv) {
return ioctx.lock_shared("foo", "TestLockECPP4", "Cookie", "Tag", "", tv, 0);
};
const int expected = 0;
ASSERT_EQ(expected, lock_shared(&tv));
ASSERT_EQ(expected, wait_until(1.0s, 0.1s, expected, lock_shared, nullptr));
}
TEST_F(LibRadosLockECPP, LockMayRenewPP) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockECPP5", "Cookie", "", NULL, 0));
ASSERT_EQ(-EEXIST, ioctx.lock_exclusive("foo", "TestLockECPP5", "Cookie", "", NULL, 0));
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockECPP5", "Cookie", "", NULL, LOCK_FLAG_MAY_RENEW));
}
TEST_F(LibRadosLockECPP, UnlockPP) {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockECPP6", "Cookie", "", NULL, 0));
ASSERT_EQ(0, ioctx.unlock("foo", "TestLockECPP6", "Cookie"));
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockECPP6", "Cookie", "", NULL, 0));
}
TEST_F(LibRadosLockECPP, ListLockersPP) {
SKIP_IF_CRIMSON();
std::stringstream sstm;
sstm << "client." << cluster.get_instance_id();
std::string me = sstm.str();
ASSERT_EQ(0, ioctx.lock_shared("foo", "TestLockECPP7", "Cookie", "Tag", "", NULL, 0));
ASSERT_EQ(0, ioctx.unlock("foo", "TestLockECPP7", "Cookie"));
{
int exclusive;
std::string tag;
std::list<librados::locker_t> lockers;
ASSERT_EQ(0, ioctx.list_lockers("foo", "TestLockECPP7", &exclusive, &tag, &lockers));
}
ASSERT_EQ(0, ioctx.lock_shared("foo", "TestLockECPP7", "Cookie", "Tag", "", NULL, 0));
{
int exclusive;
std::string tag;
std::list<librados::locker_t> lockers;
ASSERT_EQ(1, ioctx.list_lockers("foo", "TestLockECPP7", &exclusive, &tag, &lockers));
std::list<librados::locker_t>::iterator it = lockers.begin();
ASSERT_FALSE(lockers.end() == it);
ASSERT_EQ(me, it->client);
ASSERT_EQ("Cookie", it->cookie);
}
}
TEST_F(LibRadosLockECPP, BreakLockPP) {
SKIP_IF_CRIMSON();
int exclusive;
std::string tag;
std::list<librados::locker_t> lockers;
std::stringstream sstm;
sstm << "client." << cluster.get_instance_id();
std::string me = sstm.str();
ASSERT_EQ(0, ioctx.lock_exclusive("foo", "TestLockECPP8", "Cookie", "", NULL, 0));
ASSERT_EQ(1, ioctx.list_lockers("foo", "TestLockECPP8", &exclusive, &tag, &lockers));
std::list<librados::locker_t>::iterator it = lockers.begin();
ASSERT_FALSE(lockers.end() == it);
ASSERT_EQ(me, it->client);
ASSERT_EQ("Cookie", it->cookie);
ASSERT_EQ(0, ioctx.break_lock("foo", "TestLockECPP8", it->client, "Cookie"));
}
| 7,369 | 35.127451 | 102 |
cc
|
null |
ceph-main/src/test/librados/misc.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "gtest/gtest.h"
#include "mds/mdstypes.h"
#include "include/err.h"
#include "include/buffer.h"
#include "include/rbd_types.h"
#include "include/rados.h"
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "include/scope_guard.h"
#include "include/stringify.h"
#include "common/Checksummer.h"
#include "global/global_context.h"
#include "test/librados/test.h"
#include "test/librados/TestCase.h"
#include "gtest/gtest.h"
#include <sys/time.h>
#ifndef _WIN32
#include <sys/resource.h>
#endif
#include <errno.h>
#include <map>
#include <sstream>
#include <string>
#include <regex>
using namespace std;
using namespace librados;
typedef RadosTest LibRadosMisc;
TEST(LibRadosMiscVersion, Version) {
int major, minor, extra;
rados_version(&major, &minor, &extra);
}
static void test_rados_log_cb(void *arg,
const char *line,
const char *who,
uint64_t sec, uint64_t nsec,
uint64_t seq, const char *level,
const char *msg)
{
std::cerr << "monitor log callback invoked" << std::endl;
}
TEST(LibRadosMiscConnectFailure, ConnectFailure) {
rados_t cluster;
char *id = getenv("CEPH_CLIENT_ID");
if (id)
std::cerr << "Client id is: " << id << std::endl;
ASSERT_EQ(0, rados_create(&cluster, NULL));
ASSERT_EQ(0, rados_conf_read_file(cluster, NULL));
ASSERT_EQ(0, rados_conf_parse_env(cluster, NULL));
ASSERT_EQ(0, rados_conf_set(cluster, "client_mount_timeout", "1s"));
ASSERT_EQ(0, rados_conf_set(cluster, "debug_monc", "20"));
ASSERT_EQ(0, rados_conf_set(cluster, "debug_ms", "1"));
ASSERT_EQ(0, rados_conf_set(cluster, "log_to_stderr", "true"));
ASSERT_EQ(-ENOTCONN, rados_monitor_log(cluster, "error",
test_rados_log_cb, NULL));
// try this a few times; sometimes we don't schedule fast enough for the
// cond to time out
int r;
for (unsigned i=0; i<16; ++i) {
cout << i << std::endl;
r = rados_connect(cluster);
if (r < 0)
break; // yay, we timed out
// try again
rados_shutdown(cluster);
ASSERT_EQ(0, rados_create(&cluster, NULL));
}
ASSERT_NE(0, r);
rados_shutdown(cluster);
}
TEST(LibRadosMiscPool, PoolCreationRace) {
rados_t cluster_a, cluster_b;
char *id = getenv("CEPH_CLIENT_ID");
if (id)
std::cerr << "Client id is: " << id << std::endl;
ASSERT_EQ(0, rados_create(&cluster_a, NULL));
ASSERT_EQ(0, rados_conf_read_file(cluster_a, NULL));
// kludge: i want to --log-file foo and only get cluster b
//ASSERT_EQ(0, rados_conf_parse_env(cluster_a, NULL));
ASSERT_EQ(0, rados_conf_set(cluster_a,
"objecter_debug_inject_relock_delay", "true"));
ASSERT_EQ(0, rados_connect(cluster_a));
ASSERT_EQ(0, rados_create(&cluster_b, NULL));
ASSERT_EQ(0, rados_conf_read_file(cluster_b, NULL));
ASSERT_EQ(0, rados_conf_parse_env(cluster_b, NULL));
ASSERT_EQ(0, rados_connect(cluster_b));
char poolname[80];
snprintf(poolname, sizeof(poolname), "poolrace.%d", rand());
rados_pool_create(cluster_a, poolname);
rados_ioctx_t a;
rados_ioctx_create(cluster_a, poolname, &a);
char pool2name[80];
snprintf(pool2name, sizeof(pool2name), "poolrace2.%d", rand());
rados_pool_create(cluster_b, pool2name);
list<rados_completion_t> cls;
// this should normally trigger pretty easily, but we need to bound
// the requests because if we get too many we'll get stuck by always
// sending enough messages that we hit the socket failure injection.
int max = 512;
while (max--) {
char buf[100];
rados_completion_t c;
rados_aio_create_completion2(nullptr, nullptr, &c);
cls.push_back(c);
rados_aio_read(a, "PoolCreationRaceObj", c, buf, 100, 0);
cout << "started " << (void*)c << std::endl;
if (rados_aio_is_complete(cls.front())) {
break;
}
}
while (!rados_aio_is_complete(cls.front())) {
cout << "waiting 1 sec" << std::endl;
sleep(1);
}
cout << " started " << cls.size() << " aios" << std::endl;
for (auto c : cls) {
cout << "waiting " << (void*)c << std::endl;
rados_aio_wait_for_complete_and_cb(c);
rados_aio_release(c);
}
cout << "done." << std::endl;
rados_ioctx_destroy(a);
rados_pool_delete(cluster_a, poolname);
rados_pool_delete(cluster_a, pool2name);
rados_shutdown(cluster_b);
rados_shutdown(cluster_a);
}
TEST_F(LibRadosMisc, ClusterFSID) {
char fsid[37];
ASSERT_EQ(-ERANGE, rados_cluster_fsid(cluster, fsid, sizeof(fsid) - 1));
ASSERT_EQ(sizeof(fsid) - 1,
(size_t)rados_cluster_fsid(cluster, fsid, sizeof(fsid)));
}
TEST_F(LibRadosMisc, Exec) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
char buf2[512];
int res = rados_exec(ioctx, "foo", "rbd", "get_all_features",
NULL, 0, buf2, sizeof(buf2));
ASSERT_GT(res, 0);
bufferlist bl;
bl.append(buf2, res);
auto iter = bl.cbegin();
uint64_t all_features;
decode(all_features, iter);
// make sure *some* features are specified; don't care which ones
ASSERT_NE(all_features, (unsigned)0);
}
TEST_F(LibRadosMisc, WriteSame) {
char buf[128];
char full[128 * 4];
char *cmp;
/* zero the full range before using writesame */
memset(full, 0, sizeof(full));
ASSERT_EQ(0, rados_write(ioctx, "ws", full, sizeof(full), 0));
memset(buf, 0xcc, sizeof(buf));
/* write the same buf four times */
ASSERT_EQ(0, rados_writesame(ioctx, "ws", buf, sizeof(buf), sizeof(full), 0));
/* read back the full buffer and confirm that it matches */
ASSERT_EQ((int)sizeof(full), rados_read(ioctx, "ws", full, sizeof(full), 0));
for (cmp = full; cmp < full + sizeof(full); cmp += sizeof(buf)) {
ASSERT_EQ(0, memcmp(cmp, buf, sizeof(buf)));
}
/* write_len not a multiple of data_len should throw error */
ASSERT_EQ(-EINVAL, rados_writesame(ioctx, "ws", buf, sizeof(buf),
(sizeof(buf) * 4) - 1, 0));
ASSERT_EQ(-EINVAL,
rados_writesame(ioctx, "ws", buf, sizeof(buf), sizeof(buf) / 2, 0));
ASSERT_EQ(-EINVAL,
rados_writesame(ioctx, "ws", buf, 0, sizeof(buf), 0));
/* write_len = data_len, i.e. same as rados_write() */
ASSERT_EQ(0, rados_writesame(ioctx, "ws", buf, sizeof(buf), sizeof(buf), 0));
}
TEST_F(LibRadosMisc, CmpExt) {
bufferlist cmp_bl, bad_cmp_bl, write_bl;
char stored_str[] = "1234567891";
char mismatch_str[] = "1234577777";
ASSERT_EQ(0,
rados_write(ioctx, "cmpextpp", stored_str, sizeof(stored_str), 0));
ASSERT_EQ(0,
rados_cmpext(ioctx, "cmpextpp", stored_str, sizeof(stored_str), 0));
ASSERT_EQ(-MAX_ERRNO - 5,
rados_cmpext(ioctx, "cmpextpp", mismatch_str, sizeof(mismatch_str), 0));
}
TEST_F(LibRadosMisc, Applications) {
const char *cmd[] = {"{\"prefix\":\"osd dump\"}", nullptr};
char *buf, *st;
size_t buflen, stlen;
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, &buf,
&buflen, &st, &stlen));
ASSERT_LT(0u, buflen);
string result(buf);
rados_buffer_free(buf);
rados_buffer_free(st);
if (!std::regex_search(result, std::regex("require_osd_release [l-z]"))) {
std::cout << "SKIPPING";
return;
}
char apps[128];
size_t app_len;
app_len = sizeof(apps);
ASSERT_EQ(0, rados_application_list(ioctx, apps, &app_len));
ASSERT_EQ(6U, app_len);
ASSERT_EQ(0, memcmp("rados\0", apps, app_len));
ASSERT_EQ(0, rados_application_enable(ioctx, "app1", 1));
ASSERT_EQ(-EPERM, rados_application_enable(ioctx, "app2", 0));
ASSERT_EQ(0, rados_application_enable(ioctx, "app2", 1));
ASSERT_EQ(-ERANGE, rados_application_list(ioctx, apps, &app_len));
ASSERT_EQ(16U, app_len);
ASSERT_EQ(0, rados_application_list(ioctx, apps, &app_len));
ASSERT_EQ(16U, app_len);
ASSERT_EQ(0, memcmp("app1\0app2\0rados\0", apps, app_len));
char keys[128];
char vals[128];
size_t key_len;
size_t val_len;
key_len = sizeof(keys);
val_len = sizeof(vals);
ASSERT_EQ(-ENOENT, rados_application_metadata_list(ioctx, "dne", keys,
&key_len, vals, &val_len));
ASSERT_EQ(0, rados_application_metadata_list(ioctx, "app1", keys, &key_len,
vals, &val_len));
ASSERT_EQ(0U, key_len);
ASSERT_EQ(0U, val_len);
ASSERT_EQ(-ENOENT, rados_application_metadata_set(ioctx, "dne", "key",
"value"));
ASSERT_EQ(0, rados_application_metadata_set(ioctx, "app1", "key1", "value1"));
ASSERT_EQ(0, rados_application_metadata_set(ioctx, "app1", "key2", "value2"));
ASSERT_EQ(-ERANGE, rados_application_metadata_list(ioctx, "app1", keys,
&key_len, vals, &val_len));
ASSERT_EQ(10U, key_len);
ASSERT_EQ(14U, val_len);
ASSERT_EQ(0, rados_application_metadata_list(ioctx, "app1", keys, &key_len,
vals, &val_len));
ASSERT_EQ(10U, key_len);
ASSERT_EQ(14U, val_len);
ASSERT_EQ(0, memcmp("key1\0key2\0", keys, key_len));
ASSERT_EQ(0, memcmp("value1\0value2\0", vals, val_len));
ASSERT_EQ(0, rados_application_metadata_remove(ioctx, "app1", "key1"));
ASSERT_EQ(0, rados_application_metadata_list(ioctx, "app1", keys, &key_len,
vals, &val_len));
ASSERT_EQ(5U, key_len);
ASSERT_EQ(7U, val_len);
ASSERT_EQ(0, memcmp("key2\0", keys, key_len));
ASSERT_EQ(0, memcmp("value2\0", vals, val_len));
}
TEST_F(LibRadosMisc, MinCompatOSD) {
int8_t require_osd_release;
ASSERT_EQ(0, rados_get_min_compatible_osd(cluster, &require_osd_release));
ASSERT_LE(-1, require_osd_release);
ASSERT_GT(CEPH_RELEASE_MAX, require_osd_release);
}
TEST_F(LibRadosMisc, MinCompatClient) {
int8_t min_compat_client;
int8_t require_min_compat_client;
ASSERT_EQ(0, rados_get_min_compatible_client(cluster,
&min_compat_client,
&require_min_compat_client));
ASSERT_LE(-1, min_compat_client);
ASSERT_GT(CEPH_RELEASE_MAX, min_compat_client);
ASSERT_LE(-1, require_min_compat_client);
ASSERT_GT(CEPH_RELEASE_MAX, require_min_compat_client);
}
static void shutdown_racer_func()
{
const int niter = 32;
rados_t rad;
int i;
for (i = 0; i < niter; ++i) {
auto r = connect_cluster(&rad);
if (getenv("ALLOW_TIMEOUTS")) {
ASSERT_TRUE(r == "" || r == "rados_connect failed with error -110");
} else {
ASSERT_EQ("", r);
}
rados_shutdown(rad);
}
}
#ifndef _WIN32
// See trackers #20988 and #42026
TEST_F(LibRadosMisc, ShutdownRace)
{
const int nthreads = 128;
std::thread threads[nthreads];
// Need a bunch of fd's for this test
struct rlimit rold, rnew;
ASSERT_EQ(getrlimit(RLIMIT_NOFILE, &rold), 0);
rnew = rold;
rnew.rlim_cur = rnew.rlim_max;
ASSERT_EQ(setrlimit(RLIMIT_NOFILE, &rnew), 0);
for (int i = 0; i < nthreads; ++i)
threads[i] = std::thread(shutdown_racer_func);
for (int i = 0; i < nthreads; ++i)
threads[i].join();
ASSERT_EQ(setrlimit(RLIMIT_NOFILE, &rold), 0);
}
#endif /* _WIN32 */
| 11,343 | 31.135977 | 80 |
cc
|
null |
ceph-main/src/test/librados/misc_cxx.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include <map>
#include <sstream>
#include <string>
#include <regex>
#include "gtest/gtest.h"
#include "include/err.h"
#include "include/buffer.h"
#include "include/rbd_types.h"
#include "include/rados.h"
#include "include/rados/librados.hpp"
#include "include/scope_guard.h"
#include "include/stringify.h"
#include "common/Checksummer.h"
#include "mds/mdstypes.h"
#include "global/global_context.h"
#include "test/librados/testcase_cxx.h"
#include "test/librados/test_cxx.h"
#include "crimson_utils.h"
using namespace std;
using namespace librados;
typedef RadosTestPP LibRadosMiscPP;
typedef RadosTestECPP LibRadosMiscECPP;
TEST(LibRadosMiscVersion, VersionPP) {
int major, minor, extra;
Rados::version(&major, &minor, &extra);
}
TEST_F(LibRadosMiscPP, WaitOSDMapPP) {
ASSERT_EQ(0, cluster.wait_for_latest_osdmap());
}
TEST_F(LibRadosMiscPP, LongNamePP) {
bufferlist bl;
bl.append("content");
int maxlen = g_conf()->osd_max_object_name_len;
ASSERT_EQ(0, ioctx.write(string(maxlen/2, 'a').c_str(), bl, bl.length(), 0));
ASSERT_EQ(0, ioctx.write(string(maxlen-1, 'a').c_str(), bl, bl.length(), 0));
ASSERT_EQ(0, ioctx.write(string(maxlen, 'a').c_str(), bl, bl.length(), 0));
ASSERT_EQ(-ENAMETOOLONG, ioctx.write(string(maxlen+1, 'a').c_str(), bl, bl.length(), 0));
ASSERT_EQ(-ENAMETOOLONG, ioctx.write(string(maxlen*2, 'a').c_str(), bl, bl.length(), 0));
}
TEST_F(LibRadosMiscPP, LongLocatorPP) {
bufferlist bl;
bl.append("content");
int maxlen = g_conf()->osd_max_object_name_len;
ioctx.locator_set_key(
string((maxlen/2), 'a'));
ASSERT_EQ(
0,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
ioctx.locator_set_key(
string(maxlen - 1, 'a'));
ASSERT_EQ(
0,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
ioctx.locator_set_key(
string(maxlen, 'a'));
ASSERT_EQ(
0,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
ioctx.locator_set_key(
string(maxlen+1, 'a'));
ASSERT_EQ(
-ENAMETOOLONG,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
ioctx.locator_set_key(
string((maxlen*2), 'a'));
ASSERT_EQ(
-ENAMETOOLONG,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
}
TEST_F(LibRadosMiscPP, LongNSpacePP) {
bufferlist bl;
bl.append("content");
int maxlen = g_conf()->osd_max_object_namespace_len;
ioctx.set_namespace(
string((maxlen/2), 'a'));
ASSERT_EQ(
0,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
ioctx.set_namespace(
string(maxlen - 1, 'a'));
ASSERT_EQ(
0,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
ioctx.set_namespace(
string(maxlen, 'a'));
ASSERT_EQ(
0,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
ioctx.set_namespace(
string(maxlen+1, 'a'));
ASSERT_EQ(
-ENAMETOOLONG,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
ioctx.set_namespace(
string((maxlen*2), 'a'));
ASSERT_EQ(
-ENAMETOOLONG,
ioctx.write(
string("a").c_str(),
bl, bl.length(), 0));
}
TEST_F(LibRadosMiscPP, LongAttrNamePP) {
bufferlist bl;
bl.append("content");
int maxlen = g_conf()->osd_max_attr_name_len;
ASSERT_EQ(0, ioctx.setxattr("bigattrobj", string(maxlen/2, 'a').c_str(), bl));
ASSERT_EQ(0, ioctx.setxattr("bigattrobj", string(maxlen-1, 'a').c_str(), bl));
ASSERT_EQ(0, ioctx.setxattr("bigattrobj", string(maxlen, 'a').c_str(), bl));
ASSERT_EQ(-ENAMETOOLONG, ioctx.setxattr("bigattrobj", string(maxlen+1, 'a').c_str(), bl));
ASSERT_EQ(-ENAMETOOLONG, ioctx.setxattr("bigattrobj", string(maxlen*2, 'a').c_str(), bl));
}
TEST_F(LibRadosMiscPP, ExecPP) {
bufferlist bl;
ASSERT_EQ(0, ioctx.write("foo", bl, 0, 0));
bufferlist bl2, out;
int r = ioctx.exec("foo", "rbd", "get_all_features", bl2, out);
ASSERT_EQ(0, r);
auto iter = out.cbegin();
uint64_t all_features;
decode(all_features, iter);
// make sure *some* features are specified; don't care which ones
ASSERT_NE(all_features, (unsigned)0);
}
void set_completion_complete(rados_completion_t cb, void *arg)
{
bool *my_aio_complete = (bool*)arg;
*my_aio_complete = true;
}
TEST_F(LibRadosMiscPP, BadFlagsPP) {
unsigned badflags = CEPH_OSD_FLAG_PARALLELEXEC;
{
bufferlist bl;
bl.append("data");
ASSERT_EQ(0, ioctx.write("badfoo", bl, bl.length(), 0));
}
{
ASSERT_EQ(-EINVAL, ioctx.remove("badfoo", badflags));
}
}
TEST_F(LibRadosMiscPP, Operate1PP) {
ObjectWriteOperation o;
{
bufferlist bl;
o.write(0, bl);
}
std::string val1("val1");
{
bufferlist bl;
bl.append(val1.c_str(), val1.size() + 1);
o.setxattr("key1", bl);
o.omap_clear(); // shouldn't affect attrs!
}
ASSERT_EQ(0, ioctx.operate("foo", &o));
ObjectWriteOperation empty;
ASSERT_EQ(0, ioctx.operate("foo", &empty));
{
bufferlist bl;
ASSERT_GT(ioctx.getxattr("foo", "key1", bl), 0);
ASSERT_EQ(0, strcmp(bl.c_str(), val1.c_str()));
}
ObjectWriteOperation o2;
{
bufferlist bl;
bl.append(val1);
o2.cmpxattr("key1", CEPH_OSD_CMPXATTR_OP_EQ, bl);
o2.rmxattr("key1");
}
ASSERT_EQ(-ECANCELED, ioctx.operate("foo", &o2));
ObjectWriteOperation o3;
{
bufferlist bl;
bl.append(val1);
o3.cmpxattr("key1", CEPH_OSD_CMPXATTR_OP_EQ, bl);
}
ASSERT_EQ(-ECANCELED, ioctx.operate("foo", &o3));
}
TEST_F(LibRadosMiscPP, Operate2PP) {
ObjectWriteOperation o;
{
bufferlist bl;
bl.append("abcdefg");
o.write(0, bl);
}
std::string val1("val1");
{
bufferlist bl;
bl.append(val1.c_str(), val1.size() + 1);
o.setxattr("key1", bl);
o.truncate(0);
}
ASSERT_EQ(0, ioctx.operate("foo", &o));
uint64_t size;
time_t mtime;
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(0U, size);
}
TEST_F(LibRadosMiscPP, BigObjectPP) {
bufferlist bl;
bl.append("abcdefg");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
{
ObjectWriteOperation o;
o.truncate(500000000000ull);
ASSERT_EQ(-EFBIG, ioctx.operate("foo", &o));
}
{
ObjectWriteOperation o;
o.zero(500000000000ull, 1);
ASSERT_EQ(-EFBIG, ioctx.operate("foo", &o));
}
{
ObjectWriteOperation o;
o.zero(1, 500000000000ull);
ASSERT_EQ(-EFBIG, ioctx.operate("foo", &o));
}
{
ObjectWriteOperation o;
o.zero(500000000000ull, 500000000000ull);
ASSERT_EQ(-EFBIG, ioctx.operate("foo", &o));
}
#ifdef __LP64__
// this test only works on 64-bit platforms
ASSERT_EQ(-EFBIG, ioctx.write("foo", bl, bl.length(), 500000000000ull));
#endif
}
TEST_F(LibRadosMiscPP, AioOperatePP) {
bool my_aio_complete = false;
AioCompletion *my_completion = cluster.aio_create_completion(
(void*)&my_aio_complete, set_completion_complete);
AioCompletion *my_completion_null = NULL;
ASSERT_NE(my_completion, my_completion_null);
ObjectWriteOperation o;
{
bufferlist bl;
o.write(0, bl);
}
std::string val1("val1");
{
bufferlist bl;
bl.append(val1.c_str(), val1.size() + 1);
o.setxattr("key1", bl);
bufferlist bl2;
char buf2[1024];
memset(buf2, 0xdd, sizeof(buf2));
bl2.append(buf2, sizeof(buf2));
o.append(bl2);
}
ASSERT_EQ(0, ioctx.aio_operate("foo", my_completion, &o));
ASSERT_EQ(0, my_completion->wait_for_complete_and_cb());
ASSERT_EQ(my_aio_complete, true);
my_completion->release();
uint64_t size;
time_t mtime;
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(1024U, size);
}
TEST_F(LibRadosMiscPP, AssertExistsPP) {
char buf[64];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ObjectWriteOperation op;
op.assert_exists();
op.write(0, bl);
ASSERT_EQ(-ENOENT, ioctx.operate("asdffoo", &op));
ASSERT_EQ(0, ioctx.create("asdffoo", true));
ASSERT_EQ(0, ioctx.operate("asdffoo", &op));
ASSERT_EQ(-EEXIST, ioctx.create("asdffoo", true));
}
TEST_F(LibRadosMiscPP, AssertVersionPP) {
char buf[64];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
// Create test object...
ASSERT_EQ(0, ioctx.create("asdfbar", true));
// ...then write it again to guarantee that the
// (unsigned) version must be at least 1 (not 0)
// since we want to decrement it by 1 later.
ASSERT_EQ(0, ioctx.write_full("asdfbar", bl));
uint64_t v = ioctx.get_last_version();
ObjectWriteOperation op1;
op1.assert_version(v+1);
op1.write(0, bl);
ASSERT_EQ(-EOVERFLOW, ioctx.operate("asdfbar", &op1));
ObjectWriteOperation op2;
op2.assert_version(v-1);
op2.write(0, bl);
ASSERT_EQ(-ERANGE, ioctx.operate("asdfbar", &op2));
ObjectWriteOperation op3;
op3.assert_version(v);
op3.write(0, bl);
ASSERT_EQ(0, ioctx.operate("asdfbar", &op3));
}
TEST_F(LibRadosMiscPP, BigAttrPP) {
char buf[64];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.create("foo", true));
bufferlist got;
cout << "osd_max_attr_size = " << g_conf()->osd_max_attr_size << std::endl;
if (g_conf()->osd_max_attr_size) {
bl.clear();
got.clear();
bl.append(buffer::create(g_conf()->osd_max_attr_size));
ASSERT_EQ(0, ioctx.setxattr("foo", "one", bl));
ASSERT_EQ((int)bl.length(), ioctx.getxattr("foo", "one", got));
ASSERT_TRUE(bl.contents_equal(got));
bl.clear();
bl.append(buffer::create(g_conf()->osd_max_attr_size+1));
ASSERT_EQ(-EFBIG, ioctx.setxattr("foo", "one", bl));
} else {
cout << "osd_max_attr_size == 0; skipping test" << std::endl;
}
for (int i=0; i<1000; i++) {
bl.clear();
got.clear();
bl.append(buffer::create(std::min<uint64_t>(g_conf()->osd_max_attr_size,
1024)));
char n[10];
snprintf(n, sizeof(n), "a%d", i);
ASSERT_EQ(0, ioctx.setxattr("foo", n, bl));
ASSERT_EQ((int)bl.length(), ioctx.getxattr("foo", n, got));
ASSERT_TRUE(bl.contents_equal(got));
}
}
TEST_F(LibRadosMiscPP, CopyPP) {
SKIP_IF_CRIMSON();
bufferlist bl, x;
bl.append("hi there");
x.append("bar");
// small object
bufferlist blc = bl;
bufferlist xc = x;
ASSERT_EQ(0, ioctx.write_full("foo", blc));
ASSERT_EQ(0, ioctx.setxattr("foo", "myattr", xc));
version_t uv = ioctx.get_last_version();
{
// pass future version
ObjectWriteOperation op;
op.copy_from("foo", ioctx, uv + 1, LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(-EOVERFLOW, ioctx.operate("foo.copy", &op));
}
{
// pass old version
ObjectWriteOperation op;
op.copy_from("foo", ioctx, uv - 1, LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(-ERANGE, ioctx.operate("foo.copy", &op));
}
{
ObjectWriteOperation op;
op.copy_from("foo", ioctx, uv, LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, ioctx.operate("foo.copy", &op));
bufferlist bl2, x2;
ASSERT_EQ((int)bl.length(), ioctx.read("foo.copy", bl2, 10000, 0));
ASSERT_TRUE(bl.contents_equal(bl2));
ASSERT_EQ((int)x.length(), ioctx.getxattr("foo.copy", "myattr", x2));
ASSERT_TRUE(x.contents_equal(x2));
}
// small object without a version
{
ObjectWriteOperation op;
op.copy_from("foo", ioctx, 0, LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, ioctx.operate("foo.copy2", &op));
bufferlist bl2, x2;
ASSERT_EQ((int)bl.length(), ioctx.read("foo.copy2", bl2, 10000, 0));
ASSERT_TRUE(bl.contents_equal(bl2));
ASSERT_EQ((int)x.length(), ioctx.getxattr("foo.copy2", "myattr", x2));
ASSERT_TRUE(x.contents_equal(x2));
}
// do a big object
bl.append(buffer::create(g_conf()->osd_copyfrom_max_chunk * 3));
bl.zero();
bl.append("tail");
blc = bl;
xc = x;
ASSERT_EQ(0, ioctx.write_full("big", blc));
ASSERT_EQ(0, ioctx.setxattr("big", "myattr", xc));
{
ObjectWriteOperation op;
op.copy_from("big", ioctx, ioctx.get_last_version(), LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
ASSERT_EQ(0, ioctx.operate("big.copy", &op));
bufferlist bl2, x2;
ASSERT_EQ((int)bl.length(), ioctx.read("big.copy", bl2, bl.length(), 0));
ASSERT_TRUE(bl.contents_equal(bl2));
ASSERT_EQ((int)x.length(), ioctx.getxattr("foo.copy", "myattr", x2));
ASSERT_TRUE(x.contents_equal(x2));
}
{
ObjectWriteOperation op;
op.copy_from("big", ioctx, 0, LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL);
ASSERT_EQ(0, ioctx.operate("big.copy2", &op));
bufferlist bl2, x2;
ASSERT_EQ((int)bl.length(), ioctx.read("big.copy2", bl2, bl.length(), 0));
ASSERT_TRUE(bl.contents_equal(bl2));
ASSERT_EQ((int)x.length(), ioctx.getxattr("foo.copy2", "myattr", x2));
ASSERT_TRUE(x.contents_equal(x2));
}
}
class LibRadosTwoPoolsECPP : public RadosTestECPP
{
public:
LibRadosTwoPoolsECPP() {};
~LibRadosTwoPoolsECPP() override {};
protected:
static void SetUpTestCase() {
SKIP_IF_CRIMSON();
pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_ec_pool_pp(pool_name, s_cluster));
src_pool_name = get_temp_pool_name();
ASSERT_EQ(0, s_cluster.pool_create(src_pool_name.c_str()));
librados::IoCtx ioctx;
ASSERT_EQ(0, s_cluster.ioctx_create(pool_name.c_str(), ioctx));
ioctx.application_enable("rados", true);
librados::IoCtx src_ioctx;
ASSERT_EQ(0, s_cluster.ioctx_create(src_pool_name.c_str(), src_ioctx));
src_ioctx.application_enable("rados", true);
}
static void TearDownTestCase() {
SKIP_IF_CRIMSON();
ASSERT_EQ(0, s_cluster.pool_delete(src_pool_name.c_str()));
ASSERT_EQ(0, destroy_one_ec_pool_pp(pool_name, s_cluster));
}
static std::string src_pool_name;
void SetUp() override {
SKIP_IF_CRIMSON();
RadosTestECPP::SetUp();
ASSERT_EQ(0, cluster.ioctx_create(src_pool_name.c_str(), src_ioctx));
src_ioctx.set_namespace(nspace);
}
void TearDown() override {
SKIP_IF_CRIMSON();
// wait for maps to settle before next test
cluster.wait_for_latest_osdmap();
RadosTestECPP::TearDown();
cleanup_default_namespace(src_ioctx);
cleanup_namespace(src_ioctx, nspace);
src_ioctx.close();
}
librados::IoCtx src_ioctx;
};
std::string LibRadosTwoPoolsECPP::src_pool_name;
//copy_from between ecpool and no-ecpool.
TEST_F(LibRadosTwoPoolsECPP, CopyFrom) {
SKIP_IF_CRIMSON();
bufferlist z;
z.append_zero(4194304*2);
bufferlist b;
b.append("copyfrom");
// create big object w/ omapheader
{
ASSERT_EQ(0, src_ioctx.write_full("foo", z));
ASSERT_EQ(0, src_ioctx.omap_set_header("foo", b));
version_t uv = src_ioctx.get_last_version();
ObjectWriteOperation op;
op.copy_from("foo", src_ioctx, uv, 0);
ASSERT_EQ(-EOPNOTSUPP, ioctx.operate("foo.copy", &op));
}
// same with small object
{
ASSERT_EQ(0, src_ioctx.omap_set_header("bar", b));
version_t uv = src_ioctx.get_last_version();
ObjectWriteOperation op;
op.copy_from("bar", src_ioctx, uv, 0);
ASSERT_EQ(-EOPNOTSUPP, ioctx.operate("bar.copy", &op));
}
}
TEST_F(LibRadosMiscPP, CopyScrubPP) {
SKIP_IF_CRIMSON();
bufferlist inbl, bl, x;
for (int i=0; i<100; ++i)
x.append("barrrrrrrrrrrrrrrrrrrrrrrrrr");
bl.append(buffer::create(g_conf()->osd_copyfrom_max_chunk * 3));
bl.zero();
bl.append("tail");
bufferlist cbl;
map<string, bufferlist> to_set;
for (int i=0; i<1000; ++i)
to_set[string("foo") + stringify(i)] = x;
// small
cbl = x;
ASSERT_EQ(0, ioctx.write_full("small", cbl));
ASSERT_EQ(0, ioctx.setxattr("small", "myattr", x));
// big
cbl = bl;
ASSERT_EQ(0, ioctx.write_full("big", cbl));
// without header
cbl = bl;
ASSERT_EQ(0, ioctx.write_full("big2", cbl));
ASSERT_EQ(0, ioctx.setxattr("big2", "myattr", x));
ASSERT_EQ(0, ioctx.setxattr("big2", "myattr2", x));
ASSERT_EQ(0, ioctx.omap_set("big2", to_set));
// with header
cbl = bl;
ASSERT_EQ(0, ioctx.write_full("big3", cbl));
ASSERT_EQ(0, ioctx.omap_set_header("big3", x));
ASSERT_EQ(0, ioctx.omap_set("big3", to_set));
// deep scrub to ensure digests are in place
{
for (int i=0; i<10; ++i) {
ostringstream ss;
ss << "{\"prefix\": \"pg deep-scrub\", \"pgid\": \""
<< ioctx.get_id() << "." << i
<< "\"}";
cluster.mon_command(ss.str(), inbl, NULL, NULL);
}
// give it a few seconds to go. this is sloppy but is usually enough time
cout << "waiting for initial deep scrubs..." << std::endl;
sleep(30);
cout << "done waiting, doing copies" << std::endl;
}
{
ObjectWriteOperation op;
op.copy_from("small", ioctx, 0, 0);
ASSERT_EQ(0, ioctx.operate("small.copy", &op));
}
{
ObjectWriteOperation op;
op.copy_from("big", ioctx, 0, 0);
ASSERT_EQ(0, ioctx.operate("big.copy", &op));
}
{
ObjectWriteOperation op;
op.copy_from("big2", ioctx, 0, 0);
ASSERT_EQ(0, ioctx.operate("big2.copy", &op));
}
{
ObjectWriteOperation op;
op.copy_from("big3", ioctx, 0, 0);
ASSERT_EQ(0, ioctx.operate("big3.copy", &op));
}
// deep scrub to ensure digests are correct
{
for (int i=0; i<10; ++i) {
ostringstream ss;
ss << "{\"prefix\": \"pg deep-scrub\", \"pgid\": \""
<< ioctx.get_id() << "." << i
<< "\"}";
cluster.mon_command(ss.str(), inbl, NULL, NULL);
}
// give it a few seconds to go. this is sloppy but is usually enough time
cout << "waiting for final deep scrubs..." << std::endl;
sleep(30);
cout << "done waiting" << std::endl;
}
}
TEST_F(LibRadosMiscPP, WriteSamePP) {
bufferlist bl;
char buf[128];
bufferlist fl;
char full[128 * 4];
char *cmp;
/* zero the full range before using writesame */
memset(full, 0, sizeof(full));
fl.append(full, sizeof(full));
ASSERT_EQ(0, ioctx.write("ws", fl, fl.length(), 0));
memset(buf, 0xcc, sizeof(buf));
bl.clear();
bl.append(buf, sizeof(buf));
/* write the same buf four times */
ASSERT_EQ(0, ioctx.writesame("ws", bl, sizeof(full), 0));
/* read back the full buffer and confirm that it matches */
fl.clear();
fl.append(full, sizeof(full));
ASSERT_EQ((int)fl.length(), ioctx.read("ws", fl, fl.length(), 0));
for (cmp = fl.c_str(); cmp < fl.c_str() + fl.length(); cmp += sizeof(buf)) {
ASSERT_EQ(0, memcmp(cmp, buf, sizeof(buf)));
}
/* write_len not a multiple of data_len should throw error */
bl.clear();
bl.append(buf, sizeof(buf));
ASSERT_EQ(-EINVAL, ioctx.writesame("ws", bl, (sizeof(buf) * 4) - 1, 0));
ASSERT_EQ(-EINVAL,
ioctx.writesame("ws", bl, bl.length() / 2, 0));
/* write_len = data_len, i.e. same as write() */
ASSERT_EQ(0, ioctx.writesame("ws", bl, sizeof(buf), 0));
bl.clear();
ASSERT_EQ(-EINVAL,
ioctx.writesame("ws", bl, sizeof(buf), 0));
}
template <typename T>
class LibRadosChecksum : public LibRadosMiscPP {
public:
typedef typename T::alg_t alg_t;
typedef typename T::value_t value_t;
typedef typename alg_t::init_value_t init_value_t;
static const rados_checksum_type_t type = T::type;
bufferlist content_bl;
using LibRadosMiscPP::SetUpTestCase;
using LibRadosMiscPP::TearDownTestCase;
void SetUp() override {
LibRadosMiscPP::SetUp();
std::string content(4096, '\0');
for (size_t i = 0; i < content.length(); ++i) {
content[i] = static_cast<char>(rand() % (126 - 33) + 33);
}
content_bl.append(content);
ASSERT_EQ(0, ioctx.write("foo", content_bl, content_bl.length(), 0));
}
};
template <rados_checksum_type_t _type, typename AlgT, typename ValueT>
class LibRadosChecksumParams {
public:
typedef AlgT alg_t;
typedef ValueT value_t;
static const rados_checksum_type_t type = _type;
};
typedef ::testing::Types<
LibRadosChecksumParams<LIBRADOS_CHECKSUM_TYPE_XXHASH32,
Checksummer::xxhash32, ceph_le32>,
LibRadosChecksumParams<LIBRADOS_CHECKSUM_TYPE_XXHASH64,
Checksummer::xxhash64, ceph_le64>,
LibRadosChecksumParams<LIBRADOS_CHECKSUM_TYPE_CRC32C,
Checksummer::crc32c, ceph_le32>
> LibRadosChecksumTypes;
TYPED_TEST_SUITE(LibRadosChecksum, LibRadosChecksumTypes);
TYPED_TEST(LibRadosChecksum, Subset) {
uint32_t chunk_size = 1024;
uint32_t csum_count = this->content_bl.length() / chunk_size;
typename TestFixture::init_value_t init_value = -1;
bufferlist init_value_bl;
encode(init_value, init_value_bl);
std::vector<bufferlist> checksum_bls(csum_count);
std::vector<int> checksum_rvals(csum_count);
// individual checksum ops for each chunk
ObjectReadOperation op;
for (uint32_t i = 0; i < csum_count; ++i) {
op.checksum(TestFixture::type, init_value_bl, i * chunk_size, chunk_size,
0, &checksum_bls[i], &checksum_rvals[i]);
}
ASSERT_EQ(0, this->ioctx.operate("foo", &op, NULL));
for (uint32_t i = 0; i < csum_count; ++i) {
ASSERT_EQ(0, checksum_rvals[i]);
auto bl_it = checksum_bls[i].cbegin();
uint32_t count;
decode(count, bl_it);
ASSERT_EQ(1U, count);
typename TestFixture::value_t value;
decode(value, bl_it);
bufferlist content_sub_bl;
content_sub_bl.substr_of(this->content_bl, i * chunk_size, chunk_size);
typename TestFixture::value_t expected_value;
bufferptr expected_value_bp = buffer::create_static(
sizeof(expected_value), reinterpret_cast<char*>(&expected_value));
Checksummer::template calculate<typename TestFixture::alg_t>(
init_value, chunk_size, 0, chunk_size, content_sub_bl,
&expected_value_bp);
ASSERT_EQ(expected_value, value);
}
}
TYPED_TEST(LibRadosChecksum, Chunked) {
uint32_t chunk_size = 1024;
uint32_t csum_count = this->content_bl.length() / chunk_size;
typename TestFixture::init_value_t init_value = -1;
bufferlist init_value_bl;
encode(init_value, init_value_bl);
bufferlist checksum_bl;
int checksum_rval;
// single op with chunked checksum results
ObjectReadOperation op;
op.checksum(TestFixture::type, init_value_bl, 0, this->content_bl.length(),
chunk_size, &checksum_bl, &checksum_rval);
ASSERT_EQ(0, this->ioctx.operate("foo", &op, NULL));
ASSERT_EQ(0, checksum_rval);
auto bl_it = checksum_bl.cbegin();
uint32_t count;
decode(count, bl_it);
ASSERT_EQ(csum_count, count);
std::vector<typename TestFixture::value_t> expected_values(csum_count);
bufferptr expected_values_bp = buffer::create_static(
csum_count * sizeof(typename TestFixture::value_t),
reinterpret_cast<char*>(&expected_values[0]));
Checksummer::template calculate<typename TestFixture::alg_t>(
init_value, chunk_size, 0, this->content_bl.length(), this->content_bl,
&expected_values_bp);
for (uint32_t i = 0; i < csum_count; ++i) {
typename TestFixture::value_t value;
decode(value, bl_it);
ASSERT_EQ(expected_values[i], value);
}
}
TEST_F(LibRadosMiscPP, CmpExtPP) {
bufferlist cmp_bl, bad_cmp_bl, write_bl;
char stored_str[] = "1234567891";
char mismatch_str[] = "1234577777";
write_bl.append(stored_str);
ioctx.write("cmpextpp", write_bl, write_bl.length(), 0);
cmp_bl.append(stored_str);
ASSERT_EQ(0, ioctx.cmpext("cmpextpp", 0, cmp_bl));
bad_cmp_bl.append(mismatch_str);
ASSERT_EQ(-MAX_ERRNO - 5, ioctx.cmpext("cmpextpp", 0, bad_cmp_bl));
}
TEST_F(LibRadosMiscPP, Applications) {
bufferlist inbl, outbl;
string outs;
ASSERT_EQ(0, cluster.mon_command("{\"prefix\": \"osd dump\"}",
inbl, &outbl, &outs));
ASSERT_LT(0u, outbl.length());
ASSERT_LE(0u, outs.length());
if (!std::regex_search(outbl.to_str(),
std::regex("require_osd_release [l-z]"))) {
std::cout << "SKIPPING";
return;
}
std::set<std::string> expected_apps = {"rados"};
std::set<std::string> apps;
ASSERT_EQ(0, ioctx.application_list(&apps));
ASSERT_EQ(expected_apps, apps);
ASSERT_EQ(0, ioctx.application_enable("app1", true));
ASSERT_EQ(-EPERM, ioctx.application_enable("app2", false));
ASSERT_EQ(0, ioctx.application_enable("app2", true));
expected_apps = {"app1", "app2", "rados"};
ASSERT_EQ(0, ioctx.application_list(&apps));
ASSERT_EQ(expected_apps, apps);
std::map<std::string, std::string> expected_meta;
std::map<std::string, std::string> meta;
ASSERT_EQ(-ENOENT, ioctx.application_metadata_list("dne", &meta));
ASSERT_EQ(0, ioctx.application_metadata_list("app1", &meta));
ASSERT_EQ(expected_meta, meta);
ASSERT_EQ(-ENOENT, ioctx.application_metadata_set("dne", "key1", "value1"));
ASSERT_EQ(0, ioctx.application_metadata_set("app1", "key1", "value1"));
ASSERT_EQ(0, ioctx.application_metadata_set("app1", "key2", "value2"));
expected_meta = {{"key1", "value1"}, {"key2", "value2"}};
ASSERT_EQ(0, ioctx.application_metadata_list("app1", &meta));
ASSERT_EQ(expected_meta, meta);
ASSERT_EQ(0, ioctx.application_metadata_remove("app1", "key1"));
expected_meta = {{"key2", "value2"}};
ASSERT_EQ(0, ioctx.application_metadata_list("app1", &meta));
ASSERT_EQ(expected_meta, meta);
}
TEST_F(LibRadosMiscECPP, CompareExtentRange) {
SKIP_IF_CRIMSON();
bufferlist bl1;
bl1.append("ceph");
ObjectWriteOperation write;
write.write(0, bl1);
ASSERT_EQ(0, ioctx.operate("foo", &write));
bufferlist bl2;
bl2.append("ph");
bl2.append(std::string(2, '\0'));
ObjectReadOperation read1;
read1.cmpext(2, bl2, nullptr);
ASSERT_EQ(0, ioctx.operate("foo", &read1, nullptr));
bufferlist bl3;
bl3.append(std::string(4, '\0'));
ObjectReadOperation read2;
read2.cmpext(2097152, bl3, nullptr);
ASSERT_EQ(0, ioctx.operate("foo", &read2, nullptr));
}
TEST_F(LibRadosMiscPP, MinCompatOSD) {
int8_t require_osd_release;
ASSERT_EQ(0, cluster.get_min_compatible_osd(&require_osd_release));
ASSERT_LE(-1, require_osd_release);
ASSERT_GT(CEPH_RELEASE_MAX, require_osd_release);
}
TEST_F(LibRadosMiscPP, MinCompatClient) {
int8_t min_compat_client;
int8_t require_min_compat_client;
ASSERT_EQ(0, cluster.get_min_compatible_client(&min_compat_client,
&require_min_compat_client));
ASSERT_LE(-1, min_compat_client);
ASSERT_GT(CEPH_RELEASE_MAX, min_compat_client);
ASSERT_LE(-1, require_min_compat_client);
ASSERT_GT(CEPH_RELEASE_MAX, require_min_compat_client);
}
TEST_F(LibRadosMiscPP, Conf) {
const char* const option = "bluestore_throttle_bytes";
size_t new_size = 1 << 20;
std::string original;
ASSERT_EQ(0, cluster.conf_get(option, original));
auto restore_setting = make_scope_guard([&] {
cluster.conf_set(option, original.c_str());
});
std::string expected = std::to_string(new_size);
ASSERT_EQ(0, cluster.conf_set(option, expected.c_str()));
std::string actual;
ASSERT_EQ(0, cluster.conf_get(option, actual));
ASSERT_EQ(expected, actual);
}
| 26,649 | 27.841991 | 92 |
cc
|
null |
ceph-main/src/test/librados/op_speed.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*
// vim: ts=8 sw=2 smarttab
#include <cstdint>
#include "include/rados/librados.hpp"
constexpr int to_create = 10'000'000;
int main() {
for (int i = 0; i < to_create; ++i) {
librados::ObjectReadOperation op;
bufferlist bl;
std::uint64_t sz;
struct timespec tm;
std::map<std::string, ceph::buffer::list> xattrs;
std::map<std::string, ceph::buffer::list> omap;
bool more;
op.read(0, 0, &bl, nullptr);
op.stat2(&sz, &tm, nullptr);
op.getxattrs(&xattrs, nullptr);
op.omap_get_vals2({}, 1000, &omap, &more, nullptr);
}
}
| 637 | 24.52 | 69 |
cc
|
null |
ceph-main/src/test/librados/pool.cc
|
#include <errno.h>
#include <vector>
#include "crimson_utils.h"
#include "gtest/gtest.h"
#include "include/rados/librados.h"
#include "test/librados/test.h"
#define POOL_LIST_BUF_SZ 32768
TEST(LibRadosPools, PoolList) {
char pool_list_buf[POOL_LIST_BUF_SZ];
char *buf = pool_list_buf;
rados_t cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
ASSERT_LT(rados_pool_list(cluster, buf, POOL_LIST_BUF_SZ), POOL_LIST_BUF_SZ);
// we can pass a null buffer too.
ASSERT_LT(rados_pool_list(cluster, NULL, POOL_LIST_BUF_SZ), POOL_LIST_BUF_SZ);
bool found_pool = false;
int firstlen = 0;
while (buf[0] != '\0') {
if ((found_pool == false) && (strcmp(buf, pool_name.c_str()) == 0)) {
found_pool = true;
}
if (!firstlen)
firstlen = strlen(buf) + 1;
buf += strlen(buf) + 1;
}
ASSERT_EQ(found_pool, true);
// make sure we honor the buffer size limit
buf = pool_list_buf;
memset(buf, 0, POOL_LIST_BUF_SZ);
ASSERT_LT(rados_pool_list(cluster, buf, firstlen), POOL_LIST_BUF_SZ);
ASSERT_NE(0, buf[0]); // include at least one pool name
ASSERT_EQ(0, buf[firstlen]); // but don't touch the stopping point
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
int64_t rados_pool_lookup(rados_t cluster, const char *pool_name);
TEST(LibRadosPools, PoolLookup) {
rados_t cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
ASSERT_LT(0, rados_pool_lookup(cluster, pool_name.c_str()));
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosPools, PoolLookup2) {
rados_t cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
int64_t pool_id = rados_pool_lookup(cluster, pool_name.c_str());
ASSERT_GT(pool_id, 0);
rados_ioctx_t ioctx;
ASSERT_EQ(0, rados_ioctx_create(cluster, pool_name.c_str(), &ioctx));
int64_t pool_id2 = rados_ioctx_get_id(ioctx);
ASSERT_EQ(pool_id, pool_id2);
rados_ioctx_destroy(ioctx);
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosPools, PoolLookupOtherInstance) {
rados_t cluster1;
ASSERT_EQ("", connect_cluster(&cluster1));
rados_t cluster2;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster2));
int64_t pool_id = rados_pool_lookup(cluster2, pool_name.c_str());
ASSERT_GT(pool_id, 0);
ASSERT_EQ(pool_id, rados_pool_lookup(cluster1, pool_name.c_str()));
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster2));
rados_shutdown(cluster1);
}
TEST(LibRadosPools, PoolReverseLookupOtherInstance) {
rados_t cluster1;
ASSERT_EQ("", connect_cluster(&cluster1));
rados_t cluster2;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster2));
int64_t pool_id = rados_pool_lookup(cluster2, pool_name.c_str());
ASSERT_GT(pool_id, 0);
char buf[100];
ASSERT_LT(0, rados_pool_reverse_lookup(cluster1, pool_id, buf, 100));
ASSERT_EQ(0, strcmp(buf, pool_name.c_str()));
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster2));
rados_shutdown(cluster1);
}
TEST(LibRadosPools, PoolDelete) {
rados_t cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
ASSERT_EQ(0, rados_pool_delete(cluster, pool_name.c_str()));
ASSERT_GT(0, rados_pool_lookup(cluster, pool_name.c_str()));
ASSERT_EQ(0, rados_pool_create(cluster, pool_name.c_str()));
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosPools, PoolCreateDelete) {
rados_t cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
std::string n = pool_name + "abc123";
ASSERT_EQ(0, rados_pool_create(cluster, n.c_str()));
ASSERT_EQ(-EEXIST, rados_pool_create(cluster, n.c_str()));
ASSERT_EQ(0, rados_pool_delete(cluster, n.c_str()));
ASSERT_EQ(-ENOENT, rados_pool_delete(cluster, n.c_str()));
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosPools, PoolCreateWithCrushRule) {
rados_t cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
std::string pool2_name = get_temp_pool_name();
ASSERT_EQ(0, rados_pool_create_with_crush_rule(cluster,
pool2_name.c_str(), 0));
ASSERT_EQ(0, rados_pool_delete(cluster, pool2_name.c_str()));
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
TEST(LibRadosPools, PoolGetBaseTier) {
SKIP_IF_CRIMSON();
rados_t cluster;
std::string pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool(pool_name, &cluster));
std::string tier_pool_name = pool_name + "-cache";
ASSERT_EQ(0, rados_pool_create(cluster, tier_pool_name.c_str()));
int64_t pool_id = rados_pool_lookup(cluster, pool_name.c_str());
ASSERT_GE(pool_id, 0);
int64_t tier_pool_id = rados_pool_lookup(cluster, tier_pool_name.c_str());
ASSERT_GE(tier_pool_id, 0);
int64_t base_tier = 0;
EXPECT_EQ(0, rados_pool_get_base_tier(cluster, pool_id, &base_tier));
EXPECT_EQ(pool_id, base_tier);
std::string cmdstr = "{\"prefix\": \"osd tier add\", \"pool\": \"" +
pool_name + "\", \"tierpool\":\"" + tier_pool_name + "\", \"force_nonempty\":\"\"}";
char *cmd[1];
cmd[0] = (char *)cmdstr.c_str();
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
cmdstr = "{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" +
tier_pool_name + "\", \"mode\":\"readonly\"," +
" \"yes_i_really_mean_it\": true}";
cmd[0] = (char *)cmdstr.c_str();
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
EXPECT_EQ(0, rados_wait_for_latest_osdmap(cluster));
EXPECT_EQ(0, rados_pool_get_base_tier(cluster, pool_id, &base_tier));
EXPECT_EQ(pool_id, base_tier);
EXPECT_EQ(0, rados_pool_get_base_tier(cluster, tier_pool_id, &base_tier));
EXPECT_EQ(pool_id, base_tier);
int64_t nonexistent_pool_id = (int64_t)((-1ULL) >> 1);
EXPECT_EQ(-ENOENT, rados_pool_get_base_tier(cluster, nonexistent_pool_id, &base_tier));
cmdstr = "{\"prefix\": \"osd tier remove\", \"pool\": \"" +
pool_name + "\", \"tierpool\":\"" + tier_pool_name + "\"}";
cmd[0] = (char *)cmdstr.c_str();
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
ASSERT_EQ(0, rados_pool_delete(cluster, tier_pool_name.c_str()));
ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster));
}
| 6,540 | 33.97861 | 91 |
cc
|
null |
ceph-main/src/test/librados/service.cc
|
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "common/config_proxy.h"
#include "test/librados/test.h"
#include "test/librados/TestCase.h"
#ifndef _WIN32
#include <sys/resource.h>
#endif
#include <mutex>
#include <condition_variable>
#include <algorithm>
#include <thread>
#include <errno.h>
#include "gtest/gtest.h"
#include "test/unit.cc"
using namespace std;
using namespace librados;
TEST(LibRadosService, RegisterEarly) {
rados_t cluster;
ASSERT_EQ(0, rados_create(&cluster, "admin"));
ASSERT_EQ(0, rados_conf_read_file(cluster, NULL));
ASSERT_EQ(0, rados_conf_parse_env(cluster, NULL));
string name = string("pid") + stringify(getpid());
ASSERT_EQ(0, rados_service_register(cluster, "laundry", name.c_str(),
"foo\0bar\0this\0that\0"));
ASSERT_EQ(-EEXIST, rados_service_register(cluster, "laundry", name.c_str(),
"foo\0bar\0this\0that\0"));
ASSERT_EQ(0, rados_connect(cluster));
sleep(5);
rados_shutdown(cluster);
}
TEST(LibRadosService, RegisterLate) {
rados_t cluster;
ASSERT_EQ(0, rados_create(&cluster, "admin"));
ASSERT_EQ(0, rados_conf_read_file(cluster, NULL));
ASSERT_EQ(0, rados_conf_parse_env(cluster, NULL));
ASSERT_EQ(0, rados_connect(cluster));
string name = string("pid") + stringify(getpid());
ASSERT_EQ(0, rados_service_register(cluster, "laundry", name.c_str(),
"foo\0bar\0this\0that\0"));
ASSERT_EQ(-EEXIST, rados_service_register(cluster, "laundry", name.c_str(),
"foo\0bar\0this\0that\0"));
rados_shutdown(cluster);
}
static void status_format_func(const int i, std::mutex &lock,
std::condition_variable &cond,
int &threads_started, bool &stopped)
{
rados_t cluster;
char metadata_buf[4096];
ASSERT_EQ(0, rados_create(&cluster, "admin"));
ASSERT_EQ(0, rados_conf_read_file(cluster, NULL));
ASSERT_EQ(0, rados_conf_parse_env(cluster, NULL));
ASSERT_EQ(0, rados_connect(cluster));
if (i == 0) {
ASSERT_LT(0, sprintf(metadata_buf, "%s%c%s%c",
"foo", '\0', "bar", '\0'));
} else if (i == 1) {
ASSERT_LT(0, sprintf(metadata_buf, "%s%c%s%c",
"daemon_type", '\0', "portal", '\0'));
} else if (i == 2) {
ASSERT_LT(0, sprintf(metadata_buf, "%s%c%s%c",
"daemon_prefix", '\0', "gateway", '\0'));
} else {
string prefix = string("gw") + stringify(i % 4);
string zone = string("z") + stringify(i % 3);
ASSERT_LT(0, sprintf(metadata_buf, "%s%c%s%c%s%c%s%c%s%c%s%c%s%c%s%c",
"daemon_type", '\0', "portal", '\0',
"daemon_prefix", '\0', prefix.c_str(), '\0',
"hostname", '\0', prefix.c_str(), '\0',
"zone_id", '\0', zone.c_str(), '\0'));
}
string name = string("rbd/image") + stringify(i);
ASSERT_EQ(0, rados_service_register(cluster, "foo", name.c_str(),
metadata_buf));
std::unique_lock<std::mutex> l(lock);
threads_started++;
cond.notify_all();
cond.wait(l, [&stopped] {
return stopped;
});
rados_shutdown(cluster);
}
TEST(LibRadosService, StatusFormat) {
const int nthreads = 16;
std::thread threads[nthreads];
std::mutex lock;
std::condition_variable cond;
bool stopped = false;
int threads_started = 0;
// no rlimits on Windows
#ifndef _WIN32
// Need a bunch of fd's for this test
struct rlimit rold, rnew;
ASSERT_EQ(getrlimit(RLIMIT_NOFILE, &rold), 0);
rnew = rold;
rnew.rlim_cur = rnew.rlim_max;
ASSERT_EQ(setrlimit(RLIMIT_NOFILE, &rnew), 0);
#endif
for (int i = 0; i < nthreads; ++i)
threads[i] = std::thread(status_format_func, i, std::ref(lock),
std::ref(cond), std::ref(threads_started),
std::ref(stopped));
{
std::unique_lock<std::mutex> l(lock);
cond.wait(l, [&threads_started] {
return nthreads == threads_started;
});
}
int retry = 60; // mon thrashing may make this take a long time
while (retry) {
rados_t cluster;
ASSERT_EQ(0, rados_create(&cluster, "admin"));
ASSERT_EQ(0, rados_conf_read_file(cluster, NULL));
ASSERT_EQ(0, rados_conf_parse_env(cluster, NULL));
ASSERT_EQ(0, rados_connect(cluster));
JSONFormatter cmd_f;
cmd_f.open_object_section("command");
cmd_f.dump_string("prefix", "status");
cmd_f.close_section();
std::ostringstream cmd_stream;
cmd_f.flush(cmd_stream);
const std::string serialized_cmd = cmd_stream.str();
const char *cmd[2];
cmd[1] = NULL;
cmd[0] = serialized_cmd.c_str();
char *outbuf = NULL;
size_t outlen = 0;
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)cmd, 1, "", 0,
&outbuf, &outlen, NULL, NULL));
std::string out(outbuf, outlen);
cout << out << std::endl;
bool success = false;
auto r1 = out.find("16 portals active (1 hosts, 3 zones)");
if (std::string::npos != r1) {
success = true;
}
rados_buffer_free(outbuf);
rados_shutdown(cluster);
if (success || !retry) {
break;
}
// wait for 2 seconds to make sure all the
// services have been successfully updated
// to ceph mon, then retry it.
sleep(2);
retry--;
}
{
std::scoped_lock<std::mutex> l(lock);
stopped = true;
cond.notify_all();
}
for (int i = 0; i < nthreads; ++i)
threads[i].join();
ASSERT_NE(0, retry);
#ifndef _WIN32
ASSERT_EQ(setrlimit(RLIMIT_NOFILE, &rold), 0);
#endif
}
TEST(LibRadosService, Status) {
rados_t cluster;
ASSERT_EQ(0, rados_create(&cluster, "admin"));
ASSERT_EQ(0, rados_conf_read_file(cluster, NULL));
ASSERT_EQ(0, rados_conf_parse_env(cluster, NULL));
ASSERT_EQ(-ENOTCONN, rados_service_update_status(cluster,
"testing\0testing\0"));
ASSERT_EQ(0, rados_connect(cluster));
string name = string("pid") + stringify(getpid());
ASSERT_EQ(0, rados_service_register(cluster, "laundry", name.c_str(),
"foo\0bar\0this\0that\0"));
for (int i=0; i<20; ++i) {
char buffer[1024];
snprintf(buffer, sizeof(buffer), "%s%c%s%c%s%c%d%c",
"testing", '\0', "testing", '\0',
"count", '\0', i, '\0');
ASSERT_EQ(0, rados_service_update_status(cluster, buffer));
sleep(1);
}
rados_shutdown(cluster);
}
| 6,615 | 30.504762 | 77 |
cc
|
null |
ceph-main/src/test/librados/service_cxx.cc
|
#include <algorithm>
#include <thread>
#include <errno.h>
#include "gtest/gtest.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "common/config_proxy.h"
#include "test/librados/test_cxx.h"
#include "test/librados/testcase_cxx.h"
#include "test/unit.cc"
using namespace std;
using namespace librados;
TEST(LibRadosServicePP, RegisterEarly) {
Rados cluster;
cluster.init("admin");
ASSERT_EQ(0, cluster.conf_read_file(NULL));
cluster.conf_parse_env(NULL);
string name = string("pid") + stringify(getpid());
ASSERT_EQ(0, cluster.service_daemon_register(
"laundry", name, {{"foo", "bar"}, {"this", "that"}}));
ASSERT_EQ(-EEXIST, cluster.service_daemon_register(
"laundry", name, {{"foo", "bar"}, {"this", "that"}}));
ASSERT_EQ(0, cluster.connect());
sleep(5);
cluster.shutdown();
}
TEST(LibRadosServicePP, RegisterLate) {
Rados cluster;
cluster.init("admin");
ASSERT_EQ(0, cluster.conf_read_file(NULL));
cluster.conf_parse_env(NULL);
ASSERT_EQ("", connect_cluster_pp(cluster));
string name = string("pid") + stringify(getpid());
ASSERT_EQ(0, cluster.service_daemon_register(
"laundry", name, {{"foo", "bar"}, {"this", "that"}}));
}
TEST(LibRadosServicePP, Status) {
Rados cluster;
cluster.init("admin");
ASSERT_EQ(0, cluster.conf_read_file(NULL));
cluster.conf_parse_env(NULL);
string name = string("pid") + stringify(getpid());
ASSERT_EQ(-ENOTCONN, cluster.service_daemon_update_status(
{{"testing", "starting"}}));
ASSERT_EQ(0, cluster.connect());
ASSERT_EQ(0, cluster.service_daemon_register(
"laundry", name, {{"foo", "bar"}, {"this", "that"}}));
for (int i=0; i<20; ++i) {
ASSERT_EQ(0, cluster.service_daemon_update_status({
{"testing", "running"},
{"count", stringify(i)}
}));
sleep(1);
}
cluster.shutdown();
}
TEST(LibRadosServicePP, Close) {
int tries = 20;
string name = string("close-test-pid") + stringify(getpid());
int i;
for (i = 0; i < tries; ++i) {
cout << "attempt " << i << " of " << tries << std::endl;
{
Rados cluster;
cluster.init("admin");
ASSERT_EQ(0, cluster.conf_read_file(NULL));
cluster.conf_parse_env(NULL);
ASSERT_EQ(0, cluster.connect());
ASSERT_EQ(0, cluster.service_daemon_register(
"laundry", name, {{"foo", "bar"}, {"this", "that"}}));
sleep(3); // let it register
cluster.shutdown();
}
// mgr updates servicemap every tick
//sleep(g_conf().get_val<int64_t>("mgr_tick_period"));
std::this_thread::sleep_for(g_conf().get_val<std::chrono::seconds>(
"mgr_tick_period"));
// make sure we are deregistered
{
Rados cluster;
cluster.init("admin");
ASSERT_EQ(0, cluster.conf_read_file(NULL));
cluster.conf_parse_env(NULL);
ASSERT_EQ(0, cluster.connect());
bufferlist inbl, outbl;
ASSERT_EQ(0, cluster.mon_command("{\"prefix\": \"service dump\"}",
inbl, &outbl, NULL));
string s = outbl.to_str();
cluster.shutdown();
if (s.find(name) != string::npos) {
cout << " failed to deregister:\n" << s << std::endl;
} else {
break;
}
}
}
ASSERT_LT(i, tries);
}
| 3,208 | 29.273585 | 72 |
cc
|
null |
ceph-main/src/test/librados/snapshots.cc
|
#include "include/rados.h"
#include "test/librados/test.h"
#include "test/librados/TestCase.h"
#include "crimson_utils.h"
#include <algorithm>
#include <errno.h>
#include "gtest/gtest.h"
#include <string>
using std::string;
typedef RadosTest LibRadosSnapshots;
typedef RadosTest LibRadosSnapshotsSelfManaged;
typedef RadosTestEC LibRadosSnapshotsEC;
typedef RadosTestEC LibRadosSnapshotsSelfManagedEC;
const int bufsize = 128;
TEST_F(LibRadosSnapshots, SnapList) {
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_ioctx_snap_create(ioctx, "snap1"));
rados_snap_t snaps[10];
EXPECT_EQ(1, rados_ioctx_snap_list(ioctx, snaps,
sizeof(snaps) / sizeof(snaps[0])));
rados_snap_t rid;
EXPECT_EQ(0, rados_ioctx_snap_lookup(ioctx, "snap1", &rid));
EXPECT_EQ(rid, snaps[0]);
EXPECT_EQ(0, rados_ioctx_snap_remove(ioctx, "snap1"));
}
TEST_F(LibRadosSnapshots, SnapRemove) {
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_ioctx_snap_create(ioctx, "snap1"));
rados_snap_t rid;
ASSERT_EQ(0, rados_ioctx_snap_lookup(ioctx, "snap1", &rid));
ASSERT_EQ(-EEXIST, rados_ioctx_snap_create(ioctx, "snap1"));
ASSERT_EQ(0, rados_ioctx_snap_remove(ioctx, "snap1"));
ASSERT_EQ(-ENOENT, rados_ioctx_snap_lookup(ioctx, "snap1", &rid));
}
TEST_F(LibRadosSnapshots, Rollback) {
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_ioctx_snap_create(ioctx, "snap1"));
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
EXPECT_EQ(0, rados_write_full(ioctx, "foo", buf2, sizeof(buf2)));
EXPECT_EQ(0, rados_ioctx_snap_rollback(ioctx, "foo", "snap1"));
char buf3[sizeof(buf)];
EXPECT_EQ((int)sizeof(buf3), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
EXPECT_EQ(0, memcmp(buf, buf3, sizeof(buf)));
EXPECT_EQ(0, rados_ioctx_snap_remove(ioctx, "snap1"));
}
TEST_F(LibRadosSnapshots, SnapGetName) {
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_ioctx_snap_create(ioctx, "snapfoo"));
rados_snap_t rid;
EXPECT_EQ(0, rados_ioctx_snap_lookup(ioctx, "snapfoo", &rid));
EXPECT_EQ(-ENOENT, rados_ioctx_snap_lookup(ioctx, "snapbar", &rid));
char name[128];
memset(name, 0, sizeof(name));
EXPECT_EQ(0, rados_ioctx_snap_get_name(ioctx, rid, name, sizeof(name)));
time_t snaptime;
EXPECT_EQ(0, rados_ioctx_snap_get_stamp(ioctx, rid, &snaptime));
EXPECT_EQ(0, strcmp(name, "snapfoo"));
EXPECT_EQ(0, rados_ioctx_snap_remove(ioctx, "snapfoo"));
}
TEST_F(LibRadosSnapshotsSelfManaged, Snap) {
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
my_snaps.push_back(-2);
rados_completion_t completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr,
&completion));
rados_aio_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back(), completion);
ASSERT_EQ(0, rados_aio_wait_for_complete(completion));
rados_aio_release(completion);
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf2, sizeof(buf2), 0));
rados_ioctx_snap_set_read(ioctx, my_snaps[1]-1);
char buf3[sizeof(buf)];
ASSERT_EQ(-ENOENT, rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
rados_ioctx_snap_set_read(ioctx, my_snaps[1]);
ASSERT_EQ((int)sizeof(buf3), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
ASSERT_EQ(0, memcmp(buf3, buf, sizeof(buf)));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr,
&completion));
rados_aio_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back(), completion);
ASSERT_EQ(0, rados_aio_wait_for_complete(completion));
rados_aio_release(completion);
my_snaps.pop_back();
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back()));
my_snaps.pop_back();
rados_ioctx_snap_set_read(ioctx, LIBRADOS_SNAP_HEAD);
ASSERT_EQ(0, rados_remove(ioctx, "foo"));
}
TEST_F(LibRadosSnapshotsSelfManaged, Rollback) {
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
// First write
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
// Second write
ASSERT_EQ(0, rados_write(ioctx, "foo", buf2, sizeof(buf2), 0));
// Rollback to my_snaps[1] - Object is expeceted to conatin the first write
rados_ioctx_selfmanaged_snap_rollback(ioctx, "foo", my_snaps[1]);
char buf3[sizeof(buf)];
ASSERT_EQ((int)sizeof(buf3), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
ASSERT_EQ(0, memcmp(buf3, buf, sizeof(buf)));
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, rados_remove(ioctx, "foo"));
}
TEST_F(LibRadosSnapshotsSelfManaged, FutureSnapRollback) {
std::vector<uint64_t> my_snaps;
// Snapshot 1
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
// First write
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
// Snapshot 2
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
// Second write
ASSERT_EQ(0, rados_write(ioctx, "foo", buf2, sizeof(buf2), 0));
// Snapshot 3
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
// Rollback to the last snap id - Object is expected to conatin
// latest write (head object)
rados_ioctx_selfmanaged_snap_rollback(ioctx, "foo", my_snaps[2]);
char buf3[sizeof(buf)];
ASSERT_EQ((int)sizeof(buf3), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
ASSERT_EQ(0, memcmp(buf3, buf2, sizeof(buf)));
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, rados_remove(ioctx, "foo"));
}
// EC testing
TEST_F(LibRadosSnapshotsEC, SnapList) {
SKIP_IF_CRIMSON();
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_ioctx_snap_create(ioctx, "snap1"));
rados_snap_t snaps[10];
EXPECT_EQ(1, rados_ioctx_snap_list(ioctx, snaps,
sizeof(snaps) / sizeof(snaps[0])));
rados_snap_t rid;
EXPECT_EQ(0, rados_ioctx_snap_lookup(ioctx, "snap1", &rid));
EXPECT_EQ(rid, snaps[0]);
EXPECT_EQ(0, rados_ioctx_snap_remove(ioctx, "snap1"));
}
TEST_F(LibRadosSnapshotsEC, SnapRemove) {
SKIP_IF_CRIMSON();
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_ioctx_snap_create(ioctx, "snap1"));
rados_snap_t rid;
ASSERT_EQ(0, rados_ioctx_snap_lookup(ioctx, "snap1", &rid));
ASSERT_EQ(-EEXIST, rados_ioctx_snap_create(ioctx, "snap1"));
ASSERT_EQ(0, rados_ioctx_snap_remove(ioctx, "snap1"));
ASSERT_EQ(-ENOENT, rados_ioctx_snap_lookup(ioctx, "snap1", &rid));
}
TEST_F(LibRadosSnapshotsEC, Rollback) {
SKIP_IF_CRIMSON();
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_ioctx_snap_create(ioctx, "snap1"));
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
EXPECT_EQ(0, rados_write_full(ioctx, "foo", buf2, sizeof(buf2)));
EXPECT_EQ(0, rados_ioctx_snap_rollback(ioctx, "foo", "snap1"));
char buf3[sizeof(buf)];
EXPECT_EQ((int)sizeof(buf3), rados_read(ioctx, "foo", buf3, sizeof(buf3), 0));
EXPECT_EQ(0, memcmp(buf, buf3, sizeof(buf)));
EXPECT_EQ(0, rados_ioctx_snap_remove(ioctx, "snap1"));
}
TEST_F(LibRadosSnapshotsEC, SnapGetName) {
SKIP_IF_CRIMSON();
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_ioctx_snap_create(ioctx, "snapfoo"));
rados_snap_t rid;
EXPECT_EQ(0, rados_ioctx_snap_lookup(ioctx, "snapfoo", &rid));
EXPECT_EQ(-ENOENT, rados_ioctx_snap_lookup(ioctx, "snapbar", &rid));
char name[128];
memset(name, 0, sizeof(name));
EXPECT_EQ(0, rados_ioctx_snap_get_name(ioctx, rid, name, sizeof(name)));
time_t snaptime;
EXPECT_EQ(0, rados_ioctx_snap_get_stamp(ioctx, rid, &snaptime));
EXPECT_EQ(0, strcmp(name, "snapfoo"));
EXPECT_EQ(0, rados_ioctx_snap_remove(ioctx, "snapfoo"));
}
TEST_F(LibRadosSnapshotsSelfManagedEC, Snap) {
SKIP_IF_CRIMSON();
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
int bsize = alignment;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, bsize, 0));
my_snaps.push_back(-2);
rados_completion_t completion;
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr,
&completion));
rados_aio_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back(), completion);
ASSERT_EQ(0, rados_aio_wait_for_complete(completion));
rados_aio_release(completion);
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
char *buf2 = (char *)new char[bsize];
memset(buf2, 0xdd, bsize);
ASSERT_EQ(0, rados_write(ioctx, "foo", buf2, bsize, bsize));
rados_ioctx_snap_set_read(ioctx, my_snaps[1]-1);
char *buf3 = (char *)new char[bsize*2];
ASSERT_EQ(-ENOENT, rados_read(ioctx, "foo", buf3, bsize*2, 0));
rados_ioctx_snap_set_read(ioctx, my_snaps[1]);
ASSERT_EQ(bsize, rados_read(ioctx, "foo", buf3, bsize*2, 0));
ASSERT_EQ(0, memcmp(buf3, buf, bsize));
ASSERT_EQ(0, rados_aio_create_completion2(nullptr, nullptr,
&completion));
rados_aio_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back(), completion);
ASSERT_EQ(0, rados_aio_wait_for_complete(completion));
rados_aio_release(completion);
my_snaps.pop_back();
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back()));
my_snaps.pop_back();
rados_ioctx_snap_set_read(ioctx, LIBRADOS_SNAP_HEAD);
ASSERT_EQ(0, rados_remove(ioctx, "foo"));
delete[] buf;
delete[] buf2;
delete[] buf3;
}
TEST_F(LibRadosSnapshotsSelfManagedEC, Rollback) {
SKIP_IF_CRIMSON();
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
int bsize = alignment;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, bsize, 0));
my_snaps.push_back(-2);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
::std::reverse(my_snaps.begin(), my_snaps.end());
char *buf2 = (char *)new char[bsize];
memset(buf2, 0xdd, bsize);
ASSERT_EQ(0, rados_write(ioctx, "foo", buf2, bsize, bsize));
rados_ioctx_selfmanaged_snap_rollback(ioctx, "foo", my_snaps[1]);
char *buf3 = (char *)new char[bsize*2];
ASSERT_EQ(bsize, rados_read(ioctx, "foo", buf3, bsize*2, 0));
ASSERT_EQ(0, memcmp(buf3, buf, bsize));
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, rados_remove(ioctx, "foo"));
delete[] buf;
delete[] buf2;
delete[] buf3;
}
| 14,249 | 38.915966 | 80 |
cc
|
null |
ceph-main/src/test/librados/snapshots_cxx.cc
|
#include <algorithm>
#include <errno.h>
#include <string>
#include "gtest/gtest.h"
#include "include/rados.h"
#include "include/rados/librados.hpp"
#include "test/librados/test_cxx.h"
#include "test/librados/testcase_cxx.h"
#include "crimson_utils.h"
using namespace librados;
typedef RadosTestPP LibRadosSnapshotsPP;
typedef RadosTestPP LibRadosSnapshotsSelfManagedPP;
typedef RadosTestECPP LibRadosSnapshotsECPP;
typedef RadosTestECPP LibRadosSnapshotsSelfManagedECPP;
const int bufsize = 128;
TEST_F(LibRadosSnapshotsPP, SnapListPP) {
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_FALSE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name));
ASSERT_EQ(0, ioctx.snap_create("snap1"));
ASSERT_FALSE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name));
std::vector<snap_t> snaps;
EXPECT_EQ(0, ioctx.snap_list(&snaps));
EXPECT_EQ(1U, snaps.size());
snap_t rid;
EXPECT_EQ(0, ioctx.snap_lookup("snap1", &rid));
EXPECT_EQ(rid, snaps[0]);
EXPECT_EQ(0, ioctx.snap_remove("snap1"));
ASSERT_FALSE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name));
}
TEST_F(LibRadosSnapshotsPP, SnapRemovePP) {
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.snap_create("snap1"));
rados_snap_t rid;
ASSERT_EQ(0, ioctx.snap_lookup("snap1", &rid));
ASSERT_EQ(0, ioctx.snap_remove("snap1"));
ASSERT_EQ(-ENOENT, ioctx.snap_lookup("snap1", &rid));
}
TEST_F(LibRadosSnapshotsPP, RollbackPP) {
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.snap_create("snap1"));
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
EXPECT_EQ(0, ioctx.write_full("foo", bl2));
EXPECT_EQ(0, ioctx.snap_rollback("foo", "snap1"));
bufferlist bl3;
EXPECT_EQ((int)sizeof(buf), ioctx.read("foo", bl3, sizeof(buf), 0));
EXPECT_EQ(0, memcmp(buf, bl3.c_str(), sizeof(buf)));
EXPECT_EQ(0, ioctx.snap_remove("snap1"));
}
TEST_F(LibRadosSnapshotsPP, SnapGetNamePP) {
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.snap_create("snapfoo"));
rados_snap_t rid;
EXPECT_EQ(0, ioctx.snap_lookup("snapfoo", &rid));
EXPECT_EQ(-ENOENT, ioctx.snap_lookup("snapbar", &rid));
std::string name;
EXPECT_EQ(0, ioctx.snap_get_name(rid, &name));
time_t snaptime;
EXPECT_EQ(0, ioctx.snap_get_stamp(rid, &snaptime));
EXPECT_EQ(0, strcmp(name.c_str(), "snapfoo"));
EXPECT_EQ(0, ioctx.snap_remove("snapfoo"));
}
TEST_F(LibRadosSnapshotsPP, SnapCreateRemovePP) {
// reproduces http://tracker.ceph.com/issues/10262
bufferlist bl;
bl.append("foo");
ASSERT_EQ(0, ioctx.write("foo", bl, bl.length(), 0));
ASSERT_EQ(0, ioctx.snap_create("snapfoo"));
ASSERT_EQ(0, ioctx.remove("foo"));
ASSERT_EQ(0, ioctx.snap_create("snapbar"));
std::unique_ptr<librados::ObjectWriteOperation> op(new librados::ObjectWriteOperation());
op->create(false);
op->remove();
ASSERT_EQ(0, ioctx.operate("foo", op.get()));
EXPECT_EQ(0, ioctx.snap_remove("snapfoo"));
EXPECT_EQ(0, ioctx.snap_remove("snapbar"));
}
TEST_F(LibRadosSnapshotsSelfManagedPP, SnapPP) {
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_FALSE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name));
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
ASSERT_TRUE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
my_snaps.push_back(-2);
librados::AioCompletion *completion = cluster.aio_create_completion();
ioctx.aio_selfmanaged_snap_create(&my_snaps.back(), completion);
ASSERT_EQ(0, completion->wait_for_complete());
completion->release();
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), 0));
ioctx.snap_set_read(my_snaps[1]);
bufferlist bl3;
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", bl3, sizeof(buf), 0));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, sizeof(buf)));
completion = cluster.aio_create_completion();
ioctx.aio_selfmanaged_snap_remove(my_snaps.back(), completion);
ASSERT_EQ(0, completion->wait_for_complete());
completion->release();
my_snaps.pop_back();
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ioctx.snap_set_read(LIBRADOS_SNAP_HEAD);
ASSERT_TRUE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name));
ASSERT_EQ(0, ioctx.remove("foo"));
}
TEST_F(LibRadosSnapshotsSelfManagedPP, RollbackPP) {
SKIP_IF_CRIMSON();
std::vector<uint64_t> my_snaps;
IoCtx readioctx;
ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), readioctx));
readioctx.set_namespace(nspace);
readioctx.snap_set_read(LIBRADOS_SNAP_DIR);
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
//Write 3 consecutive buffers
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), bufsize));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), bufsize*2));
snap_set_t ss;
snap_t head = SNAP_HEAD;
ASSERT_EQ(0, readioctx.list_snaps("foo", &ss));
ASSERT_EQ(1u, ss.clones.size());
ASSERT_EQ(head, ss.clones[0].cloneid);
ASSERT_EQ(0u, ss.clones[0].snaps.size());
ASSERT_EQ(0u, ss.clones[0].overlap.size());
ASSERT_EQ(384u, ss.clones[0].size);
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
//Change the middle buffer
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), bufsize));
//Add another after
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), bufsize*3));
ASSERT_EQ(-EINVAL, ioctx.list_snaps("foo", &ss));
ObjectReadOperation o;
o.list_snaps(&ss, NULL);
ASSERT_EQ(-EINVAL, ioctx.operate("foo", &o, NULL));
ASSERT_EQ(0, readioctx.list_snaps("foo", &ss));
ASSERT_EQ(2u, ss.clones.size());
ASSERT_EQ(my_snaps[1], ss.clones[0].cloneid);
ASSERT_EQ(1u, ss.clones[0].snaps.size());
ASSERT_EQ(my_snaps[1], ss.clones[0].snaps[0]);
ASSERT_EQ(2u, ss.clones[0].overlap.size());
ASSERT_EQ(0u, ss.clones[0].overlap[0].first);
ASSERT_EQ(128u, ss.clones[0].overlap[0].second);
ASSERT_EQ(256u, ss.clones[0].overlap[1].first);
ASSERT_EQ(128u, ss.clones[0].overlap[1].second);
ASSERT_EQ(384u, ss.clones[0].size);
ASSERT_EQ(head, ss.clones[1].cloneid);
ASSERT_EQ(0u, ss.clones[1].snaps.size());
ASSERT_EQ(0u, ss.clones[1].overlap.size());
ASSERT_EQ(512u, ss.clones[1].size);
ioctx.selfmanaged_snap_rollback("foo", my_snaps[1]);
bufferlist bl3;
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", bl3, sizeof(buf), 0));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, sizeof(buf)));
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", bl3, sizeof(buf), bufsize));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, sizeof(buf)));
ASSERT_EQ((int)sizeof(buf), ioctx.read("foo", bl3, sizeof(buf), bufsize*2));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, sizeof(buf)));
ASSERT_EQ((int)0, ioctx.read("foo", bl3, sizeof(buf), bufsize*3));
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
readioctx.close();
}
TEST_F(LibRadosSnapshotsSelfManagedPP, SnapOverlapPP) {
// WIP https://tracker.ceph.com/issues/58263
SKIP_IF_CRIMSON();
std::vector<uint64_t> my_snaps;
IoCtx readioctx;
ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), readioctx));
readioctx.set_namespace(nspace);
readioctx.snap_set_read(LIBRADOS_SNAP_DIR);
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), bufsize*2));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), bufsize*4));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), bufsize*6));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), bufsize*8));
snap_set_t ss;
snap_t head = SNAP_HEAD;
ASSERT_EQ(0, readioctx.list_snaps("foo", &ss));
ASSERT_EQ(1u, ss.clones.size());
ASSERT_EQ(head, ss.clones[0].cloneid);
ASSERT_EQ(0u, ss.clones[0].snaps.size());
ASSERT_EQ(0u, ss.clones[0].overlap.size());
ASSERT_EQ(1152u, ss.clones[0].size);
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), bufsize*1));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), bufsize*3));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), bufsize*5));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), bufsize*7));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), bufsize*9));
ASSERT_EQ(0, readioctx.list_snaps("foo", &ss));
ASSERT_EQ(2u, ss.clones.size());
ASSERT_EQ(my_snaps[1], ss.clones[0].cloneid);
ASSERT_EQ(1u, ss.clones[0].snaps.size());
ASSERT_EQ(my_snaps[1], ss.clones[0].snaps[0]);
ASSERT_EQ(5u, ss.clones[0].overlap.size());
ASSERT_EQ(0u, ss.clones[0].overlap[0].first);
ASSERT_EQ(128u, ss.clones[0].overlap[0].second);
ASSERT_EQ(256u, ss.clones[0].overlap[1].first);
ASSERT_EQ(128u, ss.clones[0].overlap[1].second);
ASSERT_EQ(512u, ss.clones[0].overlap[2].first);
ASSERT_EQ(128u, ss.clones[0].overlap[2].second);
ASSERT_EQ(768u, ss.clones[0].overlap[3].first);
ASSERT_EQ(128u, ss.clones[0].overlap[3].second);
ASSERT_EQ(1024u, ss.clones[0].overlap[4].first);
ASSERT_EQ(128u, ss.clones[0].overlap[4].second);
ASSERT_EQ(1152u, ss.clones[0].size);
ASSERT_EQ(head, ss.clones[1].cloneid);
ASSERT_EQ(0u, ss.clones[1].snaps.size());
ASSERT_EQ(0u, ss.clones[1].overlap.size());
ASSERT_EQ(1280u, ss.clones[1].size);
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf3[sizeof(buf)];
memset(buf3, 0xee, sizeof(buf3));
bufferlist bl4;
bl4.append(buf3, sizeof(buf3));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf3), bufsize*1));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf3), bufsize*4));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf3), bufsize*5));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf3), bufsize*8));
ASSERT_EQ(0, readioctx.list_snaps("foo", &ss));
ASSERT_EQ(3u, ss.clones.size());
ASSERT_EQ(my_snaps[1], ss.clones[0].cloneid);
ASSERT_EQ(1u, ss.clones[0].snaps.size());
ASSERT_EQ(my_snaps[1], ss.clones[0].snaps[0]);
ASSERT_EQ(5u, ss.clones[0].overlap.size());
ASSERT_EQ(0u, ss.clones[0].overlap[0].first);
ASSERT_EQ(128u, ss.clones[0].overlap[0].second);
ASSERT_EQ(256u, ss.clones[0].overlap[1].first);
ASSERT_EQ(128u, ss.clones[0].overlap[1].second);
ASSERT_EQ(512u, ss.clones[0].overlap[2].first);
ASSERT_EQ(128u, ss.clones[0].overlap[2].second);
ASSERT_EQ(768u, ss.clones[0].overlap[3].first);
ASSERT_EQ(128u, ss.clones[0].overlap[3].second);
ASSERT_EQ(1024u, ss.clones[0].overlap[4].first);
ASSERT_EQ(128u, ss.clones[0].overlap[4].second);
ASSERT_EQ(1152u, ss.clones[0].size);
ASSERT_EQ(my_snaps[2], ss.clones[1].cloneid);
ASSERT_EQ(1u, ss.clones[1].snaps.size());
ASSERT_EQ(my_snaps[2], ss.clones[1].snaps[0]);
ASSERT_EQ(4u, ss.clones[1].overlap.size());
ASSERT_EQ(0u, ss.clones[1].overlap[0].first);
ASSERT_EQ(128u, ss.clones[1].overlap[0].second);
ASSERT_EQ(256u, ss.clones[1].overlap[1].first);
ASSERT_EQ(256u, ss.clones[1].overlap[1].second);
ASSERT_EQ(768u, ss.clones[1].overlap[2].first);
ASSERT_EQ(256u, ss.clones[1].overlap[2].second);
ASSERT_EQ(1152u, ss.clones[1].overlap[3].first);
ASSERT_EQ(128u, ss.clones[1].overlap[3].second);
ASSERT_EQ(1280u, ss.clones[1].size);
ASSERT_EQ(head, ss.clones[2].cloneid);
ASSERT_EQ(0u, ss.clones[2].snaps.size());
ASSERT_EQ(0u, ss.clones[2].overlap.size());
ASSERT_EQ(1280u, ss.clones[2].size);
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
readioctx.close();
}
TEST_F(LibRadosSnapshotsSelfManagedPP, Bug11677) {
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
int bsize = 1<<20;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
bufferlist bl1;
bl1.append(buf, bsize);
ASSERT_EQ(0, ioctx.write("foo", bl1, bsize, 0));
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
std::unique_ptr<librados::ObjectWriteOperation> op(new librados::ObjectWriteOperation());
op->assert_exists();
op->remove();
ASSERT_EQ(0, ioctx.operate("foo", op.get()));
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ioctx.snap_set_read(LIBRADOS_SNAP_HEAD);
delete[] buf;
}
TEST_F(LibRadosSnapshotsSelfManagedPP, OrderSnap) {
std::vector<uint64_t> my_snaps;
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
int flags = librados::OPERATION_ORDERSNAP;
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
ObjectWriteOperation op1;
op1.write(0, bl);
librados::AioCompletion *comp1 = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", comp1, &op1, flags));
ASSERT_EQ(0, comp1->wait_for_complete());
ASSERT_EQ(0, comp1->get_return_value());
comp1->release();
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
ObjectWriteOperation op2;
op2.write(0, bl);
librados::AioCompletion *comp2 = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", comp2, &op2, flags));
ASSERT_EQ(0, comp2->wait_for_complete());
ASSERT_EQ(0, comp2->get_return_value());
comp2->release();
my_snaps.pop_back();
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
ObjectWriteOperation op3;
op3.write(0, bl);
librados::AioCompletion *comp3 = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", comp3, &op3, flags));
ASSERT_EQ(0, comp3->wait_for_complete());
ASSERT_EQ(-EOLDSNAPC, comp3->get_return_value());
comp3->release();
ObjectWriteOperation op4;
op4.write(0, bl);
librados::AioCompletion *comp4 = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", comp4, &op4, 0));
ASSERT_EQ(0, comp4->wait_for_complete());
ASSERT_EQ(0, comp4->get_return_value());
comp4->release();
}
TEST_F(LibRadosSnapshotsSelfManagedPP, WriteRollback) {
// https://tracker.ceph.com/issues/59114
GTEST_SKIP();
uint64_t snapid = 5;
// buf1
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
// buf2
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
// First write
ObjectWriteOperation op_write1;
op_write1.write(0, bl);
// Operate
librados::AioCompletion *comp_write = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", comp_write, &op_write1, 0));
ASSERT_EQ(0, comp_write->wait_for_complete());
ASSERT_EQ(0, comp_write->get_return_value());
comp_write->release();
// Take Snapshot
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&snapid));
// Rollback + Second write in the same op
ObjectWriteOperation op_write2_snap_rollback;
op_write2_snap_rollback.write(0, bl2);
op_write2_snap_rollback.selfmanaged_snap_rollback(snapid);
// Operate
librados::AioCompletion *comp_write2 = cluster.aio_create_completion();
ASSERT_EQ(0, ioctx.aio_operate("foo", comp_write2, &op_write2_snap_rollback, 0));
ASSERT_EQ(0, comp_write2->wait_for_complete());
ASSERT_EQ(0, comp_write2->get_return_value());
comp_write2->release();
// Resolved should be first write
bufferlist bl3;
EXPECT_EQ((int)sizeof(buf), ioctx.read("foo", bl3, sizeof(buf), 0));
EXPECT_EQ(0, memcmp(buf, bl3.c_str(), sizeof(buf)));
}
TEST_F(LibRadosSnapshotsSelfManagedPP, ReusePurgedSnap) {
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
ASSERT_TRUE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
my_snaps.push_back(-2);
librados::AioCompletion *completion = cluster.aio_create_completion();
ioctx.aio_selfmanaged_snap_create(&my_snaps.back(), completion);
ASSERT_EQ(0, completion->wait_for_complete());
completion->release();
std::cout << "deleting snap " << my_snaps.back() << " in pool "
<< ioctx.get_pool_name() << std::endl;
completion = cluster.aio_create_completion();
ioctx.aio_selfmanaged_snap_remove(my_snaps.back(), completion);
ASSERT_EQ(0, completion->wait_for_complete());
completion->release();
std::cout << "waiting for snaps to purge" << std::endl;
sleep(15);
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), 0));
// scrub it out?
//sleep(600);
}
// EC testing
TEST_F(LibRadosSnapshotsECPP, SnapListPP) {
SKIP_IF_CRIMSON();
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.snap_create("snap1"));
std::vector<snap_t> snaps;
EXPECT_EQ(0, ioctx.snap_list(&snaps));
EXPECT_EQ(1U, snaps.size());
snap_t rid;
EXPECT_EQ(0, ioctx.snap_lookup("snap1", &rid));
EXPECT_EQ(rid, snaps[0]);
EXPECT_EQ(0, ioctx.snap_remove("snap1"));
}
TEST_F(LibRadosSnapshotsECPP, SnapRemovePP) {
SKIP_IF_CRIMSON();
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.snap_create("snap1"));
rados_snap_t rid;
ASSERT_EQ(0, ioctx.snap_lookup("snap1", &rid));
ASSERT_EQ(0, ioctx.snap_remove("snap1"));
ASSERT_EQ(-ENOENT, ioctx.snap_lookup("snap1", &rid));
}
TEST_F(LibRadosSnapshotsECPP, RollbackPP) {
SKIP_IF_CRIMSON();
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.snap_create("snap1"));
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
EXPECT_EQ(0, ioctx.write_full("foo", bl2));
EXPECT_EQ(0, ioctx.snap_rollback("foo", "snap1"));
bufferlist bl3;
EXPECT_EQ((int)sizeof(buf), ioctx.read("foo", bl3, sizeof(buf), 0));
EXPECT_EQ(0, memcmp(buf, bl3.c_str(), sizeof(buf)));
EXPECT_EQ(0, ioctx.snap_remove("snap1"));
}
TEST_F(LibRadosSnapshotsECPP, SnapGetNamePP) {
SKIP_IF_CRIMSON();
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.snap_create("snapfoo"));
rados_snap_t rid;
EXPECT_EQ(0, ioctx.snap_lookup("snapfoo", &rid));
EXPECT_EQ(-ENOENT, ioctx.snap_lookup("snapbar", &rid));
std::string name;
EXPECT_EQ(0, ioctx.snap_get_name(rid, &name));
time_t snaptime;
EXPECT_EQ(0, ioctx.snap_get_stamp(rid, &snaptime));
EXPECT_EQ(0, strcmp(name.c_str(), "snapfoo"));
EXPECT_EQ(0, ioctx.snap_remove("snapfoo"));
}
TEST_F(LibRadosSnapshotsSelfManagedECPP, SnapPP) {
SKIP_IF_CRIMSON();
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
int bsize = alignment;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
bufferlist bl1;
bl1.append(buf, bsize);
ASSERT_EQ(0, ioctx.write("foo", bl1, bsize, 0));
my_snaps.push_back(-2);
librados::AioCompletion *completion = cluster.aio_create_completion();
ioctx.aio_selfmanaged_snap_create(&my_snaps.back(), completion);
ASSERT_EQ(0, completion->wait_for_complete());
completion->release();
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char *buf2 = (char *)new char[bsize];
memset(buf2, 0xdd, bsize);
bufferlist bl2;
bl2.append(buf2, bsize);
// Add another aligned buffer
ASSERT_EQ(0, ioctx.write("foo", bl2, bsize, bsize));
ioctx.snap_set_read(my_snaps[1]);
bufferlist bl3;
ASSERT_EQ(bsize, ioctx.read("foo", bl3, bsize*3, 0));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, bsize));
completion = cluster.aio_create_completion();
ioctx.aio_selfmanaged_snap_remove(my_snaps.back(), completion);
ASSERT_EQ(0, completion->wait_for_complete());
completion->release();
my_snaps.pop_back();
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ioctx.snap_set_read(LIBRADOS_SNAP_HEAD);
ASSERT_EQ(0, ioctx.remove("foo"));
delete[] buf;
delete[] buf2;
}
TEST_F(LibRadosSnapshotsSelfManagedECPP, RollbackPP) {
SKIP_IF_CRIMSON();
std::vector<uint64_t> my_snaps;
IoCtx readioctx;
ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), readioctx));
readioctx.set_namespace(nspace);
readioctx.snap_set_read(LIBRADOS_SNAP_DIR);
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
int bsize = alignment;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
bufferlist bl1;
bl1.append(buf, bsize);
//Write 3 consecutive buffers
ASSERT_EQ(0, ioctx.write("foo", bl1, bsize, 0));
ASSERT_EQ(0, ioctx.write("foo", bl1, bsize, bsize));
ASSERT_EQ(0, ioctx.write("foo", bl1, bsize, bsize*2));
snap_set_t ss;
snap_t head = SNAP_HEAD;
ASSERT_EQ(0, readioctx.list_snaps("foo", &ss));
ASSERT_EQ(1u, ss.clones.size());
ASSERT_EQ(head, ss.clones[0].cloneid);
ASSERT_EQ(0u, ss.clones[0].snaps.size());
ASSERT_EQ(0u, ss.clones[0].overlap.size());
ASSERT_EQ((unsigned)(bsize*3), ss.clones[0].size);
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
char *buf2 = (char *)new char[bsize];
memset(buf2, 0xdd, bsize);
bufferlist bl2;
bl2.append(buf2, bsize);
//Change the middle buffer
//ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), bufsize));
//Add another after
ASSERT_EQ(0, ioctx.write("foo", bl2, bsize, bsize*3));
ASSERT_EQ(-EINVAL, ioctx.list_snaps("foo", &ss));
ObjectReadOperation o;
o.list_snaps(&ss, NULL);
ASSERT_EQ(-EINVAL, ioctx.operate("foo", &o, NULL));
ASSERT_EQ(0, readioctx.list_snaps("foo", &ss));
ASSERT_EQ(2u, ss.clones.size());
ASSERT_EQ(my_snaps[1], ss.clones[0].cloneid);
ASSERT_EQ(1u, ss.clones[0].snaps.size());
ASSERT_EQ(my_snaps[1], ss.clones[0].snaps[0]);
ASSERT_EQ(1u, ss.clones[0].overlap.size());
ASSERT_EQ(0u, ss.clones[0].overlap[0].first);
ASSERT_EQ((unsigned)bsize*3, ss.clones[0].overlap[0].second);
ASSERT_EQ((unsigned)bsize*3, ss.clones[0].size);
ASSERT_EQ(head, ss.clones[1].cloneid);
ASSERT_EQ(0u, ss.clones[1].snaps.size());
ASSERT_EQ(0u, ss.clones[1].overlap.size());
ASSERT_EQ((unsigned)bsize*4, ss.clones[1].size);
ioctx.selfmanaged_snap_rollback("foo", my_snaps[1]);
bufferlist bl3;
ASSERT_EQ(bsize, ioctx.read("foo", bl3, bsize, 0));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, bsize));
ASSERT_EQ(bsize, ioctx.read("foo", bl3, bsize, bsize));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, bsize));
ASSERT_EQ(bsize, ioctx.read("foo", bl3, bsize, bsize*2));
ASSERT_EQ(0, memcmp(bl3.c_str(), buf, bsize));
ASSERT_EQ(0, ioctx.read("foo", bl3, bsize, bsize*3));
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
readioctx.close();
delete[] buf;
delete[] buf2;
}
TEST_F(LibRadosSnapshotsSelfManagedECPP, Bug11677) {
SKIP_IF_CRIMSON();
std::vector<uint64_t> my_snaps;
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
int bsize = alignment;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
bufferlist bl1;
bl1.append(buf, bsize);
ASSERT_EQ(0, ioctx.write("foo", bl1, bsize, 0));
my_snaps.push_back(-2);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back()));
::std::reverse(my_snaps.begin(), my_snaps.end());
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
::std::reverse(my_snaps.begin(), my_snaps.end());
std::unique_ptr<librados::ObjectWriteOperation> op(new librados::ObjectWriteOperation());
op->assert_exists();
op->remove();
ASSERT_EQ(0, ioctx.operate("foo", op.get()));
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back()));
my_snaps.pop_back();
ioctx.snap_set_read(LIBRADOS_SNAP_HEAD);
delete[] buf;
}
| 29,252 | 35.982301 | 91 |
cc
|
null |
ceph-main/src/test/librados/snapshots_stats.cc
|
#include "include/rados.h"
#include "json_spirit/json_spirit.h"
#include "test/librados/test.h"
#include "test/librados/TestCase.h"
#include <algorithm>
#include <errno.h>
#include "gtest/gtest.h"
#include <string>
#include <vector>
using std::string;
class LibRadosSnapshotStatsSelfManaged : public RadosTest {
public:
LibRadosSnapshotStatsSelfManaged() {};
~LibRadosSnapshotStatsSelfManaged() override {};
protected:
void SetUp() override {
// disable pg autoscaler for the tests
string c =
"{"
"\"prefix\": \"config set\", "
"\"who\": \"global\", "
"\"name\": \"osd_pool_default_pg_autoscale_mode\", "
"\"value\": \"off\""
"}";
char *cmd[1];
cmd[0] = (char *)c.c_str();
std::cout << "Setting pg_autoscaler to 'off'" << std::endl;
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL,
0, NULL, 0));
// disable scrubs for the test
c = string("{\"prefix\": \"osd set\",\"key\":\"noscrub\"}");
cmd[0] = (char *)c.c_str();
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
c = string("{\"prefix\": \"osd set\",\"key\":\"nodeep-scrub\"}");
cmd[0] = (char *)c.c_str();
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
RadosTest::SetUp();
}
void TearDown() override {
// re-enable pg autoscaler
string c =
"{"
"\"prefix\": \"config set\", "
"\"who\": \"global\", "
"\"name\": \"osd_pool_default_pg_autoscale_mode\", "
"\"value\": \"on\""
"}";
char *cmd[1];
cmd[0] = (char *)c.c_str();
std::cout << "Setting pg_autoscaler to 'on'" << std::endl;
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL,
0, NULL, 0));
// re-enable scrubs
c = string("{\"prefix\": \"osd unset\",\"key\":\"noscrub\"}");
cmd[0] = (char *)c.c_str();
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
c = string("{\"prefix\": \"osd unset\",\"key\":\"nodeep-scrub\"}");
cmd[0] = (char *)c.c_str();
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
RadosTest::TearDown();
}
};
class LibRadosSnapshotStatsSelfManagedEC : public RadosTestEC {
public:
LibRadosSnapshotStatsSelfManagedEC() {};
~LibRadosSnapshotStatsSelfManagedEC() override {};
protected:
void SetUp() override {
// disable pg autoscaler for the tests
string c =
"{"
"\"prefix\": \"config set\", "
"\"who\": \"global\", "
"\"name\": \"osd_pool_default_pg_autoscale_mode\", "
"\"value\": \"off\""
"}";
char *cmd[1];
cmd[0] = (char *)c.c_str();
std::cout << "Setting pg_autoscaler to 'off'" << std::endl;
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL,
0, NULL, 0));
// disable scrubs for the test
c = string("{\"prefix\": \"osd set\",\"key\":\"noscrub\"}");
cmd[0] = (char *)c.c_str();
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
c = string("{\"prefix\": \"osd set\",\"key\":\"nodeep-scrub\"}");
cmd[0] = (char *)c.c_str();
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
RadosTestEC::SetUp();
}
void TearDown() override {
// re-enable pg autoscaler
string c =
"{"
"\"prefix\": \"config set\", "
"\"who\": \"global\", "
"\"name\": \"osd_pool_default_pg_autoscale_mode\", "
"\"value\": \"on\""
"}";
char *cmd[1];
cmd[0] = (char *)c.c_str();
std::cout << "Setting pg_autoscaler to 'on'" << std::endl;
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL,
0, NULL, 0));
// re-enable scrubs
c = string("{\"prefix\": \"osd unset\",\"key\":\"noscrub\"}");
cmd[0] = (char *)c.c_str();
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
c = string("{\"prefix\": \"osd unset\",\"key\":\"nodeep-scrub\"}");
cmd[0] = (char *)c.c_str();
ASSERT_EQ(0, rados_mon_command(s_cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0));
RadosTestEC::TearDown();
}
};
void get_snaptrim_stats(json_spirit::Object& pg_dump,
int *objs_trimmed,
double *trim_duration) {
// pg_map
json_spirit::Object pgmap;
for (json_spirit::Object::size_type i = 0; i < pg_dump.size(); ++i) {
json_spirit::Pair& p = pg_dump[i];
if (p.name_ == "pg_map") {
pgmap = p.value_.get_obj();
break;
}
}
// pg_stats array
json_spirit::Array pgs;
for (json_spirit::Object::size_type i = 0; i < pgmap.size(); ++i) {
json_spirit::Pair& p = pgmap[i];
if (p.name_ == "pg_stats") {
pgs = p.value_.get_array();
break;
}
}
// snaptrim stats
for (json_spirit::Object::size_type j = 0; j < pgs.size(); ++j) {
json_spirit::Object& pg_stat = pgs[j].get_obj();
for(json_spirit::Object::size_type k = 0; k < pg_stat.size(); ++k) {
json_spirit::Pair& stats = pg_stat[k];
if (stats.name_ == "objects_trimmed") {
*objs_trimmed += stats.value_.get_int();
}
if (stats.name_ == "snaptrim_duration") {
*trim_duration += stats.value_.get_real();
}
}
}
}
const int bufsize = 128;
TEST_F(LibRadosSnapshotStatsSelfManaged, SnaptrimStats) {
int num_objs = 10;
// create objects
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, rados_write(ioctx, obj.c_str(), buf, sizeof(buf), 0));
}
std::vector<uint64_t> my_snaps;
for (int snap = 0; snap < 1; ++snap) {
// create a snapshot, clone
std::vector<uint64_t> ns(1);
ns.insert(ns.end(), my_snaps.begin(), my_snaps.end());
my_snaps.swap(ns);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps[0]));
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, rados_write(ioctx, obj.c_str(), buf2, sizeof(buf2), 0));
}
}
// wait for maps to settle
ASSERT_EQ(0, rados_wait_for_latest_osdmap(cluster));
// remove snaps - should trigger snaptrim
rados_ioctx_snap_set_read(ioctx, LIBRADOS_SNAP_HEAD);
for (unsigned snap = 0; snap < my_snaps.size(); ++snap) {
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps[snap]));
}
// sleep for few secs for the trim stats to populate
std::cout << "Waiting for snaptrim stats to be generated" << std::endl;
sleep(30);
// Dump pg stats and determine if snaptrim stats are getting set
int objects_trimmed = 0;
double snaptrim_duration = 0.0;
int tries = 0;
do {
char *buf, *st;
size_t buflen, stlen;
string c = string("{\"prefix\": \"pg dump\",\"format\":\"json\"}");
const char *cmd = c.c_str();
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)&cmd, 1, "", 0,
&buf, &buflen, &st, &stlen));
string outstr(buf, buflen);
json_spirit::Value v;
ASSERT_NE(0, json_spirit::read(outstr, v)) << "unable to parse json."
<< '\n' << outstr;
// pg dump object
json_spirit::Object& obj = v.get_obj();
get_snaptrim_stats(obj, &objects_trimmed, &snaptrim_duration);
if (objects_trimmed < num_objs) {
tries++;
objects_trimmed = 0;
std::cout << "Still waiting for all objects to be trimmed... " <<std::endl;
sleep(30);
}
} while(objects_trimmed < num_objs && tries < 5);
// final check for objects trimmed
ASSERT_EQ(objects_trimmed, num_objs);
std::cout << "Snaptrim duration: " << snaptrim_duration << std::endl;
ASSERT_GT(snaptrim_duration, 0.0);
// clean-up remaining objects
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, rados_remove(ioctx, obj.c_str()));
}
}
// EC testing
TEST_F(LibRadosSnapshotStatsSelfManagedEC, SnaptrimStats) {
int num_objs = 10;
int bsize = alignment;
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
// create objects
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, rados_write(ioctx, obj.c_str(), buf, bsize, 0));
}
std::vector<uint64_t> my_snaps;
for (int snap = 0; snap < 1; ++snap) {
// create a snapshot, clone
std::vector<uint64_t> ns(1);
ns.insert(ns.end(), my_snaps.begin(), my_snaps.end());
my_snaps.swap(ns);
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_create(ioctx, &my_snaps[0]));
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_set_write_ctx(ioctx, my_snaps[0],
&my_snaps[0], my_snaps.size()));
char *buf2 = (char *)new char[bsize];
memset(buf2, 0xdd, bsize);
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, rados_write(ioctx, obj.c_str(), buf2, bsize, bsize));
}
delete[] buf2;
}
// wait for maps to settle
ASSERT_EQ(0, rados_wait_for_latest_osdmap(cluster));
// remove snaps - should trigger snaptrim
rados_ioctx_snap_set_read(ioctx, LIBRADOS_SNAP_HEAD);
for (unsigned snap = 0; snap < my_snaps.size(); ++snap) {
ASSERT_EQ(0, rados_ioctx_selfmanaged_snap_remove(ioctx, my_snaps[snap]));
}
// sleep for few secs for the trim stats to populate
std::cout << "Waiting for snaptrim stats to be generated" << std::endl;
sleep(30);
// Dump pg stats and determine if snaptrim stats are getting set
int objects_trimmed = 0;
double snaptrim_duration = 0.0;
int tries = 0;
do {
char *buf, *st;
size_t buflen, stlen;
string c = string("{\"prefix\": \"pg dump\",\"format\":\"json\"}");
const char *cmd = c.c_str();
ASSERT_EQ(0, rados_mon_command(cluster, (const char **)&cmd, 1, 0, 0,
&buf, &buflen, &st, &stlen));
string outstr(buf, buflen);
json_spirit::Value v;
ASSERT_NE(0, json_spirit::read(outstr, v)) << "Unable tp parse json."
<< '\n' << outstr;
// pg dump object
json_spirit::Object& obj = v.get_obj();
get_snaptrim_stats(obj, &objects_trimmed, &snaptrim_duration);
if (objects_trimmed != num_objs) {
tries++;
objects_trimmed = 0;
std::cout << "Still waiting for all objects to be trimmed... " <<std::endl;
sleep(30);
}
} while (objects_trimmed != num_objs && tries < 5);
// final check for objects trimmed
ASSERT_EQ(objects_trimmed, num_objs);
std::cout << "Snaptrim duration: " << snaptrim_duration << std::endl;
ASSERT_GT(snaptrim_duration, 0.0);
// clean-up remaining objects
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, rados_remove(ioctx, obj.c_str()));
}
delete[] buf;
}
| 11,177 | 32.567568 | 95 |
cc
|
null |
ceph-main/src/test/librados/snapshots_stats_cxx.cc
|
#include <algorithm>
#include <errno.h>
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "include/rados.h"
#include "include/rados/librados.hpp"
#include "json_spirit/json_spirit.h"
#include "test/librados/test_cxx.h"
#include "test/librados/testcase_cxx.h"
using namespace librados;
using std::string;
class LibRadosSnapshotStatsSelfManagedPP : public RadosTestPP {
public:
LibRadosSnapshotStatsSelfManagedPP() {};
~LibRadosSnapshotStatsSelfManagedPP() override {};
protected:
void SetUp() override {
// disable pg autoscaler for the tests
string cmd =
"{"
"\"prefix\": \"config set\", "
"\"who\": \"global\", "
"\"name\": \"osd_pool_default_pg_autoscale_mode\", "
"\"value\": \"off\""
"}";
std::cout << "Setting pg_autoscaler to 'off'" << std::endl;
bufferlist inbl;
bufferlist outbl;
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
// disable scrubs for the test
cmd = "{\"prefix\": \"osd set\",\"key\":\"noscrub\"}";
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
cmd = "{\"prefix\": \"osd set\",\"key\":\"nodeep-scrub\"}";
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
RadosTestPP::SetUp();
}
void TearDown() override {
// re-enable pg autoscaler
string cmd =
"{"
"\"prefix\": \"config set\", "
"\"who\": \"global\", "
"\"name\": \"osd_pool_default_pg_autoscale_mode\", "
"\"value\": \"on\""
"}";
std::cout << "Setting pg_autoscaler to 'on'" << std::endl;
bufferlist inbl;
bufferlist outbl;
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
// re-enable scrubs
cmd = "{\"prefix\": \"osd unset\",\"key\":\"noscrub\"}";
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
cmd = string("{\"prefix\": \"osd unset\",\"key\":\"nodeep-scrub\"}");
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
RadosTestPP::TearDown();
}
};
class LibRadosSnapshotStatsSelfManagedECPP : public RadosTestECPP {
public:
LibRadosSnapshotStatsSelfManagedECPP() {};
~LibRadosSnapshotStatsSelfManagedECPP() override {};
protected:
void SetUp() override {
// disable pg autoscaler for the tests
string cmd =
"{"
"\"prefix\": \"config set\", "
"\"who\": \"global\", "
"\"name\": \"osd_pool_default_pg_autoscale_mode\", "
"\"value\": \"off\""
"}";
std::cout << "Setting pg_autoscaler to 'off'" << std::endl;
bufferlist inbl;
bufferlist outbl;
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
// disable scrubs for the test
cmd = string("{\"prefix\": \"osd set\",\"key\":\"noscrub\"}");
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
cmd = string("{\"prefix\": \"osd set\",\"key\":\"nodeep-scrub\"}");
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
RadosTestECPP::SetUp();
}
void TearDown() override {
// re-enable pg autoscaler
string cmd =
"{"
"\"prefix\": \"config set\", "
"\"who\": \"global\", "
"\"name\": \"osd_pool_default_pg_autoscale_mode\", "
"\"value\": \"on\""
"}";
std::cout << "Setting pg_autoscaler to 'on'" << std::endl;
bufferlist inbl;
bufferlist outbl;
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
// re-enable scrubs
cmd = string("{\"prefix\": \"osd unset\",\"key\":\"noscrub\"}");
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
cmd = string("{\"prefix\": \"osd unset\",\"key\":\"nodeep-scrub\"}");
ASSERT_EQ(0, s_cluster.mon_command(cmd, inbl, &outbl, NULL));
RadosTestECPP::TearDown();
}
};
void get_snaptrim_stats(json_spirit::Object& pg_dump,
int *objs_trimmed,
double *trim_duration) {
// pg_map
json_spirit::Object pgmap;
for (json_spirit::Object::size_type i = 0; i < pg_dump.size(); ++i) {
json_spirit::Pair& p = pg_dump[i];
if (p.name_ == "pg_map") {
pgmap = p.value_.get_obj();
break;
}
}
// pg_stats array
json_spirit::Array pgs;
for (json_spirit::Object::size_type i = 0; i < pgmap.size(); ++i) {
json_spirit::Pair& p = pgmap[i];
if (p.name_ == "pg_stats") {
pgs = p.value_.get_array();
break;
}
}
// snaptrim stats
for (json_spirit::Object::size_type j = 0; j < pgs.size(); ++j) {
json_spirit::Object& pg_stat = pgs[j].get_obj();
for(json_spirit::Object::size_type k = 0; k < pg_stat.size(); ++k) {
json_spirit::Pair& stats = pg_stat[k];
if (stats.name_ == "objects_trimmed") {
*objs_trimmed += stats.value_.get_int();
}
if (stats.name_ == "snaptrim_duration") {
*trim_duration += stats.value_.get_real();
}
}
}
}
const int bufsize = 128;
TEST_F(LibRadosSnapshotStatsSelfManagedPP, SnaptrimStatsPP) {
int num_objs = 10;
// create objects
char buf[bufsize];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, ioctx.write(obj, bl, sizeof(buf), 0));
}
std::vector<uint64_t> my_snaps;
char buf2[sizeof(buf)];
memset(buf2, 0xdd, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
for (int snap = 0; snap < 1; ++snap) {
// create a snapshot, clone
std::vector<uint64_t> ns(1);
ns.insert(ns.end(), my_snaps.begin(), my_snaps.end());
my_snaps.swap(ns);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, ioctx.write(obj, bl2, sizeof(buf2), 0));
}
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// remove snaps - should trigger snaptrim
for (unsigned snap = 0; snap < my_snaps.size(); ++snap) {
ioctx.selfmanaged_snap_remove(my_snaps[snap]);
}
// sleep for few secs for the trim stats to populate
std::cout << "Waiting for snaptrim stats to be generated" << std::endl;
sleep(30);
// Dump pg stats and determine if snaptrim stats are getting set
int objects_trimmed = 0;
double snaptrim_duration = 0.0;
int tries = 0;
do {
string cmd = string("{\"prefix\": \"pg dump\",\"format\":\"json\"}");
bufferlist inbl;
bufferlist outbl;
ASSERT_EQ(0, cluster.mon_command(cmd, inbl, &outbl, NULL));
string outstr(outbl.c_str(), outbl.length());
json_spirit::Value v;
ASSERT_NE(0, json_spirit::read(outstr, v)) << "unable to parse json." << '\n' << outstr;
// pg_map
json_spirit::Object& obj = v.get_obj();
get_snaptrim_stats(obj, &objects_trimmed, &snaptrim_duration);
if (objects_trimmed < num_objs) {
tries++;
objects_trimmed = 0;
std::cout << "Still waiting for all objects to be trimmed... " <<std::endl;
sleep(30);
}
} while(objects_trimmed < num_objs && tries < 5);
// final check for objects trimmed
ASSERT_EQ(objects_trimmed, num_objs);
std::cout << "Snaptrim duration: " << snaptrim_duration << std::endl;
ASSERT_GT(snaptrim_duration, 0.0);
// clean-up remaining objects
ioctx.snap_set_read(librados::SNAP_HEAD);
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, ioctx.remove(obj));
}
}
// EC testing
TEST_F(LibRadosSnapshotStatsSelfManagedECPP, SnaptrimStatsECPP) {
int num_objs = 10;
int bsize = alignment;
// create objects
char *buf = (char *)new char[bsize];
memset(buf, 0xcc, bsize);
bufferlist bl;
bl.append(buf, bsize);
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, ioctx.write(obj, bl, bsize, 0));
}
std::vector<uint64_t> my_snaps;
char *buf2 = (char *)new char[bsize];
memset(buf2, 0xdd, bsize);
bufferlist bl2;
bl2.append(buf2, bsize);
for (int snap = 0; snap < 1; ++snap) {
// create a snapshot, clone
std::vector<uint64_t> ns(1);
ns.insert(ns.end(), my_snaps.begin(), my_snaps.end());
my_snaps.swap(ns);
ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps[0]));
ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps));
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, ioctx.write(obj, bl2, bsize, bsize));
}
}
// wait for maps to settle
cluster.wait_for_latest_osdmap();
// remove snaps - should trigger snaptrim
for (unsigned snap = 0; snap < my_snaps.size(); ++snap) {
ioctx.selfmanaged_snap_remove(my_snaps[snap]);
}
// sleep for few secs for the trim stats to populate
std::cout << "Waiting for snaptrim stats to be generated" << std::endl;
sleep(30);
// Dump pg stats and determine if snaptrim stats are getting set
int objects_trimmed = 0;
double snaptrim_duration = 0.0;
int tries = 0;
do {
string cmd = string("{\"prefix\": \"pg dump\",\"format\":\"json\"}");
bufferlist inbl;
bufferlist outbl;
ASSERT_EQ(0, cluster.mon_command(cmd, inbl, &outbl, NULL));
string outstr(outbl.c_str(), outbl.length());
json_spirit::Value v;
ASSERT_NE(0, json_spirit::read(outstr, v)) << "unable to parse json." << '\n' << outstr;
// pg_map
json_spirit::Object& obj = v.get_obj();
get_snaptrim_stats(obj, &objects_trimmed, &snaptrim_duration);
if (objects_trimmed < num_objs) {
tries++;
objects_trimmed = 0;
std::cout << "Still waiting for all objects to be trimmed... " <<std::endl;
sleep(30);
}
} while(objects_trimmed < num_objs && tries < 5);
// final check for objects trimmed
ASSERT_EQ(objects_trimmed, num_objs);
std::cout << "Snaptrim duration: " << snaptrim_duration << std::endl;
ASSERT_GT(snaptrim_duration, 0.0);
// clean-up remaining objects
ioctx.snap_set_read(LIBRADOS_SNAP_HEAD);
for (int i = 0; i < num_objs; ++i) {
string obj = string("foo") + std::to_string(i);
ASSERT_EQ(0, ioctx.remove(obj));
}
delete[] buf;
delete[] buf2;
}
| 10,251 | 30.544615 | 92 |
cc
|
null |
ceph-main/src/test/librados/stat.cc
|
#include "include/rados/librados.h"
#include "test/librados/test.h"
#include "test/librados/TestCase.h"
#include "common/ceph_time.h"
#include <algorithm>
#include <errno.h>
#include "gtest/gtest.h"
#include "crimson_utils.h"
typedef RadosTest LibRadosStat;
typedef RadosTestEC LibRadosStatEC;
TEST_F(LibRadosStat, Stat) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
uint64_t size = 0;
time_t mtime = 0;
ASSERT_EQ(0, rados_stat(ioctx, "foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(-ENOENT, rados_stat(ioctx, "nonexistent", &size, &mtime));
}
TEST_F(LibRadosStat, Stat2) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
rados_write_op_t op = rados_create_write_op();
rados_write_op_write(op, buf, sizeof(buf), 0);
struct timespec ts;
ts.tv_sec = 1457129052;
ts.tv_nsec = 123456789;
ASSERT_EQ(0, rados_write_op_operate2(op, ioctx, "foo", &ts, 0));
rados_release_write_op(op);
uint64_t size = 0;
time_t mtime = 0;
ASSERT_EQ(0, rados_stat(ioctx, "foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(mtime, ts.tv_sec);
struct timespec ts2 = {};
ASSERT_EQ(0, rados_stat2(ioctx, "foo", &size, &ts2));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(ts2.tv_sec, ts.tv_sec);
ASSERT_EQ(ts2.tv_nsec, ts.tv_nsec);
ASSERT_EQ(-ENOENT, rados_stat2(ioctx, "nonexistent", &size, &ts2));
}
TEST_F(LibRadosStat, StatNS) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_write(ioctx, "foo2", buf, sizeof(buf), 0));
char buf2[64];
memset(buf2, 0xcc, sizeof(buf2));
rados_ioctx_set_namespace(ioctx, "nspace");
ASSERT_EQ(0, rados_write(ioctx, "foo", buf2, sizeof(buf2), 0));
uint64_t size = 0;
time_t mtime = 0;
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_stat(ioctx, "foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(-ENOENT, rados_stat(ioctx, "nonexistent", &size, &mtime));
rados_ioctx_set_namespace(ioctx, "nspace");
ASSERT_EQ(0, rados_stat(ioctx, "foo", &size, &mtime));
ASSERT_EQ(sizeof(buf2), size);
ASSERT_EQ(-ENOENT, rados_stat(ioctx, "nonexistent", &size, &mtime));
ASSERT_EQ(-ENOENT, rados_stat(ioctx, "foo2", &size, &mtime));
}
TEST_F(LibRadosStat, ClusterStat) {
struct rados_cluster_stat_t result;
ASSERT_EQ(0, rados_cluster_stat(cluster, &result));
}
TEST_F(LibRadosStat, PoolStat) {
char buf[128];
char actual_pool_name[80];
unsigned l = rados_ioctx_get_pool_name(ioctx, actual_pool_name, sizeof(actual_pool_name));
ASSERT_EQ(strlen(actual_pool_name), l);
ASSERT_EQ(0, strcmp(actual_pool_name, pool_name.c_str()));
memset(buf, 0xff, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
struct rados_pool_stat_t stats;
memset(&stats, 0, sizeof(stats));
ASSERT_EQ(0, rados_ioctx_pool_stat(ioctx, &stats));
}
TEST_F(LibRadosStatEC, Stat) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
uint64_t size = 0;
time_t mtime = 0;
ASSERT_EQ(0, rados_stat(ioctx, "foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(-ENOENT, rados_stat(ioctx, "nonexistent", &size, &mtime));
}
TEST_F(LibRadosStatEC, StatNS) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
ASSERT_EQ(0, rados_write(ioctx, "foo2", buf, sizeof(buf), 0));
char buf2[64];
memset(buf2, 0xcc, sizeof(buf2));
rados_ioctx_set_namespace(ioctx, "nspace");
ASSERT_EQ(0, rados_write(ioctx, "foo", buf2, sizeof(buf2), 0));
uint64_t size = 0;
time_t mtime = 0;
rados_ioctx_set_namespace(ioctx, "");
ASSERT_EQ(0, rados_stat(ioctx, "foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(-ENOENT, rados_stat(ioctx, "nonexistent", &size, &mtime));
rados_ioctx_set_namespace(ioctx, "nspace");
ASSERT_EQ(0, rados_stat(ioctx, "foo", &size, &mtime));
ASSERT_EQ(sizeof(buf2), size);
ASSERT_EQ(-ENOENT, rados_stat(ioctx, "nonexistent", &size, &mtime));
ASSERT_EQ(-ENOENT, rados_stat(ioctx, "foo2", &size, &mtime));
}
TEST_F(LibRadosStatEC, ClusterStat) {
SKIP_IF_CRIMSON();
struct rados_cluster_stat_t result;
ASSERT_EQ(0, rados_cluster_stat(cluster, &result));
}
TEST_F(LibRadosStatEC, PoolStat) {
SKIP_IF_CRIMSON();
char buf[128];
char actual_pool_name[80];
unsigned l = rados_ioctx_get_pool_name(ioctx, actual_pool_name, sizeof(actual_pool_name));
ASSERT_EQ(strlen(actual_pool_name), l);
ASSERT_EQ(0, strcmp(actual_pool_name, pool_name.c_str()));
memset(buf, 0xff, sizeof(buf));
ASSERT_EQ(0, rados_write(ioctx, "foo", buf, sizeof(buf), 0));
struct rados_pool_stat_t stats;
memset(&stats, 0, sizeof(stats));
ASSERT_EQ(0, rados_ioctx_pool_stat(ioctx, &stats));
}
| 4,992 | 31.422078 | 92 |
cc
|
null |
ceph-main/src/test/librados/stat_cxx.cc
|
#include "gtest/gtest.h"
#include "include/rados/librados.hpp"
#include "test/librados/test_cxx.h"
#include "test/librados/testcase_cxx.h"
#include "crimson_utils.h"
using namespace librados;
typedef RadosTestPP LibRadosStatPP;
typedef RadosTestECPP LibRadosStatECPP;
TEST_F(LibRadosStatPP, StatPP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
uint64_t size;
time_t mtime;
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(-ENOENT, ioctx.stat("nonexistent", &size, &mtime));
}
TEST_F(LibRadosStatPP, Stat2Mtime2PP) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
librados::ObjectWriteOperation op;
struct timespec ts;
ts.tv_sec = 1457129052;
ts.tv_nsec = 123456789;
op.mtime2(&ts);
op.write(0, bl);
ASSERT_EQ(0, ioctx.operate("foo", &op));
/* XXX time comparison asserts could spuriously fail */
uint64_t size;
time_t mtime;
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(mtime, ts.tv_sec);
struct timespec ts2;
ASSERT_EQ(0, ioctx.stat2("foo", &size, &ts2));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(ts2.tv_sec, ts.tv_sec);
ASSERT_EQ(ts2.tv_nsec, ts.tv_nsec);
ASSERT_EQ(-ENOENT, ioctx.stat2("nonexistent", &size, &ts2));
}
TEST_F(LibRadosStatPP, ClusterStatPP) {
cluster_stat_t cstat;
ASSERT_EQ(0, cluster.cluster_stat(cstat));
}
TEST_F(LibRadosStatPP, PoolStatPP) {
std::string n = ioctx.get_pool_name();
ASSERT_EQ(n, pool_name);
char buf[128];
memset(buf, 0xff, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
std::list<std::string> v;
std::map<std::string,stats_map> stats;
ASSERT_EQ(0, cluster.get_pool_stats(v, stats));
}
TEST_F(LibRadosStatECPP, StatPP) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
uint64_t size;
time_t mtime;
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(-ENOENT, ioctx.stat("nonexistent", &size, &mtime));
}
TEST_F(LibRadosStatECPP, ClusterStatPP) {
SKIP_IF_CRIMSON();
cluster_stat_t cstat;
ASSERT_EQ(0, cluster.cluster_stat(cstat));
}
TEST_F(LibRadosStatECPP, PoolStatPP) {
SKIP_IF_CRIMSON();
std::string n = ioctx.get_pool_name();
ASSERT_EQ(n, pool_name);
char buf[128];
memset(buf, 0xff, sizeof(buf));
bufferlist bl1;
bl1.append(buf, sizeof(buf));
ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0));
std::list<std::string> v;
std::map<std::string,stats_map> stats;
ASSERT_EQ(0, cluster.get_pool_stats(v, stats));
}
TEST_F(LibRadosStatPP, StatPPNS) {
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ioctx.set_namespace("");
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo2", bl, sizeof(buf), 0));
char buf2[64];
memset(buf2, 0xbb, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ioctx.set_namespace("nspace");
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), 0));
uint64_t size;
time_t mtime;
ioctx.set_namespace("");
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(-ENOENT, ioctx.stat("nonexistent", &size, &mtime));
ioctx.set_namespace("nspace");
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(sizeof(buf2), size);
ASSERT_EQ(-ENOENT, ioctx.stat("nonexistent", &size, &mtime));
ASSERT_EQ(-ENOENT, ioctx.stat("foo2", &size, &mtime));
}
TEST_F(LibRadosStatECPP, StatPPNS) {
SKIP_IF_CRIMSON();
char buf[128];
memset(buf, 0xcc, sizeof(buf));
bufferlist bl;
bl.append(buf, sizeof(buf));
ioctx.set_namespace("");
ASSERT_EQ(0, ioctx.write("foo", bl, sizeof(buf), 0));
ASSERT_EQ(0, ioctx.write("foo2", bl, sizeof(buf), 0));
char buf2[64];
memset(buf2, 0xbb, sizeof(buf2));
bufferlist bl2;
bl2.append(buf2, sizeof(buf2));
ioctx.set_namespace("nspace");
ASSERT_EQ(0, ioctx.write("foo", bl2, sizeof(buf2), 0));
uint64_t size;
time_t mtime;
ioctx.set_namespace("");
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(sizeof(buf), size);
ASSERT_EQ(-ENOENT, ioctx.stat("nonexistent", &size, &mtime));
ioctx.set_namespace("nspace");
ASSERT_EQ(0, ioctx.stat("foo", &size, &mtime));
ASSERT_EQ(sizeof(buf2), size);
ASSERT_EQ(-ENOENT, ioctx.stat("nonexistent", &size, &mtime));
ASSERT_EQ(-ENOENT, ioctx.stat("foo2", &size, &mtime));
}
| 4,718 | 26.923077 | 63 |
cc
|
null |
ceph-main/src/test/librados/test.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "test/librados/test.h"
#include "include/stringify.h"
#include "common/ceph_context.h"
#include "common/config.h"
#include <errno.h>
#include <sstream>
#include <stdlib.h>
#include <string>
#include <time.h>
#include <unistd.h>
#include <iostream>
#include "gtest/gtest.h"
std::string create_one_pool(
const std::string &pool_name, rados_t *cluster, uint32_t pg_num)
{
std::string err_str = connect_cluster(cluster);
if (err_str.length())
return err_str;
int ret = rados_pool_create(*cluster, pool_name.c_str());
if (ret) {
rados_shutdown(*cluster);
std::ostringstream oss;
oss << "create_one_pool(" << pool_name << ") failed with error " << ret;
return oss.str();
}
rados_ioctx_t ioctx;
ret = rados_ioctx_create(*cluster, pool_name.c_str(), &ioctx);
if (ret < 0) {
rados_shutdown(*cluster);
std::ostringstream oss;
oss << "rados_ioctx_create(" << pool_name << ") failed with error " << ret;
return oss.str();
}
rados_application_enable(ioctx, "rados", 1);
rados_ioctx_destroy(ioctx);
return "";
}
int destroy_ec_profile(rados_t *cluster,
const std::string& pool_name,
std::ostream &oss)
{
char buf[1000];
snprintf(buf, sizeof(buf),
"{\"prefix\": \"osd erasure-code-profile rm\", \"name\": \"testprofile-%s\"}",
pool_name.c_str());
char *cmd[2];
cmd[0] = buf;
cmd[1] = NULL;
int ret = rados_mon_command(*cluster, (const char **)cmd, 1, "", 0, NULL,
0, NULL, 0);
if (ret)
oss << "rados_mon_command: erasure-code-profile rm testprofile-"
<< pool_name << " failed with error " << ret;
return ret;
}
int destroy_rule(rados_t *cluster,
const std::string &rule,
std::ostream &oss)
{
char *cmd[2];
std::string tmp = ("{\"prefix\": \"osd crush rule rm\", \"name\":\"" +
rule + "\"}");
cmd[0] = (char*)tmp.c_str();
cmd[1] = NULL;
int ret = rados_mon_command(*cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0);
if (ret)
oss << "rados_mon_command: osd crush rule rm " + rule + " failed with error " << ret;
return ret;
}
int destroy_ec_profile_and_rule(rados_t *cluster,
const std::string &rule,
std::ostream &oss)
{
int ret;
ret = destroy_ec_profile(cluster, rule, oss);
if (ret)
return ret;
return destroy_rule(cluster, rule, oss);
}
std::string create_one_ec_pool(const std::string &pool_name, rados_t *cluster)
{
std::string err = connect_cluster(cluster);
if (err.length())
return err;
std::ostringstream oss;
int ret = destroy_ec_profile_and_rule(cluster, pool_name, oss);
if (ret) {
rados_shutdown(*cluster);
return oss.str();
}
char *cmd[2];
cmd[1] = NULL;
std::string profile_create = "{\"prefix\": \"osd erasure-code-profile set\", \"name\": \"testprofile-" + pool_name + "\", \"profile\": [ \"k=2\", \"m=1\", \"crush-failure-domain=osd\"]}";
cmd[0] = (char *)profile_create.c_str();
ret = rados_mon_command(*cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0);
if (ret) {
rados_shutdown(*cluster);
oss << "rados_mon_command erasure-code-profile set name:testprofile-" << pool_name << " failed with error " << ret;
return oss.str();
}
std::string cmdstr = "{\"prefix\": \"osd pool create\", \"pool\": \"" +
pool_name + "\", \"pool_type\":\"erasure\", \"pg_num\":8, \"pgp_num\":8, \"erasure_code_profile\":\"testprofile-" + pool_name + "\"}";
cmd[0] = (char *)cmdstr.c_str();
ret = rados_mon_command(*cluster, (const char **)cmd, 1, "", 0, NULL, 0, NULL, 0);
if (ret) {
destroy_ec_profile(cluster, pool_name, oss);
rados_shutdown(*cluster);
oss << "rados_mon_command osd pool create failed with error " << ret;
return oss.str();
}
rados_wait_for_latest_osdmap(*cluster);
return "";
}
std::string connect_cluster(rados_t *cluster)
{
char *id = getenv("CEPH_CLIENT_ID");
if (id) std::cerr << "Client id is: " << id << std::endl;
int ret;
ret = rados_create(cluster, NULL);
if (ret) {
std::ostringstream oss;
oss << "rados_create failed with error " << ret;
return oss.str();
}
ret = rados_conf_read_file(*cluster, NULL);
if (ret) {
rados_shutdown(*cluster);
std::ostringstream oss;
oss << "rados_conf_read_file failed with error " << ret;
return oss.str();
}
rados_conf_parse_env(*cluster, NULL);
ret = rados_connect(*cluster);
if (ret) {
rados_shutdown(*cluster);
std::ostringstream oss;
oss << "rados_connect failed with error " << ret;
return oss.str();
}
return "";
}
int destroy_one_pool(const std::string &pool_name, rados_t *cluster)
{
int ret = rados_pool_delete(*cluster, pool_name.c_str());
if (ret) {
rados_shutdown(*cluster);
return ret;
}
rados_shutdown(*cluster);
return 0;
}
int destroy_one_ec_pool(const std::string &pool_name, rados_t *cluster)
{
int ret = rados_pool_delete(*cluster, pool_name.c_str());
if (ret) {
rados_shutdown(*cluster);
return ret;
}
CephContext *cct = static_cast<CephContext*>(rados_cct(*cluster));
if (!cct->_conf->mon_fake_pool_delete) { // hope this is in [global]
std::ostringstream oss;
ret = destroy_ec_profile_and_rule(cluster, pool_name, oss);
if (ret) {
rados_shutdown(*cluster);
return ret;
}
}
rados_wait_for_latest_osdmap(*cluster);
rados_shutdown(*cluster);
return ret;
}
| 5,555 | 26.919598 | 189 |
cc
|
null |
ceph-main/src/test/librados/test.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_TEST_RADOS_API_TEST_H
#define CEPH_TEST_RADOS_API_TEST_H
#include "include/rados/librados.h"
#include "test/librados/test_shared.h"
#include <map>
#include <string>
#include <unistd.h>
std::string create_one_pool(const std::string &pool_name, rados_t *cluster,
uint32_t pg_num=0);
std::string create_one_ec_pool(const std::string &pool_name, rados_t *cluster);
std::string connect_cluster(rados_t *cluster);
int destroy_one_pool(const std::string &pool_name, rados_t *cluster);
int destroy_one_ec_pool(const std::string &pool_name, rados_t *cluster);
#endif
| 988 | 28.969697 | 79 |
h
|
null |
ceph-main/src/test/librados/test_common.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/Formatter.h"
#include "include/stringify.h"
#include "json_spirit/json_spirit.h"
#include "test_common.h"
using namespace std;
namespace {
using namespace ceph;
int wait_for_healthy(rados_t *cluster)
{
bool healthy = false;
// This timeout is very long because the tests are sometimes
// run on a thrashing cluster
int timeout = 3600;
int slept = 0;
while(!healthy) {
JSONFormatter cmd_f;
cmd_f.open_object_section("command");
cmd_f.dump_string("prefix", "status");
cmd_f.dump_string("format", "json");
cmd_f.close_section();
std::ostringstream cmd_stream;
cmd_f.flush(cmd_stream);
const std::string serialized_cmd = cmd_stream.str();
const char *cmd[2];
cmd[1] = NULL;
cmd[0] = serialized_cmd.c_str();
char *outbuf = NULL;
size_t outlen = 0;
int ret = rados_mon_command(*cluster, (const char **)cmd, 1, "", 0,
&outbuf, &outlen, NULL, NULL);
if (ret) {
return ret;
}
std::string out(outbuf, outlen);
rados_buffer_free(outbuf);
json_spirit::mValue root;
[[maybe_unused]] bool json_parse_success = json_spirit::read(out, root);
ceph_assert(json_parse_success);
json_spirit::mObject root_obj = root.get_obj();
json_spirit::mObject pgmap = root_obj["pgmap"].get_obj();
json_spirit::mArray pgs_by_state = pgmap["pgs_by_state"].get_array();
if (pgs_by_state.size() == 1) {
json_spirit::mObject state = pgs_by_state[0].get_obj();
std::string state_name = state["state_name"].get_str();
if (state_name != std::string("active+clean")) {
healthy = false;
} else {
healthy = true;
}
} else {
healthy = false;
}
if (slept >= timeout) {
return -ETIMEDOUT;
};
if (!healthy) {
sleep(1);
slept += 1;
}
}
return 0;
}
int rados_pool_set(
rados_t *cluster,
const std::string &pool_name,
const std::string &var,
const std::string &val)
{
JSONFormatter cmd_f;
cmd_f.open_object_section("command");
cmd_f.dump_string("prefix", "osd pool set");
cmd_f.dump_string("pool", pool_name);
cmd_f.dump_string("var", var);
cmd_f.dump_string("val", val);
cmd_f.close_section();
std::ostringstream cmd_stream;
cmd_f.flush(cmd_stream);
const std::string serialized_cmd = cmd_stream.str();
const char *cmd[2];
cmd[1] = NULL;
cmd[0] = serialized_cmd.c_str();
int ret = rados_mon_command(*cluster, (const char **)cmd, 1, "", 0, NULL,
NULL, NULL, NULL);
return ret;
}
struct pool_op_error : std::exception {
string msg;
pool_op_error(const std::string& pool_name,
const std::string& func_name,
int err) {
std::ostringstream oss;
oss << func_name << "(" << pool_name << ") failed with error " << err;
msg = oss.str();
}
const char* what() const noexcept override {
return msg.c_str();
}
};
template<typename Func>
std::string with_healthy_cluster(rados_t* cluster,
const std::string& pool_name,
Func&& func)
{
try {
// Wait for 'creating/backfilling' to clear
if (int r = wait_for_healthy(cluster); r != 0) {
throw pool_op_error{pool_name, "wait_for_healthy", r};
}
func();
// Wait for 'creating/backfilling' to clear
if (int r = wait_for_healthy(cluster); r != 0) {
throw pool_op_error{pool_name, "wait_for_healthy", r};
}
} catch (const pool_op_error& e) {
return e.what();
}
return "";
}
}
std::string set_pg_num(
rados_t *cluster, const std::string &pool_name, uint32_t pg_num)
{
return with_healthy_cluster(cluster, pool_name, [&] {
// Adjust pg_num
if (int r = rados_pool_set(cluster, pool_name, "pg_num",
stringify(pg_num));
r != 0) {
throw pool_op_error{pool_name, "set_pg_num", r};
}
});
}
std::string set_pgp_num(
rados_t *cluster, const std::string &pool_name, uint32_t pgp_num)
{
return with_healthy_cluster(cluster, pool_name, [&] {
// Adjust pgp_num
if (int r = rados_pool_set(cluster, pool_name, "pgp_num",
stringify(pgp_num));
r != 0) {
throw pool_op_error{pool_name, "set_pgp_num", r};
}
});
}
| 4,246 | 24.279762 | 76 |
cc
|
null |
ceph-main/src/test/librados/test_common.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.h"
std::string set_pg_num(
rados_t *cluster, const std::string &pool_name, uint32_t pg_num);
std::string set_pgp_num(
rados_t *cluster, const std::string &pool_name, uint32_t pgp_num);
| 325 | 31.6 | 70 |
h
|
null |
ceph-main/src/test/librados/test_cxx.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*
// vim: ts=8 sw=2 smarttab
#include "test_cxx.h"
#include "include/stringify.h"
#include "common/ceph_context.h"
#include "common/config.h"
#include <errno.h>
#include <sstream>
#include <stdlib.h>
#include <string>
#include <time.h>
#include <unistd.h>
#include <iostream>
#include "gtest/gtest.h"
using namespace librados;
std::string create_one_pool_pp(const std::string &pool_name, Rados &cluster)
{
return create_one_pool_pp(pool_name, cluster, {});
}
std::string create_one_pool_pp(const std::string &pool_name, Rados &cluster,
const std::map<std::string, std::string> &config)
{
std::string err = connect_cluster_pp(cluster, config);
if (err.length())
return err;
int ret = cluster.pool_create(pool_name.c_str());
if (ret) {
cluster.shutdown();
std::ostringstream oss;
oss << "cluster.pool_create(" << pool_name << ") failed with error " << ret;
return oss.str();
}
IoCtx ioctx;
ret = cluster.ioctx_create(pool_name.c_str(), ioctx);
if (ret < 0) {
cluster.shutdown();
std::ostringstream oss;
oss << "cluster.ioctx_create(" << pool_name << ") failed with error "
<< ret;
return oss.str();
}
ioctx.application_enable("rados", true);
return "";
}
int destroy_rule_pp(Rados &cluster,
const std::string &rule,
std::ostream &oss)
{
bufferlist inbl;
int ret = cluster.mon_command("{\"prefix\": \"osd crush rule rm\", \"name\":\"" +
rule + "\"}", inbl, NULL, NULL);
if (ret)
oss << "mon_command: osd crush rule rm " + rule + " failed with error " << ret << std::endl;
return ret;
}
int destroy_ec_profile_pp(Rados &cluster, const std::string& pool_name,
std::ostream &oss)
{
bufferlist inbl;
int ret = cluster.mon_command("{\"prefix\": \"osd erasure-code-profile rm\", \"name\": \"testprofile-" + pool_name + "\"}",
inbl, NULL, NULL);
if (ret)
oss << "mon_command: osd erasure-code-profile rm testprofile-" << pool_name << " failed with error " << ret << std::endl;
return ret;
}
int destroy_ec_profile_and_rule_pp(Rados &cluster,
const std::string &rule,
std::ostream &oss)
{
int ret;
ret = destroy_ec_profile_pp(cluster, rule, oss);
if (ret)
return ret;
return destroy_rule_pp(cluster, rule, oss);
}
std::string create_one_ec_pool_pp(const std::string &pool_name, Rados &cluster)
{
std::string err = connect_cluster_pp(cluster);
if (err.length())
return err;
std::ostringstream oss;
int ret = destroy_ec_profile_and_rule_pp(cluster, pool_name, oss);
if (ret) {
cluster.shutdown();
return oss.str();
}
bufferlist inbl;
ret = cluster.mon_command(
"{\"prefix\": \"osd erasure-code-profile set\", \"name\": \"testprofile-" + pool_name + "\", \"profile\": [ \"k=2\", \"m=1\", \"crush-failure-domain=osd\"]}",
inbl, NULL, NULL);
if (ret) {
cluster.shutdown();
oss << "mon_command erasure-code-profile set name:testprofile-" << pool_name << " failed with error " << ret;
return oss.str();
}
ret = cluster.mon_command(
"{\"prefix\": \"osd pool create\", \"pool\": \"" + pool_name + "\", \"pool_type\":\"erasure\", \"pg_num\":8, \"pgp_num\":8, \"erasure_code_profile\":\"testprofile-" + pool_name + "\"}",
inbl, NULL, NULL);
if (ret) {
bufferlist inbl;
destroy_ec_profile_pp(cluster, pool_name, oss);
cluster.shutdown();
oss << "mon_command osd pool create pool:" << pool_name << " pool_type:erasure failed with error " << ret;
return oss.str();
}
cluster.wait_for_latest_osdmap();
return "";
}
std::string connect_cluster_pp(librados::Rados &cluster)
{
return connect_cluster_pp(cluster, {});
}
std::string connect_cluster_pp(librados::Rados &cluster,
const std::map<std::string, std::string> &config)
{
char *id = getenv("CEPH_CLIENT_ID");
if (id) std::cerr << "Client id is: " << id << std::endl;
int ret;
ret = cluster.init(id);
if (ret) {
std::ostringstream oss;
oss << "cluster.init failed with error " << ret;
return oss.str();
}
ret = cluster.conf_read_file(NULL);
if (ret) {
cluster.shutdown();
std::ostringstream oss;
oss << "cluster.conf_read_file failed with error " << ret;
return oss.str();
}
cluster.conf_parse_env(NULL);
for (auto &setting : config) {
ret = cluster.conf_set(setting.first.c_str(), setting.second.c_str());
if (ret) {
std::ostringstream oss;
oss << "failed to set config value " << setting.first << " to '"
<< setting.second << "': " << strerror(-ret);
return oss.str();
}
}
ret = cluster.connect();
if (ret) {
cluster.shutdown();
std::ostringstream oss;
oss << "cluster.connect failed with error " << ret;
return oss.str();
}
return "";
}
int destroy_one_pool_pp(const std::string &pool_name, Rados &cluster)
{
int ret = cluster.pool_delete(pool_name.c_str());
if (ret) {
cluster.shutdown();
return ret;
}
cluster.shutdown();
return 0;
}
int destroy_one_ec_pool_pp(const std::string &pool_name, Rados &cluster)
{
int ret = cluster.pool_delete(pool_name.c_str());
if (ret) {
cluster.shutdown();
return ret;
}
CephContext *cct = static_cast<CephContext*>(cluster.cct());
if (!cct->_conf->mon_fake_pool_delete) { // hope this is in [global]
std::ostringstream oss;
ret = destroy_ec_profile_and_rule_pp(cluster, pool_name, oss);
if (ret) {
cluster.shutdown();
return ret;
}
}
cluster.wait_for_latest_osdmap();
cluster.shutdown();
return ret;
}
| 5,801 | 27.441176 | 189 |
cc
|
null |
ceph-main/src/test/librados/test_cxx.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "include/rados/librados.hpp"
#include "test/librados/test_shared.h"
std::string create_one_pool_pp(const std::string &pool_name,
librados::Rados &cluster);
std::string create_one_pool_pp(const std::string &pool_name,
librados::Rados &cluster,
const std::map<std::string, std::string> &config);
std::string create_one_ec_pool_pp(const std::string &pool_name,
librados::Rados &cluster);
std::string connect_cluster_pp(librados::Rados &cluster);
std::string connect_cluster_pp(librados::Rados &cluster,
const std::map<std::string, std::string> &config);
int destroy_one_pool_pp(const std::string &pool_name, librados::Rados &cluster);
int destroy_one_ec_pool_pp(const std::string &pool_name, librados::Rados &cluster);
| 882 | 43.15 | 83 |
h
|
null |
ceph-main/src/test/librados/test_shared.cc
|
#include "test_shared.h"
#include <cstring>
#include "gtest/gtest.h"
#include "include/buffer.h"
using namespace ceph;
std::string get_temp_pool_name(const std::string &prefix)
{
char hostname[80];
char out[160];
memset(hostname, 0, sizeof(hostname));
memset(out, 0, sizeof(out));
gethostname(hostname, sizeof(hostname)-1);
static int num = 1;
snprintf(out, sizeof(out), "%s-%d-%d", hostname, getpid(), num);
num++;
return prefix + out;
}
void assert_eq_sparse(bufferlist& expected,
const std::map<uint64_t, uint64_t>& extents,
bufferlist& actual) {
auto i = expected.begin();
auto p = actual.begin();
uint64_t pos = 0;
for (auto extent : extents) {
const uint64_t start = extent.first;
const uint64_t end = start + extent.second;
for (; pos < end; ++i, ++pos) {
ASSERT_FALSE(i.end());
if (pos < start) {
// check the hole
ASSERT_EQ('\0', *i);
} else {
// then the extent
ASSERT_EQ(*i, *p);
++p;
}
}
}
ASSERT_EQ(expected.length(), pos);
}
| 1,097 | 23.4 | 66 |
cc
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.