Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
null | ceph-main/src/test/crimson/test_errorator.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include <boost/iterator/counting_iterator.hpp>
#include <numeric>
#include "test/crimson/gtest_seastar.h"
#include "crimson/common/errorator.h"
#include "crimson/common/errorator-loop.h"
#include "crimson/common/log.h"
#include "seastar/core/sleep.hh"
struct errorator_test_t : public seastar_test_suite_t {
using ertr = crimson::errorator<crimson::ct_error::invarg>;
ertr::future<> test_do_until() {
return crimson::repeat([i=0]() mutable {
if (i < 5) {
++i;
return ertr::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::no);
} else {
return ertr::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
}
});
}
static constexpr int SIZE = 42;
ertr::future<> test_parallel_for_each() {
auto sum = std::make_unique<int>(0);
return ertr::parallel_for_each(
boost::make_counting_iterator(0),
boost::make_counting_iterator(SIZE),
[sum=sum.get()](int i) {
*sum += i;
}).safe_then([sum=std::move(sum)] {
int expected = std::accumulate(boost::make_counting_iterator(0),
boost::make_counting_iterator(SIZE),
0);
ASSERT_EQ(*sum, expected);
});
}
struct noncopyable_t {
constexpr noncopyable_t() = default;
~noncopyable_t() = default;
noncopyable_t(noncopyable_t&&) = default;
private:
noncopyable_t(const noncopyable_t&) = delete;
noncopyable_t& operator=(const noncopyable_t&) = delete;
};
ertr::future<> test_non_copy_then() {
return create_noncopyable().safe_then([](auto t) {
return ertr::now();
});
}
ertr::future<int> test_futurization() {
// we don't want to be enforced to always do `make_ready_future(...)`.
// as in seastar::future, the futurization should take care about
// turning non-future types (e.g. int) into futurized ones (e.g.
// ertr::future<int>).
return ertr::now().safe_then([] {
return 42;
}).safe_then([](int life) {
return ertr::make_ready_future<int>(life);
});
}
private:
ertr::future<noncopyable_t> create_noncopyable() {
return ertr::make_ready_future<noncopyable_t>();
}
};
TEST_F(errorator_test_t, basic)
{
run_async([this] {
test_do_until().unsafe_get0();
});
}
TEST_F(errorator_test_t, parallel_for_each)
{
run_async([this] {
test_parallel_for_each().unsafe_get0();
});
}
TEST_F(errorator_test_t, non_copy_then)
{
run_async([this] {
test_non_copy_then().unsafe_get0();
});
}
TEST_F(errorator_test_t, test_futurization)
{
run_async([this] {
test_futurization().unsafe_get0();
});
}
| 2,732 | 26.33 | 74 | cc |
null | ceph-main/src/test/crimson/test_fixed_kv_node_layout.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <stdio.h>
#include <iostream>
#include "gtest/gtest.h"
#include "crimson/common/fixed_kv_node_layout.h"
using namespace crimson;
using namespace crimson::common;
struct test_val_t {
uint32_t t1 = 0;
int32_t t2 = 0;
bool operator==(const test_val_t &rhs) const {
return rhs.t1 == t1 && rhs.t2 == t2;
}
bool operator!=(const test_val_t &rhs) const {
return !(*this == rhs);
}
};
struct test_val_le_t {
ceph_le32 t1{0};
ceph_les32 t2{0};
test_val_le_t() = default;
test_val_le_t(const test_val_le_t &) = default;
test_val_le_t(const test_val_t &nv)
: t1(nv.t1), t2(nv.t2) {}
operator test_val_t() const {
return test_val_t{t1, t2};
}
};
struct test_meta_t {
uint32_t t1 = 0;
uint32_t t2 = 0;
bool operator==(const test_meta_t &rhs) const {
return rhs.t1 == t1 && rhs.t2 == t2;
}
bool operator!=(const test_meta_t &rhs) const {
return !(*this == rhs);
}
std::pair<test_meta_t, test_meta_t> split_into(uint32_t pivot) const {
return std::make_pair(
test_meta_t{t1, pivot},
test_meta_t{pivot, t2});
}
static test_meta_t merge_from(const test_meta_t &lhs, const test_meta_t &rhs) {
return test_meta_t{lhs.t1, rhs.t2};
}
static std::pair<test_meta_t, test_meta_t>
rebalance(const test_meta_t &lhs, const test_meta_t &rhs, uint32_t pivot) {
return std::make_pair(
test_meta_t{lhs.t1, pivot},
test_meta_t{pivot, rhs.t2});
}
};
struct test_meta_le_t {
ceph_le32 t1{0};
ceph_le32 t2{0};
test_meta_le_t() = default;
test_meta_le_t(const test_meta_le_t &) = default;
test_meta_le_t(const test_meta_t &nv)
: t1(nv.t1), t2(nv.t2) {}
operator test_meta_t() const {
return test_meta_t{t1, t2};
}
};
constexpr size_t CAPACITY = 339;
struct TestNode : FixedKVNodeLayout<
CAPACITY,
test_meta_t, test_meta_le_t,
uint32_t, ceph_le32,
test_val_t, test_val_le_t> {
char buf[4096];
TestNode() : FixedKVNodeLayout(buf) {
memset(buf, 0, sizeof(buf));
set_meta({0, std::numeric_limits<uint32_t>::max()});
}
TestNode(const TestNode &rhs)
: FixedKVNodeLayout(buf) {
::memcpy(buf, rhs.buf, sizeof(buf));
}
TestNode &operator=(const TestNode &rhs) {
memcpy(buf, rhs.buf, sizeof(buf));
return *this;
}
};
TEST(FixedKVNodeTest, basic) {
auto node = TestNode();
ASSERT_EQ(node.get_size(), 0);
auto val = test_val_t{ 1, 1 };
node.journal_insert(node.begin(), 1, val, nullptr);
ASSERT_EQ(node.get_size(), 1);
auto iter = node.begin();
ASSERT_EQ(iter.get_key(), 1);
ASSERT_EQ(val, iter.get_val());
ASSERT_EQ(std::numeric_limits<uint32_t>::max(), iter.get_next_key_or_max());
}
TEST(FixedKVNodeTest, at_capacity) {
auto node = TestNode();
ASSERT_EQ(CAPACITY, node.get_capacity());
ASSERT_EQ(node.get_size(), 0);
unsigned short num = 0;
auto iter = node.begin();
while (num < CAPACITY) {
node.journal_insert(iter, num, test_val_t{num, num}, nullptr);
++num;
++iter;
}
ASSERT_EQ(node.get_size(), CAPACITY);
num = 0;
for (auto &i : node) {
ASSERT_EQ(i.get_key(), num);
ASSERT_EQ(i.get_val(), (test_val_t{num, num}));
if (num < (CAPACITY - 1)) {
ASSERT_EQ(i.get_next_key_or_max(), num + 1);
} else {
ASSERT_EQ(std::numeric_limits<uint32_t>::max(), i.get_next_key_or_max());
}
++num;
}
}
TEST(FixedKVNodeTest, split) {
auto node = TestNode();
ASSERT_EQ(node.get_size(), 0);
unsigned short num = 0;
auto iter = node.begin();
while (num < CAPACITY) {
node.journal_insert(iter, num, test_val_t{num, num}, nullptr);
++num;
++iter;
}
ASSERT_EQ(node.get_size(), CAPACITY);
auto split_left = TestNode();
auto split_right = TestNode();
node.split_into(split_left, split_right);
ASSERT_EQ(split_left.get_size() + split_right.get_size(), CAPACITY);
ASSERT_EQ(split_left.get_meta().t1, split_left.begin()->get_key());
ASSERT_EQ(split_left.get_meta().t2, split_right.get_meta().t1);
ASSERT_EQ(split_right.get_meta().t2, std::numeric_limits<uint32_t>::max());
num = 0;
for (auto &i : split_left) {
ASSERT_EQ(i.get_key(), num);
ASSERT_EQ(i.get_val(), (test_val_t{num, num}));
if (num < split_left.get_size() - 1) {
ASSERT_EQ(i.get_next_key_or_max(), num + 1);
} else {
ASSERT_EQ(std::numeric_limits<uint32_t>::max(), i.get_next_key_or_max());
}
++num;
}
for (auto &i : split_right) {
ASSERT_EQ(i.get_key(), num);
ASSERT_EQ(i.get_val(), (test_val_t{num, num}));
if (num < CAPACITY - 1) {
ASSERT_EQ(i.get_next_key_or_max(), num + 1);
} else {
ASSERT_EQ(std::numeric_limits<uint32_t>::max(), i.get_next_key_or_max());
}
++num;
}
ASSERT_EQ(num, CAPACITY);
}
TEST(FixedKVNodeTest, merge) {
auto node = TestNode();
auto node2 = TestNode();
ASSERT_EQ(node.get_size(), 0);
ASSERT_EQ(node2.get_size(), 0);
unsigned short num = 0;
auto iter = node.begin();
while (num < CAPACITY/2) {
node.journal_insert(iter, num, test_val_t{num, num}, nullptr);
++num;
++iter;
}
node.set_meta({0, num});
node2.set_meta({num, std::numeric_limits<uint32_t>::max()});
iter = node2.begin();
while (num < (2 * (CAPACITY / 2))) {
node2.journal_insert(iter, num, test_val_t{num, num}, nullptr);
++num;
++iter;
}
ASSERT_EQ(node.get_size(), CAPACITY / 2);
ASSERT_EQ(node2.get_size(), CAPACITY / 2);
auto total = node.get_size() + node2.get_size();
auto node_merged = TestNode();
node_merged.merge_from(node, node2);
ASSERT_EQ(
node_merged.get_meta(),
(test_meta_t{0, std::numeric_limits<uint32_t>::max()}));
ASSERT_EQ(node_merged.get_size(), total);
num = 0;
for (auto &i : node_merged) {
ASSERT_EQ(i.get_key(), num);
ASSERT_EQ(i.get_val(), (test_val_t{num, num}));
if (num < node_merged.get_size() - 1) {
ASSERT_EQ(i.get_next_key_or_max(), num + 1);
} else {
ASSERT_EQ(std::numeric_limits<uint32_t>::max(), i.get_next_key_or_max());
}
++num;
}
ASSERT_EQ(num, total);
}
void run_balance_test(unsigned left, unsigned right, bool prefer_left)
{
auto node = TestNode();
auto node2 = TestNode();
ASSERT_EQ(node.get_size(), 0);
ASSERT_EQ(node2.get_size(), 0);
unsigned short num = 0;
auto iter = node.begin();
while (num < left) {
node.journal_insert(iter, num, test_val_t{num, num}, nullptr);
++num;
++iter;
}
node.set_meta({0, num});
node2.set_meta({num, std::numeric_limits<uint32_t>::max()});
iter = node2.begin();
while (num < (left + right)) {
node2.journal_insert(iter, num, test_val_t{num, num}, nullptr);
++num;
++iter;
}
ASSERT_EQ(node.get_size(), left);
ASSERT_EQ(node2.get_size(), right);
auto total = node.get_size() + node2.get_size();
auto node_balanced = TestNode();
auto node_balanced2 = TestNode();
auto pivot = TestNode::balance_into_new_nodes(
node,
node2,
prefer_left,
node_balanced,
node_balanced2);
ASSERT_EQ(total, node_balanced.get_size() + node_balanced2.get_size());
unsigned left_size, right_size;
if (total % 2) {
if (prefer_left) {
left_size = (total/2) + 1;
right_size = total/2;
} else {
left_size = total/2;
right_size = (total/2) + 1;
}
} else {
left_size = right_size = total/2;
}
ASSERT_EQ(pivot, left_size);
ASSERT_EQ(left_size, node_balanced.get_size());
ASSERT_EQ(right_size, node_balanced2.get_size());
ASSERT_EQ(
node_balanced.get_meta(),
(test_meta_t{0, left_size}));
ASSERT_EQ(
node_balanced2.get_meta(),
(test_meta_t{left_size, std::numeric_limits<uint32_t>::max()}));
num = 0;
for (auto &i: node_balanced) {
ASSERT_EQ(i.get_key(), num);
ASSERT_EQ(i.get_val(), (test_val_t{num, num}));
if (num < node_balanced.get_size() - 1) {
ASSERT_EQ(i.get_next_key_or_max(), num + 1);
} else {
ASSERT_EQ(std::numeric_limits<uint32_t>::max(), i.get_next_key_or_max());
}
++num;
}
for (auto &i: node_balanced2) {
ASSERT_EQ(i.get_key(), num);
ASSERT_EQ(i.get_val(), (test_val_t{num, num}));
if (num < total - 1) {
ASSERT_EQ(i.get_next_key_or_max(), num + 1);
} else {
ASSERT_EQ(std::numeric_limits<uint32_t>::max(), i.get_next_key_or_max());
}
++num;
}
}
TEST(FixedKVNodeTest, balanced) {
run_balance_test(CAPACITY / 2, CAPACITY, true);
run_balance_test(CAPACITY / 2, CAPACITY, false);
run_balance_test(CAPACITY, CAPACITY / 2, true);
run_balance_test(CAPACITY, CAPACITY / 2, false);
run_balance_test(CAPACITY - 1, CAPACITY / 2, true);
run_balance_test(CAPACITY / 2, CAPACITY - 1, false);
run_balance_test(CAPACITY / 2, CAPACITY / 2, false);
}
void run_replay_test(
std::vector<std::function<void(TestNode&, TestNode::delta_buffer_t&)>> &&f
) {
TestNode node;
for (unsigned i = 0; i < f.size(); ++i) {
TestNode::delta_buffer_t buf;
TestNode replayed = node;
f[i](node, buf);
buf.replay(replayed);
ASSERT_EQ(node.get_size(), replayed.get_size());
ASSERT_EQ(node, replayed);
}
}
TEST(FixedKVNodeTest, replay) {
run_replay_test({
[](auto &n, auto &b) {
n.journal_insert(n.lower_bound(1), 1, test_val_t{1, 1}, &b);
ASSERT_EQ(1, n.get_size());
},
[](auto &n, auto &b) {
n.journal_insert(n.lower_bound(3), 3, test_val_t{1, 2}, &b);
ASSERT_EQ(2, n.get_size());
},
[](auto &n, auto &b) {
n.journal_remove(n.find(3), &b);
ASSERT_EQ(1, n.get_size());
},
[](auto &n, auto &b) {
n.journal_insert(n.lower_bound(2), 2, test_val_t{5, 1}, &b);
ASSERT_EQ(2, n.get_size());
}
});
}
| 9,711 | 24.761273 | 81 | cc |
null | ceph-main/src/test/crimson/test_interruptible_future.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <seastar/core/sleep.hh>
#include "test/crimson/gtest_seastar.h"
#include "crimson/common/interruptible_future.h"
#include "crimson/common/log.h"
using namespace crimson;
class test_interruption : public std::exception
{};
class TestInterruptCondition {
public:
TestInterruptCondition(bool interrupt)
: interrupt(interrupt) {}
template <typename T>
std::optional<T> may_interrupt() {
if (interrupt) {
return seastar::futurize<T>::make_exception_future(test_interruption());
} else {
return std::optional<T>();
}
}
template <typename T>
static constexpr bool is_interruption_v = std::is_same_v<T, test_interruption>;
static bool is_interruption(std::exception_ptr& eptr) {
if (*eptr.__cxa_exception_type() == typeid(test_interruption))
return true;
return false;
}
private:
bool interrupt = false;
};
namespace crimson::interruptible {
template
thread_local interrupt_cond_t<TestInterruptCondition>
interrupt_cond<TestInterruptCondition>;
}
TEST_F(seastar_test_suite_t, basic)
{
using interruptor =
interruptible::interruptor<TestInterruptCondition>;
run_async([] {
interruptor::with_interruption(
[] {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
return interruptor::make_interruptible(seastar::now())
.then_interruptible([] {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
}).then_interruptible([] {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
return errorator<ct_error::enoent>::make_ready_future<>();
}).safe_then_interruptible([] {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
return seastar::now();
}, errorator<ct_error::enoent>::all_same_way([] {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
})
);
}, [](std::exception_ptr) {}, false).get0();
interruptor::with_interruption(
[] {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
return interruptor::make_interruptible(seastar::now())
.then_interruptible([] {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
});
}, [](std::exception_ptr) {
ceph_assert(!interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
return seastar::now();
}, true).get0();
});
}
TEST_F(seastar_test_suite_t, loops)
{
using interruptor =
interruptible::interruptor<TestInterruptCondition>;
std::cout << "testing interruptible loops" << std::endl;
run_async([] {
std::cout << "beginning" << std::endl;
interruptor::with_interruption(
[] {
std::cout << "interruptiion enabled" << std::endl;
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
return interruptor::make_interruptible(seastar::now())
.then_interruptible([] {
std::cout << "test seastar future do_for_each" << std::endl;
std::vector<int> vec = {1, 2};
return seastar::do_with(std::move(vec), [](auto& vec) {
return interruptor::do_for_each(std::begin(vec), std::end(vec), [](int) {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
return seastar::now();
});
});
}).then_interruptible([] {
std::cout << "test interruptible seastar future do_for_each" << std::endl;
std::vector<int> vec = {1, 2};
return seastar::do_with(std::move(vec), [](auto& vec) {
return interruptor::do_for_each(std::begin(vec), std::end(vec), [](int) {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
return interruptor::make_interruptible(seastar::now());
});
});
}).then_interruptible([] {
std::cout << "test seastar future repeat" << std::endl;
return interruptor::repeat([] {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
return interruptor::make_interruptible(
seastar::make_ready_future<
seastar::stop_iteration>(
seastar::stop_iteration::yes));
});
}).then_interruptible([] {
std::cout << "test interruptible seastar future repeat" << std::endl;
return interruptor::repeat([] {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
return seastar::make_ready_future<
seastar::stop_iteration>(
seastar::stop_iteration::yes);
});
}).then_interruptible([] {
std::cout << "test interruptible errorated future do_for_each" << std::endl;
std::vector<int> vec = {1, 2};
return seastar::do_with(std::move(vec), [](auto& vec) {
using namespace std::chrono_literals;
return interruptor::make_interruptible(seastar::now()).then_interruptible([&vec] {
return interruptor::do_for_each(std::begin(vec), std::end(vec), [](int) {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
return interruptor::make_interruptible(
errorator<ct_error::enoent>::make_ready_future<>());
}).safe_then_interruptible([] {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
return seastar::now();
}, errorator<ct_error::enoent>::all_same_way([] {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
}));
});
});
}).then_interruptible([] {
std::cout << "test errorated future do_for_each" << std::endl;
std::vector<int> vec;
// set a big enough iteration times to test if there is stack overflow in do_for_each
for (int i = 0; i < 1000000; i++) {
vec.push_back(i);
}
return seastar::do_with(std::move(vec), [](auto& vec) {
using namespace std::chrono_literals;
return interruptor::make_interruptible(seastar::now()).then_interruptible([&vec] {
return interruptor::do_for_each(std::begin(vec), std::end(vec), [](int) {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
return errorator<ct_error::enoent>::make_ready_future<>();
}).safe_then_interruptible([] {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
return seastar::now();
}, errorator<ct_error::enoent>::all_same_way([] {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
}));
});
});
}).then_interruptible([] {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
return seastar::now();
});
}, [](std::exception_ptr) {}, false).get0();
});
}
using base_intr = interruptible::interruptor<TestInterruptCondition>;
using base_ertr = errorator<ct_error::enoent, ct_error::eagain>;
using base_iertr = interruptible::interruptible_errorator<
TestInterruptCondition,
base_ertr>;
using base2_ertr = base_ertr::extend<ct_error::input_output_error>;
using base2_iertr = interruptible::interruptible_errorator<
TestInterruptCondition,
base2_ertr>;
template <typename F>
auto with_intr(F &&f) {
return base_intr::with_interruption_to_error<ct_error::eagain>(
std::forward<F>(f),
TestInterruptCondition(false));
}
TEST_F(seastar_test_suite_t, errorated)
{
run_async([] {
base_ertr::future<> ret = with_intr(
[]() {
return base_iertr::now();
}
);
ret.unsafe_get0();
});
}
TEST_F(seastar_test_suite_t, errorated_value)
{
run_async([] {
base_ertr::future<int> ret = with_intr(
[]() {
return base_iertr::make_ready_future<int>(
1
);
});
EXPECT_EQ(ret.unsafe_get0(), 1);
});
}
TEST_F(seastar_test_suite_t, expand_errorated_value)
{
run_async([] {
base2_ertr::future<> ret = with_intr(
[]() {
return base_iertr::make_ready_future<int>(
1
).si_then([](auto) {
return base2_iertr::make_ready_future<>();
});
});
ret.unsafe_get0();
});
}
TEST_F(seastar_test_suite_t, interruptible_async)
{
using interruptor =
interruptible::interruptor<TestInterruptCondition>;
run_async([] {
interruptor::with_interruption([] {
auto fut = interruptor::async([] {
interruptor::make_interruptible(
seastar::sleep(std::chrono::milliseconds(10))).get();
ceph_assert(interruptible::interrupt_cond<
TestInterruptCondition>.interrupt_cond);
ceph_assert(interruptible::interrupt_cond<
TestInterruptCondition>.ref_count == 1);
});
ceph_assert(interruptible::interrupt_cond<
TestInterruptCondition>.interrupt_cond);
ceph_assert(interruptible::interrupt_cond<
TestInterruptCondition>.ref_count == 1);
return fut;
}, [](std::exception_ptr) {}, false).get0();
});
}
TEST_F(seastar_test_suite_t, DISABLED_nested_interruptors)
{
run_async([] {
base_ertr::future<> ret = with_intr(
[]() {
return base_iertr::now().safe_then_interruptible([]() {
return with_intr(
[]() {
return base_iertr::now();
}
);
});
}
);
ret.unsafe_get0();
});
}
#if 0
// This seems to cause a hang in the gcc-9 linker on bionic
TEST_F(seastar_test_suite_t, handle_error)
{
run_async([] {
base_ertr::future<> ret = with_intr(
[]() {
return base2_iertr::make_ready_future<int>(
1
).handle_error_interruptible(
base_iertr::pass_further{},
ct_error::assert_all{"crash on eio"}
).si_then([](auto) {
return base_iertr::now();
});
});
ret.unsafe_get0();
});
}
#endif
| 9,561 | 30.662252 | 89 | cc |
null | ceph-main/src/test/crimson/test_lru.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Cloudwatt <libre.licensing@cloudwatt.com>
*
* Author: Loic Dachary <loic@dachary.org>
* Cheng Cheng <ccheng.leo@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Library Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Library Public License for more details.
*
*/
#include <stdio.h>
#include "gtest/gtest.h"
#include "crimson/common/shared_lru.h"
class LRUTest : public SharedLRU<unsigned int, int> {
public:
auto add(unsigned int key, int value, bool* existed = nullptr) {
auto pv = new int{value};
auto ptr = insert(key, std::unique_ptr<int>{pv});
if (existed) {
*existed = (ptr.get() != pv);
}
return ptr;
}
};
TEST(LRU, add) {
LRUTest cache;
unsigned int key = 1;
int value1 = 2;
bool existed = false;
{
auto ptr = cache.add(key, value1, &existed);
ASSERT_TRUE(ptr);
ASSERT_TRUE(ptr.get());
ASSERT_EQ(value1, *ptr);
ASSERT_FALSE(existed);
}
{
auto ptr = cache.add(key, 3, &existed);
ASSERT_EQ(value1, *ptr);
ASSERT_TRUE(existed);
}
}
TEST(LRU, empty) {
LRUTest cache;
unsigned int key = 1;
bool existed = false;
ASSERT_TRUE(cache.empty());
{
int value1 = 2;
auto ptr = cache.add(key, value1, &existed);
ASSERT_EQ(value1, *ptr);
ASSERT_FALSE(existed);
}
ASSERT_FALSE(cache.empty());
cache.clear();
ASSERT_TRUE(cache.empty());
}
TEST(LRU, lookup) {
LRUTest cache;
unsigned int key = 1;
{
int value = 2;
auto ptr = cache.add(key, value);
ASSERT_TRUE(ptr);
ASSERT_TRUE(ptr.get());
ASSERT_TRUE(cache.find(key).get());
ASSERT_EQ(value, *cache.find(key));
}
ASSERT_TRUE(cache.find(key).get());
}
TEST(LRU, lookup_or_create) {
LRUTest cache;
{
int value = 2;
unsigned int key = 1;
ASSERT_TRUE(cache.add(key, value).get());
ASSERT_TRUE(cache[key].get());
ASSERT_EQ(value, *cache.find(key));
}
{
unsigned int key = 2;
ASSERT_TRUE(cache[key].get());
ASSERT_EQ(0, *cache.find(key));
}
ASSERT_TRUE(cache.find(1).get());
ASSERT_TRUE(cache.find(2).get());
}
TEST(LRU, lower_bound) {
LRUTest cache;
{
unsigned int key = 1;
ASSERT_FALSE(cache.lower_bound(key));
int value = 2;
ASSERT_TRUE(cache.add(key, value).get());
ASSERT_TRUE(cache.lower_bound(key).get());
EXPECT_EQ(value, *cache.lower_bound(key));
}
}
TEST(LRU, get_next) {
{
LRUTest cache;
const unsigned int key = 0;
EXPECT_FALSE(cache.upper_bound(key));
}
{
LRUTest cache;
const unsigned int key1 = 111;
auto ptr1 = cache[key1];
const unsigned int key2 = 222;
auto ptr2 = cache[key2];
auto i = cache.upper_bound(0);
ASSERT_TRUE(i);
EXPECT_EQ(i->first, key1);
auto j = cache.upper_bound(i->first);
ASSERT_TRUE(j);
EXPECT_EQ(j->first, key2);
}
}
TEST(LRU, clear) {
LRUTest cache;
unsigned int key = 1;
int value = 2;
cache.add(key, value);
{
auto found = cache.find(key);
ASSERT_TRUE(found);
ASSERT_EQ(value, *found);
}
ASSERT_TRUE(cache.find(key).get());
cache.clear();
ASSERT_FALSE(cache.find(key));
ASSERT_TRUE(cache.empty());
}
TEST(LRU, eviction) {
LRUTest cache{5};
bool existed;
// add a bunch of elements, some of them will be evicted
for (size_t i = 0; i < 2 * cache.capacity(); ++i) {
cache.add(i, i, &existed);
ASSERT_FALSE(existed);
}
size_t i = 0;
for (; i < cache.capacity(); ++i) {
ASSERT_FALSE(cache.find(i));
}
for (; i < 2 * cache.capacity(); ++i) {
ASSERT_TRUE(cache.find(i));
}
}
TEST(LRU, track_weak) {
constexpr int SIZE = 5;
LRUTest cache{SIZE};
bool existed = false;
// strong reference to keep 0 alive
auto ptr = cache.add(0, 0, &existed);
ASSERT_FALSE(existed);
// add a bunch of elements to get 0 evicted
for (size_t i = 1; i < 2 * cache.capacity(); ++i) {
cache.add(i, i, &existed);
ASSERT_FALSE(existed);
}
// 0 is still reachable via the cache
ASSERT_TRUE(cache.find(0));
ASSERT_TRUE(cache.find(0).get());
ASSERT_EQ(0, *cache.find(0));
// [0..SIZE) are evicted when adding [SIZE..2*SIZE)
// [SIZE..SIZE * 2) were still in the cache before accessing 0,
// but SIZE got evicted when accessing 0
ASSERT_FALSE(cache.find(SIZE-1));
ASSERT_FALSE(cache.find(SIZE));
ASSERT_TRUE(cache.find(SIZE+1));
ASSERT_TRUE(cache.find(SIZE+1).get());
ASSERT_EQ((int)SIZE+1, *cache.find(SIZE+1));
ptr.reset();
// 0 is still reachable, as it is now put back into LRU cache
ASSERT_TRUE(cache.find(0));
}
// Local Variables:
// compile-command: "cmake --build ../../../build -j 8 --target unittest_seastar_lru && ctest -R unittest_seastar_lru # --gtest_filter=*.* --log-to-stderr=true"
// End:
| 5,213 | 23.364486 | 161 | cc |
null | ceph-main/src/test/crimson/test_messenger.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/ceph_argparse.h"
#include "common/ceph_time.h"
#include "messages/MPing.h"
#include "messages/MCommand.h"
#include "messages/MCommandReply.h"
#include "messages/MOSDOp.h"
#include "messages/MOSDOpReply.h"
#include "crimson/auth/DummyAuth.h"
#include "crimson/common/log.h"
#include "crimson/net/Connection.h"
#include "crimson/net/Dispatcher.h"
#include "crimson/net/Messenger.h"
#include "crimson/net/Interceptor.h"
#include <map>
#include <random>
#include <boost/program_options.hpp>
#include <fmt/format.h>
#include <fmt/ostream.h>
#include <seastar/core/app-template.hh>
#include <seastar/core/do_with.hh>
#include <seastar/core/future-util.hh>
#include <seastar/core/reactor.hh>
#include <seastar/core/sleep.hh>
#include <seastar/core/with_timeout.hh>
#include "test_messenger.h"
using namespace std::chrono_literals;
namespace bpo = boost::program_options;
using crimson::common::local_conf;
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
static std::random_device rd;
static std::default_random_engine rng{rd()};
static bool verbose = false;
static entity_addr_t get_server_addr() {
static int port = 9030;
++port;
entity_addr_t saddr;
saddr.parse("127.0.0.1", nullptr);
saddr.set_port(port);
return saddr;
}
static seastar::future<> test_echo(unsigned rounds,
double keepalive_ratio)
{
struct test_state {
struct Server final
: public crimson::net::Dispatcher {
crimson::net::MessengerRef msgr;
crimson::auth::DummyAuthClientServer dummy_auth;
std::optional<seastar::future<>> ms_dispatch(
crimson::net::ConnectionRef c, MessageRef m) override {
if (verbose) {
logger().info("server got {}", *m);
}
// reply with a pong
std::ignore = c->send(crimson::make_message<MPing>());
return {seastar::now()};
}
seastar::future<> init(const entity_name_t& name,
const std::string& lname,
const uint64_t nonce,
const entity_addr_t& addr) {
msgr = crimson::net::Messenger::create(
name, lname, nonce, true);
msgr->set_default_policy(crimson::net::SocketPolicy::stateless_server(0));
msgr->set_auth_client(&dummy_auth);
msgr->set_auth_server(&dummy_auth);
return msgr->bind(entity_addrvec_t{addr}).safe_then([this] {
return msgr->start({this});
}, crimson::net::Messenger::bind_ertr::all_same_way(
[addr] (const std::error_code& e) {
logger().error("test_echo(): "
"there is another instance running at {}", addr);
ceph_abort();
}));
}
seastar::future<> shutdown() {
ceph_assert(msgr);
msgr->stop();
return msgr->shutdown();
}
};
struct Client final
: public crimson::net::Dispatcher {
struct PingSession : public seastar::enable_shared_from_this<PingSession> {
unsigned count = 0u;
mono_time connected_time;
mono_time finish_time;
};
using PingSessionRef = seastar::shared_ptr<PingSession>;
unsigned rounds;
std::bernoulli_distribution keepalive_dist;
crimson::net::MessengerRef msgr;
std::map<crimson::net::ConnectionRef, seastar::promise<>> pending_conns;
std::map<crimson::net::ConnectionRef, PingSessionRef> sessions;
crimson::auth::DummyAuthClientServer dummy_auth;
Client(unsigned rounds, double keepalive_ratio)
: rounds(rounds),
keepalive_dist(std::bernoulli_distribution{keepalive_ratio}) {}
PingSessionRef find_session(crimson::net::ConnectionRef c) {
auto found = sessions.find(c);
if (found == sessions.end()) {
ceph_assert(false);
}
return found->second;
}
void ms_handle_connect(
crimson::net::ConnectionRef conn,
seastar::shard_id new_shard) override {
assert(new_shard == seastar::this_shard_id());
auto session = seastar::make_shared<PingSession>();
auto [i, added] = sessions.emplace(conn, session);
std::ignore = i;
ceph_assert(added);
session->connected_time = mono_clock::now();
}
std::optional<seastar::future<>> ms_dispatch(
crimson::net::ConnectionRef c, MessageRef m) override {
auto session = find_session(c);
++(session->count);
if (verbose) {
logger().info("client ms_dispatch {}", session->count);
}
if (session->count == rounds) {
logger().info("{}: finished receiving {} pongs", *c, session->count);
session->finish_time = mono_clock::now();
auto found = pending_conns.find(c);
ceph_assert(found != pending_conns.end());
found->second.set_value();
}
return {seastar::now()};
}
seastar::future<> init(const entity_name_t& name,
const std::string& lname,
const uint64_t nonce) {
msgr = crimson::net::Messenger::create(
name, lname, nonce, true);
msgr->set_default_policy(crimson::net::SocketPolicy::lossy_client(0));
msgr->set_auth_client(&dummy_auth);
msgr->set_auth_server(&dummy_auth);
return msgr->start({this});
}
seastar::future<> shutdown() {
ceph_assert(msgr);
msgr->stop();
return msgr->shutdown();
}
seastar::future<> dispatch_pingpong(const entity_addr_t& peer_addr) {
mono_time start_time = mono_clock::now();
auto conn = msgr->connect(peer_addr, entity_name_t::TYPE_OSD);
return seastar::futurize_invoke([this, conn] {
return do_dispatch_pingpong(conn);
}).then([this, conn, start_time] {
auto session = find_session(conn);
std::chrono::duration<double> dur_handshake = session->connected_time - start_time;
std::chrono::duration<double> dur_pingpong = session->finish_time - session->connected_time;
logger().info("{}: handshake {}, pingpong {}",
*conn, dur_handshake.count(), dur_pingpong.count());
});
}
private:
seastar::future<> do_dispatch_pingpong(crimson::net::ConnectionRef conn) {
auto [i, added] = pending_conns.emplace(conn, seastar::promise<>());
std::ignore = i;
ceph_assert(added);
return seastar::do_with(0u, 0u,
[this, conn](auto &count_ping, auto &count_keepalive) {
return seastar::do_until(
[this, conn, &count_ping, &count_keepalive] {
bool stop = (count_ping == rounds);
if (stop) {
logger().info("{}: finished sending {} pings with {} keepalives",
*conn, count_ping, count_keepalive);
}
return stop;
},
[this, conn, &count_ping, &count_keepalive] {
return seastar::repeat([this, conn, &count_ping, &count_keepalive] {
if (keepalive_dist(rng)) {
return conn->send_keepalive()
.then([&count_keepalive] {
count_keepalive += 1;
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::no);
});
} else {
return conn->send(crimson::make_message<MPing>())
.then([&count_ping] {
count_ping += 1;
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
});
}
});
}).then([this, conn] {
auto found = pending_conns.find(conn);
return found->second.get_future();
}
);
});
}
};
};
logger().info("test_echo(rounds={}, keepalive_ratio={}):",
rounds, keepalive_ratio);
auto server1 = seastar::make_shared<test_state::Server>();
auto server2 = seastar::make_shared<test_state::Server>();
auto client1 = seastar::make_shared<test_state::Client>(rounds, keepalive_ratio);
auto client2 = seastar::make_shared<test_state::Client>(rounds, keepalive_ratio);
// start servers and clients
auto addr1 = get_server_addr();
auto addr2 = get_server_addr();
addr1.set_type(entity_addr_t::TYPE_MSGR2);
addr2.set_type(entity_addr_t::TYPE_MSGR2);
return seastar::when_all_succeed(
server1->init(entity_name_t::OSD(0), "server1", 1, addr1),
server2->init(entity_name_t::OSD(1), "server2", 2, addr2),
client1->init(entity_name_t::OSD(2), "client1", 3),
client2->init(entity_name_t::OSD(3), "client2", 4)
// dispatch pingpoing
).then_unpack([client1, client2, server1, server2] {
return seastar::when_all_succeed(
// test connecting in parallel, accepting in parallel
client1->dispatch_pingpong(server2->msgr->get_myaddr()),
client2->dispatch_pingpong(server1->msgr->get_myaddr()));
// shutdown
}).then_unpack([] {
return seastar::now();
}).then([client1] {
logger().info("client1 shutdown...");
return client1->shutdown();
}).then([client2] {
logger().info("client2 shutdown...");
return client2->shutdown();
}).then([server1] {
logger().info("server1 shutdown...");
return server1->shutdown();
}).then([server2] {
logger().info("server2 shutdown...");
return server2->shutdown();
}).then([] {
logger().info("test_echo() done!\n");
}).handle_exception([server1, server2, client1, client2] (auto eptr) {
logger().error("test_echo() failed: got exception {}", eptr);
throw;
});
}
static seastar::future<> test_concurrent_dispatch()
{
struct test_state {
struct Server final
: public crimson::net::Dispatcher {
crimson::net::MessengerRef msgr;
int count = 0;
seastar::promise<> on_second; // satisfied on second dispatch
seastar::promise<> on_done; // satisfied when first dispatch unblocks
crimson::auth::DummyAuthClientServer dummy_auth;
std::optional<seastar::future<>> ms_dispatch(
crimson::net::ConnectionRef, MessageRef m) override {
switch (++count) {
case 1:
// block on the first request until we reenter with the second
std::ignore = on_second.get_future().then([this] { on_done.set_value(); });
break;
case 2:
on_second.set_value();
break;
default:
throw std::runtime_error("unexpected count");
}
return {seastar::now()};
}
seastar::future<> wait() { return on_done.get_future(); }
seastar::future<> init(const entity_name_t& name,
const std::string& lname,
const uint64_t nonce,
const entity_addr_t& addr) {
msgr = crimson::net::Messenger::create(
name, lname, nonce, true);
msgr->set_default_policy(crimson::net::SocketPolicy::stateless_server(0));
msgr->set_auth_client(&dummy_auth);
msgr->set_auth_server(&dummy_auth);
return msgr->bind(entity_addrvec_t{addr}).safe_then([this] {
return msgr->start({this});
}, crimson::net::Messenger::bind_ertr::all_same_way(
[addr] (const std::error_code& e) {
logger().error("test_concurrent_dispatch(): "
"there is another instance running at {}", addr);
ceph_abort();
}));
}
};
struct Client final
: public crimson::net::Dispatcher {
crimson::net::MessengerRef msgr;
crimson::auth::DummyAuthClientServer dummy_auth;
std::optional<seastar::future<>> ms_dispatch(
crimson::net::ConnectionRef, MessageRef m) override {
return {seastar::now()};
}
seastar::future<> init(const entity_name_t& name,
const std::string& lname,
const uint64_t nonce) {
msgr = crimson::net::Messenger::create(
name, lname, nonce, true);
msgr->set_default_policy(crimson::net::SocketPolicy::lossy_client(0));
msgr->set_auth_client(&dummy_auth);
msgr->set_auth_server(&dummy_auth);
return msgr->start({this});
}
};
};
logger().info("test_concurrent_dispatch():");
auto server = seastar::make_shared<test_state::Server>();
auto client = seastar::make_shared<test_state::Client>();
auto addr = get_server_addr();
addr.set_type(entity_addr_t::TYPE_MSGR2);
addr.set_family(AF_INET);
return seastar::when_all_succeed(
server->init(entity_name_t::OSD(4), "server3", 5, addr),
client->init(entity_name_t::OSD(5), "client3", 6)
).then_unpack([server, client] {
auto conn = client->msgr->connect(server->msgr->get_myaddr(),
entity_name_t::TYPE_OSD);
// send two messages
return conn->send(crimson::make_message<MPing>()).then([conn] {
return conn->send(crimson::make_message<MPing>());
});
}).then([server] {
return server->wait();
}).then([client] {
logger().info("client shutdown...");
client->msgr->stop();
return client->msgr->shutdown();
}).then([server] {
logger().info("server shutdown...");
server->msgr->stop();
return server->msgr->shutdown();
}).then([] {
logger().info("test_concurrent_dispatch() done!\n");
}).handle_exception([server, client] (auto eptr) {
logger().error("test_concurrent_dispatch() failed: got exception {}", eptr);
throw;
});
}
seastar::future<> test_preemptive_shutdown() {
struct test_state {
class Server final
: public crimson::net::Dispatcher {
crimson::net::MessengerRef msgr;
crimson::auth::DummyAuthClientServer dummy_auth;
std::optional<seastar::future<>> ms_dispatch(
crimson::net::ConnectionRef c, MessageRef m) override {
std::ignore = c->send(crimson::make_message<MPing>());
return {seastar::now()};
}
public:
seastar::future<> init(const entity_name_t& name,
const std::string& lname,
const uint64_t nonce,
const entity_addr_t& addr) {
msgr = crimson::net::Messenger::create(
name, lname, nonce, true);
msgr->set_default_policy(crimson::net::SocketPolicy::stateless_server(0));
msgr->set_auth_client(&dummy_auth);
msgr->set_auth_server(&dummy_auth);
return msgr->bind(entity_addrvec_t{addr}).safe_then([this] {
return msgr->start({this});
}, crimson::net::Messenger::bind_ertr::all_same_way(
[addr] (const std::error_code& e) {
logger().error("test_preemptive_shutdown(): "
"there is another instance running at {}", addr);
ceph_abort();
}));
}
entity_addr_t get_addr() const {
return msgr->get_myaddr();
}
seastar::future<> shutdown() {
msgr->stop();
return msgr->shutdown();
}
};
class Client final
: public crimson::net::Dispatcher {
crimson::net::MessengerRef msgr;
crimson::auth::DummyAuthClientServer dummy_auth;
bool stop_send = false;
seastar::promise<> stopped_send_promise;
std::optional<seastar::future<>> ms_dispatch(
crimson::net::ConnectionRef, MessageRef m) override {
return {seastar::now()};
}
public:
seastar::future<> init(const entity_name_t& name,
const std::string& lname,
const uint64_t nonce) {
msgr = crimson::net::Messenger::create(
name, lname, nonce, true);
msgr->set_default_policy(crimson::net::SocketPolicy::lossy_client(0));
msgr->set_auth_client(&dummy_auth);
msgr->set_auth_server(&dummy_auth);
return msgr->start({this});
}
void send_pings(const entity_addr_t& addr) {
auto conn = msgr->connect(addr, entity_name_t::TYPE_OSD);
// forwarded to stopped_send_promise
(void) seastar::do_until(
[this] { return stop_send; },
[conn] {
return conn->send(crimson::make_message<MPing>()).then([] {
return seastar::sleep(0ms);
});
}
).then_wrapped([this, conn] (auto fut) {
fut.forward_to(std::move(stopped_send_promise));
});
}
seastar::future<> shutdown() {
msgr->stop();
return msgr->shutdown().then([this] {
stop_send = true;
return stopped_send_promise.get_future();
});
}
};
};
logger().info("test_preemptive_shutdown():");
auto server = seastar::make_shared<test_state::Server>();
auto client = seastar::make_shared<test_state::Client>();
auto addr = get_server_addr();
addr.set_type(entity_addr_t::TYPE_MSGR2);
addr.set_family(AF_INET);
return seastar::when_all_succeed(
server->init(entity_name_t::OSD(6), "server4", 7, addr),
client->init(entity_name_t::OSD(7), "client4", 8)
).then_unpack([server, client] {
client->send_pings(server->get_addr());
return seastar::sleep(100ms);
}).then([client] {
logger().info("client shutdown...");
return client->shutdown();
}).then([server] {
logger().info("server shutdown...");
return server->shutdown();
}).then([] {
logger().info("test_preemptive_shutdown() done!\n");
}).handle_exception([server, client] (auto eptr) {
logger().error("test_preemptive_shutdown() failed: got exception {}", eptr);
throw;
});
}
using ceph::msgr::v2::Tag;
using crimson::net::bp_action_t;
using crimson::net::bp_type_t;
using crimson::net::Breakpoint;
using crimson::net::Connection;
using crimson::net::ConnectionRef;
using crimson::net::custom_bp_t;
using crimson::net::Dispatcher;
using crimson::net::Interceptor;
using crimson::net::Messenger;
using crimson::net::MessengerRef;
using crimson::net::SocketPolicy;
using crimson::net::tag_bp_t;
using namespace ceph::net::test;
struct counter_t { unsigned counter = 0; };
enum class conn_state_t {
unknown = 0,
established,
closed,
replaced,
};
std::ostream& operator<<(std::ostream& out, const conn_state_t& state) {
switch(state) {
case conn_state_t::unknown:
return out << "unknown";
case conn_state_t::established:
return out << "established";
case conn_state_t::closed:
return out << "closed";
case conn_state_t::replaced:
return out << "replaced";
default:
ceph_abort();
}
}
} // anonymous namespace
#if FMT_VERSION >= 90000
template<>
struct fmt::formatter<conn_state_t> : fmt::ostream_formatter {};
#endif
namespace {
struct ConnResult {
ConnectionRef conn;
unsigned index;
conn_state_t state = conn_state_t::unknown;
unsigned connect_attempts = 0;
unsigned client_connect_attempts = 0;
unsigned client_reconnect_attempts = 0;
unsigned cnt_connect_dispatched = 0;
unsigned accept_attempts = 0;
unsigned server_connect_attempts = 0;
unsigned server_reconnect_attempts = 0;
unsigned cnt_accept_dispatched = 0;
unsigned cnt_reset_dispatched = 0;
unsigned cnt_remote_reset_dispatched = 0;
ConnResult(ConnectionRef conn, unsigned index)
: conn(conn), index(index) {}
template <typename T>
void _assert_eq(const char* expr_actual, T actual,
const char* expr_expected, T expected) const {
if (actual != expected) {
throw std::runtime_error(fmt::format(
"[{}] {} '{}' is actually {}, not the expected '{}' {}",
index, *conn, expr_actual, actual, expr_expected, expected));
}
}
#define ASSERT_EQUAL(actual, expected) \
_assert_eq(#actual, actual, #expected, expected)
void assert_state_at(conn_state_t expected) const {
ASSERT_EQUAL(state, expected);
}
void assert_connect(unsigned attempts,
unsigned connects,
unsigned reconnects,
unsigned dispatched) const {
ASSERT_EQUAL(connect_attempts, attempts);
ASSERT_EQUAL(client_connect_attempts, connects);
ASSERT_EQUAL(client_reconnect_attempts, reconnects);
ASSERT_EQUAL(cnt_connect_dispatched, dispatched);
}
void assert_connect(unsigned attempts,
unsigned dispatched) const {
ASSERT_EQUAL(connect_attempts, attempts);
ASSERT_EQUAL(cnt_connect_dispatched, dispatched);
}
void assert_accept(unsigned attempts,
unsigned accepts,
unsigned reaccepts,
unsigned dispatched) const {
ASSERT_EQUAL(accept_attempts, attempts);
ASSERT_EQUAL(server_connect_attempts, accepts);
ASSERT_EQUAL(server_reconnect_attempts, reaccepts);
ASSERT_EQUAL(cnt_accept_dispatched, dispatched);
}
void assert_accept(unsigned attempts,
unsigned dispatched) const {
ASSERT_EQUAL(accept_attempts, attempts);
ASSERT_EQUAL(cnt_accept_dispatched, dispatched);
}
void assert_reset(unsigned local, unsigned remote) const {
ASSERT_EQUAL(cnt_reset_dispatched, local);
ASSERT_EQUAL(cnt_remote_reset_dispatched, remote);
}
void dump() const {
logger().info("\nResult({}):\n"
" conn: [{}] {}:\n"
" state: {}\n"
" connect_attempts: {}\n"
" client_connect_attempts: {}\n"
" client_reconnect_attempts: {}\n"
" cnt_connect_dispatched: {}\n"
" accept_attempts: {}\n"
" server_connect_attempts: {}\n"
" server_reconnect_attempts: {}\n"
" cnt_accept_dispatched: {}\n"
" cnt_reset_dispatched: {}\n"
" cnt_remote_reset_dispatched: {}\n",
static_cast<const void*>(this),
index, *conn,
state,
connect_attempts,
client_connect_attempts,
client_reconnect_attempts,
cnt_connect_dispatched,
accept_attempts,
server_connect_attempts,
server_reconnect_attempts,
cnt_accept_dispatched,
cnt_reset_dispatched,
cnt_remote_reset_dispatched);
}
};
using ConnResults = std::vector<ConnResult>;
struct TestInterceptor : public Interceptor {
std::map<Breakpoint, std::map<unsigned, bp_action_t>> breakpoints;
std::map<Breakpoint, counter_t> breakpoints_counter;
std::map<ConnectionRef, unsigned> conns;
ConnResults results;
std::optional<seastar::abort_source> signal;
TestInterceptor() = default;
// only used for copy breakpoint configurations
TestInterceptor(const TestInterceptor& other) {
assert(other.breakpoints_counter.empty());
assert(other.conns.empty());
assert(other.results.empty());
breakpoints = other.breakpoints;
assert(!other.signal);
}
void make_fault(Breakpoint bp, unsigned round = 1) {
assert(round >= 1);
breakpoints[bp][round] = bp_action_t::FAULT;
}
void make_block(Breakpoint bp, unsigned round = 1) {
assert(round >= 1);
breakpoints[bp][round] = bp_action_t::BLOCK;
}
void make_stall(Breakpoint bp, unsigned round = 1) {
assert(round >= 1);
breakpoints[bp][round] = bp_action_t::STALL;
}
ConnResult* find_result(ConnectionRef conn) {
auto it = conns.find(conn);
if (it == conns.end()) {
return nullptr;
} else {
return &results[it->second];
}
}
seastar::future<> wait() {
assert(!signal);
signal = seastar::abort_source();
return seastar::sleep_abortable(10s, *signal).then([] {
throw std::runtime_error("Timeout (10s) in TestInterceptor::wait()");
}).handle_exception_type([] (const seastar::sleep_aborted& e) {
// wait done!
});
}
void notify() {
if (signal) {
signal->request_abort();
signal = std::nullopt;
}
}
private:
void register_conn(ConnectionRef conn) override {
auto result = find_result(conn);
if (result != nullptr) {
logger().error("The connection [{}] {} already exists when register {}",
result->index, *result->conn, *conn);
ceph_abort();
}
unsigned index = results.size();
results.emplace_back(conn, index);
conns[conn] = index;
notify();
logger().info("[{}] {} new connection registered", index, *conn);
}
void register_conn_closed(ConnectionRef conn) override {
auto result = find_result(conn);
if (result == nullptr) {
logger().error("Untracked closed connection: {}", *conn);
ceph_abort();
}
if (result->state != conn_state_t::replaced) {
result->state = conn_state_t::closed;
}
notify();
logger().info("[{}] {} closed({})", result->index, *conn, result->state);
}
void register_conn_ready(ConnectionRef conn) override {
auto result = find_result(conn);
if (result == nullptr) {
logger().error("Untracked ready connection: {}", *conn);
ceph_abort();
}
ceph_assert(conn->is_connected());
notify();
logger().info("[{}] {} ready", result->index, *conn);
}
void register_conn_replaced(ConnectionRef conn) override {
auto result = find_result(conn);
if (result == nullptr) {
logger().error("Untracked replaced connection: {}", *conn);
ceph_abort();
}
result->state = conn_state_t::replaced;
logger().info("[{}] {} {}", result->index, *conn, result->state);
}
bp_action_t intercept(ConnectionRef conn, Breakpoint bp) override {
++breakpoints_counter[bp].counter;
auto result = find_result(conn);
if (result == nullptr) {
logger().error("Untracked intercepted connection: {}, at breakpoint {}({})",
*conn, bp, breakpoints_counter[bp].counter);
ceph_abort();
}
if (bp == custom_bp_t::SOCKET_CONNECTING) {
++result->connect_attempts;
logger().info("[Test] connect_attempts={}", result->connect_attempts);
} else if (bp == tag_bp_t{Tag::CLIENT_IDENT, bp_type_t::WRITE}) {
++result->client_connect_attempts;
logger().info("[Test] client_connect_attempts={}", result->client_connect_attempts);
} else if (bp == tag_bp_t{Tag::SESSION_RECONNECT, bp_type_t::WRITE}) {
++result->client_reconnect_attempts;
logger().info("[Test] client_reconnect_attempts={}", result->client_reconnect_attempts);
} else if (bp == custom_bp_t::SOCKET_ACCEPTED) {
++result->accept_attempts;
logger().info("[Test] accept_attempts={}", result->accept_attempts);
} else if (bp == tag_bp_t{Tag::CLIENT_IDENT, bp_type_t::READ}) {
++result->server_connect_attempts;
logger().info("[Test] server_connect_attemps={}", result->server_connect_attempts);
} else if (bp == tag_bp_t{Tag::SESSION_RECONNECT, bp_type_t::READ}) {
++result->server_reconnect_attempts;
logger().info("[Test] server_reconnect_attempts={}", result->server_reconnect_attempts);
}
auto it_bp = breakpoints.find(bp);
if (it_bp != breakpoints.end()) {
auto it_cnt = it_bp->second.find(breakpoints_counter[bp].counter);
if (it_cnt != it_bp->second.end()) {
logger().info("[{}] {} intercepted {}({}) => {}",
result->index, *conn, bp,
breakpoints_counter[bp].counter, it_cnt->second);
return it_cnt->second;
}
}
logger().info("[{}] {} intercepted {}({})",
result->index, *conn, bp, breakpoints_counter[bp].counter);
return bp_action_t::CONTINUE;
}
};
SocketPolicy to_socket_policy(policy_t policy) {
switch (policy) {
case policy_t::stateful_server:
return SocketPolicy::stateful_server(0);
case policy_t::stateless_server:
return SocketPolicy::stateless_server(0);
case policy_t::lossless_peer:
return SocketPolicy::lossless_peer(0);
case policy_t::lossless_peer_reuse:
return SocketPolicy::lossless_peer_reuse(0);
case policy_t::lossy_client:
return SocketPolicy::lossy_client(0);
case policy_t::lossless_client:
return SocketPolicy::lossless_client(0);
default:
logger().error("unexpected policy type");
ceph_abort();
}
}
class FailoverSuite : public Dispatcher {
crimson::auth::DummyAuthClientServer dummy_auth;
MessengerRef test_msgr;
const entity_addr_t test_peer_addr;
TestInterceptor interceptor;
unsigned tracked_index = 0;
ConnectionRef tracked_conn;
unsigned pending_send = 0;
unsigned pending_peer_receive = 0;
unsigned pending_receive = 0;
std::optional<seastar::future<>> ms_dispatch(ConnectionRef c, MessageRef m) override {
auto result = interceptor.find_result(c);
if (result == nullptr) {
logger().error("Untracked ms dispatched connection: {}", *c);
ceph_abort();
}
if (tracked_conn != c) {
logger().error("[{}] {} got op, but doesn't match tracked_conn [{}] {}",
result->index, *c, tracked_index, *tracked_conn);
ceph_abort();
}
ceph_assert(result->index == tracked_index);
ceph_assert(m->get_type() == CEPH_MSG_OSD_OP);
ceph_assert(pending_receive > 0);
--pending_receive;
if (pending_receive == 0) {
interceptor.notify();
}
logger().info("[Test] got op, left {} ops -- [{}] {}",
pending_receive, result->index, *c);
return {seastar::now()};
}
void ms_handle_accept(
ConnectionRef conn,
seastar::shard_id new_shard,
bool is_replace) override {
assert(new_shard == seastar::this_shard_id());
auto result = interceptor.find_result(conn);
if (result == nullptr) {
logger().error("Untracked accepted connection: {}", *conn);
ceph_abort();
}
if (tracked_conn &&
!tracked_conn->is_closed() &&
tracked_conn != conn) {
logger().error("[{}] {} got accepted, but there's already traced_conn [{}] {}",
result->index, *conn, tracked_index, *tracked_conn);
ceph_abort();
}
tracked_index = result->index;
tracked_conn = conn;
++result->cnt_accept_dispatched;
logger().info("[Test] got accept (cnt_accept_dispatched={}), track [{}] {}",
result->cnt_accept_dispatched, result->index, *conn);
std::ignore = flush_pending_send();
}
void ms_handle_connect(
ConnectionRef conn,
seastar::shard_id new_shard) override {
assert(new_shard == seastar::this_shard_id());
auto result = interceptor.find_result(conn);
if (result == nullptr) {
logger().error("Untracked connected connection: {}", *conn);
ceph_abort();
}
if (tracked_conn != conn) {
logger().error("[{}] {} got connected, but doesn't match tracked_conn [{}] {}",
result->index, *conn, tracked_index, *tracked_conn);
ceph_abort();
}
ceph_assert(result->index == tracked_index);
++result->cnt_connect_dispatched;
logger().info("[Test] got connected (cnt_connect_dispatched={}) -- [{}] {}",
result->cnt_connect_dispatched, result->index, *conn);
}
void ms_handle_reset(ConnectionRef conn, bool is_replace) override {
auto result = interceptor.find_result(conn);
if (result == nullptr) {
logger().error("Untracked reset connection: {}", *conn);
ceph_abort();
}
if (tracked_conn != conn) {
logger().error("[{}] {} got reset, but doesn't match tracked_conn [{}] {}",
result->index, *conn, tracked_index, *tracked_conn);
ceph_abort();
}
ceph_assert(result->index == tracked_index);
tracked_index = 0;
tracked_conn = nullptr;
++result->cnt_reset_dispatched;
logger().info("[Test] got reset (cnt_reset_dispatched={}), untrack [{}] {}",
result->cnt_reset_dispatched, result->index, *conn);
}
void ms_handle_remote_reset(ConnectionRef conn) override {
auto result = interceptor.find_result(conn);
if (result == nullptr) {
logger().error("Untracked remotely reset connection: {}", *conn);
ceph_abort();
}
if (tracked_conn != conn) {
logger().error("[{}] {} got remotely reset, but doesn't match tracked_conn [{}] {}",
result->index, *conn, tracked_index, *tracked_conn);
ceph_abort();
}
ceph_assert(result->index == tracked_index);
++result->cnt_remote_reset_dispatched;
logger().info("[Test] got remote reset (cnt_remote_reset_dispatched={}) -- [{}] {}",
result->cnt_remote_reset_dispatched, result->index, *conn);
}
private:
seastar::future<> init(entity_addr_t test_addr, SocketPolicy policy) {
test_msgr->set_default_policy(policy);
test_msgr->set_auth_client(&dummy_auth);
test_msgr->set_auth_server(&dummy_auth);
test_msgr->set_interceptor(&interceptor);
return test_msgr->bind(entity_addrvec_t{test_addr}).safe_then([this] {
return test_msgr->start({this});
}, Messenger::bind_ertr::all_same_way([test_addr] (const std::error_code& e) {
logger().error("FailoverSuite: "
"there is another instance running at {}", test_addr);
ceph_abort();
}));
}
seastar::future<> send_op(bool expect_reply=true) {
ceph_assert(tracked_conn);
if (expect_reply) {
++pending_peer_receive;
}
pg_t pgid;
object_locator_t oloc;
hobject_t hobj(object_t(), oloc.key, CEPH_NOSNAP, pgid.ps(),
pgid.pool(), oloc.nspace);
spg_t spgid(pgid);
return tracked_conn->send(crimson::make_message<MOSDOp>(0, 0, hobj, spgid, 0, 0, 0));
}
seastar::future<> flush_pending_send() {
if (pending_send != 0) {
logger().info("[Test] flush sending {} ops", pending_send);
}
ceph_assert(tracked_conn);
return seastar::do_until(
[this] { return pending_send == 0; },
[this] {
--pending_send;
return send_op();
});
}
seastar::future<> wait_ready(unsigned num_ready_conns,
unsigned num_replaced,
bool wait_received) {
unsigned pending_conns = 0;
unsigned pending_establish = 0;
unsigned replaced_conns = 0;
for (auto& result : interceptor.results) {
if (result.conn->is_closed_clean()) {
if (result.state == conn_state_t::replaced) {
++replaced_conns;
}
} else if (result.conn->is_connected()) {
if (tracked_conn != result.conn || tracked_index != result.index) {
throw std::runtime_error(fmt::format(
"The connected connection [{}] {} doesn't"
" match the tracked connection [{}] {}",
result.index, *result.conn, tracked_index, *tracked_conn));
}
if (pending_send == 0 && pending_peer_receive == 0 && pending_receive == 0) {
result.state = conn_state_t::established;
} else {
++pending_establish;
}
} else {
++pending_conns;
}
}
bool do_wait = false;
if (num_ready_conns > 0) {
if (interceptor.results.size() > num_ready_conns) {
throw std::runtime_error(fmt::format(
"{} connections, more than expected: {}",
interceptor.results.size(), num_ready_conns));
} else if (interceptor.results.size() < num_ready_conns || pending_conns > 0) {
logger().info("[Test] wait_ready(): wait for connections,"
" currently {} out of {}, pending {} ready ...",
interceptor.results.size(), num_ready_conns, pending_conns);
do_wait = true;
}
}
if (wait_received &&
(pending_send || pending_peer_receive || pending_receive)) {
if (pending_conns || pending_establish) {
logger().info("[Test] wait_ready(): wait for pending_send={},"
" pending_peer_receive={}, pending_receive={},"
" pending {}/{} ready/establish connections ...",
pending_send, pending_peer_receive, pending_receive,
pending_conns, pending_establish);
do_wait = true;
}
}
if (num_replaced > 0) {
if (replaced_conns > num_replaced) {
throw std::runtime_error(fmt::format(
"{} replaced connections, more than expected: {}",
replaced_conns, num_replaced));
}
if (replaced_conns < num_replaced) {
logger().info("[Test] wait_ready(): wait for {} replaced connections,"
" currently {} ...",
num_replaced, replaced_conns);
do_wait = true;
}
}
if (do_wait) {
return interceptor.wait(
).then([this, num_ready_conns, num_replaced, wait_received] {
return wait_ready(num_ready_conns, num_replaced, wait_received);
});
} else {
logger().info("[Test] wait_ready(): wait done!");
return seastar::now();
}
}
// called by FailoverTest
public:
FailoverSuite(MessengerRef test_msgr,
entity_addr_t test_peer_addr,
const TestInterceptor& interceptor)
: test_msgr(test_msgr),
test_peer_addr(test_peer_addr),
interceptor(interceptor) { }
entity_addr_t get_addr() const {
return test_msgr->get_myaddr();
}
seastar::future<> shutdown() {
test_msgr->stop();
return test_msgr->shutdown();
}
void needs_receive() {
++pending_receive;
}
void notify_peer_reply() {
ceph_assert(pending_peer_receive > 0);
--pending_peer_receive;
logger().info("[Test] TestPeer said got op, left {} ops",
pending_peer_receive);
if (pending_peer_receive == 0) {
interceptor.notify();
}
}
void post_check() const {
// make sure all breakpoints were hit
for (auto& kv : interceptor.breakpoints) {
auto it = interceptor.breakpoints_counter.find(kv.first);
if (it == interceptor.breakpoints_counter.end()) {
throw std::runtime_error(fmt::format("{} was missed", kv.first));
}
auto expected = kv.second.rbegin()->first;
if (expected > it->second.counter) {
throw std::runtime_error(fmt::format(
"{} only triggered {} times, not the expected {}",
kv.first, it->second.counter, expected));
}
}
}
void dump_results() const {
for (auto& result : interceptor.results) {
result.dump();
}
}
static seastar::future<std::unique_ptr<FailoverSuite>>
create(entity_addr_t test_addr,
SocketPolicy test_policy,
entity_addr_t test_peer_addr,
const TestInterceptor& interceptor) {
auto suite = std::make_unique<FailoverSuite>(
Messenger::create(
entity_name_t::OSD(TEST_OSD),
"Test",
TEST_NONCE,
true),
test_peer_addr, interceptor);
return suite->init(test_addr, test_policy
).then([suite = std::move(suite)] () mutable {
return std::move(suite);
});
}
// called by tests
public:
seastar::future<> connect_peer() {
logger().info("[Test] connect_peer({})", test_peer_addr);
auto conn = test_msgr->connect(test_peer_addr, entity_name_t::TYPE_OSD);
auto result = interceptor.find_result(conn);
ceph_assert(result != nullptr);
if (tracked_conn) {
if (tracked_conn->is_closed()) {
ceph_assert(tracked_conn != conn);
logger().info("[Test] this is a new session replacing an closed one");
} else {
ceph_assert(tracked_index == result->index);
ceph_assert(tracked_conn == conn);
logger().info("[Test] this is not a new session");
}
} else {
logger().info("[Test] this is a new session");
}
tracked_index = result->index;
tracked_conn = conn;
return flush_pending_send();
}
seastar::future<> send_peer() {
if (tracked_conn) {
logger().info("[Test] send_peer()");
ceph_assert(!pending_send);
return send_op();
} else {
++pending_send;
logger().info("[Test] send_peer() (pending {})", pending_send);
return seastar::now();
}
}
seastar::future<> keepalive_peer() {
logger().info("[Test] keepalive_peer()");
ceph_assert(tracked_conn);
return tracked_conn->send_keepalive();
}
seastar::future<> try_send_peer() {
logger().info("[Test] try_send_peer()");
ceph_assert(tracked_conn);
return send_op(false);
}
seastar::future<> markdown() {
logger().info("[Test] markdown() in 100ms ...");
ceph_assert(tracked_conn);
// sleep to propagate potential remaining acks
return seastar::sleep(100ms
).then([this] {
tracked_conn->mark_down();
});
}
seastar::future<> wait_blocked() {
logger().info("[Test] wait_blocked() ...");
return interceptor.blocker.wait_blocked();
}
void unblock() {
logger().info("[Test] unblock()");
return interceptor.blocker.unblock();
}
seastar::future<> wait_replaced(unsigned count) {
logger().info("[Test] wait_replaced({}) ...", count);
return wait_ready(0, count, false);
}
seastar::future<> wait_established() {
logger().info("[Test] wait_established() ...");
return wait_ready(0, 0, true);
}
seastar::future<std::reference_wrapper<ConnResults>>
wait_results(unsigned count) {
logger().info("[Test] wait_result({}) ...", count);
return wait_ready(count, 0, true).then([this] {
return std::reference_wrapper<ConnResults>(interceptor.results);
});
}
bool is_standby() {
ceph_assert(tracked_conn);
return !(tracked_conn->is_connected() || tracked_conn->is_closed());
}
};
class FailoverTest : public Dispatcher {
crimson::auth::DummyAuthClientServer dummy_auth;
MessengerRef cmd_msgr;
ConnectionRef cmd_conn;
const entity_addr_t test_addr;
const entity_addr_t test_peer_addr;
std::optional<seastar::promise<>> recv_pong;
std::optional<seastar::promise<>> recv_cmdreply;
std::unique_ptr<FailoverSuite> test_suite;
std::optional<seastar::future<>> ms_dispatch(ConnectionRef c, MessageRef m) override {
switch (m->get_type()) {
case CEPH_MSG_PING:
ceph_assert(recv_pong);
recv_pong->set_value();
recv_pong = std::nullopt;
break;
case MSG_COMMAND_REPLY:
ceph_assert(recv_cmdreply);
recv_cmdreply->set_value();
recv_cmdreply = std::nullopt;
break;
case MSG_COMMAND: {
auto m_cmd = boost::static_pointer_cast<MCommand>(m);
ceph_assert(static_cast<cmd_t>(m_cmd->cmd[0][0]) == cmd_t::suite_recv_op);
ceph_assert(test_suite);
test_suite->notify_peer_reply();
break;
}
default:
logger().error("{} got unexpected msg from cmd server: {}", *c, *m);
ceph_abort();
}
return {seastar::now()};
}
private:
seastar::future<> prepare_cmd(
cmd_t cmd,
std::function<void(MCommand&)>
f_prepare = [] (auto& m) { return; }) {
assert(!recv_cmdreply);
recv_cmdreply = seastar::promise<>();
auto fut = recv_cmdreply->get_future();
auto m = crimson::make_message<MCommand>();
m->cmd.emplace_back(1, static_cast<char>(cmd));
f_prepare(*m);
return cmd_conn->send(std::move(m)).then([fut = std::move(fut)] () mutable {
return std::move(fut);
});
}
seastar::future<> start_peer(policy_t peer_policy) {
return prepare_cmd(cmd_t::suite_start,
[peer_policy] (auto& m) {
m.cmd.emplace_back(1, static_cast<char>(peer_policy));
});
}
seastar::future<> stop_peer() {
return prepare_cmd(cmd_t::suite_stop);
}
seastar::future<> pingpong() {
assert(!recv_pong);
recv_pong = seastar::promise<>();
auto fut = recv_pong->get_future();
return cmd_conn->send(crimson::make_message<MPing>()
).then([fut = std::move(fut)] () mutable {
return std::move(fut);
});
}
seastar::future<> init(entity_addr_t cmd_peer_addr) {
cmd_msgr->set_default_policy(SocketPolicy::lossy_client(0));
cmd_msgr->set_auth_client(&dummy_auth);
cmd_msgr->set_auth_server(&dummy_auth);
return cmd_msgr->start({this}).then([this, cmd_peer_addr] {
logger().info("CmdCli connect to CmdSrv({}) ...", cmd_peer_addr);
cmd_conn = cmd_msgr->connect(cmd_peer_addr, entity_name_t::TYPE_OSD);
return pingpong();
});
}
public:
FailoverTest(MessengerRef cmd_msgr,
entity_addr_t test_addr,
entity_addr_t test_peer_addr)
: cmd_msgr(cmd_msgr),
test_addr(test_addr),
test_peer_addr(test_peer_addr) { }
seastar::future<> shutdown() {
logger().info("CmdCli shutdown...");
assert(!recv_cmdreply);
auto m = crimson::make_message<MCommand>();
m->cmd.emplace_back(1, static_cast<char>(cmd_t::shutdown));
return cmd_conn->send(std::move(m)).then([] {
return seastar::sleep(200ms);
}).then([this] {
cmd_msgr->stop();
return cmd_msgr->shutdown();
});
}
static seastar::future<seastar::lw_shared_ptr<FailoverTest>>
create(entity_addr_t test_addr,
entity_addr_t cmd_peer_addr,
entity_addr_t test_peer_addr) {
auto test = seastar::make_lw_shared<FailoverTest>(
Messenger::create(
entity_name_t::OSD(CMD_CLI_OSD),
"CmdCli",
CMD_CLI_NONCE,
true),
test_addr, test_peer_addr);
return test->init(cmd_peer_addr).then([test] {
logger().info("CmdCli ready");
return test;
});
}
// called by tests
public:
seastar::future<> run_suite(
std::string name,
const TestInterceptor& interceptor,
policy_t test_policy,
policy_t peer_policy,
std::function<seastar::future<>(FailoverSuite&)>&& f) {
logger().info("\n\n[{}]", name);
ceph_assert(!test_suite);
SocketPolicy test_policy_ = to_socket_policy(test_policy);
return FailoverSuite::create(
test_addr, test_policy_, test_peer_addr, interceptor
).then([this, peer_policy, f = std::move(f)] (auto suite) mutable {
ceph_assert(suite->get_addr() == test_addr);
test_suite.swap(suite);
return start_peer(peer_policy).then([this, f = std::move(f)] {
return f(*test_suite);
}).then([this] {
test_suite->post_check();
logger().info("\n[SUCCESS]");
}).handle_exception([this] (auto eptr) {
logger().info("\n[FAIL: {}]", eptr);
test_suite->dump_results();
throw;
}).then([this] {
return stop_peer();
}).then([this] {
return test_suite->shutdown().then([this] {
test_suite.reset();
});
});
});
}
seastar::future<> peer_connect_me() {
logger().info("[Test] peer_connect_me({})", test_addr);
return prepare_cmd(cmd_t::suite_connect_me,
[this] (auto& m) {
m.cmd.emplace_back(fmt::format("{}", test_addr));
});
}
seastar::future<> peer_send_me() {
logger().info("[Test] peer_send_me()");
ceph_assert(test_suite);
test_suite->needs_receive();
return prepare_cmd(cmd_t::suite_send_me);
}
seastar::future<> try_peer_send_me() {
logger().info("[Test] try_peer_send_me()");
ceph_assert(test_suite);
return prepare_cmd(cmd_t::suite_send_me);
}
seastar::future<> send_bidirectional() {
ceph_assert(test_suite);
return test_suite->send_peer().then([this] {
return peer_send_me();
});
}
seastar::future<> peer_keepalive_me() {
logger().info("[Test] peer_keepalive_me()");
ceph_assert(test_suite);
return prepare_cmd(cmd_t::suite_keepalive_me);
}
seastar::future<> markdown_peer() {
logger().info("[Test] markdown_peer() in 150ms ...");
// sleep to propagate potential remaining acks
return seastar::sleep(50ms
).then([this] {
return prepare_cmd(cmd_t::suite_markdown);
}).then([] {
// sleep awhile for peer markdown propagated
return seastar::sleep(100ms);
});
}
};
class FailoverSuitePeer : public Dispatcher {
using cb_t = std::function<seastar::future<>()>;
crimson::auth::DummyAuthClientServer dummy_auth;
MessengerRef peer_msgr;
cb_t op_callback;
ConnectionRef tracked_conn;
unsigned pending_send = 0;
std::optional<seastar::future<>> ms_dispatch(ConnectionRef c, MessageRef m) override {
logger().info("[TestPeer] got op from Test");
ceph_assert(m->get_type() == CEPH_MSG_OSD_OP);
ceph_assert(tracked_conn == c);
std::ignore = op_callback();
return {seastar::now()};
}
void ms_handle_accept(
ConnectionRef conn,
seastar::shard_id new_shard,
bool is_replace) override {
assert(new_shard == seastar::this_shard_id());
logger().info("[TestPeer] got accept from Test");
ceph_assert(!tracked_conn ||
tracked_conn->is_closed() ||
tracked_conn == conn);
tracked_conn = conn;
std::ignore = flush_pending_send();
}
void ms_handle_reset(ConnectionRef conn, bool is_replace) override {
logger().info("[TestPeer] got reset from Test");
ceph_assert(tracked_conn == conn);
tracked_conn = nullptr;
}
private:
seastar::future<> init(entity_addr_t test_peer_addr, SocketPolicy policy) {
peer_msgr->set_default_policy(policy);
peer_msgr->set_auth_client(&dummy_auth);
peer_msgr->set_auth_server(&dummy_auth);
return peer_msgr->bind(entity_addrvec_t{test_peer_addr}).safe_then([this] {
return peer_msgr->start({this});
}, Messenger::bind_ertr::all_same_way([test_peer_addr] (const std::error_code& e) {
logger().error("FailoverSuitePeer: "
"there is another instance running at {}", test_peer_addr);
ceph_abort();
}));
}
seastar::future<> send_op() {
ceph_assert(tracked_conn);
pg_t pgid;
object_locator_t oloc;
hobject_t hobj(object_t(), oloc.key, CEPH_NOSNAP, pgid.ps(),
pgid.pool(), oloc.nspace);
spg_t spgid(pgid);
return tracked_conn->send(crimson::make_message<MOSDOp>(0, 0, hobj, spgid, 0, 0, 0));
}
seastar::future<> flush_pending_send() {
if (pending_send != 0) {
logger().info("[TestPeer] flush sending {} ops", pending_send);
}
ceph_assert(tracked_conn);
return seastar::do_until(
[this] { return pending_send == 0; },
[this] {
--pending_send;
return send_op();
});
}
public:
FailoverSuitePeer(MessengerRef peer_msgr, cb_t op_callback)
: peer_msgr(peer_msgr), op_callback(op_callback) { }
seastar::future<> shutdown() {
peer_msgr->stop();
return peer_msgr->shutdown();
}
seastar::future<> connect_peer(entity_addr_t test_addr_decoded) {
logger().info("[TestPeer] connect_peer({})", test_addr_decoded);
auto new_tracked_conn = peer_msgr->connect(test_addr_decoded, entity_name_t::TYPE_OSD);
if (tracked_conn) {
if (tracked_conn->is_closed()) {
ceph_assert(tracked_conn != new_tracked_conn);
logger().info("[TestPeer] this is a new session"
" replacing an closed one");
} else {
ceph_assert(tracked_conn == new_tracked_conn);
logger().info("[TestPeer] this is not a new session");
}
} else {
logger().info("[TestPeer] this is a new session");
}
tracked_conn = new_tracked_conn;
return flush_pending_send();
}
seastar::future<> send_peer() {
if (tracked_conn) {
logger().info("[TestPeer] send_peer()");
return send_op();
} else {
++pending_send;
logger().info("[TestPeer] send_peer() (pending {})", pending_send);
return seastar::now();
}
}
seastar::future<> keepalive_peer() {
logger().info("[TestPeer] keepalive_peer()");
ceph_assert(tracked_conn);
return tracked_conn->send_keepalive();
}
seastar::future<> markdown() {
logger().info("[TestPeer] markdown()");
ceph_assert(tracked_conn);
tracked_conn->mark_down();
return seastar::now();
}
static seastar::future<std::unique_ptr<FailoverSuitePeer>>
create(entity_addr_t test_peer_addr, const SocketPolicy& policy, cb_t op_callback) {
auto suite = std::make_unique<FailoverSuitePeer>(
Messenger::create(
entity_name_t::OSD(TEST_PEER_OSD),
"TestPeer",
TEST_PEER_NONCE,
true),
op_callback
);
return suite->init(test_peer_addr, policy
).then([suite = std::move(suite)] () mutable {
return std::move(suite);
});
}
};
class FailoverTestPeer : public Dispatcher {
crimson::auth::DummyAuthClientServer dummy_auth;
MessengerRef cmd_msgr;
ConnectionRef cmd_conn;
const entity_addr_t test_peer_addr;
std::unique_ptr<FailoverSuitePeer> test_suite;
std::optional<seastar::future<>> ms_dispatch(ConnectionRef c, MessageRef m) override {
ceph_assert(cmd_conn == c);
switch (m->get_type()) {
case CEPH_MSG_PING:
std::ignore = c->send(crimson::make_message<MPing>());
break;
case MSG_COMMAND: {
auto m_cmd = boost::static_pointer_cast<MCommand>(m);
auto cmd = static_cast<cmd_t>(m_cmd->cmd[0][0]);
if (cmd == cmd_t::shutdown) {
logger().info("CmdSrv shutdown...");
// forwarded to FailoverTestPeer::wait()
cmd_msgr->stop();
std::ignore = cmd_msgr->shutdown();
} else {
std::ignore = handle_cmd(cmd, m_cmd).then([c] {
return c->send(crimson::make_message<MCommandReply>());
});
}
break;
}
default:
logger().error("{} got unexpected msg from cmd client: {}", *c, *m);
ceph_abort();
}
return {seastar::now()};
}
void ms_handle_accept(
ConnectionRef conn,
seastar::shard_id new_shard,
bool is_replace) override {
assert(new_shard == seastar::this_shard_id());
cmd_conn = conn;
}
private:
seastar::future<> notify_recv_op() {
ceph_assert(cmd_conn);
auto m = crimson::make_message<MCommand>();
m->cmd.emplace_back(1, static_cast<char>(cmd_t::suite_recv_op));
return cmd_conn->send(std::move(m));
}
seastar::future<> handle_cmd(cmd_t cmd, MRef<MCommand> m_cmd) {
switch (cmd) {
case cmd_t::suite_start: {
ceph_assert(!test_suite);
auto policy = to_socket_policy(static_cast<policy_t>(m_cmd->cmd[1][0]));
return FailoverSuitePeer::create(
test_peer_addr, policy, [this] { return notify_recv_op(); }
).then([this] (auto suite) {
test_suite.swap(suite);
});
}
case cmd_t::suite_stop:
ceph_assert(test_suite);
return test_suite->shutdown().then([this] {
test_suite.reset();
});
case cmd_t::suite_connect_me: {
ceph_assert(test_suite);
entity_addr_t test_addr_decoded = entity_addr_t();
test_addr_decoded.parse(m_cmd->cmd[1].c_str(), nullptr);
return test_suite->connect_peer(test_addr_decoded);
}
case cmd_t::suite_send_me:
ceph_assert(test_suite);
return test_suite->send_peer();
case cmd_t::suite_keepalive_me:
ceph_assert(test_suite);
return test_suite->keepalive_peer();
case cmd_t::suite_markdown:
ceph_assert(test_suite);
return test_suite->markdown();
default:
logger().error("TestPeer got unexpected command {} from Test",
fmt::ptr(m_cmd.get()));
ceph_abort();
return seastar::now();
}
}
seastar::future<> init(entity_addr_t cmd_peer_addr) {
cmd_msgr->set_default_policy(SocketPolicy::stateless_server(0));
cmd_msgr->set_auth_client(&dummy_auth);
cmd_msgr->set_auth_server(&dummy_auth);
return cmd_msgr->bind(entity_addrvec_t{cmd_peer_addr}).safe_then([this] {
return cmd_msgr->start({this});
}, Messenger::bind_ertr::all_same_way([cmd_peer_addr] (const std::error_code& e) {
logger().error("FailoverTestPeer: "
"there is another instance running at {}", cmd_peer_addr);
ceph_abort();
}));
}
public:
FailoverTestPeer(MessengerRef cmd_msgr,
entity_addr_t test_peer_addr)
: cmd_msgr(cmd_msgr),
test_peer_addr(test_peer_addr) { }
seastar::future<> wait() {
return cmd_msgr->wait();
}
static seastar::future<std::unique_ptr<FailoverTestPeer>>
create(entity_addr_t cmd_peer_addr, entity_addr_t test_peer_addr) {
auto test_peer = std::make_unique<FailoverTestPeer>(
Messenger::create(
entity_name_t::OSD(CMD_SRV_OSD),
"CmdSrv",
CMD_SRV_NONCE,
true),
test_peer_addr);
return test_peer->init(cmd_peer_addr
).then([test_peer = std::move(test_peer)] () mutable {
logger().info("CmdSrv ready");
return std::move(test_peer);
});
}
};
seastar::future<>
test_v2_lossy_early_connect_fault(FailoverTest& test) {
return seastar::do_with(std::vector<Breakpoint>{
{custom_bp_t::SOCKET_CONNECTING},
{custom_bp_t::BANNER_WRITE},
{custom_bp_t::BANNER_READ},
{custom_bp_t::BANNER_PAYLOAD_READ},
{Tag::HELLO, bp_type_t::WRITE},
{Tag::HELLO, bp_type_t::READ},
{Tag::AUTH_REQUEST, bp_type_t::WRITE},
{Tag::AUTH_DONE, bp_type_t::READ},
{Tag::AUTH_SIGNATURE, bp_type_t::WRITE},
{Tag::AUTH_SIGNATURE, bp_type_t::READ},
}, [&test] (auto& failure_cases) {
return seastar::do_for_each(failure_cases, [&test] (auto bp) {
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_lossy_early_connect_fault -- {}", bp),
interceptor,
policy_t::lossy_client,
policy_t::stateless_server,
[] (FailoverSuite& suite) {
return seastar::futurize_invoke([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(2, 1, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
});
});
});
});
}
seastar::future<>
test_v2_lossy_connect_fault(FailoverTest& test) {
return seastar::do_with(std::vector<Breakpoint>{
{Tag::CLIENT_IDENT, bp_type_t::WRITE},
{Tag::SERVER_IDENT, bp_type_t::READ},
}, [&test] (auto& failure_cases) {
return seastar::do_for_each(failure_cases, [&test] (auto bp) {
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_lossy_connect_fault -- {}", bp),
interceptor,
policy_t::lossy_client,
policy_t::stateless_server,
[] (FailoverSuite& suite) {
return seastar::futurize_invoke([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(2, 2, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
});
});
});
});
}
seastar::future<>
test_v2_lossy_connected_fault(FailoverTest& test) {
return seastar::do_with(std::vector<Breakpoint>{
{Tag::MESSAGE, bp_type_t::WRITE},
{Tag::MESSAGE, bp_type_t::READ},
}, [&test] (auto& failure_cases) {
return seastar::do_for_each(failure_cases, [&test] (auto bp) {
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_lossy_connected_fault -- {}", bp),
interceptor,
policy_t::lossy_client,
policy_t::stateless_server,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.send_bidirectional();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(1, 1, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(1, 0);
});
});
});
});
}
seastar::future<>
test_v2_lossy_early_accept_fault(FailoverTest& test) {
return seastar::do_with(std::vector<Breakpoint>{
{custom_bp_t::BANNER_WRITE},
{custom_bp_t::BANNER_READ},
{custom_bp_t::BANNER_PAYLOAD_READ},
{Tag::HELLO, bp_type_t::WRITE},
{Tag::HELLO, bp_type_t::READ},
{Tag::AUTH_REQUEST, bp_type_t::READ},
{Tag::AUTH_DONE, bp_type_t::WRITE},
{Tag::AUTH_SIGNATURE, bp_type_t::WRITE},
{Tag::AUTH_SIGNATURE, bp_type_t::READ},
}, [&test] (auto& failure_cases) {
return seastar::do_for_each(failure_cases, [&test] (auto bp) {
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_lossy_early_accept_fault -- {}", bp),
interceptor,
policy_t::stateless_server,
policy_t::lossy_client,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.peer_send_me();
}).then([&test] {
return test.peer_connect_me();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 0, 0, 0);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::established);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 1);
results[1].assert_reset(0, 0);
});
});
});
});
}
seastar::future<>
test_v2_lossy_accept_fault(FailoverTest& test) {
auto bp = Breakpoint{Tag::CLIENT_IDENT, bp_type_t::READ};
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_lossy_accept_fault -- {}", bp),
interceptor,
policy_t::stateless_server,
policy_t::lossy_client,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.peer_send_me();
}).then([&test] {
return test.peer_connect_me();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 0);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::established);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 1);
results[1].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_lossy_establishing_fault(FailoverTest& test) {
auto bp = Breakpoint{Tag::SERVER_IDENT, bp_type_t::WRITE};
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_lossy_establishing_fault -- {}", bp),
interceptor,
policy_t::stateless_server,
policy_t::lossy_client,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.peer_send_me();
}).then([&test] {
return test.peer_connect_me();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 1);
results[0].assert_reset(1, 0);
results[1].assert_state_at(conn_state_t::established);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 1);
results[1].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_lossy_accepted_fault(FailoverTest& test) {
return seastar::do_with(std::vector<Breakpoint>{
{Tag::MESSAGE, bp_type_t::WRITE},
{Tag::MESSAGE, bp_type_t::READ},
}, [&test] (auto& failure_cases) {
return seastar::do_for_each(failure_cases, [&test] (auto bp) {
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_lossy_accepted_fault -- {}", bp),
interceptor,
policy_t::stateless_server,
policy_t::lossy_client,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.send_bidirectional();
}).then([&test] {
return test.peer_connect_me();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 1);
results[0].assert_reset(1, 0);
});
});
});
});
}
seastar::future<>
test_v2_lossless_connect_fault(FailoverTest& test) {
return seastar::do_with(std::vector<Breakpoint>{
{Tag::CLIENT_IDENT, bp_type_t::WRITE},
{Tag::SERVER_IDENT, bp_type_t::READ},
}, [&test] (auto& failure_cases) {
return seastar::do_for_each(failure_cases, [&test] (auto bp) {
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_lossless_connect_fault -- {}", bp),
interceptor,
policy_t::lossless_client,
policy_t::stateful_server,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.send_bidirectional();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(2, 2, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
});
});
});
});
}
seastar::future<>
test_v2_lossless_connected_fault(FailoverTest& test) {
return seastar::do_with(std::vector<Breakpoint>{
{Tag::MESSAGE, bp_type_t::WRITE},
{Tag::MESSAGE, bp_type_t::READ},
}, [&test] (auto& failure_cases) {
return seastar::do_for_each(failure_cases, [&test] (auto bp) {
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_lossless_connected_fault -- {}", bp),
interceptor,
policy_t::lossless_client,
policy_t::stateful_server,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.send_bidirectional();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(2, 1, 1, 2);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
});
});
});
});
}
seastar::future<>
test_v2_lossless_connected_fault2(FailoverTest& test) {
return seastar::do_with(std::vector<Breakpoint>{
{Tag::ACK, bp_type_t::READ},
{Tag::ACK, bp_type_t::WRITE},
{Tag::KEEPALIVE2, bp_type_t::READ},
{Tag::KEEPALIVE2, bp_type_t::WRITE},
{Tag::KEEPALIVE2_ACK, bp_type_t::READ},
{Tag::KEEPALIVE2_ACK, bp_type_t::WRITE},
}, [&test] (auto& failure_cases) {
return seastar::do_for_each(failure_cases, [&test] (auto bp) {
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_lossless_connected_fault2 -- {}", bp),
interceptor,
policy_t::lossless_client,
policy_t::stateful_server,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.wait_established();
}).then([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.keepalive_peer();
}).then([&suite] {
return suite.wait_established();
}).then([&test] {
return test.peer_send_me();
}).then([&test] {
return test.peer_keepalive_me();
}).then([&suite] {
return suite.wait_established();
}).then([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.wait_established();
}).then([&test] {
return test.peer_send_me();
}).then([&suite] {
return suite.wait_established();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(2, 1, 1, 2);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
});
});
});
});
}
seastar::future<>
test_v2_lossless_reconnect_fault(FailoverTest& test) {
return seastar::do_with(std::vector<std::pair<Breakpoint, Breakpoint>>{
{{Tag::MESSAGE, bp_type_t::WRITE},
{Tag::SESSION_RECONNECT, bp_type_t::WRITE}},
{{Tag::MESSAGE, bp_type_t::WRITE},
{Tag::SESSION_RECONNECT_OK, bp_type_t::READ}},
}, [&test] (auto& failure_cases) {
return seastar::do_for_each(failure_cases, [&test] (auto bp_pair) {
TestInterceptor interceptor;
interceptor.make_fault(bp_pair.first);
interceptor.make_fault(bp_pair.second);
return test.run_suite(
fmt::format("test_v2_lossless_reconnect_fault -- {}, {}",
bp_pair.first, bp_pair.second),
interceptor,
policy_t::lossless_client,
policy_t::stateful_server,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.send_bidirectional();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(3, 1, 2, 2);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
});
});
});
});
}
seastar::future<>
test_v2_lossless_accept_fault(FailoverTest& test) {
auto bp = Breakpoint{Tag::CLIENT_IDENT, bp_type_t::READ};
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_lossless_accept_fault -- {}", bp),
interceptor,
policy_t::stateful_server,
policy_t::lossless_client,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.send_bidirectional();
}).then([&test] {
return test.peer_connect_me();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 0);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::established);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 1);
results[1].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_lossless_establishing_fault(FailoverTest& test) {
auto bp = Breakpoint{Tag::SERVER_IDENT, bp_type_t::WRITE};
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_lossless_establishing_fault -- {}", bp),
interceptor,
policy_t::stateful_server,
policy_t::lossless_client,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.send_bidirectional();
}).then([&test] {
return test.peer_connect_me();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 2);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 0);
results[1].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_lossless_accepted_fault(FailoverTest& test) {
return seastar::do_with(std::vector<Breakpoint>{
{Tag::MESSAGE, bp_type_t::WRITE},
{Tag::MESSAGE, bp_type_t::READ},
}, [&test] (auto& failure_cases) {
return seastar::do_for_each(failure_cases, [&test] (auto bp) {
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_lossless_accepted_fault -- {}", bp),
interceptor,
policy_t::stateful_server,
policy_t::lossless_client,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.send_bidirectional();
}).then([&test] {
return test.peer_connect_me();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 2);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 0);
results[1].assert_reset(0, 0);
});
});
});
});
}
seastar::future<>
test_v2_lossless_reaccept_fault(FailoverTest& test) {
return seastar::do_with(std::vector<std::pair<Breakpoint, Breakpoint>>{
{{Tag::MESSAGE, bp_type_t::READ},
{Tag::SESSION_RECONNECT, bp_type_t::READ}},
{{Tag::MESSAGE, bp_type_t::READ},
{Tag::SESSION_RECONNECT_OK, bp_type_t::WRITE}},
}, [&test] (auto& failure_cases) {
return seastar::do_for_each(failure_cases, [&test] (auto bp_pair) {
TestInterceptor interceptor;
interceptor.make_fault(bp_pair.first);
interceptor.make_fault(bp_pair.second);
return test.run_suite(
fmt::format("test_v2_lossless_reaccept_fault -- {}, {}",
bp_pair.first, bp_pair.second),
interceptor,
policy_t::stateful_server,
policy_t::lossless_client,
[&test, bp = bp_pair.second] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.send_bidirectional();
}).then([&test] {
return test.peer_connect_me();
}).then([&suite] {
return suite.wait_results(3);
}).then([bp] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(0, 0, 0, 0);
if (bp == Breakpoint{Tag::SESSION_RECONNECT, bp_type_t::READ}) {
results[0].assert_accept(1, 1, 0, 2);
} else {
results[0].assert_accept(1, 1, 0, 3);
}
results[0].assert_reset(0, 0);
if (bp == Breakpoint{Tag::SESSION_RECONNECT, bp_type_t::READ}) {
results[1].assert_state_at(conn_state_t::closed);
} else {
results[1].assert_state_at(conn_state_t::replaced);
}
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 0, 1, 0);
results[1].assert_reset(0, 0);
results[2].assert_state_at(conn_state_t::replaced);
results[2].assert_connect(0, 0, 0, 0);
results[2].assert_accept(1, 0, 1, 0);
results[2].assert_reset(0, 0);
});
});
});
});
}
seastar::future<>
test_v2_peer_connect_fault(FailoverTest& test) {
return seastar::do_with(std::vector<Breakpoint>{
{Tag::CLIENT_IDENT, bp_type_t::WRITE},
{Tag::SERVER_IDENT, bp_type_t::READ},
}, [&test] (auto& failure_cases) {
return seastar::do_for_each(failure_cases, [&test] (auto bp) {
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_peer_connect_fault -- {}", bp),
interceptor,
policy_t::lossless_peer,
policy_t::lossless_peer,
[] (FailoverSuite& suite) {
return seastar::futurize_invoke([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(2, 2, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
});
});
});
});
}
seastar::future<>
test_v2_peer_accept_fault(FailoverTest& test) {
auto bp = Breakpoint{Tag::CLIENT_IDENT, bp_type_t::READ};
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_peer_accept_fault -- {}", bp),
interceptor,
policy_t::lossless_peer,
policy_t::lossless_peer,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.peer_send_me();
}).then([&test] {
return test.peer_connect_me();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 0);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::established);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 1);
results[1].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_peer_establishing_fault(FailoverTest& test) {
auto bp = Breakpoint{Tag::SERVER_IDENT, bp_type_t::WRITE};
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_peer_establishing_fault -- {}", bp),
interceptor,
policy_t::lossless_peer,
policy_t::lossless_peer,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.peer_send_me();
}).then([&test] {
return test.peer_connect_me();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 2);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 0);
results[1].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_peer_connected_fault_reconnect(FailoverTest& test) {
auto bp = Breakpoint{Tag::MESSAGE, bp_type_t::WRITE};
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_peer_connected_fault_reconnect -- {}", bp),
interceptor,
policy_t::lossless_peer,
policy_t::lossless_peer,
[] (FailoverSuite& suite) {
return seastar::futurize_invoke([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(2, 1, 1, 2);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_peer_connected_fault_reaccept(FailoverTest& test) {
auto bp = Breakpoint{Tag::MESSAGE, bp_type_t::READ};
TestInterceptor interceptor;
interceptor.make_fault(bp);
return test.run_suite(
fmt::format("test_v2_peer_connected_fault_reaccept -- {}", bp),
interceptor,
policy_t::lossless_peer,
policy_t::lossless_peer,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.peer_send_me();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(1, 1, 0, 1);
results[0].assert_accept(0, 0, 0, 1);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 0, 1, 0);
results[1].assert_reset(0, 0);
});
});
}
seastar::future<bool>
check_peer_wins(FailoverTest& test) {
return seastar::do_with(bool(), [&test] (auto& ret) {
return test.run_suite("check_peer_wins",
TestInterceptor(),
policy_t::lossy_client,
policy_t::stateless_server,
[&ret] (FailoverSuite& suite) {
return suite.connect_peer().then([&suite] {
return suite.wait_results(1);
}).then([&ret] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
ret = results[0].conn->peer_wins();
logger().info("check_peer_wins: {}", ret);
});
}).then([&ret] {
return ret;
});
});
}
seastar::future<>
test_v2_racing_reconnect_acceptor_lose(FailoverTest& test) {
return seastar::do_with(std::vector<std::pair<unsigned, Breakpoint>>{
{1, {Tag::SESSION_RECONNECT, bp_type_t::READ}},
{2, {custom_bp_t::BANNER_WRITE}},
{2, {custom_bp_t::BANNER_READ}},
{2, {custom_bp_t::BANNER_PAYLOAD_READ}},
{2, {Tag::HELLO, bp_type_t::WRITE}},
{2, {Tag::HELLO, bp_type_t::READ}},
{2, {Tag::AUTH_REQUEST, bp_type_t::READ}},
{2, {Tag::AUTH_DONE, bp_type_t::WRITE}},
{2, {Tag::AUTH_SIGNATURE, bp_type_t::WRITE}},
{2, {Tag::AUTH_SIGNATURE, bp_type_t::READ}},
}, [&test] (auto& failure_cases) {
return seastar::do_for_each(failure_cases, [&test] (auto bp) {
TestInterceptor interceptor;
// fault acceptor
interceptor.make_fault({Tag::MESSAGE, bp_type_t::READ});
// block acceptor
interceptor.make_block(bp.second, bp.first);
return test.run_suite(
fmt::format("test_v2_racing_reconnect_acceptor_lose -- {}({})",
bp.second, bp.first),
interceptor,
policy_t::lossless_peer,
policy_t::lossless_peer,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.peer_send_me();
}).then([&test] {
return test.peer_connect_me();
}).then([&suite] {
return suite.wait_blocked();
}).then([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.wait_established();
}).then([&suite] {
suite.unblock();
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(1, 0, 1, 1);
results[0].assert_accept(1, 1, 0, 1);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::closed);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 0);
results[1].assert_reset(0, 0);
});
});
});
});
}
seastar::future<>
test_v2_racing_reconnect_acceptor_win(FailoverTest& test) {
return seastar::do_with(std::vector<std::pair<unsigned, Breakpoint>>{
{1, {Tag::SESSION_RECONNECT, bp_type_t::WRITE}},
{2, {custom_bp_t::SOCKET_CONNECTING}},
{2, {custom_bp_t::BANNER_WRITE}},
{2, {custom_bp_t::BANNER_READ}},
{2, {custom_bp_t::BANNER_PAYLOAD_READ}},
{2, {Tag::HELLO, bp_type_t::WRITE}},
{2, {Tag::HELLO, bp_type_t::READ}},
{2, {Tag::AUTH_REQUEST, bp_type_t::WRITE}},
{2, {Tag::AUTH_DONE, bp_type_t::READ}},
{2, {Tag::AUTH_SIGNATURE, bp_type_t::WRITE}},
{2, {Tag::AUTH_SIGNATURE, bp_type_t::READ}},
}, [&test] (auto& failure_cases) {
return seastar::do_for_each(failure_cases, [&test] (auto bp) {
TestInterceptor interceptor;
// fault connector
interceptor.make_fault({Tag::MESSAGE, bp_type_t::WRITE});
// block connector
interceptor.make_block(bp.second, bp.first);
return test.run_suite(
fmt::format("test_v2_racing_reconnect_acceptor_win -- {}({})",
bp.second, bp.first),
interceptor,
policy_t::lossless_peer,
policy_t::lossless_peer,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.wait_blocked();
}).then([&test] {
return test.peer_send_me();
}).then([&suite] {
return suite.wait_replaced(1);
}).then([&suite] {
suite.unblock();
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(2, 1);
results[0].assert_accept(0, 0, 0, 1);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 0, 1, 0);
results[1].assert_reset(0, 0);
});
});
});
});
}
seastar::future<>
test_v2_racing_connect_acceptor_lose(FailoverTest& test) {
return seastar::do_with(std::vector<Breakpoint>{
{custom_bp_t::BANNER_WRITE},
{custom_bp_t::BANNER_READ},
{custom_bp_t::BANNER_PAYLOAD_READ},
{Tag::HELLO, bp_type_t::WRITE},
{Tag::HELLO, bp_type_t::READ},
{Tag::AUTH_REQUEST, bp_type_t::READ},
{Tag::AUTH_DONE, bp_type_t::WRITE},
{Tag::AUTH_SIGNATURE, bp_type_t::WRITE},
{Tag::AUTH_SIGNATURE, bp_type_t::READ},
{Tag::CLIENT_IDENT, bp_type_t::READ},
}, [&test] (auto& failure_cases) {
return seastar::do_for_each(failure_cases, [&test] (auto bp) {
TestInterceptor interceptor;
// block acceptor
interceptor.make_block(bp);
return test.run_suite(
fmt::format("test_v2_racing_connect_acceptor_lose -- {}", bp),
interceptor,
policy_t::lossless_peer,
policy_t::lossless_peer,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.peer_send_me();
}).then([&test] {
return test.peer_connect_me();
}).then([&suite] {
return suite.wait_blocked();
}).then([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.wait_established();
}).then([&suite] {
suite.unblock();
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 0);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::established);
results[1].assert_connect(1, 1, 0, 1);
results[1].assert_accept(0, 0, 0, 0);
results[1].assert_reset(0, 0);
});
});
});
});
}
seastar::future<>
test_v2_racing_connect_acceptor_win(FailoverTest& test) {
return seastar::do_with(std::vector<Breakpoint>{
{custom_bp_t::SOCKET_CONNECTING},
{custom_bp_t::BANNER_WRITE},
{custom_bp_t::BANNER_READ},
{custom_bp_t::BANNER_PAYLOAD_READ},
{Tag::HELLO, bp_type_t::WRITE},
{Tag::HELLO, bp_type_t::READ},
{Tag::AUTH_REQUEST, bp_type_t::WRITE},
{Tag::AUTH_DONE, bp_type_t::READ},
{Tag::AUTH_SIGNATURE, bp_type_t::WRITE},
{Tag::AUTH_SIGNATURE, bp_type_t::READ},
{Tag::CLIENT_IDENT, bp_type_t::WRITE},
}, [&test] (auto& failure_cases) {
return seastar::do_for_each(failure_cases, [&test] (auto bp) {
TestInterceptor interceptor;
// block connector
interceptor.make_block(bp);
return test.run_suite(
fmt::format("test_v2_racing_connect_acceptor_win -- {}", bp),
interceptor,
policy_t::lossless_peer,
policy_t::lossless_peer,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.wait_blocked();
}).then([&test] {
return test.peer_send_me();
}).then([&test] {
return test.peer_connect_me();
}).then([&suite] {
return suite.wait_replaced(1);
}).then([&suite] {
suite.unblock();
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(1, 0);
results[0].assert_accept(0, 0, 0, 1);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 0);
results[1].assert_reset(0, 0);
});
});
});
});
}
seastar::future<>
test_v2_racing_connect_reconnect_lose(FailoverTest& test) {
TestInterceptor interceptor;
interceptor.make_fault({Tag::SERVER_IDENT, bp_type_t::READ});
interceptor.make_block({Tag::CLIENT_IDENT, bp_type_t::WRITE}, 2);
return test.run_suite("test_v2_racing_connect_reconnect_lose",
interceptor,
policy_t::lossless_peer,
policy_t::lossless_peer,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.wait_blocked();
}).then([&test] {
return test.peer_send_me();
}).then([&suite] {
return suite.wait_replaced(1);
}).then([&suite] {
suite.unblock();
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(2, 2, 0, 0);
results[0].assert_accept(0, 0, 0, 1);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 1, 0);
results[1].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_racing_connect_reconnect_win(FailoverTest& test) {
TestInterceptor interceptor;
interceptor.make_fault({Tag::SERVER_IDENT, bp_type_t::READ});
interceptor.make_block({Tag::SESSION_RECONNECT, bp_type_t::READ});
return test.run_suite("test_v2_racing_connect_reconnect_win",
interceptor,
policy_t::lossless_peer,
policy_t::lossless_peer,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.peer_send_me();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.wait_blocked();
}).then([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.wait_established();
}).then([&suite] {
suite.unblock();
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(2, 2, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::closed);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 0, 1, 0);
results[1].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_stale_connect(FailoverTest& test) {
auto bp = Breakpoint{Tag::SERVER_IDENT, bp_type_t::READ};
TestInterceptor interceptor;
interceptor.make_stall(bp);
return test.run_suite(
fmt::format("test_v2_stale_connect -- {}", bp),
interceptor,
policy_t::lossless_peer,
policy_t::lossless_peer,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.wait_blocked();
}).then([&test] {
return test.peer_send_me();
}).then([&suite] {
return suite.wait_replaced(1);
}).then([&suite] {
suite.unblock();
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(1, 1, 0, 0);
results[0].assert_accept(0, 0, 0, 1);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 1, 0);
results[1].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_stale_reconnect(FailoverTest& test) {
auto bp = Breakpoint{Tag::SESSION_RECONNECT_OK, bp_type_t::READ};
TestInterceptor interceptor;
interceptor.make_fault({Tag::MESSAGE, bp_type_t::WRITE});
interceptor.make_stall(bp);
return test.run_suite(
fmt::format("test_v2_stale_reconnect -- {}", bp),
interceptor,
policy_t::lossless_peer,
policy_t::lossless_peer,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.wait_blocked();
}).then([&test] {
return test.peer_send_me();
}).then([&suite] {
return suite.wait_replaced(1);
}).then([&suite] {
suite.unblock();
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(2, 1, 1, 1);
results[0].assert_accept(0, 0, 0, 1);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 0, 1, 0);
results[1].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_stale_accept(FailoverTest& test) {
auto bp = Breakpoint{Tag::CLIENT_IDENT, bp_type_t::READ};
TestInterceptor interceptor;
interceptor.make_stall(bp);
return test.run_suite(
fmt::format("test_v2_stale_accept -- {}", bp),
interceptor,
policy_t::lossless_peer,
policy_t::lossless_peer,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.peer_connect_me();
}).then([&suite] {
return suite.wait_blocked();
}).then([&test] {
return test.peer_send_me();
}).then([&suite] {
return suite.wait_established();
}).then([&suite] {
suite.unblock();
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 0);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::established);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 1);
results[1].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_stale_establishing(FailoverTest& test) {
auto bp = Breakpoint{Tag::SERVER_IDENT, bp_type_t::WRITE};
TestInterceptor interceptor;
interceptor.make_stall(bp);
return test.run_suite(
fmt::format("test_v2_stale_establishing -- {}", bp),
interceptor,
policy_t::lossless_peer,
policy_t::lossless_peer,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.peer_connect_me();
}).then([&suite] {
return suite.wait_blocked();
}).then([&test] {
return test.peer_send_me();
}).then([&suite] {
return suite.wait_replaced(1);
}).then([&suite] {
suite.unblock();
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 2);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 0);
results[1].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_stale_reaccept(FailoverTest& test) {
auto bp = Breakpoint{Tag::SESSION_RECONNECT_OK, bp_type_t::WRITE};
TestInterceptor interceptor;
interceptor.make_fault({Tag::MESSAGE, bp_type_t::READ});
interceptor.make_stall(bp);
return test.run_suite(
fmt::format("test_v2_stale_reaccept -- {}", bp),
interceptor,
policy_t::lossless_peer,
policy_t::lossless_peer,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
return test.peer_send_me();
}).then([&test] {
return test.peer_connect_me();
}).then([&suite] {
return suite.wait_blocked();
}).then([] {
logger().info("[Test] block the broken REPLACING for 210ms...");
return seastar::sleep(210ms);
}).then([&suite] {
suite.unblock();
return suite.wait_results(3);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 3);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 0, 1, 0);
results[1].assert_reset(0, 0);
results[2].assert_state_at(conn_state_t::replaced);
results[2].assert_connect(0, 0, 0, 0);
results[2].assert_accept(1, 0);
results[2].assert_reset(0, 0);
ceph_assert(results[2].server_reconnect_attempts >= 1);
});
});
}
seastar::future<>
test_v2_lossy_client(FailoverTest& test) {
return test.run_suite(
"test_v2_lossy_client",
TestInterceptor(),
policy_t::lossy_client,
policy_t::stateless_server,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&suite] {
logger().info("-- 0 --");
logger().info("[Test] setup connection...");
return suite.connect_peer();
}).then([&test] {
return test.send_bidirectional();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(1, 1, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
}).then([&suite] {
logger().info("-- 1 --");
logger().info("[Test] client markdown...");
return suite.markdown();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(1, 1, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::established);
results[1].assert_connect(1, 1, 0, 1);
results[1].assert_accept(0, 0, 0, 0);
results[1].assert_reset(0, 0);
}).then([&test] {
logger().info("-- 2 --");
logger().info("[Test] server markdown...");
return test.markdown_peer();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(1, 1, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::closed);
results[1].assert_connect(1, 1, 0, 1);
results[1].assert_accept(0, 0, 0, 0);
results[1].assert_reset(1, 0);
}).then([&suite] {
logger().info("-- 3 --");
logger().info("[Test] client reconnect...");
return suite.connect_peer();
}).then([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.wait_results(3);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(1, 1, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::closed);
results[1].assert_connect(1, 1, 0, 1);
results[1].assert_accept(0, 0, 0, 0);
results[1].assert_reset(1, 0);
results[2].assert_state_at(conn_state_t::established);
results[2].assert_connect(1, 1, 0, 1);
results[2].assert_accept(0, 0, 0, 0);
results[2].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_stateless_server(FailoverTest& test) {
return test.run_suite(
"test_v2_stateless_server",
TestInterceptor(),
policy_t::stateless_server,
policy_t::lossy_client,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
logger().info("-- 0 --");
logger().info("[Test] setup connection...");
return test.peer_connect_me();
}).then([&test] {
return test.send_bidirectional();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 1);
results[0].assert_reset(0, 0);
}).then([&test] {
logger().info("-- 1 --");
logger().info("[Test] client markdown...");
return test.markdown_peer();
}).then([&test] {
return test.peer_connect_me();
}).then([&test] {
return test.peer_send_me();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 1);
results[0].assert_reset(1, 0);
results[1].assert_state_at(conn_state_t::established);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 1);
results[1].assert_reset(0, 0);
}).then([&suite] {
logger().info("-- 2 --");
logger().info("[Test] server markdown...");
return suite.markdown();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 1);
results[0].assert_reset(1, 0);
results[1].assert_state_at(conn_state_t::closed);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 1);
results[1].assert_reset(0, 0);
}).then([&test] {
logger().info("-- 3 --");
logger().info("[Test] client reconnect...");
return test.peer_connect_me();
}).then([&test] {
return test.peer_send_me();
}).then([&suite] {
return suite.wait_results(3);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 1);
results[0].assert_reset(1, 0);
results[1].assert_state_at(conn_state_t::closed);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 1);
results[1].assert_reset(0, 0);
results[2].assert_state_at(conn_state_t::established);
results[2].assert_connect(0, 0, 0, 0);
results[2].assert_accept(1, 1, 0, 1);
results[2].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_lossless_client(FailoverTest& test) {
return test.run_suite(
"test_v2_lossless_client",
TestInterceptor(),
policy_t::lossless_client,
policy_t::stateful_server,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&suite] {
logger().info("-- 0 --");
logger().info("[Test] setup connection...");
return suite.connect_peer();
}).then([&test] {
return test.send_bidirectional();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(1, 1, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
}).then([&suite] {
logger().info("-- 1 --");
logger().info("[Test] client markdown...");
return suite.markdown();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(1, 1, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::established);
results[1].assert_connect(1, 1, 0, 1);
results[1].assert_accept(0, 0, 0, 0);
results[1].assert_reset(0, 0);
}).then([&test] {
logger().info("-- 2 --");
logger().info("[Test] server markdown...");
return test.markdown_peer();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(1, 1, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::established);
results[1].assert_connect(2, 2, 1, 2);
results[1].assert_accept(0, 0, 0, 0);
results[1].assert_reset(0, 1);
}).then([&suite] {
logger().info("-- 3 --");
logger().info("[Test] client reconnect...");
return suite.connect_peer();
}).then([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(1, 1, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::established);
results[1].assert_connect(2, 2, 1, 2);
results[1].assert_accept(0, 0, 0, 0);
results[1].assert_reset(0, 1);
});
});
}
seastar::future<>
test_v2_stateful_server(FailoverTest& test) {
return test.run_suite(
"test_v2_stateful_server",
TestInterceptor(),
policy_t::stateful_server,
policy_t::lossless_client,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
logger().info("-- 0 --");
logger().info("[Test] setup connection...");
return test.peer_connect_me();
}).then([&test] {
return test.send_bidirectional();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 1);
results[0].assert_reset(0, 0);
}).then([&test] {
logger().info("-- 1 --");
logger().info("[Test] client markdown...");
return test.markdown_peer();
}).then([&test] {
return test.peer_connect_me();
}).then([&test] {
return test.peer_send_me();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 2);
results[0].assert_reset(0, 1);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 0);
results[1].assert_reset(0, 0);
}).then([&suite] {
logger().info("-- 2 --");
logger().info("[Test] server markdown...");
return suite.markdown();
}).then([&suite] {
return suite.wait_results(3);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 2);
results[0].assert_reset(0, 1);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 0);
results[1].assert_reset(0, 0);
results[2].assert_state_at(conn_state_t::established);
results[2].assert_connect(0, 0, 0, 0);
results[2].assert_accept(1, 1, 1, 1);
results[2].assert_reset(0, 0);
}).then([&test] {
logger().info("-- 3 --");
logger().info("[Test] client reconnect...");
return test.peer_connect_me();
}).then([&test] {
return test.peer_send_me();
}).then([&suite] {
return suite.wait_results(3);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 2);
results[0].assert_reset(0, 1);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 0);
results[1].assert_reset(0, 0);
results[2].assert_state_at(conn_state_t::established);
results[2].assert_connect(0, 0, 0, 0);
results[2].assert_accept(1, 1, 1, 1);
results[2].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_peer_reuse_connector(FailoverTest& test) {
return test.run_suite(
"test_v2_peer_reuse_connector",
TestInterceptor(),
policy_t::lossless_peer_reuse,
policy_t::lossless_peer_reuse,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&suite] {
logger().info("-- 0 --");
logger().info("[Test] setup connection...");
return suite.connect_peer();
}).then([&test] {
return test.send_bidirectional();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(1, 1, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
}).then([&suite] {
logger().info("-- 1 --");
logger().info("[Test] connector markdown...");
return suite.markdown();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(1, 1, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::established);
results[1].assert_connect(1, 1, 0, 1);
results[1].assert_accept(0, 0, 0, 0);
results[1].assert_reset(0, 0);
}).then([&test] {
logger().info("-- 2 --");
logger().info("[Test] acceptor markdown...");
return test.markdown_peer();
}).then([&suite] {
ceph_assert(suite.is_standby());
logger().info("-- 3 --");
logger().info("[Test] connector reconnect...");
return suite.connect_peer();
}).then([&suite] {
return suite.try_send_peer();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(1, 1, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::established);
results[1].assert_connect(2, 2, 1, 2);
results[1].assert_accept(0, 0, 0, 0);
results[1].assert_reset(0, 1);
});
});
}
seastar::future<>
test_v2_peer_reuse_acceptor(FailoverTest& test) {
return test.run_suite(
"test_v2_peer_reuse_acceptor",
TestInterceptor(),
policy_t::lossless_peer_reuse,
policy_t::lossless_peer_reuse,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
logger().info("-- 0 --");
logger().info("[Test] setup connection...");
return test.peer_connect_me();
}).then([&test] {
return test.send_bidirectional();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 1);
results[0].assert_reset(0, 0);
}).then([&test] {
logger().info("-- 1 --");
logger().info("[Test] connector markdown...");
return test.markdown_peer();
}).then([&test] {
return test.peer_connect_me();
}).then([&test] {
return test.peer_send_me();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 2);
results[0].assert_reset(0, 1);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 0);
results[1].assert_reset(0, 0);
}).then([&suite] {
logger().info("-- 2 --");
logger().info("[Test] acceptor markdown...");
return suite.markdown();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 2);
results[0].assert_reset(0, 1);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 0);
results[1].assert_reset(0, 0);
}).then([&test] {
logger().info("-- 3 --");
logger().info("[Test] connector reconnect...");
return test.peer_connect_me();
}).then([&test] {
return test.try_peer_send_me();
}).then([&suite] {
return suite.wait_results(3);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 2);
results[0].assert_reset(0, 1);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 0);
results[1].assert_reset(0, 0);
results[2].assert_state_at(conn_state_t::established);
results[2].assert_connect(0, 0, 0, 0);
results[2].assert_accept(1, 1, 1, 1);
results[2].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_lossless_peer_connector(FailoverTest& test) {
return test.run_suite(
"test_v2_lossless_peer_connector",
TestInterceptor(),
policy_t::lossless_peer,
policy_t::lossless_peer,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&suite] {
logger().info("-- 0 --");
logger().info("[Test] setup connection...");
return suite.connect_peer();
}).then([&test] {
return test.send_bidirectional();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(1, 1, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
}).then([&suite] {
logger().info("-- 1 --");
logger().info("[Test] connector markdown...");
return suite.markdown();
}).then([&suite] {
return suite.connect_peer();
}).then([&suite] {
return suite.send_peer();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(1, 1, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::established);
results[1].assert_connect(1, 1, 0, 1);
results[1].assert_accept(0, 0, 0, 0);
results[1].assert_reset(0, 0);
}).then([&test] {
logger().info("-- 2 --");
logger().info("[Test] acceptor markdown...");
return test.markdown_peer();
}).then([&suite] {
ceph_assert(suite.is_standby());
logger().info("-- 3 --");
logger().info("[Test] connector reconnect...");
return suite.connect_peer();
}).then([&suite] {
return suite.try_send_peer();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(1, 1, 0, 1);
results[0].assert_accept(0, 0, 0, 0);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::established);
results[1].assert_connect(2, 2, 1, 2);
results[1].assert_accept(0, 0, 0, 0);
results[1].assert_reset(0, 1);
});
});
}
seastar::future<>
test_v2_lossless_peer_acceptor(FailoverTest& test) {
return test.run_suite(
"test_v2_lossless_peer_acceptor",
TestInterceptor(),
policy_t::lossless_peer,
policy_t::lossless_peer,
[&test] (FailoverSuite& suite) {
return seastar::futurize_invoke([&test] {
logger().info("-- 0 --");
logger().info("[Test] setup connection...");
return test.peer_connect_me();
}).then([&test] {
return test.send_bidirectional();
}).then([&suite] {
return suite.wait_results(1);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 1);
results[0].assert_reset(0, 0);
}).then([&test] {
logger().info("-- 1 --");
logger().info("[Test] connector markdown...");
return test.markdown_peer();
}).then([&test] {
return test.peer_connect_me();
}).then([&test] {
return test.peer_send_me();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::established);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 2);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 0);
results[1].assert_reset(0, 0);
}).then([&suite] {
logger().info("-- 2 --");
logger().info("[Test] acceptor markdown...");
return suite.markdown();
}).then([&suite] {
return suite.wait_results(2);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 2);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 0);
results[1].assert_reset(0, 0);
}).then([&test] {
logger().info("-- 3 --");
logger().info("[Test] connector reconnect...");
return test.peer_connect_me();
}).then([&test] {
return test.try_peer_send_me();
}).then([&suite] {
return suite.wait_results(3);
}).then([] (ConnResults& results) {
results[0].assert_state_at(conn_state_t::closed);
results[0].assert_connect(0, 0, 0, 0);
results[0].assert_accept(1, 1, 0, 2);
results[0].assert_reset(0, 0);
results[1].assert_state_at(conn_state_t::replaced);
results[1].assert_connect(0, 0, 0, 0);
results[1].assert_accept(1, 1, 0, 0);
results[1].assert_reset(0, 0);
results[2].assert_state_at(conn_state_t::established);
results[2].assert_connect(0, 0, 0, 0);
results[2].assert_accept(1, 1, 1, 1);
results[2].assert_reset(0, 0);
});
});
}
seastar::future<>
test_v2_protocol(entity_addr_t test_addr,
entity_addr_t cmd_peer_addr,
entity_addr_t test_peer_addr,
bool test_peer_islocal,
bool peer_wins) {
ceph_assert_always(test_addr.is_msgr2());
ceph_assert_always(cmd_peer_addr.is_msgr2());
ceph_assert_always(test_peer_addr.is_msgr2());
if (test_peer_islocal) {
// initiate crimson test peer locally
logger().info("test_v2_protocol: start local TestPeer at {}...", cmd_peer_addr);
return FailoverTestPeer::create(cmd_peer_addr, test_peer_addr
).then([test_addr, cmd_peer_addr, test_peer_addr, peer_wins](auto peer) {
return test_v2_protocol(
test_addr,
cmd_peer_addr,
test_peer_addr,
false,
peer_wins
).then([peer = std::move(peer)] () mutable {
return peer->wait().then([peer = std::move(peer)] {});
});
}).handle_exception([] (auto eptr) {
logger().error("FailoverTestPeer failed: got exception {}", eptr);
throw;
});
}
return FailoverTest::create(test_addr, cmd_peer_addr, test_peer_addr
).then([peer_wins](auto test) {
return seastar::futurize_invoke([test] {
return test_v2_lossy_early_connect_fault(*test);
}).then([test] {
return test_v2_lossy_connect_fault(*test);
}).then([test] {
return test_v2_lossy_connected_fault(*test);
}).then([test] {
return test_v2_lossy_early_accept_fault(*test);
}).then([test] {
return test_v2_lossy_accept_fault(*test);
}).then([test] {
return test_v2_lossy_establishing_fault(*test);
}).then([test] {
return test_v2_lossy_accepted_fault(*test);
}).then([test] {
return test_v2_lossless_connect_fault(*test);
}).then([test] {
return test_v2_lossless_connected_fault(*test);
}).then([test] {
return test_v2_lossless_connected_fault2(*test);
}).then([test] {
return test_v2_lossless_reconnect_fault(*test);
}).then([test] {
return test_v2_lossless_accept_fault(*test);
}).then([test] {
return test_v2_lossless_establishing_fault(*test);
}).then([test] {
return test_v2_lossless_accepted_fault(*test);
}).then([test] {
return test_v2_lossless_reaccept_fault(*test);
}).then([test] {
return test_v2_peer_connect_fault(*test);
}).then([test] {
return test_v2_peer_accept_fault(*test);
}).then([test] {
return test_v2_peer_establishing_fault(*test);
}).then([test] {
return test_v2_peer_connected_fault_reconnect(*test);
}).then([test] {
return test_v2_peer_connected_fault_reaccept(*test);
}).then([test] {
return check_peer_wins(*test);
}).then([test, peer_wins](bool ret_peer_wins) {
ceph_assert(peer_wins == ret_peer_wins);
if (ret_peer_wins) {
return seastar::futurize_invoke([test] {
return test_v2_racing_connect_acceptor_win(*test);
}).then([test] {
return test_v2_racing_reconnect_acceptor_win(*test);
});
} else {
return seastar::futurize_invoke([test] {
return test_v2_racing_connect_acceptor_lose(*test);
}).then([test] {
return test_v2_racing_reconnect_acceptor_lose(*test);
});
}
}).then([test] {
return test_v2_racing_connect_reconnect_win(*test);
}).then([test] {
return test_v2_racing_connect_reconnect_lose(*test);
}).then([test] {
return test_v2_stale_connect(*test);
}).then([test] {
return test_v2_stale_reconnect(*test);
}).then([test] {
return test_v2_stale_accept(*test);
}).then([test] {
return test_v2_stale_establishing(*test);
}).then([test] {
return test_v2_stale_reaccept(*test);
}).then([test] {
return test_v2_lossy_client(*test);
}).then([test] {
return test_v2_stateless_server(*test);
}).then([test] {
return test_v2_lossless_client(*test);
}).then([test] {
return test_v2_stateful_server(*test);
}).then([test] {
return test_v2_peer_reuse_connector(*test);
}).then([test] {
return test_v2_peer_reuse_acceptor(*test);
}).then([test] {
return test_v2_lossless_peer_connector(*test);
}).then([test] {
return test_v2_lossless_peer_acceptor(*test);
}).then([test] {
return test->shutdown().then([test] {});
});
}).handle_exception([] (auto eptr) {
logger().error("FailoverTest failed: got exception {}", eptr);
throw;
});
}
}
seastar::future<int> do_test(seastar::app_template& app)
{
std::vector<const char*> args;
std::string cluster;
std::string conf_file_list;
auto init_params = ceph_argparse_early_args(args,
CEPH_ENTITY_TYPE_CLIENT,
&cluster,
&conf_file_list);
return crimson::common::sharded_conf().start(init_params.name, cluster)
.then([conf_file_list] {
return local_conf().parse_config_files(conf_file_list);
}).then([&app] {
auto&& config = app.configuration();
verbose = config["verbose"].as<bool>();
auto rounds = config["rounds"].as<unsigned>();
auto keepalive_ratio = config["keepalive-ratio"].as<double>();
auto testpeer_islocal = config["testpeer-islocal"].as<bool>();
entity_addr_t test_addr;
ceph_assert(test_addr.parse(
config["test-addr"].as<std::string>().c_str(), nullptr));
test_addr.set_nonce(TEST_NONCE);
entity_addr_t cmd_peer_addr;
ceph_assert(cmd_peer_addr.parse(
config["testpeer-addr"].as<std::string>().c_str(), nullptr));
cmd_peer_addr.set_nonce(CMD_SRV_NONCE);
entity_addr_t test_peer_addr = get_test_peer_addr(cmd_peer_addr);
bool peer_wins = (test_addr > test_peer_addr);
logger().info("test configuration: verbose={}, rounds={}, keepalive_ratio={}, "
"test_addr={}, cmd_peer_addr={}, test_peer_addr={}, "
"testpeer_islocal={}, peer_wins={}",
verbose, rounds, keepalive_ratio,
test_addr, cmd_peer_addr, test_peer_addr,
testpeer_islocal, peer_wins);
return test_echo(rounds, keepalive_ratio
).then([] {
return test_concurrent_dispatch();
}).then([] {
return test_preemptive_shutdown();
}).then([test_addr, cmd_peer_addr, test_peer_addr, testpeer_islocal, peer_wins] {
return test_v2_protocol(
test_addr,
cmd_peer_addr,
test_peer_addr,
testpeer_islocal,
peer_wins);
}).then([] {
logger().info("All tests succeeded");
// Seastar has bugs to have events undispatched during shutdown,
// which will result in memory leak and thus fail LeakSanitizer.
return seastar::sleep(100ms);
});
}).then([] {
return crimson::common::sharded_conf().stop();
}).then([] {
return 0;
}).handle_exception([] (auto eptr) {
logger().error("Test failed: got exception {}", eptr);
return 1;
});
}
int main(int argc, char** argv)
{
seastar::app_template app;
app.add_options()
("verbose,v", bpo::value<bool>()->default_value(false),
"chatty if true")
("rounds", bpo::value<unsigned>()->default_value(512),
"number of pingpong rounds")
("keepalive-ratio", bpo::value<double>()->default_value(0.1),
"ratio of keepalive in ping messages")
("test-addr", bpo::value<std::string>()->default_value("v2:127.0.0.1:9014"),
"address of v2 failover tests")
("testpeer-addr", bpo::value<std::string>()->default_value("v2:127.0.0.1:9012"),
"addresses of v2 failover testpeer"
" (This is CmdSrv address, and TestPeer address is at port+=1)")
("testpeer-islocal", bpo::value<bool>()->default_value(true),
"create a local crimson testpeer, or connect to a remote testpeer");
return app.run(argc, argv, [&app] {
// This test normally succeeds within 60 seconds, so kill it after 300
// seconds in case it is blocked forever due to unaddressed bugs.
return seastar::with_timeout(seastar::lowres_clock::now() + 300s, do_test(app))
.handle_exception_type([](seastar::timed_out_error&) {
logger().error("test_messenger timeout after 300s, abort! "
"Consider to extend the period if the test is still running.");
// use the retcode of timeout(1)
return 124;
});
});
}
| 130,632 | 33.92861 | 102 | cc |
null | ceph-main/src/test/crimson/test_messenger.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "msg/msg_types.h"
namespace ceph::net::test {
constexpr uint64_t CMD_CLI_NONCE = 1;
constexpr int64_t CMD_CLI_OSD = 1;
constexpr uint64_t TEST_NONCE = 2;
constexpr int64_t TEST_OSD = 2;
constexpr uint64_t CMD_SRV_NONCE = 3;
constexpr int64_t CMD_SRV_OSD = 3;
constexpr uint64_t TEST_PEER_NONCE = 2;
constexpr int64_t TEST_PEER_OSD = 4;
inline entity_addr_t get_test_peer_addr(
const entity_addr_t &cmd_peer_addr) {
entity_addr_t test_peer_addr = cmd_peer_addr;
test_peer_addr.set_port(cmd_peer_addr.get_port() + 1);
test_peer_addr.set_nonce(TEST_PEER_NONCE);
return test_peer_addr;
}
enum class cmd_t : char {
none = '\0',
shutdown,
suite_start,
suite_stop,
suite_connect_me,
suite_send_me,
suite_keepalive_me,
suite_markdown,
suite_recv_op
};
enum class policy_t : char {
none = '\0',
stateful_server,
stateless_server,
lossless_peer,
lossless_peer_reuse,
lossy_client,
lossless_client
};
inline std::ostream& operator<<(std::ostream& out, const cmd_t& cmd) {
switch(cmd) {
case cmd_t::none:
return out << "none";
case cmd_t::shutdown:
return out << "shutdown";
case cmd_t::suite_start:
return out << "suite_start";
case cmd_t::suite_stop:
return out << "suite_stop";
case cmd_t::suite_connect_me:
return out << "suite_connect_me";
case cmd_t::suite_send_me:
return out << "suite_send_me";
case cmd_t::suite_keepalive_me:
return out << "suite_keepalive_me";
case cmd_t::suite_markdown:
return out << "suite_markdown";
case cmd_t::suite_recv_op:
return out << "suite_recv_op";
default:
ceph_abort();
}
}
inline std::ostream& operator<<(std::ostream& out, const policy_t& policy) {
switch(policy) {
case policy_t::none:
return out << "none";
case policy_t::stateful_server:
return out << "stateful_server";
case policy_t::stateless_server:
return out << "stateless_server";
case policy_t::lossless_peer:
return out << "lossless_peer";
case policy_t::lossless_peer_reuse:
return out << "lossless_peer_reuse";
case policy_t::lossy_client:
return out << "lossy_client";
case policy_t::lossless_client:
return out << "lossless_client";
default:
ceph_abort();
}
}
} // namespace ceph::net::test
| 2,397 | 23.979167 | 76 | h |
null | ceph-main/src/test/crimson/test_messenger_peer.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
#include <boost/pointer_cast.hpp>
#include <boost/program_options/variables_map.hpp>
#include <boost/program_options/parsers.hpp>
#include "auth/DummyAuth.h"
#include "common/dout.h"
#include "global/global_init.h"
#include "messages/MPing.h"
#include "messages/MCommand.h"
#include "messages/MCommandReply.h"
#include "messages/MOSDOp.h"
#include "msg/Dispatcher.h"
#include "msg/Messenger.h"
#include "test_messenger.h"
namespace {
#define dout_subsys ceph_subsys_test
using namespace ceph::net::test;
using SocketPolicy = Messenger::Policy;
constexpr int CEPH_OSD_PROTOCOL = 10;
class FailoverSuitePeer : public Dispatcher {
using cb_t = std::function<void()>;
DummyAuthClientServer dummy_auth;
std::unique_ptr<Messenger> peer_msgr;
cb_t op_callback;
Connection* tracked_conn = nullptr;
unsigned pending_send = 0;
bool ms_can_fast_dispatch_any() const override { return true; }
bool ms_can_fast_dispatch(const Message* m) const override { return true; }
void ms_fast_dispatch(Message* m) override {
auto conn = m->get_connection().get();
if (tracked_conn == nullptr) {
ldout(cct, 0) << "[!TestPeer] got op from Test(conn "
<< conn << "not tracked yet)" << dendl;
tracked_conn = conn;
} else if (tracked_conn != conn) {
lderr(cct) << "[TestPeer] got op from Test: conn(" << conn
<< ") != tracked_conn(" << tracked_conn
<< ")" << dendl;
ceph_abort();
} else {
ldout(cct, 0) << "[TestPeer] got op from Test" << dendl;
}
op_callback();
}
bool ms_dispatch(Message* m) override { ceph_abort(); }
void ms_handle_fast_connect(Connection* conn) override {
if (tracked_conn == conn) {
ldout(cct, 0) << "[TestPeer] connected: " << conn << dendl;
} else {
lderr(cct) << "[TestPeer] connected: conn(" << conn
<< ") != tracked_conn(" << tracked_conn
<< ")" << dendl;
ceph_abort();
}
}
void ms_handle_fast_accept(Connection* conn) override {
if (tracked_conn == nullptr) {
ldout(cct, 0) << "[TestPeer] accepted: " << conn << dendl;
tracked_conn = conn;
} else if (tracked_conn != conn) {
lderr(cct) << "[TestPeer] accepted: conn(" << conn
<< ") != tracked_conn(" << tracked_conn
<< ")" << dendl;
ceph_abort();
} else {
ldout(cct, 0) << "[!TestPeer] accepted(stale event): " << conn << dendl;
}
flush_pending_send();
}
bool ms_handle_reset(Connection* conn) override {
if (tracked_conn == conn) {
ldout(cct, 0) << "[TestPeer] reset: " << conn << dendl;
tracked_conn = nullptr;
} else {
ldout(cct, 0) << "[!TestPeer] reset(invalid event): conn(" << conn
<< ") != tracked_conn(" << tracked_conn
<< ")" << dendl;
}
return true;
}
void ms_handle_remote_reset(Connection* conn) override {
if (tracked_conn == conn) {
ldout(cct, 0) << "[TestPeer] remote reset: " << conn << dendl;
} else {
ldout(cct, 0) << "[!TestPeer] reset(invalid event): conn(" << conn
<< ") != tracked_conn(" << tracked_conn
<< ")" << dendl;
}
}
bool ms_handle_refused(Connection* conn) override {
ldout(cct, 0) << "[!TestPeer] refused: " << conn << dendl;
return true;
}
private:
void init(entity_addr_t test_peer_addr, SocketPolicy policy) {
peer_msgr.reset(Messenger::create(
cct, "async",
entity_name_t::OSD(TEST_PEER_OSD),
"TestPeer",
TEST_PEER_NONCE));
dummy_auth.auth_registry.refresh_config();
peer_msgr->set_cluster_protocol(CEPH_OSD_PROTOCOL);
peer_msgr->set_default_policy(policy);
peer_msgr->set_auth_client(&dummy_auth);
peer_msgr->set_auth_server(&dummy_auth);
peer_msgr->bind(test_peer_addr);
peer_msgr->add_dispatcher_head(this);
peer_msgr->start();
}
void send_op() {
ceph_assert(tracked_conn);
pg_t pgid;
object_locator_t oloc;
hobject_t hobj(object_t(), oloc.key, CEPH_NOSNAP, pgid.ps(),
pgid.pool(), oloc.nspace);
spg_t spgid(pgid);
tracked_conn->send_message2(make_message<MOSDOp>(0, 0, hobj, spgid, 0, 0, 0));
}
void flush_pending_send() {
if (pending_send != 0) {
ldout(cct, 0) << "[TestPeer] flush sending "
<< pending_send << " ops" << dendl;
}
ceph_assert(tracked_conn);
while (pending_send) {
send_op();
--pending_send;
}
}
public:
FailoverSuitePeer(CephContext* cct, cb_t op_callback)
: Dispatcher(cct), dummy_auth(cct), op_callback(op_callback) { }
void shutdown() {
peer_msgr->shutdown();
peer_msgr->wait();
}
void connect_peer(entity_addr_t test_addr) {
ldout(cct, 0) << "[TestPeer] connect_peer(" << test_addr << ")" << dendl;
auto conn = peer_msgr->connect_to_osd(entity_addrvec_t{test_addr});
if (tracked_conn) {
if (tracked_conn == conn.get()) {
ldout(cct, 0) << "[TestPeer] this is not a new session " << conn.get() << dendl;
} else {
ldout(cct, 0) << "[TestPeer] this is a new session " << conn.get()
<< ", replacing old one " << tracked_conn << dendl;
}
} else {
ldout(cct, 0) << "[TestPeer] this is a new session " << conn.get() << dendl;
}
tracked_conn = conn.get();
flush_pending_send();
}
void send_peer() {
if (tracked_conn) {
ldout(cct, 0) << "[TestPeer] send_peer()" << dendl;
send_op();
} else {
++pending_send;
ldout(cct, 0) << "[TestPeer] send_peer() (pending " << pending_send << ")" << dendl;
}
}
void keepalive_peer() {
ldout(cct, 0) << "[TestPeer] keepalive_peer()" << dendl;
ceph_assert(tracked_conn);
tracked_conn->send_keepalive();
}
void markdown() {
ldout(cct, 0) << "[TestPeer] markdown()" << dendl;
ceph_assert(tracked_conn);
tracked_conn->mark_down();
tracked_conn = nullptr;
}
static std::unique_ptr<FailoverSuitePeer>
create(CephContext* cct, entity_addr_t test_peer_addr,
SocketPolicy policy, cb_t op_callback) {
auto suite = std::make_unique<FailoverSuitePeer>(cct, op_callback);
suite->init(test_peer_addr, policy);
return suite;
}
};
SocketPolicy to_socket_policy(CephContext* cct, policy_t policy) {
switch (policy) {
case policy_t::stateful_server:
return SocketPolicy::stateful_server(0);
case policy_t::stateless_server:
return SocketPolicy::stateless_server(0);
case policy_t::lossless_peer:
return SocketPolicy::lossless_peer(0);
case policy_t::lossless_peer_reuse:
return SocketPolicy::lossless_peer_reuse(0);
case policy_t::lossy_client:
return SocketPolicy::lossy_client(0);
case policy_t::lossless_client:
return SocketPolicy::lossless_client(0);
default:
lderr(cct) << "[CmdSrv] unexpected policy type" << dendl;
ceph_abort();
}
}
class FailoverTestPeer : public Dispatcher {
DummyAuthClientServer dummy_auth;
std::unique_ptr<Messenger> cmd_msgr;
Connection *cmd_conn = nullptr;
const entity_addr_t test_peer_addr;
std::unique_ptr<FailoverSuitePeer> test_suite;
const bool nonstop;
bool ms_can_fast_dispatch_any() const override { return false; }
bool ms_can_fast_dispatch(const Message* m) const override { return false; }
void ms_fast_dispatch(Message* m) override { ceph_abort(); }
bool ms_dispatch(Message* m) override {
auto conn = m->get_connection().get();
if (cmd_conn == nullptr) {
ldout(cct, 0) << "[!CmdSrv] got msg from CmdCli(conn "
<< conn << "not tracked yet)" << dendl;
cmd_conn = conn;
} else if (cmd_conn != conn) {
lderr(cct) << "[CmdSrv] got msg from CmdCli: conn(" << conn
<< ") != cmd_conn(" << cmd_conn
<< ")" << dendl;
ceph_abort();
} else {
// good!
}
switch (m->get_type()) {
case CEPH_MSG_PING: {
ldout(cct, 0) << "[CmdSrv] got PING, sending PONG ..." << dendl;
cmd_conn->send_message2(make_message<MPing>());
break;
}
case MSG_COMMAND: {
auto m_cmd = boost::static_pointer_cast<MCommand>(m);
auto cmd = static_cast<cmd_t>(m_cmd->cmd[0][0]);
if (cmd == cmd_t::shutdown) {
ldout(cct, 0) << "All tests succeeded" << dendl;
if (!nonstop) {
ldout(cct, 0) << "[CmdSrv] shutdown ..." << dendl;
cmd_msgr->shutdown();
} else {
ldout(cct, 0) << "[CmdSrv] nonstop set ..." << dendl;
}
} else {
ldout(cct, 0) << "[CmdSrv] got cmd " << cmd << dendl;
handle_cmd(cmd, m_cmd);
ldout(cct, 0) << "[CmdSrv] done, send cmd reply ..." << dendl;
cmd_conn->send_message2(make_message<MCommandReply>());
}
break;
}
default:
lderr(cct) << "[CmdSrv] " << __func__ << " " << cmd_conn
<< " got unexpected msg from CmdCli: "
<< m << dendl;
ceph_abort();
}
m->put();
return true;
}
void ms_handle_fast_connect(Connection*) override { ceph_abort(); }
void ms_handle_fast_accept(Connection *conn) override {
if (cmd_conn == nullptr) {
ldout(cct, 0) << "[CmdSrv] accepted: " << conn << dendl;
cmd_conn = conn;
} else if (cmd_conn != conn) {
lderr(cct) << "[CmdSrv] accepted: conn(" << conn
<< ") != cmd_conn(" << cmd_conn
<< ")" << dendl;
ceph_abort();
} else {
ldout(cct, 0) << "[!CmdSrv] accepted(stale event): " << conn << dendl;
}
}
bool ms_handle_reset(Connection* conn) override {
if (cmd_conn == conn) {
ldout(cct, 0) << "[CmdSrv] reset: " << conn << dendl;
cmd_conn = nullptr;
} else {
ldout(cct, 0) << "[!CmdSrv] reset(invalid event): conn(" << conn
<< ") != cmd_conn(" << cmd_conn
<< ")" << dendl;
}
return true;
}
void ms_handle_remote_reset(Connection*) override { ceph_abort(); }
bool ms_handle_refused(Connection*) override { ceph_abort(); }
private:
void notify_recv_op() {
ceph_assert(cmd_conn);
auto m = make_message<MCommand>();
m->cmd.emplace_back(1, static_cast<char>(cmd_t::suite_recv_op));
cmd_conn->send_message2(m);
}
void handle_cmd(cmd_t cmd, MRef<MCommand> m_cmd) {
switch (cmd) {
case cmd_t::suite_start: {
if (test_suite) {
test_suite->shutdown();
test_suite.reset();
ldout(cct, 0) << "-------- suite stopped (force) --------\n\n" << dendl;
}
auto p = static_cast<policy_t>(m_cmd->cmd[1][0]);
ldout(cct, 0) << "[CmdSrv] suite starting (" << p
<<", " << test_peer_addr << ") ..." << dendl;
auto policy = to_socket_policy(cct, p);
auto suite = FailoverSuitePeer::create(cct, test_peer_addr, policy,
[this] { notify_recv_op(); });
test_suite.swap(suite);
return;
}
case cmd_t::suite_stop:
ceph_assert(test_suite);
test_suite->shutdown();
test_suite.reset();
ldout(cct, 0) << "-------- suite stopped --------\n\n" << dendl;
return;
case cmd_t::suite_connect_me: {
ceph_assert(test_suite);
entity_addr_t test_addr = entity_addr_t();
test_addr.parse(m_cmd->cmd[1].c_str(), nullptr);
test_suite->connect_peer(test_addr);
return;
}
case cmd_t::suite_send_me:
ceph_assert(test_suite);
test_suite->send_peer();
return;
case cmd_t::suite_keepalive_me:
ceph_assert(test_suite);
test_suite->keepalive_peer();
return;
case cmd_t::suite_markdown:
ceph_assert(test_suite);
test_suite->markdown();
return;
default:
lderr(cct) << "[CmdSrv] got unexpected command " << m_cmd
<< " from CmdCli" << dendl;
ceph_abort();
}
}
void init(entity_addr_t cmd_peer_addr) {
cmd_msgr.reset(Messenger::create(
cct, "async",
entity_name_t::OSD(CMD_SRV_OSD),
"CmdSrv",
CMD_SRV_NONCE));
dummy_auth.auth_registry.refresh_config();
cmd_msgr->set_cluster_protocol(CEPH_OSD_PROTOCOL);
cmd_msgr->set_default_policy(Messenger::Policy::stateless_server(0));
cmd_msgr->set_auth_client(&dummy_auth);
cmd_msgr->set_auth_server(&dummy_auth);
cmd_msgr->bind(cmd_peer_addr);
cmd_msgr->add_dispatcher_head(this);
cmd_msgr->start();
}
public:
FailoverTestPeer(CephContext* cct,
entity_addr_t test_peer_addr,
bool nonstop)
: Dispatcher(cct),
dummy_auth(cct),
test_peer_addr(test_peer_addr),
nonstop(nonstop) { }
void wait() { cmd_msgr->wait(); }
static std::unique_ptr<FailoverTestPeer>
create(CephContext* cct,
entity_addr_t cmd_peer_addr,
entity_addr_t test_peer_addr,
bool nonstop) {
auto test_peer = std::make_unique<FailoverTestPeer>(
cct, test_peer_addr, nonstop);
test_peer->init(cmd_peer_addr);
ldout(cct, 0) << "[CmdSrv] ready" << dendl;
return test_peer;
}
};
}
int main(int argc, char** argv)
{
namespace po = boost::program_options;
po::options_description desc{"Allowed options"};
desc.add_options()
("help,h", "show help message")
("addr", po::value<std::string>()->default_value("v2:127.0.0.1:9012"),
"This is CmdSrv address, and TestPeer address is at port+=1")
("nonstop", po::value<bool>()->default_value(false),
"Do not shutdown TestPeer when all tests are successful");
po::variables_map vm;
std::vector<std::string> unrecognized_options;
try {
auto parsed = po::command_line_parser(argc, argv)
.options(desc)
.allow_unregistered()
.run();
po::store(parsed, vm);
if (vm.count("help")) {
std::cout << desc << std::endl;
return 0;
}
po::notify(vm);
unrecognized_options = po::collect_unrecognized(parsed.options, po::include_positional);
} catch(const po::error& e) {
std::cerr << "error: " << e.what() << std::endl;
return 1;
}
std::vector<const char*> args(argv, argv + argc);
auto cct = global_init(nullptr, args,
CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_MON_CONFIG);
common_init_finish(cct.get());
auto addr = vm["addr"].as<std::string>();
entity_addr_t cmd_peer_addr;
cmd_peer_addr.parse(addr.c_str(), nullptr);
cmd_peer_addr.set_nonce(CMD_SRV_NONCE);
ceph_assert_always(cmd_peer_addr.is_msgr2());
auto test_peer_addr = get_test_peer_addr(cmd_peer_addr);
auto nonstop = vm["nonstop"].as<bool>();
ldout(cct, 0) << "test configuration: cmd_peer_addr=" << cmd_peer_addr
<< ", test_peer_addr=" << test_peer_addr
<< ", nonstop=" << nonstop
<< dendl;
auto test_peer = FailoverTestPeer::create(
cct.get(),
cmd_peer_addr,
test_peer_addr,
nonstop);
test_peer->wait();
}
| 15,161 | 31.7473 | 92 | cc |
null | ceph-main/src/test/crimson/test_messenger_thrash.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <map>
#include <random>
#include <fmt/format.h>
#include <fmt/ostream.h>
#include <seastar/core/app-template.hh>
#include <seastar/core/do_with.hh>
#include <seastar/core/future-util.hh>
#include <seastar/core/reactor.hh>
#include <seastar/core/sleep.hh>
#include <seastar/core/with_timeout.hh>
#include "common/ceph_argparse.h"
#include "messages/MPing.h"
#include "messages/MCommand.h"
#include "crimson/auth/DummyAuth.h"
#include "crimson/common/log.h"
#include "crimson/net/Connection.h"
#include "crimson/net/Dispatcher.h"
#include "crimson/net/Messenger.h"
using namespace std::chrono_literals;
namespace bpo = boost::program_options;
using crimson::common::local_conf;
using payload_seq_t = uint64_t;
struct Payload {
enum Who : uint8_t {
PING = 0,
PONG = 1,
};
uint8_t who = 0;
payload_seq_t seq = 0;
bufferlist data;
Payload(Who who, uint64_t seq, const bufferlist& data)
: who(who), seq(seq), data(data)
{}
Payload() = default;
DENC(Payload, v, p) {
DENC_START(1, 1, p);
denc(v.who, p);
denc(v.seq, p);
denc(v.data, p);
DENC_FINISH(p);
}
};
WRITE_CLASS_DENC(Payload)
template<>
struct fmt::formatter<Payload> : fmt::formatter<std::string_view> {
template <typename FormatContext>
auto format(const Payload& pl, FormatContext& ctx) const {
return fmt::format_to(ctx.out(), "reply={} i={}", pl.who, pl.seq);
}
};
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
std::random_device rd;
std::default_random_engine rng{rd()};
std::uniform_int_distribution<> prob(0,99);
bool verbose = false;
entity_addr_t get_server_addr() {
static int port = 16800;
++port;
entity_addr_t saddr;
saddr.parse("127.0.0.1", nullptr);
saddr.set_port(port);
return saddr;
}
uint64_t get_nonce() {
static uint64_t nonce = 1;
++nonce;
return nonce;
}
struct thrash_params_t {
std::size_t servers;
std::size_t clients;
std::size_t connections;
std::size_t random_op;
};
class SyntheticWorkload;
class SyntheticDispatcher final
: public crimson::net::Dispatcher {
public:
std::map<crimson::net::Connection*, std::deque<payload_seq_t> > conn_sent;
std::map<payload_seq_t, bufferlist> sent;
unsigned index;
SyntheticWorkload *workload;
SyntheticDispatcher(bool s, SyntheticWorkload *wl):
index(0), workload(wl) {
}
std::optional<seastar::future<>> ms_dispatch(crimson::net::ConnectionRef con,
MessageRef m) final {
if (verbose) {
logger().warn("{}: con = {}", __func__, *con);
}
// MSG_COMMAND is used to disorganize regular message flow
if (m->get_type() == MSG_COMMAND) {
return seastar::now();
}
Payload pl;
auto p = m->get_data().cbegin();
decode(pl, p);
if (pl.who == Payload::PING) {
logger().info(" {} conn= {} {}", __func__, *con, pl);
return reply_message(m, con, pl);
} else {
ceph_assert(pl.who == Payload::PONG);
if (sent.count(pl.seq)) {
logger().info(" {} conn= {} {}", __func__, *con, pl);
ceph_assert(conn_sent[&*con].front() == pl.seq);
ceph_assert(pl.data.contents_equal(sent[pl.seq]));
conn_sent[&*con].pop_front();
sent.erase(pl.seq);
}
return seastar::now();
}
}
void ms_handle_accept(
crimson::net::ConnectionRef conn,
seastar::shard_id new_shard,
bool is_replace) final {
logger().info("{} - Connection:{}", __func__, *conn);
assert(new_shard == seastar::this_shard_id());
}
void ms_handle_connect(
crimson::net::ConnectionRef conn,
seastar::shard_id new_shard) final {
logger().info("{} - Connection:{}", __func__, *conn);
assert(new_shard == seastar::this_shard_id());
}
void ms_handle_reset(crimson::net::ConnectionRef con, bool is_replace) final;
void ms_handle_remote_reset(crimson::net::ConnectionRef con) final {
clear_pending(con);
}
std::optional<seastar::future<>> reply_message(
const MessageRef m,
crimson::net::ConnectionRef con,
Payload& pl) {
pl.who = Payload::PONG;
bufferlist bl;
encode(pl, bl);
auto rm = crimson::make_message<MPing>();
rm->set_data(bl);
if (verbose) {
logger().info("{} conn= {} reply i= {}",
__func__, *con, pl.seq);
}
return con->send(std::move(rm));
}
seastar::future<> send_message_wrap(crimson::net::ConnectionRef con,
const bufferlist& data) {
auto m = crimson::make_message<MPing>();
Payload pl{Payload::PING, index++, data};
bufferlist bl;
encode(pl, bl);
m->set_data(bl);
sent[pl.seq] = pl.data;
conn_sent[&*con].push_back(pl.seq);
logger().info("{} conn= {} send i= {}",
__func__, *con, pl.seq);
return con->send(std::move(m));
}
uint64_t get_num_pending_msgs() {
return sent.size();
}
void clear_pending(crimson::net::ConnectionRef con) {
for (std::deque<uint64_t>::iterator it = conn_sent[&*con].begin();
it != conn_sent[&*con].end(); ++it)
sent.erase(*it);
conn_sent.erase(&*con);
}
void print() {
for (auto && [connptr, list] : conn_sent) {
if (!list.empty()) {
logger().info("{} {} wait {}", __func__,
(void*)connptr, list.size());
}
}
}
};
class SyntheticWorkload {
// messengers must be freed after its connections
std::set<crimson::net::MessengerRef> available_servers;
std::set<crimson::net::MessengerRef> available_clients;
crimson::net::SocketPolicy server_policy;
crimson::net::SocketPolicy client_policy;
std::map<crimson::net::ConnectionRef,
std::pair<crimson::net::MessengerRef,
crimson::net::MessengerRef>> available_connections;
SyntheticDispatcher dispatcher;
std::vector<bufferlist> rand_data;
crimson::auth::DummyAuthClientServer dummy_auth;
seastar::future<crimson::net::ConnectionRef> get_random_connection() {
return seastar::do_until(
[this] { return dispatcher.get_num_pending_msgs() <= max_in_flight; },
[] { return seastar::sleep(100ms); }
).then([this] {
boost::uniform_int<> choose(0, available_connections.size() - 1);
int index = choose(rng);
std::map<crimson::net::ConnectionRef,
std::pair<crimson::net::MessengerRef, crimson::net::MessengerRef>>::iterator i
= available_connections.begin();
for (; index > 0; --index, ++i) ;
return seastar::make_ready_future<crimson::net::ConnectionRef>(i->first);
});
}
public:
const unsigned min_connections = 10;
const unsigned max_in_flight = 64;
const unsigned max_connections = 128;
const unsigned max_message_len = 1024 * 1024 * 4;
const uint64_t servers, clients;
SyntheticWorkload(int servers, int clients, int random_num,
crimson::net::SocketPolicy srv_policy,
crimson::net::SocketPolicy cli_policy)
: server_policy(srv_policy),
client_policy(cli_policy),
dispatcher(false, this),
servers(servers),
clients(clients) {
for (int i = 0; i < random_num; i++) {
bufferlist bl;
boost::uniform_int<> u(32, max_message_len);
uint64_t value_len = u(rng);
bufferptr bp(value_len);
bp.zero();
for (uint64_t j = 0; j < value_len-sizeof(i); ) {
memcpy(bp.c_str()+j, &i, sizeof(i));
j += 4096;
}
bl.append(bp);
rand_data.push_back(bl);
}
}
bool can_create_connection() {
return available_connections.size() < max_connections;
}
seastar::future<> maybe_generate_connection() {
if (!can_create_connection()) {
return seastar::now();
}
crimson::net::MessengerRef server, client;
{
boost::uniform_int<> choose(0, available_servers.size() - 1);
int index = choose(rng);
std::set<crimson::net::MessengerRef>::iterator i
= available_servers.begin();
for (; index > 0; --index, ++i) ;
server = *i;
}
{
boost::uniform_int<> choose(0, available_clients.size() - 1);
int index = choose(rng);
std::set<crimson::net::MessengerRef>::iterator i
= available_clients.begin();
for (; index > 0; --index, ++i) ;
client = *i;
}
std::pair<crimson::net::MessengerRef, crimson::net::MessengerRef>
connected_pair;
{
crimson::net::ConnectionRef conn = client->connect(
server->get_myaddr(),
entity_name_t::TYPE_OSD);
connected_pair = std::make_pair(client, server);
available_connections[conn] = connected_pair;
}
return seastar::now();
}
seastar::future<> random_op (const uint64_t& iter) {
return seastar::do_with(iter, [this] (uint64_t& iter) {
return seastar::do_until(
[&] { return iter == 0; },
[&, this]
{
if (!(iter % 10)) {
logger().info("{} Op {} : ", __func__ ,iter);
print_internal_state();
}
--iter;
int val = prob(rng);
if(val > 90) {
return maybe_generate_connection();
} else if (val > 80) {
return drop_connection();
} else if (val > 10) {
return send_message();
} else {
return seastar::sleep(
std::chrono::milliseconds(rand() % 1000 + 500));
}
});
});
}
seastar::future<> generate_connections (const uint64_t& iter) {
return seastar::do_with(iter, [this] (uint64_t& iter) {
return seastar::do_until(
[&] { return iter == 0; },
[&, this]
{
--iter;
if (!(connections_count() % 10)) {
logger().info("seeding connection {}",
connections_count());
}
return maybe_generate_connection();
});
});
}
seastar::future<> init_server(const entity_name_t& name,
const std::string& lname,
const uint64_t nonce,
const entity_addr_t& addr) {
crimson::net::MessengerRef msgr =
crimson::net::Messenger::create(
name, lname, nonce, true);
msgr->set_default_policy(server_policy);
msgr->set_auth_client(&dummy_auth);
msgr->set_auth_server(&dummy_auth);
available_servers.insert(msgr);
return msgr->bind(entity_addrvec_t{addr}).safe_then(
[this, msgr] {
return msgr->start({&dispatcher});
}, crimson::net::Messenger::bind_ertr::all_same_way(
[addr] (const std::error_code& e) {
logger().error("{} test_messenger_thrash(): "
"there is another instance running at {}",
__func__, addr);
ceph_abort();
}));
}
seastar::future<> init_client(const entity_name_t& name,
const std::string& lname,
const uint64_t nonce) {
crimson::net::MessengerRef msgr =
crimson::net::Messenger::create(
name, lname, nonce, true);
msgr->set_default_policy(client_policy);
msgr->set_auth_client(&dummy_auth);
msgr->set_auth_server(&dummy_auth);
available_clients.insert(msgr);
return msgr->start({&dispatcher});
}
seastar::future<> send_message() {
return get_random_connection()
.then([this] (crimson::net::ConnectionRef conn) {
boost::uniform_int<> true_false(0, 99);
int val = true_false(rng);
if (val >= 95) {
uuid_d uuid;
uuid.generate_random();
auto m = crimson::make_message<MCommand>(uuid);
std::vector<std::string> cmds;
cmds.push_back("command");
m->cmd = cmds;
m->set_priority(200);
return conn->send(std::move(m));
} else {
boost::uniform_int<> u(0, rand_data.size()-1);
return dispatcher.send_message_wrap(conn, rand_data[u(rng)]);
}
});
}
seastar::future<> drop_connection() {
if (available_connections.size() < min_connections) {
return seastar::now();
}
return get_random_connection()
.then([this] (crimson::net::ConnectionRef conn) {
dispatcher.clear_pending(conn);
conn->mark_down();
if (!client_policy.server &&
client_policy.standby) {
// it's a lossless policy, so we need to mark down each side
std::pair<crimson::net::MessengerRef, crimson::net::MessengerRef> &p =
available_connections[conn];
if (!p.first->get_default_policy().server &&
!p.second->get_default_policy().server) {
//verify that equal-to operator applies here
ceph_assert(p.first->owns_connection(*conn));
crimson::net::ConnectionRef peer = p.second->connect(
p.first->get_myaddr(), p.first->get_mytype());
peer->mark_down();
dispatcher.clear_pending(peer);
available_connections.erase(peer);
}
}
ceph_assert(available_connections.erase(conn) == 1U);
return seastar::now();
});
}
void print_internal_state(bool detail=false) {
logger().info("available_connections: {} inflight messages: {}",
available_connections.size(),
dispatcher.get_num_pending_msgs());
if (detail && !available_connections.empty()) {
dispatcher.print();
}
}
seastar::future<> wait_for_done() {
int i = 0;
return seastar::do_until(
[this] { return !dispatcher.get_num_pending_msgs(); },
[this, &i]
{
if (i++ % 50 == 0){
print_internal_state(true);
}
return seastar::sleep(100ms);
}).then([this] {
return seastar::do_for_each(available_servers, [] (auto server) {
if (verbose) {
logger().info("server {} shutdown" , server->get_myaddrs());
}
server->stop();
return server->shutdown();
});
}).then([this] {
return seastar::do_for_each(available_clients, [] (auto client) {
if (verbose) {
logger().info("client {} shutdown" , client->get_myaddrs());
}
client->stop();
return client->shutdown();
});
});
}
void handle_reset(crimson::net::ConnectionRef con) {
available_connections.erase(con);
}
uint64_t servers_count() {
return available_servers.size();
}
uint64_t clients_count() {
return available_clients.size();
}
uint64_t connections_count() {
return available_connections.size();
}
};
void SyntheticDispatcher::ms_handle_reset(crimson::net::ConnectionRef con,
bool is_replace) {
workload->handle_reset(con);
clear_pending(con);
}
seastar::future<> reset_conf() {
return seastar::when_all_succeed(
local_conf().set_val("ms_inject_socket_failures", "0"),
local_conf().set_val("ms_inject_internal_delays", "0"),
local_conf().set_val("ms_inject_delay_probability", "0"),
local_conf().set_val("ms_inject_delay_max", "0")
).then_unpack([] {
return seastar::now();
});
}
// Testing Crimson messenger (with msgr-v2 protocol) robustness against
// network delays and failures. The test includes stress tests and
// socket level delays/failures injection tests, letting time
// and randomness achieve the best test coverage.
// Test Parameters:
// Clients: 8 (stateful)
// Servers: 32 (lossless)
// Connections: 100 (Generated between random clients/server)
// Random Operations: 120 (Generate/Drop Connection, Send Message, Sleep)
seastar::future<> test_stress(thrash_params_t tp)
{
logger().info("test_stress():");
SyntheticWorkload test_msg(tp.servers, tp.clients, 100,
crimson::net::SocketPolicy::stateful_server(0),
crimson::net::SocketPolicy::lossless_client(0));
return seastar::do_with(test_msg, [tp]
(SyntheticWorkload& test_msg) {
return seastar::do_until([&test_msg] {
return test_msg.servers_count() == test_msg.servers; },
[&test_msg] {
entity_addr_t bind_addr = get_server_addr();
bind_addr.set_type(entity_addr_t::TYPE_MSGR2);
uint64_t server_num = get_nonce();
return test_msg.init_server(entity_name_t::OSD(server_num),
"server", server_num , bind_addr);
}).then([&test_msg] {
return seastar::do_until([&test_msg] {
return test_msg.clients_count() == test_msg.clients; },
[&test_msg] {
return test_msg.init_client(entity_name_t::CLIENT(-1),
"client", get_nonce());
});
}).then([&test_msg, tp] {
return test_msg.generate_connections(tp.connections);
}).then([&test_msg, tp] {
return test_msg.random_op(tp.random_op);
}).then([&test_msg] {
return test_msg.wait_for_done();
}).then([] {
logger().info("test_stress() DONE");
}).handle_exception([] (auto eptr) {
logger().error(
"test_stress() failed: got exception {}",
eptr);
throw;
});
});
}
// Test Parameters:
// Clients: 8 (statefull)
// Servers: 32 (loseless)
// Connections: 100 (Generated between random clients/server)
// Random Operations: 120 (Generate/Drop Connection, Send Message, Sleep)
seastar::future<> test_injection(thrash_params_t tp)
{
logger().info("test_injection():");
SyntheticWorkload test_msg(tp.servers, tp.clients, 100,
crimson::net::SocketPolicy::stateful_server(0),
crimson::net::SocketPolicy::lossless_client(0));
return seastar::do_with(test_msg, [tp]
(SyntheticWorkload& test_msg) {
return seastar::do_until([&test_msg] {
return test_msg.servers_count() == test_msg.servers; },
[&test_msg] {
entity_addr_t bind_addr = get_server_addr();
bind_addr.set_type(entity_addr_t::TYPE_MSGR2);
uint64_t server_num = get_nonce();
return test_msg.init_server(entity_name_t::OSD(server_num),
"server", server_num , bind_addr);
}).then([&test_msg] {
return seastar::do_until([&test_msg] {
return test_msg.clients_count() == test_msg.clients; },
[&test_msg] {
return test_msg.init_client(entity_name_t::CLIENT(-1),
"client", get_nonce());
});
}).then([] {
return seastar::when_all_succeed(
local_conf().set_val("ms_inject_socket_failures", "30"),
local_conf().set_val("ms_inject_internal_delays", "0.1"),
local_conf().set_val("ms_inject_delay_probability", "1"),
local_conf().set_val("ms_inject_delay_max", "5"));
}).then_unpack([] {
return seastar::now();
}).then([&test_msg, tp] {
return test_msg.generate_connections(tp.connections);
}).then([&test_msg, tp] {
return test_msg.random_op(tp.random_op);
}).then([&test_msg] {
return test_msg.wait_for_done();
}).then([] {
logger().info("test_inejction() DONE");
return seastar::now();
}).then([] {
return reset_conf();
}).handle_exception([] (auto eptr) {
logger().error(
"test_injection() failed: got exception {}",
eptr);
throw;
});
});
}
}
seastar::future<int> do_test(seastar::app_template& app)
{
std::vector<const char*> args;
std::string cluster;
std::string conf_file_list;
auto init_params = ceph_argparse_early_args(args,
CEPH_ENTITY_TYPE_CLIENT,
&cluster,
&conf_file_list);
return crimson::common::sharded_conf().start(init_params.name, cluster)
.then([conf_file_list] {
return local_conf().parse_config_files(conf_file_list);
}).then([&app] {
auto&& config = app.configuration();
verbose = config["verbose"].as<bool>();
return test_stress(thrash_params_t{8, 32, 50, 120})
.then([] {
return test_injection(thrash_params_t{16, 32, 50, 120});
}).then([] {
logger().info("All tests succeeded");
// Seastar has bugs to have events undispatched during shutdown,
// which will result in memory leak and thus fail LeakSanitizer.
return seastar::sleep(100ms);
});
}).then([] {
return crimson::common::sharded_conf().stop();
}).then([] {
return 0;
}).handle_exception([] (auto eptr) {
logger().error("Test failed: got exception {}", eptr);
return 1;
});
}
int main(int argc, char** argv)
{
seastar::app_template app;
app.add_options()
("verbose,v", bpo::value<bool>()->default_value(false),
"chatty if true");
return app.run(argc, argv, [&app] {
return do_test(app);
});
}
| 21,085 | 30.471642 | 86 | cc |
null | ceph-main/src/test/crimson/test_monc.cc | #include <seastar/core/app-template.hh>
#include "common/ceph_argparse.h"
#include "crimson/common/auth_handler.h"
#include "crimson/common/config_proxy.h"
#include "crimson/mon/MonClient.h"
#include "crimson/net/Connection.h"
#include "crimson/net/Messenger.h"
using Config = crimson::common::ConfigProxy;
using MonClient = crimson::mon::Client;
namespace {
class DummyAuthHandler : public crimson::common::AuthHandler {
public:
void handle_authentication(const EntityName& name,
const AuthCapsInfo& caps) final
{}
};
DummyAuthHandler dummy_handler;
}
using namespace std::literals;
static seastar::future<> test_monc()
{
return crimson::common::sharded_conf().start(EntityName{}, "ceph"sv).then([] {
std::vector<const char*> args;
std::string cluster;
std::string conf_file_list;
auto init_params = ceph_argparse_early_args(args,
CEPH_ENTITY_TYPE_CLIENT,
&cluster,
&conf_file_list);
auto& conf = crimson::common::local_conf();
conf->name = init_params.name;
conf->cluster = cluster;
return conf.parse_config_files(conf_file_list);
}).then([] {
return crimson::common::sharded_perf_coll().start();
}).then([]() mutable {
auto msgr = crimson::net::Messenger::create(entity_name_t::OSD(0), "monc", 0, true);
return seastar::do_with(MonClient{*msgr, dummy_handler},
[msgr](auto& monc) mutable {
return msgr->start({&monc}).then([&monc] {
return seastar::with_timeout(
seastar::lowres_clock::now() + std::chrono::seconds{10},
monc.start());
}).then([&monc] {
return monc.stop();
});
}).finally([msgr] {
return msgr->shutdown();
});
}).finally([] {
return crimson::common::sharded_perf_coll().stop().then([] {
return crimson::common::sharded_conf().stop();
});
});
}
int main(int argc, char** argv)
{
seastar::app_template app;
return app.run(argc, argv, [&] {
return test_monc().then([] {
std::cout << "All tests succeeded" << std::endl;
}).handle_exception([] (auto eptr) {
std::cout << "Test failure" << std::endl;
return seastar::make_exception_future<>(eptr);
});
});
}
/*
* Local Variables:
* compile-command: "make -j4 \
* -C ../../../build \
* unittest_seastar_monc"
* End:
*/
| 2,474 | 28.117647 | 88 | cc |
null | ceph-main/src/test/crimson/test_perfcounters.cc | #include <pthread.h>
#include <stdlib.h>
#include <iostream>
#include <fmt/format.h>
#include "common/Formatter.h"
#include "common/perf_counters.h"
#include "crimson/common/perf_counters_collection.h"
#include <seastar/core/app-template.hh>
#include <seastar/core/sharded.hh>
enum {
PERFTEST_FIRST = 1000000,
PERFTEST_INDEX,
PERFTEST_LAST,
};
static constexpr uint64_t PERF_VAL = 42;
static seastar::future<> test_perfcounters(){
return crimson::common::sharded_perf_coll().start().then([] {
return crimson::common::sharded_perf_coll().invoke_on_all([] (auto& s){
std::string name =fmt::format("seastar-osd::shard-{}",seastar::this_shard_id());
PerfCountersBuilder plb(NULL, name, PERFTEST_FIRST,PERFTEST_LAST);
plb.add_u64_counter(PERFTEST_INDEX, "perftest_count", "count perftest");
auto perf_logger = plb.create_perf_counters();
perf_logger->inc(PERFTEST_INDEX,PERF_VAL);
s.get_perf_collection()->add(perf_logger);
});
}).then([]{
return crimson::common::sharded_perf_coll().invoke_on_all([] (auto& s){
auto pcc = s.get_perf_collection();
pcc->with_counters([](auto& by_path){
for (auto& perf_counter : by_path) {
if (PERF_VAL != perf_counter.second.perf_counters->get(PERFTEST_INDEX)) {
throw std::runtime_error("perf counter does not match");
}
}
});
});
}).finally([] {
return crimson::common::sharded_perf_coll().stop();
});
}
int main(int argc, char** argv)
{
seastar::app_template app;
return app.run(argc, argv, [&] {
return test_perfcounters().then([] {
std::cout << "All tests succeeded" << std::endl;
}).handle_exception([] (auto eptr) {
std::cout << "Test failure" << std::endl;
return seastar::make_exception_future<>(eptr);
});
});
}
| 1,836 | 28.15873 | 86 | cc |
null | ceph-main/src/test/crimson/test_socket.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/ceph_argparse.h"
#include <fmt/os.h>
#include <seastar/core/app-template.hh>
#include <seastar/core/gate.hh>
#include <seastar/core/sharded.hh>
#include <seastar/core/sleep.hh>
#include <seastar/core/when_all.hh>
#include <seastar/util/later.hh>
#include "crimson/common/log.h"
#include "crimson/net/Errors.h"
#include "crimson/net/Fwd.h"
#include "crimson/net/Socket.h"
using crimson::common::local_conf;
namespace {
using namespace std::chrono_literals;
using seastar::engine;
using seastar::future;
using crimson::net::error;
using crimson::net::listen_ertr;
using crimson::net::ShardedServerSocket;
using crimson::net::Socket;
using crimson::net::SocketRef;
using crimson::net::stop_t;
using SocketFRef = seastar::foreign_ptr<SocketRef>;
seastar::logger &logger() {
return crimson::get_logger(ceph_subsys_test);
}
entity_addr_t get_server_addr() {
entity_addr_t saddr;
saddr.parse("127.0.0.1", nullptr);
saddr.set_port(9020);
return saddr;
}
future<SocketRef> socket_connect(const entity_addr_t& saddr) {
logger().debug("socket_connect() to {} ...", saddr);
return Socket::connect(saddr).then([](auto socket) {
logger().debug("socket_connect() connected");
return socket;
});
}
future<> test_refused() {
logger().info("test_refused()...");
auto saddr = get_server_addr();
return socket_connect(saddr).discard_result().then([saddr] {
logger().error("test_refused(): connection to {} is not refused", saddr);
ceph_abort();
}).handle_exception_type([](const std::system_error& e) {
if (e.code() != std::errc::connection_refused) {
logger().error("test_refused() got unexpeted error {}", e);
ceph_abort();
} else {
logger().info("test_refused() ok\n");
}
}).handle_exception([](auto eptr) {
logger().error("test_refused() got unexpeted exception {}", eptr);
ceph_abort();
});
}
future<> test_bind_same(bool is_fixed_cpu) {
logger().info("test_bind_same()...");
return ShardedServerSocket::create(is_fixed_cpu
).then([is_fixed_cpu](auto pss1) {
auto saddr = get_server_addr();
return pss1->listen(saddr).safe_then([saddr, is_fixed_cpu] {
// try to bind the same address
return ShardedServerSocket::create(is_fixed_cpu
).then([saddr](auto pss2) {
return pss2->listen(saddr).safe_then([] {
logger().error("test_bind_same() should raise address_in_use");
ceph_abort();
}, listen_ertr::all_same_way(
[](const std::error_code& e) {
if (e == std::errc::address_in_use) {
// successful!
logger().info("test_bind_same() ok\n");
} else {
logger().error("test_bind_same() got unexpected error {}", e);
ceph_abort();
}
// Note: need to return a explicit ready future, or there will be a
// runtime error: member access within null pointer of type 'struct promise_base'
return seastar::now();
})).then([pss2] {
return pss2->shutdown_destroy();
});
});
}, listen_ertr::all_same_way(
[saddr](const std::error_code& e) {
logger().error("test_bind_same(): there is another instance running at {}",
saddr);
ceph_abort();
})).then([pss1] {
return pss1->shutdown_destroy();
}).handle_exception([](auto eptr) {
logger().error("test_bind_same() got unexpeted exception {}", eptr);
ceph_abort();
});
});
}
future<> test_accept(bool is_fixed_cpu) {
logger().info("test_accept()");
return ShardedServerSocket::create(is_fixed_cpu
).then([](auto pss) {
auto saddr = get_server_addr();
return pss->listen(saddr
).safe_then([pss] {
return pss->accept([](auto socket, auto paddr) {
logger().info("test_accept(): accepted at shard {}", seastar::this_shard_id());
// simple accept
return seastar::sleep(100ms
).then([socket = std::move(socket)]() mutable {
return socket->close(
).finally([cleanup = std::move(socket)] {});
});
});
}, listen_ertr::all_same_way(
[saddr](const std::error_code& e) {
logger().error("test_accept(): there is another instance running at {}",
saddr);
ceph_abort();
})).then([saddr] {
return seastar::when_all(
socket_connect(saddr).then([](auto socket) {
return socket->close().finally([cleanup = std::move(socket)] {}); }),
socket_connect(saddr).then([](auto socket) {
return socket->close().finally([cleanup = std::move(socket)] {}); }),
socket_connect(saddr).then([](auto socket) {
return socket->close().finally([cleanup = std::move(socket)] {}); })
).discard_result();
}).then([] {
// should be enough to be connected locally
return seastar::sleep(50ms);
}).then([] {
logger().info("test_accept() ok\n");
}).then([pss] {
return pss->shutdown_destroy();
}).handle_exception([](auto eptr) {
logger().error("test_accept() got unexpeted exception {}", eptr);
ceph_abort();
});
});
}
class SocketFactory {
static constexpr seastar::shard_id CLIENT_CPU = 0u;
SocketRef client_socket;
seastar::promise<> server_connected;
static constexpr seastar::shard_id SERVER_CPU = 1u;
ShardedServerSocket *pss = nullptr;
seastar::shard_id server_socket_CPU;
SocketFRef server_socket;
public:
template <typename FuncC, typename FuncS>
static future<> dispatch_sockets(
bool is_fixed_cpu,
FuncC&& cb_client,
FuncS&& cb_server) {
ceph_assert_always(seastar::this_shard_id() == CLIENT_CPU);
auto owner = std::make_unique<SocketFactory>();
auto psf = owner.get();
auto saddr = get_server_addr();
return seastar::smp::submit_to(SERVER_CPU, [psf, saddr, is_fixed_cpu] {
return ShardedServerSocket::create(is_fixed_cpu
).then([psf, saddr](auto pss) {
psf->pss = pss;
return pss->listen(saddr
).safe_then([] {
}, listen_ertr::all_same_way([saddr](const std::error_code& e) {
logger().error("dispatch_sockets(): there is another instance running at {}",
saddr);
ceph_abort();
}));
});
}).then([psf, saddr] {
return seastar::when_all_succeed(
seastar::smp::submit_to(CLIENT_CPU, [psf, saddr] {
return socket_connect(saddr).then([psf](auto socket) {
ceph_assert_always(seastar::this_shard_id() == CLIENT_CPU);
psf->client_socket = std::move(socket);
});
}),
seastar::smp::submit_to(SERVER_CPU, [psf] {
return psf->pss->accept([psf](auto _socket, auto paddr) {
logger().info("dispatch_sockets(): accepted at shard {}",
seastar::this_shard_id());
psf->server_socket_CPU = seastar::this_shard_id();
if (psf->pss->is_fixed_shard_dispatching()) {
ceph_assert_always(SERVER_CPU == seastar::this_shard_id());
}
SocketFRef socket = seastar::make_foreign(std::move(_socket));
psf->server_socket = std::move(socket);
return seastar::smp::submit_to(CLIENT_CPU, [psf] {
psf->server_connected.set_value();
});
});
})
);
}).then_unpack([] {
return seastar::now();
}).then([psf] {
return psf->server_connected.get_future();
}).then([psf] {
if (psf->pss) {
return seastar::smp::submit_to(SERVER_CPU, [psf] {
return psf->pss->shutdown_destroy();
});
}
return seastar::now();
}).then([psf,
cb_client = std::move(cb_client),
cb_server = std::move(cb_server)]() mutable {
logger().debug("dispatch_sockets(): client/server socket are ready");
return seastar::when_all_succeed(
seastar::smp::submit_to(CLIENT_CPU,
[socket = psf->client_socket.get(), cb_client = std::move(cb_client)] {
return cb_client(socket).then([socket] {
logger().debug("closing client socket...");
return socket->close();
}).handle_exception([](auto eptr) {
logger().error("dispatch_sockets():"
" cb_client() got unexpeted exception {}", eptr);
ceph_abort();
});
}),
seastar::smp::submit_to(psf->server_socket_CPU,
[socket = psf->server_socket.get(), cb_server = std::move(cb_server)] {
return cb_server(socket).then([socket] {
logger().debug("closing server socket...");
return socket->close();
}).handle_exception([](auto eptr) {
logger().error("dispatch_sockets():"
" cb_server() got unexpeted exception {}", eptr);
ceph_abort();
});
})
);
}).then_unpack([] {
return seastar::now();
}).finally([cleanup = std::move(owner)] {});
}
};
class Connection {
static const uint64_t DATA_TAIL = 5327;
static const unsigned DATA_SIZE = 4096;
std::array<uint64_t, DATA_SIZE> data = {0};
void verify_data_read(const uint64_t read_data[]) {
ceph_assert(read_data[0] == read_count);
ceph_assert(data[DATA_SIZE - 1] = DATA_TAIL);
}
Socket* socket = nullptr;
uint64_t write_count = 0;
uint64_t read_count = 0;
Connection(Socket* socket) : socket{socket} {
assert(socket);
data[DATA_SIZE - 1] = DATA_TAIL;
}
future<> dispatch_write(unsigned round = 0, bool force_shut = false) {
logger().debug("dispatch_write(round={}, force_shut={})...", round, force_shut);
return seastar::repeat([this, round, force_shut] {
if (round != 0 && round <= write_count) {
return seastar::futurize_invoke([this, force_shut] {
if (force_shut) {
logger().debug("dispatch_write() done, force shutdown output");
socket->force_shutdown_out();
} else {
logger().debug("dispatch_write() done");
}
}).then([] {
return seastar::make_ready_future<stop_t>(stop_t::yes);
});
} else {
data[0] = write_count;
bufferlist bl;
bl.append(buffer::copy(
reinterpret_cast<const char*>(&data), sizeof(data)));
return socket->write(bl
).then([this] {
return socket->flush();
}).then([this] {
write_count += 1;
return seastar::make_ready_future<stop_t>(stop_t::no);
});
}
});
}
future<> dispatch_write_unbounded() {
return dispatch_write(
).then([] {
ceph_abort();
}).handle_exception_type([this](const std::system_error& e) {
if (e.code() != std::errc::broken_pipe &&
e.code() != std::errc::connection_reset) {
logger().error("dispatch_write_unbounded(): "
"unexpected error {}", e);
throw;
}
// successful
logger().debug("dispatch_write_unbounded(): "
"expected error {}", e);
shutdown();
});
}
future<> dispatch_read(unsigned round = 0, bool force_shut = false) {
logger().debug("dispatch_read(round={}, force_shut={})...", round, force_shut);
return seastar::repeat([this, round, force_shut] {
if (round != 0 && round <= read_count) {
return seastar::futurize_invoke([this, force_shut] {
if (force_shut) {
logger().debug("dispatch_read() done, force shutdown input");
socket->force_shutdown_in();
} else {
logger().debug("dispatch_read() done");
}
}).then([] {
return seastar::make_ready_future<stop_t>(stop_t::yes);
});
} else {
return seastar::futurize_invoke([this] {
// we want to test both Socket::read() and Socket::read_exactly()
if (read_count % 2) {
return socket->read(DATA_SIZE * sizeof(uint64_t)
).then([this](ceph::bufferlist bl) {
uint64_t read_data[DATA_SIZE];
auto p = bl.cbegin();
::ceph::decode_raw(read_data, p);
verify_data_read(read_data);
});
} else {
return socket->read_exactly(DATA_SIZE * sizeof(uint64_t)
).then([this](auto bptr) {
uint64_t read_data[DATA_SIZE];
std::memcpy(read_data, bptr.c_str(), DATA_SIZE * sizeof(uint64_t));
verify_data_read(read_data);
});
}
}).then([this] {
++read_count;
return seastar::make_ready_future<stop_t>(stop_t::no);
});
}
});
}
future<> dispatch_read_unbounded() {
return dispatch_read(
).then([] {
ceph_abort();
}).handle_exception_type([this](const std::system_error& e) {
if (e.code() != error::read_eof
&& e.code() != std::errc::connection_reset) {
logger().error("dispatch_read_unbounded(): "
"unexpected error {}", e);
throw;
}
// successful
logger().debug("dispatch_read_unbounded(): "
"expected error {}", e);
shutdown();
});
}
void shutdown() {
socket->shutdown();
}
public:
static future<> dispatch_rw_bounded(Socket* socket, unsigned round,
bool force_shut = false) {
logger().debug("dispatch_rw_bounded(round={}, force_shut={})...",
round, force_shut);
return seastar::do_with(Connection{socket},
[round, force_shut](auto& conn) {
ceph_assert(round != 0);
return seastar::when_all_succeed(
conn.dispatch_write(round, force_shut),
conn.dispatch_read(round, force_shut)
).then_unpack([] {
return seastar::now();
});
});
}
static future<> dispatch_rw_unbounded(Socket* socket, bool preemptive_shut = false) {
logger().debug("dispatch_rw_unbounded(preemptive_shut={})...", preemptive_shut);
return seastar::do_with(Connection{socket}, [preemptive_shut](auto& conn) {
return seastar::when_all_succeed(
conn.dispatch_write_unbounded(),
conn.dispatch_read_unbounded(),
seastar::futurize_invoke([&conn, preemptive_shut] {
if (preemptive_shut) {
return seastar::sleep(100ms).then([&conn] {
logger().debug("dispatch_rw_unbounded() shutdown socket preemptively(100ms)");
conn.shutdown();
});
} else {
return seastar::now();
}
})
).then_unpack([] {
return seastar::now();
});
});
}
};
future<> test_read_write(bool is_fixed_cpu) {
logger().info("test_read_write()...");
return SocketFactory::dispatch_sockets(
is_fixed_cpu,
[](auto cs) { return Connection::dispatch_rw_bounded(cs, 128); },
[](auto ss) { return Connection::dispatch_rw_bounded(ss, 128); }
).then([] {
logger().info("test_read_write() ok\n");
}).handle_exception([](auto eptr) {
logger().error("test_read_write() got unexpeted exception {}", eptr);
ceph_abort();
});
}
future<> test_unexpected_down(bool is_fixed_cpu) {
logger().info("test_unexpected_down()...");
return SocketFactory::dispatch_sockets(
is_fixed_cpu,
[](auto cs) {
return Connection::dispatch_rw_bounded(cs, 128, true
).handle_exception_type([](const std::system_error& e) {
logger().debug("test_unexpected_down(): client get error {}", e);
ceph_assert(e.code() == error::read_eof);
});
},
[](auto ss) { return Connection::dispatch_rw_unbounded(ss); }
).then([] {
logger().info("test_unexpected_down() ok\n");
}).handle_exception([](auto eptr) {
logger().error("test_unexpected_down() got unexpeted exception {}", eptr);
ceph_abort();
});
}
future<> test_shutdown_propagated(bool is_fixed_cpu) {
logger().info("test_shutdown_propagated()...");
return SocketFactory::dispatch_sockets(
is_fixed_cpu,
[](auto cs) {
logger().debug("test_shutdown_propagated() shutdown client socket");
cs->shutdown();
return seastar::now();
},
[](auto ss) { return Connection::dispatch_rw_unbounded(ss); }
).then([] {
logger().info("test_shutdown_propagated() ok\n");
}).handle_exception([](auto eptr) {
logger().error("test_shutdown_propagated() got unexpeted exception {}", eptr);
ceph_abort();
});
}
future<> test_preemptive_down(bool is_fixed_cpu) {
logger().info("test_preemptive_down()...");
return SocketFactory::dispatch_sockets(
is_fixed_cpu,
[](auto cs) { return Connection::dispatch_rw_unbounded(cs, true); },
[](auto ss) { return Connection::dispatch_rw_unbounded(ss); }
).then([] {
logger().info("test_preemptive_down() ok\n");
}).handle_exception([](auto eptr) {
logger().error("test_preemptive_down() got unexpeted exception {}", eptr);
ceph_abort();
});
}
future<> do_test_with_type(bool is_fixed_cpu) {
return test_bind_same(is_fixed_cpu
).then([is_fixed_cpu] {
return test_accept(is_fixed_cpu);
}).then([is_fixed_cpu] {
return test_read_write(is_fixed_cpu);
}).then([is_fixed_cpu] {
return test_unexpected_down(is_fixed_cpu);
}).then([is_fixed_cpu] {
return test_shutdown_propagated(is_fixed_cpu);
}).then([is_fixed_cpu] {
return test_preemptive_down(is_fixed_cpu);
});
}
}
seastar::future<int> do_test(seastar::app_template& app)
{
std::vector<const char*> args;
std::string cluster;
std::string conf_file_list;
auto init_params = ceph_argparse_early_args(args,
CEPH_ENTITY_TYPE_CLIENT,
&cluster,
&conf_file_list);
return crimson::common::sharded_conf().start(init_params.name, cluster
).then([conf_file_list] {
return local_conf().parse_config_files(conf_file_list);
}).then([] {
return local_conf().set_val("ms_inject_internal_delays", "0");
}).then([] {
return test_refused();
}).then([] {
return do_test_with_type(true);
}).then([] {
return do_test_with_type(false);
}).then([] {
logger().info("All tests succeeded");
// Seastar has bugs to have events undispatched during shutdown,
// which will result in memory leak and thus fail LeakSanitizer.
return seastar::sleep(100ms);
}).then([] {
return crimson::common::sharded_conf().stop();
}).then([] {
return 0;
}).handle_exception([](auto eptr) {
logger().error("Test failed: got exception {}", eptr);
return 1;
});
}
int main(int argc, char** argv)
{
seastar::app_template app;
return app.run(argc, argv, [&app] {
return do_test(app);
});
}
| 18,961 | 33.104317 | 92 | cc |
null | ceph-main/src/test/crimson/cbt/radosbench_4K_read.yaml | meta:
- desc: |
Run radosbench benchmark using cbt.
4K read workload.
tasks:
- cbt:
benchmarks:
radosbench:
concurrent_ops: 16
concurrent_procs: 2
op_size: [4096]
pool_profile: 'replicated'
read_time: 30
read_only: true
readmode: 'rand'
prefill_time: 3
acceptable:
bandwidth: '(or (greater) (near 0.05))'
iops_avg: '(or (greater) (near 0.05))'
iops_stddev: '(or (less) (near 2.00))'
latency_avg: '(or (less) (near 0.05))'
cpu_cycles_per_op: '(or (less) (near 0.05))'
monitoring_profiles:
perf:
nodes:
- osds
args: 'stat -p {pid} -o {perf_dir}/perf_stat.{pid}'
cluster:
osds_per_node: 3
iterations: 1
pool_profiles:
replicated:
pg_size: 128
pgp_size: 128
replication: 'replicated'
| 915 | 23.756757 | 59 | yaml |
null | ceph-main/src/test/crimson/cbt/radosbench_4K_write.yaml | meta:
- desc: |
Run radosbench benchmark using cbt.
4K write workload.
tasks:
- cbt:
benchmarks:
radosbench:
concurrent_ops: 16
concurrent_procs: 2
op_size: [4096]
pool_profile: 'replicated'
write_time: 3
write_only: true
acceptable:
bandwidth: '(or (greater) (near 0.05))'
iops_avg: '(or (greater) (near 0.05))'
iops_stddev: '(or (less) (near 2.00))'
latency_avg: '(or (less) (near 0.05))'
cpu_cycles_per_op: '(or (less) (near 0.05))'
monitoring_profiles:
perf:
nodes:
- osds
args: 'stat -p {pid} -o {perf_dir}/perf_stat.{pid}'
cluster:
osds_per_node: 3
iterations: 1
pool_profiles:
replicated:
pg_size: 128
pgp_size: 128
replication: 'replicated'
| 868 | 23.828571 | 59 | yaml |
null | ceph-main/src/test/crimson/cbt/t2c.py | #!/usr/bin/env python3
from __future__ import print_function
import argparse
import os
import os.path
import socket
import sys
import yaml
class Translator(object):
def __init__(self, build_dir):
self.build_dir = build_dir
def translate(self, config):
cluster = config.get('cluster', {})
benchmarks = config.get('benchmarks', [])
monitoring_profiles = config.get('monitoring_profiles', {})
return dict(cluster=self._create_cluster_config(cluster),
benchmarks=benchmarks,
monitoring_profiles=monitoring_profiles)
def _create_cluster_config(self, cluster):
# prepare the "cluster" section consumed by CBT
localhost = socket.getfqdn()
num_osds = cluster.get('osds_per_node', 3)
items_to_copy = ['iterations', 'pool_profiles']
conf = dict((k, cluster[k]) for k in items_to_copy if k in cluster)
conf.update(dict(
head=localhost,
osds=[localhost],
osds_per_node=num_osds,
mons=[localhost],
clients=[localhost],
rebuild_every_test=False,
conf_file=os.path.join(self.build_dir, 'ceph.conf'),
ceph_cmd=os.path.join(self.build_dir, 'bin', 'ceph'),
rados_cmd=os.path.join(self.build_dir, 'bin', 'rados'),
pid_dir=os.path.join(self.build_dir, 'out')
))
return conf
def get_cbt_tasks(path):
with open(path) as input:
teuthology_config = yaml.load(input)
for task in teuthology_config['tasks']:
for name, conf in task.items():
if name == 'cbt':
yield conf
def main():
parser = argparse.ArgumentParser(description='translate teuthology yaml to CBT yaml')
parser.add_argument('--build-dir',
default=os.getcwd(),
required=False,
help='Directory where CMakeCache.txt is located')
parser.add_argument('--input',
required=True,
help='The path to the input YAML file')
parser.add_argument('--output',
required=True,
help='The path to the output YAML file')
options = parser.parse_args(sys.argv[1:])
cbt_tasks = [task for task in get_cbt_tasks(options.input)]
if not cbt_tasks:
print('cbt not found in "tasks" section', file=sys.stderr)
return sys.exit(1)
elif len(cbt_tasks) > 1:
print('more than one cbt task found in "tasks" section', file=sys.stderr)
return sys.exit(1)
translator = Translator(options.build_dir)
cbt_config = translator.translate(cbt_tasks[0])
with open(options.output, 'w') as output:
yaml.dump(cbt_config, output)
if __name__ == '__main__':
main()
| 2,843 | 35 | 89 | py |
null | ceph-main/src/test/crimson/seastore/test_block.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/crimson/seastore/test_block.h"
namespace crimson::os::seastore {
ceph::bufferlist TestBlock::get_delta() {
ceph::bufferlist bl;
encode(delta, bl);
return bl;
}
void TestBlock::apply_delta(const ceph::bufferlist &bl) {
auto biter = bl.begin();
decltype(delta) deltas;
decode(deltas, biter);
for (auto &&d : deltas) {
set_contents(d.val, d.offset, d.len);
}
}
ceph::bufferlist TestBlockPhysical::get_delta() {
ceph::bufferlist bl;
encode(delta, bl);
return bl;
}
void TestBlockPhysical::apply_delta_and_adjust_crc(
paddr_t, const ceph::bufferlist &bl) {
auto biter = bl.begin();
decltype(delta) deltas;
decode(deltas, biter);
for (auto &&d : deltas) {
set_contents(d.val, d.offset, d.len);
}
}
}
| 860 | 19.5 | 70 | cc |
null | ceph-main/src/test/crimson/seastore/test_block.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <random>
#include "crimson/os/seastore/transaction_manager.h"
namespace crimson::os::seastore {
struct test_extent_desc_t {
size_t len = 0;
unsigned checksum = 0;
bool operator==(const test_extent_desc_t &rhs) const {
return (len == rhs.len &&
checksum == rhs.checksum);
}
bool operator!=(const test_extent_desc_t &rhs) const {
return !(*this == rhs);
}
};
struct test_block_delta_t {
int8_t val = 0;
uint16_t offset = 0;
uint16_t len = 0;
DENC(test_block_delta_t, v, p) {
DENC_START(1, 1, p);
denc(v.val, p);
denc(v.offset, p);
denc(v.len, p);
DENC_FINISH(p);
}
};
inline std::ostream &operator<<(
std::ostream &lhs, const test_extent_desc_t &rhs) {
return lhs << "test_extent_desc_t(len=" << rhs.len
<< ", checksum=" << rhs.checksum << ")";
}
struct TestBlock : crimson::os::seastore::LogicalCachedExtent {
constexpr static extent_len_t SIZE = 4<<10;
using Ref = TCachedExtentRef<TestBlock>;
std::vector<test_block_delta_t> delta = {};
TestBlock(ceph::bufferptr &&ptr)
: LogicalCachedExtent(std::move(ptr)) {}
TestBlock(const TestBlock &other)
: LogicalCachedExtent(other) {}
CachedExtentRef duplicate_for_write(Transaction&) final {
return CachedExtentRef(new TestBlock(*this));
};
static constexpr extent_types_t TYPE = extent_types_t::TEST_BLOCK;
extent_types_t get_type() const final {
return TYPE;
}
ceph::bufferlist get_delta() final;
void set_contents(char c, uint16_t offset, uint16_t len) {
::memset(get_bptr().c_str() + offset, c, len);
delta.push_back({c, offset, len});
}
void set_contents(char c) {
set_contents(c, 0, get_length());
}
test_extent_desc_t get_desc() {
return { get_length(), get_crc32c() };
}
void apply_delta(const ceph::bufferlist &bl) final;
};
using TestBlockRef = TCachedExtentRef<TestBlock>;
struct TestBlockPhysical : crimson::os::seastore::CachedExtent{
constexpr static extent_len_t SIZE = 4<<10;
using Ref = TCachedExtentRef<TestBlockPhysical>;
std::vector<test_block_delta_t> delta = {};
TestBlockPhysical(ceph::bufferptr &&ptr)
: CachedExtent(std::move(ptr)) {}
TestBlockPhysical(const TestBlockPhysical &other)
: CachedExtent(other) {}
CachedExtentRef duplicate_for_write(Transaction&) final {
return CachedExtentRef(new TestBlockPhysical(*this));
};
static constexpr extent_types_t TYPE = extent_types_t::TEST_BLOCK_PHYSICAL;
extent_types_t get_type() const final {
return TYPE;
}
void set_contents(char c, uint16_t offset, uint16_t len) {
::memset(get_bptr().c_str() + offset, c, len);
delta.push_back({c, offset, len});
}
void set_contents(char c) {
set_contents(c, 0, get_length());
}
ceph::bufferlist get_delta() final;
void apply_delta_and_adjust_crc(paddr_t, const ceph::bufferlist &bl) final;
};
using TestBlockPhysicalRef = TCachedExtentRef<TestBlockPhysical>;
struct test_block_mutator_t {
std::uniform_int_distribution<int8_t>
contents_distribution = std::uniform_int_distribution<int8_t>(
std::numeric_limits<int8_t>::min(),
std::numeric_limits<int8_t>::max());
std::uniform_int_distribution<uint16_t>
offset_distribution = std::uniform_int_distribution<uint16_t>(
0, TestBlock::SIZE - 1);
std::uniform_int_distribution<uint16_t> length_distribution(uint16_t offset) {
return std::uniform_int_distribution<uint16_t>(
0, TestBlock::SIZE - offset - 1);
}
template <typename generator_t>
void mutate(TestBlock &block, generator_t &gen) {
auto offset = offset_distribution(gen);
block.set_contents(
contents_distribution(gen),
offset,
length_distribution(offset)(gen));
}
};
}
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::test_block_delta_t)
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::test_extent_desc_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::TestBlock> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::TestBlockPhysical> : fmt::ostream_formatter {};
#endif
| 4,252 | 26.43871 | 105 | h |
null | ceph-main/src/test/crimson/seastore/test_btree_lba_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/crimson/gtest_seastar.h"
#include "crimson/common/log.h"
#include "crimson/os/seastore/journal.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/segment_manager/ephemeral.h"
#include "crimson/os/seastore/lba_manager/btree/btree_lba_manager.h"
#include "test/crimson/seastore/test_block.h"
namespace {
[[maybe_unused]] seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
}
using namespace crimson;
using namespace crimson::os;
using namespace crimson::os::seastore;
using namespace crimson::os::seastore::lba_manager;
using namespace crimson::os::seastore::lba_manager::btree;
struct btree_test_base :
public seastar_test_suite_t, SegmentProvider, JournalTrimmer {
segment_manager::EphemeralSegmentManagerRef segment_manager;
SegmentManagerGroupRef sms;
JournalRef journal;
ExtentPlacementManagerRef epm;
CacheRef cache;
size_t block_size;
WritePipeline pipeline;
segment_id_t next;
std::map<segment_id_t, segment_seq_t> segment_seqs;
std::map<segment_id_t, segment_type_t> segment_types;
journal_seq_t dummy_tail;
mutable segment_info_t tmp_info;
btree_test_base() = default;
/*
* JournalTrimmer interfaces
*/
journal_seq_t get_journal_head() const final { return dummy_tail; }
void set_journal_head(journal_seq_t) final {}
journal_seq_t get_dirty_tail() const final { return dummy_tail; }
journal_seq_t get_alloc_tail() const final { return dummy_tail; }
void update_journal_tails(journal_seq_t, journal_seq_t) final {}
bool try_reserve_inline_usage(std::size_t) final { return true; }
void release_inline_usage(std::size_t) final {}
std::size_t get_trim_size_per_cycle() const final {
return 0;
}
/*
* SegmentProvider interfaces
*/
const segment_info_t& get_seg_info(segment_id_t id) const final {
tmp_info = {};
tmp_info.seq = segment_seqs.at(id);
tmp_info.type = segment_types.at(id);
return tmp_info;
}
segment_id_t allocate_segment(
segment_seq_t seq,
segment_type_t type,
data_category_t,
rewrite_gen_t
) final {
auto ret = next;
next = segment_id_t{
segment_manager->get_device_id(),
next.device_segment_id() + 1};
segment_seqs[ret] = seq;
segment_types[ret] = type;
return ret;
}
void close_segment(segment_id_t) final {}
void update_segment_avail_bytes(segment_type_t, paddr_t) final {}
void update_modify_time(segment_id_t, sea_time_point, std::size_t) final {}
SegmentManagerGroup* get_segment_manager_group() final { return sms.get(); }
virtual void complete_commit(Transaction &t) {}
seastar::future<> submit_transaction(TransactionRef t)
{
auto record = cache->prepare_record(*t, JOURNAL_SEQ_NULL, JOURNAL_SEQ_NULL);
return journal->submit_record(std::move(record), t->get_handle()).safe_then(
[this, t=std::move(t)](auto submit_result) mutable {
cache->complete_commit(
*t,
submit_result.record_block_base,
submit_result.write_result.start_seq);
complete_commit(*t);
}).handle_error(crimson::ct_error::assert_all{});
}
virtual LBAManager::mkfs_ret test_structure_setup(Transaction &t) = 0;
seastar::future<> set_up_fut() final {
segment_manager = segment_manager::create_test_ephemeral();
return segment_manager->init(
).safe_then([this] {
return segment_manager->mkfs(
segment_manager::get_ephemeral_device_config(0, 1, 0));
}).safe_then([this] {
sms.reset(new SegmentManagerGroup());
journal = journal::make_segmented(*this, *this);
epm.reset(new ExtentPlacementManager());
cache.reset(new Cache(*epm));
block_size = segment_manager->get_block_size();
next = segment_id_t{segment_manager->get_device_id(), 0};
sms->add_segment_manager(segment_manager.get());
epm->test_init_no_background(segment_manager.get());
journal->set_write_pipeline(&pipeline);
return journal->open_for_mkfs().discard_result();
}).safe_then([this] {
dummy_tail = journal_seq_t{0,
paddr_t::make_seg_paddr(segment_id_t(segment_manager->get_device_id(), 0), 0)};
return epm->open_for_write();
}).safe_then([this] {
return seastar::do_with(
cache->create_transaction(
Transaction::src_t::MUTATE, "test_set_up_fut", false),
[this](auto &ref_t) {
return with_trans_intr(*ref_t, [&](auto &t) {
cache->init();
return cache->mkfs(t
).si_then([this, &t] {
return test_structure_setup(t);
});
}).safe_then([this, &ref_t] {
return submit_transaction(std::move(ref_t));
});
});
}).handle_error(
crimson::ct_error::all_same_way([] {
ceph_assert(0 == "error");
})
);
}
virtual void test_structure_reset() {}
seastar::future<> tear_down_fut() final {
return cache->close(
).safe_then([this] {
return journal->close();
}).safe_then([this] {
return epm->close();
}).safe_then([this] {
test_structure_reset();
segment_manager.reset();
sms.reset();
journal.reset();
epm.reset();
cache.reset();
}).handle_error(
crimson::ct_error::all_same_way([] {
ASSERT_FALSE("Unable to close");
})
);
}
};
struct lba_btree_test : btree_test_base {
std::map<laddr_t, lba_map_val_t> check;
auto get_op_context(Transaction &t) {
return op_context_t<laddr_t>{*cache, t};
}
LBAManager::mkfs_ret test_structure_setup(Transaction &t) final {
return cache->get_root(
t
).si_then([this, &t](RootBlockRef croot) {
auto mut_croot = cache->duplicate_for_write(
t, croot
)->cast<RootBlock>();
mut_croot->root.lba_root =
LBABtree::mkfs(mut_croot, get_op_context(t));
});
}
template <typename F>
auto lba_btree_update(F &&f) {
auto tref = cache->create_transaction(
Transaction::src_t::MUTATE, "test_btree_update", false);
auto &t = *tref;
with_trans_intr(
t,
[this, tref=std::move(tref), f=std::forward<F>(f)](auto &t) mutable {
return cache->get_root(
t
).si_then([f=std::move(f), &t](RootBlockRef croot) {
return seastar::do_with(
LBABtree(croot),
[f=std::move(f), &t](auto &btree) mutable {
return std::invoke(
std::move(f), btree, t
);
});
}).si_then([this, tref=std::move(tref)]() mutable {
return submit_transaction(std::move(tref));
});
}).unsafe_get0();
}
template <typename F>
auto lba_btree_read(F &&f) {
auto t = cache->create_transaction(
Transaction::src_t::READ, "test_btree_read", false);
return with_trans_intr(
*t,
[this, f=std::forward<F>(f)](auto &t) mutable {
return cache->get_root(
t
).si_then([f=std::move(f), &t](RootBlockRef croot) mutable {
return seastar::do_with(
LBABtree(croot),
[f=std::move(f), &t](auto &btree) mutable {
return std::invoke(
std::move(f), btree, t
);
});
});
}).unsafe_get0();
}
static auto get_map_val(extent_len_t len) {
return lba_map_val_t{0, P_ADDR_NULL, len, 0};
}
device_off_t next_off = 0;
paddr_t get_paddr() {
next_off += block_size;
return make_fake_paddr(next_off);
}
void insert(laddr_t addr, extent_len_t len) {
ceph_assert(check.count(addr) == 0);
check.emplace(addr, get_map_val(len));
lba_btree_update([=, this](auto &btree, auto &t) {
auto extent = cache->alloc_new_extent<TestBlock>(
t,
TestBlock::SIZE,
placement_hint_t::HOT,
0,
get_paddr());
return btree.insert(
get_op_context(t), addr, get_map_val(len), extent.get()
).si_then([addr, extent](auto p){
auto& [iter, inserted] = p;
assert(inserted);
extent->set_laddr(addr);
});
});
}
void remove(laddr_t addr) {
auto iter = check.find(addr);
ceph_assert(iter != check.end());
auto len = iter->second.len;
check.erase(iter++);
lba_btree_update([=, this](auto &btree, auto &t) {
return btree.lower_bound(
get_op_context(t), addr
).si_then([this, len, addr, &btree, &t](auto iter) {
EXPECT_FALSE(iter.is_end());
EXPECT_TRUE(iter.get_key() == addr);
EXPECT_TRUE(iter.get_val().len == len);
return btree.remove(
get_op_context(t), iter
);
});
});
}
void check_lower_bound(laddr_t addr) {
auto iter = check.lower_bound(addr);
auto result = lba_btree_read([=, this](auto &btree, auto &t) {
return btree.lower_bound(
get_op_context(t), addr
).si_then([](auto iter)
-> std::optional<std::pair<const laddr_t, const lba_map_val_t>> {
if (iter.is_end()) {
return std::nullopt;
} else {
return std::make_optional(
std::make_pair(iter.get_key(), iter.get_val()));
}
});
});
if (iter == check.end()) {
EXPECT_FALSE(result);
} else {
EXPECT_TRUE(result);
decltype(result) to_check = *iter;
EXPECT_EQ(to_check, *result);
}
}
};
TEST_F(lba_btree_test, basic)
{
run_async([this] {
constexpr unsigned total = 16<<10;
for (unsigned i = 0; i < total; i += 16) {
insert(i, 8);
}
for (unsigned i = 0; i < total; i += 16) {
check_lower_bound(i);
check_lower_bound(i + 4);
check_lower_bound(i + 8);
check_lower_bound(i + 12);
}
});
}
struct btree_lba_manager_test : btree_test_base {
BtreeLBAManagerRef lba_manager;
btree_lba_manager_test() = default;
void complete_commit(Transaction &t) final {}
LBAManager::mkfs_ret test_structure_setup(Transaction &t) final {
lba_manager.reset(new BtreeLBAManager(*cache));
return lba_manager->mkfs(t);
}
void test_structure_reset() final {
lba_manager.reset();
}
struct test_extent_t {
paddr_t addr;
size_t len = 0;
unsigned refcount = 0;
};
using test_lba_mapping_t = std::map<laddr_t, test_extent_t>;
test_lba_mapping_t test_lba_mappings;
struct test_transaction_t {
TransactionRef t;
test_lba_mapping_t mappings;
};
auto create_transaction(bool create_fake_extent=true) {
auto t = test_transaction_t{
cache->create_transaction(
Transaction::src_t::MUTATE, "test_mutate_lba", false),
test_lba_mappings
};
if (create_fake_extent) {
cache->alloc_new_extent<TestBlockPhysical>(
*t.t,
TestBlockPhysical::SIZE,
placement_hint_t::HOT,
0);
};
return t;
}
auto create_weak_transaction() {
auto t = test_transaction_t{
cache->create_transaction(
Transaction::src_t::READ, "test_read_weak", true),
test_lba_mappings
};
return t;
}
void submit_test_transaction(test_transaction_t t) {
submit_transaction(std::move(t.t)).get();
test_lba_mappings.swap(t.mappings);
}
auto get_overlap(test_transaction_t &t, laddr_t addr, size_t len) {
auto bottom = t.mappings.upper_bound(addr);
if (bottom != t.mappings.begin())
--bottom;
if (bottom != t.mappings.end() &&
bottom->first + bottom->second.len <= addr)
++bottom;
auto top = t.mappings.lower_bound(addr + len);
return std::make_pair(
bottom,
top
);
}
device_off_t next_off = 0;
paddr_t get_paddr() {
next_off += block_size;
return make_fake_paddr(next_off);
}
auto alloc_mapping(
test_transaction_t &t,
laddr_t hint,
size_t len) {
auto ret = with_trans_intr(
*t.t,
[=, this](auto &t) {
auto extent = cache->alloc_new_extent<TestBlock>(
t,
TestBlock::SIZE,
placement_hint_t::HOT,
0,
get_paddr());
return lba_manager->alloc_extent(
t, hint, len, extent->get_paddr(), extent.get());
}).unsafe_get0();
logger().debug("alloc'd: {}", *ret);
EXPECT_EQ(len, ret->get_length());
auto [b, e] = get_overlap(t, ret->get_key(), len);
EXPECT_EQ(b, e);
t.mappings.emplace(
std::make_pair(
ret->get_key(),
test_extent_t{
ret->get_val(),
ret->get_length(),
1
}
));
return ret;
}
auto decref_mapping(
test_transaction_t &t,
laddr_t addr) {
return decref_mapping(t, t.mappings.find(addr));
}
void decref_mapping(
test_transaction_t &t,
test_lba_mapping_t::iterator target) {
ceph_assert(target != t.mappings.end());
ceph_assert(target->second.refcount > 0);
target->second.refcount--;
(void) with_trans_intr(
*t.t,
[=, this](auto &t) {
return lba_manager->decref_extent(
t,
target->first
).si_then([this, &t, target](auto result) {
EXPECT_EQ(result.refcount, target->second.refcount);
if (result.refcount == 0) {
return cache->retire_extent_addr(t, result.addr, result.length);
}
return Cache::retire_extent_iertr::now();
});
}).unsafe_get0();
if (target->second.refcount == 0) {
t.mappings.erase(target);
}
}
auto incref_mapping(
test_transaction_t &t,
laddr_t addr) {
return incref_mapping(t, t.mappings.find(addr));
}
void incref_mapping(
test_transaction_t &t,
test_lba_mapping_t::iterator target) {
ceph_assert(target->second.refcount > 0);
target->second.refcount++;
auto refcnt = with_trans_intr(
*t.t,
[=, this](auto &t) {
return lba_manager->incref_extent(
t,
target->first);
}).unsafe_get0().refcount;
EXPECT_EQ(refcnt, target->second.refcount);
}
std::vector<laddr_t> get_mapped_addresses() {
std::vector<laddr_t> addresses;
addresses.reserve(test_lba_mappings.size());
for (auto &i: test_lba_mappings) {
addresses.push_back(i.first);
}
return addresses;
}
std::vector<laddr_t> get_mapped_addresses(test_transaction_t &t) {
std::vector<laddr_t> addresses;
addresses.reserve(t.mappings.size());
for (auto &i: t.mappings) {
addresses.push_back(i.first);
}
return addresses;
}
void check_mappings() {
auto t = create_transaction();
check_mappings(t);
}
void check_mappings(test_transaction_t &t) {
(void)with_trans_intr(
*t.t,
[=, this](auto &t) {
return lba_manager->check_child_trackers(t);
}).unsafe_get0();
for (auto &&i: t.mappings) {
auto laddr = i.first;
auto len = i.second.len;
auto ret_list = with_trans_intr(
*t.t,
[=, this](auto &t) {
return lba_manager->get_mappings(
t, laddr, len);
}).unsafe_get0();
EXPECT_EQ(ret_list.size(), 1);
auto &ret = *ret_list.begin();
EXPECT_EQ(i.second.addr, ret->get_val());
EXPECT_EQ(laddr, ret->get_key());
EXPECT_EQ(len, ret->get_length());
auto ret_pin = with_trans_intr(
*t.t,
[=, this](auto &t) {
return lba_manager->get_mapping(
t, laddr);
}).unsafe_get0();
EXPECT_EQ(i.second.addr, ret_pin->get_val());
EXPECT_EQ(laddr, ret_pin->get_key());
EXPECT_EQ(len, ret_pin->get_length());
}
with_trans_intr(
*t.t,
[=, &t, this](auto &) {
return lba_manager->scan_mappings(
*t.t,
0,
L_ADDR_MAX,
[iter=t.mappings.begin(), &t](auto l, auto p, auto len) mutable {
EXPECT_NE(iter, t.mappings.end());
EXPECT_EQ(l, iter->first);
EXPECT_EQ(p, iter->second.addr);
EXPECT_EQ(len, iter->second.len);
++iter;
});
}).unsafe_get();
}
};
TEST_F(btree_lba_manager_test, basic)
{
run_async([this] {
laddr_t laddr = 0x12345678 * block_size;
{
// write initial mapping
auto t = create_transaction();
check_mappings(t); // check in progress transaction sees mapping
check_mappings(); // check concurrent does not
auto ret = alloc_mapping(t, laddr, block_size);
submit_test_transaction(std::move(t));
}
check_mappings(); // check new transaction post commit sees it
});
}
TEST_F(btree_lba_manager_test, force_split)
{
run_async([this] {
for (unsigned i = 0; i < 40; ++i) {
auto t = create_transaction();
logger().debug("opened transaction");
for (unsigned j = 0; j < 5; ++j) {
auto ret = alloc_mapping(t, 0, block_size);
if ((i % 10 == 0) && (j == 3)) {
check_mappings(t);
check_mappings();
}
}
logger().debug("submitting transaction");
submit_test_transaction(std::move(t));
check_mappings();
}
});
}
TEST_F(btree_lba_manager_test, force_split_merge)
{
run_async([this] {
for (unsigned i = 0; i < 80; ++i) {
auto t = create_transaction();
logger().debug("opened transaction");
for (unsigned j = 0; j < 5; ++j) {
auto ret = alloc_mapping(t, 0, block_size);
// just to speed things up a bit
if ((i % 100 == 0) && (j == 3)) {
check_mappings(t);
check_mappings();
}
incref_mapping(t, ret->get_key());
decref_mapping(t, ret->get_key());
}
logger().debug("submitting transaction");
submit_test_transaction(std::move(t));
if (i % 50 == 0) {
check_mappings();
}
}
{
auto addresses = get_mapped_addresses();
auto t = create_transaction();
for (unsigned i = 0; i != addresses.size(); ++i) {
if (i % 2 == 0) {
incref_mapping(t, addresses[i]);
decref_mapping(t, addresses[i]);
decref_mapping(t, addresses[i]);
}
logger().debug("submitting transaction");
if (i % 7 == 0) {
submit_test_transaction(std::move(t));
t = create_transaction();
}
if (i % 13 == 0) {
check_mappings();
check_mappings(t);
}
}
submit_test_transaction(std::move(t));
}
{
auto addresses = get_mapped_addresses();
auto t = create_transaction();
for (unsigned i = 0; i != addresses.size(); ++i) {
incref_mapping(t, addresses[i]);
decref_mapping(t, addresses[i]);
decref_mapping(t, addresses[i]);
}
check_mappings(t);
submit_test_transaction(std::move(t));
check_mappings();
}
});
}
TEST_F(btree_lba_manager_test, single_transaction_split_merge)
{
run_async([this] {
{
auto t = create_transaction();
for (unsigned i = 0; i < 400; ++i) {
alloc_mapping(t, 0, block_size);
}
check_mappings(t);
submit_test_transaction(std::move(t));
}
check_mappings();
{
auto addresses = get_mapped_addresses();
auto t = create_transaction();
for (unsigned i = 0; i != addresses.size(); ++i) {
if (i % 4 != 0) {
decref_mapping(t, addresses[i]);
}
}
check_mappings(t);
submit_test_transaction(std::move(t));
}
check_mappings();
{
auto t = create_transaction();
for (unsigned i = 0; i < 600; ++i) {
alloc_mapping(t, 0, block_size);
}
auto addresses = get_mapped_addresses(t);
for (unsigned i = 0; i != addresses.size(); ++i) {
decref_mapping(t, addresses[i]);
}
check_mappings(t);
submit_test_transaction(std::move(t));
}
check_mappings();
});
}
TEST_F(btree_lba_manager_test, split_merge_multi)
{
run_async([this] {
auto iterate = [&](auto f) {
for (uint64_t i = 0; i < (1<<10); ++i) {
auto t = create_transaction(false);
logger().debug("opened transaction");
for (unsigned j = 0; j < 5; ++j) {
f(t, (i * 5) + j);
}
logger().debug("submitting transaction");
submit_test_transaction(std::move(t));
}
};
iterate([&](auto &t, auto idx) {
alloc_mapping(t, idx * block_size, block_size);
});
check_mappings();
iterate([&](auto &t, auto idx) {
if ((idx % 32) > 0) {
decref_mapping(t, idx * block_size);
}
});
check_mappings();
iterate([&](auto &t, auto idx) {
if ((idx % 32) > 0) {
alloc_mapping(t, idx * block_size, block_size);
}
});
check_mappings();
iterate([&](auto &t, auto idx) {
decref_mapping(t, idx * block_size);
});
check_mappings();
});
}
| 19,787 | 25.348868 | 87 | cc |
null | ceph-main/src/test/crimson/seastore/test_cbjournal.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/crimson/gtest_seastar.h"
#include <random>
#include "crimson/common/log.h"
#include "crimson/os/seastore/async_cleaner.h"
#include "crimson/os/seastore/journal.h"
#include "crimson/os/seastore/journal/circular_bounded_journal.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "crimson/os/seastore/random_block_manager/rbm_device.h"
#include "crimson/os/seastore/seastore_types.h"
#include "test/crimson/seastore/transaction_manager_test_state.h"
#include "crimson/os/seastore/random_block_manager/block_rb_manager.h"
using namespace crimson;
using namespace crimson::os;
using namespace crimson::os::seastore;
using namespace crimson::os::seastore::journal;
namespace {
[[maybe_unused]] seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
}
std::optional<record_t> decode_record(
bufferlist& bl)
{
record_t record;
record_group_header_t r_header;
auto bliter = bl.cbegin();
decode(r_header, bliter);
logger().debug(" decode_record mdlength {} records {}",
r_header.mdlength, r_header.records);
device_id_t d_id = 1 << (std::numeric_limits<device_id_t>::digits - 1);
auto del_infos = try_decode_deltas(r_header, bl,
paddr_t::make_blk_paddr(d_id, 0));
for (auto &iter : *del_infos) {
for (auto r : iter.deltas) {
record.deltas.push_back(r.second);
}
}
auto ex_infos = try_decode_extent_infos(r_header, bl);
auto bliter_ex = bl.cbegin();
bliter_ex += r_header.mdlength;
for (auto &iter: *ex_infos) {
for (auto e : iter.extent_infos) {
extent_t ex;
auto bptr = bufferptr(ceph::buffer::create_page_aligned(e.len));
logger().debug(" exten len {} remaining {} ", e.len, bliter_ex.get_remaining());
bliter_ex.copy(e.len, bptr.c_str());
ex.bl.append(bptr);
record.extents.push_back(ex);
}
}
return record;
}
struct entry_validator_t {
bufferlist bl;
int entries;
journal_seq_t last_seq;
record_t record;
rbm_abs_addr addr = 0;
template <typename... T>
entry_validator_t(T&&... entry) : record(std::forward<T>(entry)...) {}
void validate(record_t read) {
auto iter = read.extents.begin();
for (auto &&block : record.extents) {
ASSERT_EQ(
iter->bl.length(),
block.bl.length());
ASSERT_EQ(
iter->bl.begin().crc32c(iter->bl.length(), 1),
block.bl.begin().crc32c(block.bl.length(), 1));
++iter;
}
auto iter_delta = read.deltas.begin();
for (auto &&block : record.deltas) {
ASSERT_EQ(
iter_delta->bl.length(),
block.bl.length());
ASSERT_EQ(
iter_delta->bl.begin().crc32c(iter_delta->bl.length(), 1),
block.bl.begin().crc32c(block.bl.length(), 1));
++iter_delta;
}
}
void validate(CircularBoundedJournal &cbj) {
rbm_abs_addr offset = 0;
for (int i = 0; i < entries; i++) {
paddr_t paddr = convert_abs_addr_to_paddr(
addr + offset,
cbj.get_device_id());
auto [header, buf] = *(cbj.read_record(paddr, NULL_SEG_SEQ).unsafe_get0());
auto record = decode_record(buf);
validate(*record);
offset += header.mdlength + header.dlength;
}
}
bool validate_delta(bufferlist bl) {
for (auto &&block : record.deltas) {
if (bl.begin().crc32c(bl.length(), 1) ==
block.bl.begin().crc32c(block.bl.length(), 1)) {
return true;
}
}
return false;
}
};
struct cbjournal_test_t : public seastar_test_suite_t, JournalTrimmer
{
std::vector<entry_validator_t> entries;
std::unique_ptr<CircularBoundedJournal> cbj;
random_block_device::EphemeralRBMDeviceRef device;
std::default_random_engine generator;
uint64_t block_size;
WritePipeline pipeline;
cbjournal_test_t() = default;
/*
* JournalTrimmer interfaces
*/
journal_seq_t get_journal_head() const {
return JOURNAL_SEQ_NULL;
}
journal_seq_t get_dirty_tail() const final {
return JOURNAL_SEQ_NULL;
}
journal_seq_t get_alloc_tail() const final {
return JOURNAL_SEQ_NULL;
}
void set_journal_head(journal_seq_t head) final {}
void update_journal_tails(
journal_seq_t dirty_tail,
journal_seq_t alloc_tail) final {}
bool try_reserve_inline_usage(std::size_t) final { return true; }
void release_inline_usage(std::size_t) final {}
std::size_t get_trim_size_per_cycle() const final {
return 0;
}
auto submit_record(record_t&& record) {
entries.push_back(record);
OrderingHandle handle = get_dummy_ordering_handle();
auto [addr, w_result] = cbj->submit_record(
std::move(record),
handle).unsafe_get0();
entries.back().addr =
convert_paddr_to_abs_addr(w_result.start_seq.offset);
entries.back().entries = 1;
logger().debug("submit entry to addr {}", entries.back().addr);
return entries.back().addr;
}
seastar::future<> tear_down_fut() final {
return close();
}
extent_t generate_extent(size_t blocks) {
std::uniform_int_distribution<char> distribution(
std::numeric_limits<char>::min(),
std::numeric_limits<char>::max()
);
char contents = distribution(generator);
bufferlist bl;
bl.append(buffer::ptr(buffer::create(blocks * block_size, contents)));
return extent_t{extent_types_t::TEST_BLOCK, L_ADDR_NULL, bl};
}
delta_info_t generate_delta(size_t bytes) {
std::uniform_int_distribution<char> distribution(
std::numeric_limits<char>::min(),
std::numeric_limits<char>::max()
);
char contents = distribution(generator);
bufferlist bl;
bl.append(buffer::ptr(buffer::create(bytes, contents)));
return delta_info_t{
extent_types_t::TEST_BLOCK,
paddr_t{},
L_ADDR_NULL,
0, 0,
device->get_block_size(),
1,
0,
segment_type_t::JOURNAL,
bl
};
}
auto replay_and_check() {
for (auto &i : entries) {
i.validate(*(cbj.get()));
}
}
auto replay() {
return cbj->replay(
[this](const auto &offsets,
const auto &e,
auto &dirty_seq,
auto &alloc_seq,
auto last_modified) {
bool found = false;
for (auto &i : entries) {
paddr_t base = offsets.write_result.start_seq.offset;
rbm_abs_addr addr = convert_paddr_to_abs_addr(base);
if (addr == i.addr) {
logger().debug(" compare addr: {} and i.addr {} ", base, i.addr);
found = i.validate_delta(e.bl);
break;
}
}
assert(found == true);
return Journal::replay_ertr::make_ready_future<bool>(true);
});
}
auto mkfs() {
device_config_t config = get_rbm_ephemeral_device_config(0, 1);
return device->mkfs(config
).safe_then([this]() {
return device->mount(
).safe_then([this]() {
return cbj->open_for_mkfs(
).safe_then([](auto q) {
return seastar::now();
});
});
}).safe_then([this] {
return cbj->close();
});
}
auto open() {
return cbj->open_for_mount(
).safe_then([](auto q) {
return seastar::now();
});
}
seastar::future<> close() {
return cbj->close().handle_error(crimson::ct_error::assert_all{});
}
auto get_records_available_size() {
return cbj->get_cjs().get_records_available_size();
}
auto get_records_total_size() {
return cbj->get_cjs().get_records_total_size();
}
auto get_block_size() {
return device->get_block_size();
}
auto get_written_to_rbm_addr() {
return cbj->get_rbm_addr(cbj->get_cjs().get_written_to());
}
auto get_written_to() {
return cbj->get_cjs().get_written_to();
}
auto get_journal_tail() {
return cbj->get_dirty_tail();
}
auto get_records_used_size() {
return cbj->get_cjs().get_records_used_size();
}
bool is_available_size(uint64_t size) {
return cbj->get_cjs().is_available_size(size);
}
void update_journal_tail(rbm_abs_addr addr, uint32_t len) {
paddr_t paddr =
convert_abs_addr_to_paddr(
addr + len,
cbj->get_device_id());
journal_seq_t seq = {0, paddr};
cbj->update_journal_tail(
seq,
seq
).get0();
}
void set_written_to(journal_seq_t seq) {
cbj->set_written_to(seq);
}
seastar::future<> set_up_fut() final {
device = random_block_device::create_test_ephemeral(
random_block_device::DEFAULT_TEST_CBJOURNAL_SIZE, 0);
cbj.reset(new CircularBoundedJournal(*this, device.get(), std::string()));
block_size = device->get_block_size();
cbj->set_write_pipeline(&pipeline);
return mkfs(
).safe_then([this] {
return replay(
).safe_then([this] {
return open();
});
}).handle_error(crimson::ct_error::assert_all{});
}
};
TEST_F(cbjournal_test_t, submit_one_record)
{
run_async([this] {
submit_record(
record_t{
{ generate_extent(1), generate_extent(2) },
{ generate_delta(3), generate_delta(4) }
});
replay_and_check();
});
}
TEST_F(cbjournal_test_t, submit_three_records)
{
run_async([this] {
submit_record(
record_t{
{ generate_extent(1), generate_extent(2) },
{ generate_delta(3), generate_delta(4) }
});
submit_record(
record_t{
{ generate_extent(8), generate_extent(9) },
{ generate_delta(20), generate_delta(21) }
});
submit_record(
record_t{
{ generate_extent(5), generate_extent(6) },
{ generate_delta(200), generate_delta(210) }
});
replay_and_check();
});
}
TEST_F(cbjournal_test_t, submit_full_records)
{
run_async([this] {
record_t rec {
{ generate_extent(1), generate_extent(2) },
{ generate_delta(20), generate_delta(21) }
};
auto r_size = record_group_size_t(rec.size, block_size);
auto record_total_size = r_size.get_encoded_length();
submit_record(std::move(rec));
while (is_available_size(record_total_size)) {
submit_record(
record_t {
{ generate_extent(1), generate_extent(2) },
{ generate_delta(20), generate_delta(21) }
});
}
update_journal_tail(entries.back().addr, record_total_size);
ASSERT_EQ(get_records_total_size(),
get_records_available_size());
// will be appended at the begining of log
submit_record(
record_t {
{ generate_extent(1), generate_extent(2) },
{ generate_delta(20), generate_delta(21) }
});
while (is_available_size(record_total_size)) {
submit_record(
record_t {
{ generate_extent(1), generate_extent(2) },
{ generate_delta(20), generate_delta(21) }
});
}
ASSERT_TRUE(record_total_size > get_records_available_size());
});
}
TEST_F(cbjournal_test_t, boudary_check_verify)
{
run_async([this] {
record_t rec {
{ generate_extent(1), generate_extent(2) },
{ generate_delta(20), generate_delta(21) }
};
auto r_size = record_group_size_t(rec.size, block_size);
auto record_total_size = r_size.get_encoded_length();
submit_record(std::move(rec));
while (is_available_size(record_total_size)) {
submit_record(
record_t {
{ generate_extent(1), generate_extent(2) },
{ generate_delta(20), generate_delta(21) }
});
}
uint64_t avail = get_records_available_size();
// forward 2 recod size here because 1 block is reserved between head and tail
update_journal_tail(entries.front().addr, record_total_size * 2);
entries.erase(entries.begin());
entries.erase(entries.begin());
ASSERT_EQ(avail + (record_total_size * 2), get_records_available_size());
avail = get_records_available_size();
// will be appended at the begining of WAL
submit_record(
record_t {
{ generate_extent(1), generate_extent(2) },
{ generate_delta(20), generate_delta(21) }
});
ASSERT_TRUE(avail - record_total_size >= get_records_available_size());
replay_and_check();
});
}
TEST_F(cbjournal_test_t, update_header)
{
run_async([this] {
auto [header, _buf] = *(cbj->get_cjs().read_header().unsafe_get0());
record_t rec {
{ generate_extent(1), generate_extent(2) },
{ generate_delta(20), generate_delta(21) }
};
auto r_size = record_group_size_t(rec.size, block_size);
auto record_total_size = r_size.get_encoded_length();
submit_record(std::move(rec));
update_journal_tail(entries.front().addr, record_total_size);
cbj->get_cjs().write_header().unsafe_get0();
auto [update_header, update_buf2] = *(cbj->get_cjs().read_header().unsafe_get0());
cbj->close().unsafe_get0();
replay().unsafe_get0();
ASSERT_EQ(update_header.dirty_tail.offset, update_header.dirty_tail.offset);
});
}
TEST_F(cbjournal_test_t, replay)
{
run_async([this] {
record_t rec {
{ generate_extent(1), generate_extent(2) },
{ generate_delta(20), generate_delta(21) }
};
auto r_size = record_group_size_t(rec.size, block_size);
auto record_total_size = r_size.get_encoded_length();
submit_record(std::move(rec));
while (is_available_size(record_total_size)) {
submit_record(
record_t {
{ generate_extent(1), generate_extent(2) },
{ generate_delta(20), generate_delta(21) }
});
}
// will be appended at the begining of WAL
uint64_t avail = get_records_available_size();
update_journal_tail(entries.front().addr, record_total_size * 2);
entries.erase(entries.begin());
entries.erase(entries.begin());
ASSERT_EQ(avail + (record_total_size * 2), get_records_available_size());
avail = get_records_available_size();
submit_record(
record_t {
{ generate_extent(1), generate_extent(2) },
{ generate_delta(20), generate_delta(21) }
});
ASSERT_TRUE(avail - record_total_size >= get_records_available_size());
cbj->close().unsafe_get0();
replay().unsafe_get0();
});
}
TEST_F(cbjournal_test_t, replay_after_reset)
{
run_async([this] {
record_t rec {
{ generate_extent(1), generate_extent(2) },
{ generate_delta(20), generate_delta(21) }
};
auto r_size = record_group_size_t(rec.size, block_size);
auto record_total_size = r_size.get_encoded_length();
submit_record(std::move(rec));
while (is_available_size(record_total_size)) {
submit_record(
record_t {
{ generate_extent(1), generate_extent(2) },
{ generate_delta(20), generate_delta(21) }
});
}
auto old_written_to = get_written_to();
auto old_used_size = get_records_used_size();
set_written_to(
journal_seq_t{0,
convert_abs_addr_to_paddr(
cbj->get_records_start(),
cbj->get_device_id())});
cbj->close().unsafe_get0();
replay().unsafe_get0();
ASSERT_EQ(old_written_to, get_written_to());
ASSERT_EQ(old_used_size,
get_records_used_size());
});
}
| 14,649 | 27.391473 | 86 | cc |
null | ceph-main/src/test/crimson/seastore/test_collection_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "os/ObjectStore.h"
#include "test/crimson/gtest_seastar.h"
#include "test/crimson/seastore/transaction_manager_test_state.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/segment_manager.h"
#include "crimson/os/seastore/collection_manager.h"
#include "test/crimson/seastore/test_block.h"
using namespace crimson;
using namespace crimson::os;
using namespace crimson::os::seastore;
namespace {
[[maybe_unused]] seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
}
#define TEST_COLL_FORWARD(METHOD) \
template <typename... Args> \
auto METHOD(coll_root_t &root, Transaction &t, Args&&... args) const { \
return with_trans_intr( \
t, \
[this](auto &t, auto &root, auto&&... args) { \
return collection_manager->METHOD( \
root, \
t, \
std::forward<decltype(args)>(args)...); \
}, \
root, \
std::forward<Args>(args)...).unsafe_get0(); \
}
struct collection_manager_test_t :
public seastar_test_suite_t,
TMTestState {
CollectionManagerRef collection_manager;
collection_manager_test_t() {}
seastar::future<> set_up_fut() final {
return tm_setup().then([this] {
collection_manager = collection_manager::create_coll_manager(*tm);
return seastar::now();
});
}
seastar::future<> tear_down_fut() final {
return tm_teardown().then([this] {
collection_manager.reset();
return seastar::now();
});
}
using test_collection_t = std::map<coll_t, coll_info_t>;
test_collection_t test_coll_mappings;
void replay() {
restart();
collection_manager = collection_manager::create_coll_manager(*tm);
}
auto get_root() {
auto tref = create_mutate_transaction();
auto coll_root = with_trans_intr(
*tref,
[this](auto &t) {
return collection_manager->mkfs(t);
}).unsafe_get0();
submit_transaction(std::move(tref));
return coll_root;
}
TEST_COLL_FORWARD(remove)
TEST_COLL_FORWARD(list)
TEST_COLL_FORWARD(create)
TEST_COLL_FORWARD(update)
void checking_mappings(coll_root_t &coll_root, Transaction &t) {
auto coll_list = list(coll_root, t);
EXPECT_EQ(test_coll_mappings.size(), coll_list.size());
for (std::pair<coll_t, coll_info_t> p : test_coll_mappings) {
EXPECT_NE(
std::find(coll_list.begin(), coll_list.end(), p),
coll_list.end());
}
}
void checking_mappings(coll_root_t &coll_root) {
auto t = create_read_transaction();
checking_mappings(coll_root, *t);
}
};
TEST_F(collection_manager_test_t, basic)
{
run_async([this] {
coll_root_t coll_root = get_root();
{
auto t = create_mutate_transaction();
for (int i = 0; i < 20; i++) {
coll_t cid(spg_t(pg_t(i+1,i+2), shard_id_t::NO_SHARD));
create(coll_root, *t, cid, coll_info_t(i));
test_coll_mappings.emplace(cid, coll_info_t(i));
}
checking_mappings(coll_root, *t);
submit_transaction(std::move(t));
EXPECT_EQ(test_coll_mappings.size(), 20);
}
replay();
checking_mappings(coll_root);
{
auto t = create_mutate_transaction();
for (auto iter = test_coll_mappings.begin();
iter != test_coll_mappings.end();) {
remove(coll_root, *t, iter->first);
iter = test_coll_mappings.erase(iter);
}
submit_transaction(std::move(t));
}
replay();
{
auto t = create_mutate_transaction();
auto list_ret = list(coll_root, *t);
submit_transaction(std::move(t));
EXPECT_EQ(list_ret.size(), test_coll_mappings.size());
}
});
}
TEST_F(collection_manager_test_t, overflow)
{
run_async([this] {
coll_root_t coll_root = get_root();
auto old_location = coll_root.get_location();
auto t = create_mutate_transaction();
for (int i = 0; i < 412; i++) {
coll_t cid(spg_t(pg_t(i+1,i+2), shard_id_t::NO_SHARD));
create(coll_root, *t, cid, coll_info_t(i));
test_coll_mappings.emplace(cid, coll_info_t(i));
}
submit_transaction(std::move(t));
EXPECT_NE(old_location, coll_root.get_location());
checking_mappings(coll_root);
replay();
checking_mappings(coll_root);
});
}
TEST_F(collection_manager_test_t, update)
{
run_async([this] {
coll_root_t coll_root = get_root();
{
auto t = create_mutate_transaction();
for (int i = 0; i < 2; i++) {
coll_t cid(spg_t(pg_t(1,i+1), shard_id_t::NO_SHARD));
create(coll_root, *t, cid, coll_info_t(i));
test_coll_mappings.emplace(cid, coll_info_t(i));
}
submit_transaction(std::move(t));
}
{
auto iter1= test_coll_mappings.begin();
auto iter2 = std::next(test_coll_mappings.begin(), 1);
EXPECT_NE(iter1->second.split_bits, iter2->second.split_bits);
auto t = create_mutate_transaction();
update(coll_root, *t, iter1->first, iter2->second);
submit_transaction(std::move(t));
iter1->second.split_bits = iter2->second.split_bits;
}
replay();
checking_mappings(coll_root);
});
}
| 5,293 | 27.31016 | 74 | cc |
null | ceph-main/src/test/crimson/seastore/test_extent_allocator.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <random>
#include <boost/iterator/counting_iterator.hpp>
#include "test/crimson/gtest_seastar.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "crimson/os/seastore/random_block_manager/extent_allocator.h"
#include "crimson/os/seastore/random_block_manager/avlallocator.h"
#include "include/interval_set.h"
using namespace crimson;
using namespace crimson::os;
using namespace crimson::os::seastore;
namespace {
[[maybe_unused]] seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
}
struct allocator_test_t :
public seastar_test_suite_t,
::testing::WithParamInterface<const char*> {
std::random_device rd;
std::mt19937 gen;
ExtentAllocatorRef allocator;
allocator_test_t()
: gen(rd()) {}
seastar::future<> set_up_fut() final {
std::string a_type = GetParam();
if (a_type == "avl") {
allocator.reset(new AvlAllocator(false));
return seastar::now();
}
ceph_assert(0 == "no support");
}
seastar::future<> tear_down_fut() final {
if (allocator) {
allocator->close();
}
return seastar::now();
}
void init_alloc(uint64_t block_size, uint64_t total_size) {
assert(allocator);
allocator->init(0, total_size, block_size);
}
void close() {
assert(allocator);
allocator->close();
}
auto allocate(size_t size) {
return allocator->alloc_extent(size);
}
void free(uint64_t start, uint64_t length) {
allocator->free_extent(start, length);
}
rbm_abs_addr get_random_addr(size_t block_size, size_t capacity) {
return block_size *
std::uniform_int_distribution<>(0, (capacity / block_size) - 1)(gen);
}
};
TEST_P(allocator_test_t, test_alloc_init)
{
init_alloc(4096, 4096 * 64);
ASSERT_EQ((4096 * 64), allocator->get_available_size());
close();
init_alloc(8192, 8192 * 32);
allocate(8192);
ASSERT_EQ(8192 * 32 - 8192, allocator->get_available_size());
close();
init_alloc(4096, 4096 * 128);
allocate(8192);
ASSERT_EQ(4096 * 128 - 8192, allocator->get_available_size());
}
TEST_P(allocator_test_t, test_init_alloc_free)
{
uint64_t block_size = 4096;
uint64_t capacity = 4 * 1024 * block_size;
{
init_alloc(block_size, capacity);
auto free_length = allocator->get_available_size();
allocate(allocator->get_max_alloc_size());
ASSERT_EQ(free_length - allocator->get_max_alloc_size(),
allocator->get_available_size());
free(0, allocator->get_max_alloc_size());
ASSERT_EQ(free_length, allocator->get_available_size());
}
}
TEST_P(allocator_test_t, test_alloc_failure)
{
uint64_t block_size = 8192;
uint64_t capacity = 1024 * block_size;
{
init_alloc(block_size, capacity);
allocator->mark_extent_used(0, block_size * 256);
allocator->mark_extent_used(block_size * 512, block_size * 256);
auto result = allocate(block_size * 512);
ASSERT_EQ(false, result.has_value());
free(0, block_size * 256);
allocator->mark_extent_used(0, block_size * 512);
result = allocate(block_size * 512);
ASSERT_EQ(false, result.has_value());
}
}
TEST_P(allocator_test_t, test_random_alloc_verify)
{
uint64_t block_size = 4096;
uint64_t capacity = 64 * 1024 * block_size;
uint64_t avail = capacity;
interval_set<rbm_abs_addr> alloc_map;
init_alloc(block_size, capacity);
{
for (int i = 0; i < 256; i++) {
auto addr = get_random_addr(block_size, capacity);
auto size = get_random_addr(block_size, capacity) % (4 << 20);
if (addr + size > capacity || size == 0 ||
alloc_map.intersects(addr, size) ) continue;
allocator->mark_extent_used(addr, size);
alloc_map.insert(addr, size);
avail -= size;
}
ASSERT_EQ(avail, allocator->get_available_size());
for (auto p : alloc_map) {
free(p.first, p.second);
avail += p.second;
alloc_map.erase(p.first, p.second);
ASSERT_EQ(avail, allocator->get_available_size());
}
ASSERT_EQ(capacity, allocator->get_available_size());
for (int i = 0; i < 100; i++) {
auto addr = get_random_addr(block_size, capacity);
auto size = get_random_addr(block_size, capacity) % (4 << 20);
if (addr + size > capacity || size == 0 ||
alloc_map.intersects(addr, size) ) continue;
allocator->mark_extent_used(addr, size);
alloc_map.insert(addr, size);
avail -= size;
}
for (int i = 0; i < 50; i++) {
free((*alloc_map.begin()).first, (*alloc_map.begin()).second);
avail += (*alloc_map.begin()).second;
alloc_map.erase((*alloc_map.begin()).first, (*alloc_map.begin()).second);
ASSERT_EQ(avail, allocator->get_available_size());
auto addr = get_random_addr(block_size, capacity);
auto size = get_random_addr(block_size, capacity) % (4 << 20);
if (addr + size > capacity || size == 0 ||
alloc_map.intersects(addr, size) ) continue;
allocator->mark_extent_used(addr, size);
alloc_map.insert(addr, size);
avail -= size;
}
ASSERT_EQ(avail, allocator->get_available_size());
}
}
INSTANTIATE_TEST_SUITE_P(
allocator_test,
allocator_test_t,
::testing::Values("avl"));
| 5,261 | 27.912088 | 79 | cc |
null | ceph-main/src/test/crimson/seastore/test_object_data_handler.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/crimson/gtest_seastar.h"
#include "test/crimson/seastore/transaction_manager_test_state.h"
#include "crimson/os/seastore/onode.h"
#include "crimson/os/seastore/object_data_handler.h"
using namespace crimson;
using namespace crimson::os;
using namespace crimson::os::seastore;
#define MAX_OBJECT_SIZE (16<<20)
#define DEFAULT_OBJECT_DATA_RESERVATION (16<<20)
#define DEFAULT_OBJECT_METADATA_RESERVATION (16<<20)
namespace {
[[maybe_unused]] seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
}
class TestOnode final : public Onode {
onode_layout_t layout;
bool dirty = false;
public:
TestOnode(uint32_t ddr, uint32_t dmr) : Onode(ddr, dmr) {}
const onode_layout_t &get_layout() const final {
return layout;
}
onode_layout_t &get_mutable_layout(Transaction &t) final {
dirty = true;
return layout;
}
bool is_dirty() const { return dirty; }
laddr_t get_hint() const final {return L_ADDR_MIN; }
~TestOnode() final = default;
};
struct object_data_handler_test_t:
public seastar_test_suite_t,
TMTestState {
OnodeRef onode;
bufferptr known_contents;
extent_len_t size = 0;
object_data_handler_test_t() {}
void write(Transaction &t, objaddr_t offset, extent_len_t len, char fill) {
ceph_assert(offset + len <= known_contents.length());
size = std::max<extent_len_t>(size, offset + len);
memset(
known_contents.c_str() + offset,
fill,
len);
bufferlist bl;
bl.append(
bufferptr(
known_contents,
offset,
len));
with_trans_intr(t, [&](auto &t) {
return ObjectDataHandler(MAX_OBJECT_SIZE).write(
ObjectDataHandler::context_t{
*tm,
t,
*onode,
},
offset,
bl);
}).unsafe_get0();
}
void write(objaddr_t offset, extent_len_t len, char fill) {
auto t = create_mutate_transaction();
write(*t, offset, len, fill);
return submit_transaction(std::move(t));
}
void truncate(Transaction &t, objaddr_t offset) {
if (size > offset) {
memset(
known_contents.c_str() + offset,
0,
size - offset);
with_trans_intr(t, [&](auto &t) {
return ObjectDataHandler(MAX_OBJECT_SIZE).truncate(
ObjectDataHandler::context_t{
*tm,
t,
*onode
},
offset);
}).unsafe_get0();
}
size = offset;
}
void truncate(objaddr_t offset) {
auto t = create_mutate_transaction();
truncate(*t, offset);
return submit_transaction(std::move(t));
}
void read(Transaction &t, objaddr_t offset, extent_len_t len) {
bufferlist bl = with_trans_intr(t, [&](auto &t) {
return ObjectDataHandler(MAX_OBJECT_SIZE).read(
ObjectDataHandler::context_t{
*tm,
t,
*onode
},
offset,
len);
}).unsafe_get0();
bufferlist known;
known.append(
bufferptr(
known_contents,
offset,
len));
EXPECT_EQ(bl.length(), known.length());
EXPECT_EQ(bl, known);
}
void read(objaddr_t offset, extent_len_t len) {
auto t = create_read_transaction();
read(*t, offset, len);
}
void read_near(objaddr_t offset, extent_len_t len, extent_len_t fuzz) {
auto fuzzes = std::vector<int32_t>{-1 * (int32_t)fuzz, 0, (int32_t)fuzz};
for (auto left_fuzz : fuzzes) {
for (auto right_fuzz : fuzzes) {
read(offset + left_fuzz, len - left_fuzz + right_fuzz);
}
}
}
std::list<LBAMappingRef> get_mappings(objaddr_t offset, extent_len_t length) {
auto t = create_mutate_transaction();
auto ret = with_trans_intr(*t, [&](auto &t) {
return tm->get_pins(t, offset, length);
}).unsafe_get0();
return ret;
}
seastar::future<> set_up_fut() final {
onode = new TestOnode(
DEFAULT_OBJECT_DATA_RESERVATION,
DEFAULT_OBJECT_METADATA_RESERVATION);
known_contents = buffer::create(4<<20 /* 4MB */);
memset(known_contents.c_str(), 0, known_contents.length());
size = 0;
return tm_setup();
}
seastar::future<> tear_down_fut() final {
onode.reset();
size = 0;
return tm_teardown();
}
};
TEST_F(object_data_handler_test_t, single_write)
{
run_async([this] {
write(1<<20, 8<<10, 'c');
read_near(1<<20, 8<<10, 1);
read_near(1<<20, 8<<10, 512);
});
}
TEST_F(object_data_handler_test_t, multi_write)
{
run_async([this] {
write((1<<20) - (4<<10), 4<<10, 'a');
write(1<<20, 4<<10, 'b');
write((1<<20) + (4<<10), 4<<10, 'c');
read_near(1<<20, 4<<10, 1);
read_near(1<<20, 4<<10, 512);
read_near((1<<20)-(4<<10), 12<<10, 1);
read_near((1<<20)-(4<<10), 12<<10, 512);
});
}
TEST_F(object_data_handler_test_t, write_hole)
{
run_async([this] {
write((1<<20) - (4<<10), 4<<10, 'a');
// hole at 1<<20
write((1<<20) + (4<<10), 4<<10, 'c');
read_near(1<<20, 4<<10, 1);
read_near(1<<20, 4<<10, 512);
read_near((1<<20)-(4<<10), 12<<10, 1);
read_near((1<<20)-(4<<10), 12<<10, 512);
});
}
TEST_F(object_data_handler_test_t, overwrite_single)
{
run_async([this] {
write((1<<20), 4<<10, 'a');
write((1<<20), 4<<10, 'c');
read_near(1<<20, 4<<10, 1);
read_near(1<<20, 4<<10, 512);
});
}
TEST_F(object_data_handler_test_t, overwrite_double)
{
run_async([this] {
write((1<<20), 4<<10, 'a');
write((1<<20)+(4<<10), 4<<10, 'c');
write((1<<20), 8<<10, 'b');
read_near(1<<20, 8<<10, 1);
read_near(1<<20, 8<<10, 512);
read_near(1<<20, 4<<10, 1);
read_near(1<<20, 4<<10, 512);
read_near((1<<20) + (4<<10), 4<<10, 1);
read_near((1<<20) + (4<<10), 4<<10, 512);
});
}
TEST_F(object_data_handler_test_t, overwrite_partial)
{
run_async([this] {
write((1<<20), 12<<10, 'a');
read_near(1<<20, 12<<10, 1);
write((1<<20)+(8<<10), 4<<10, 'b');
read_near(1<<20, 12<<10, 1);
write((1<<20)+(4<<10), 4<<10, 'c');
read_near(1<<20, 12<<10, 1);
write((1<<20), 4<<10, 'd');
read_near(1<<20, 12<<10, 1);
read_near(1<<20, 12<<10, 512);
read_near(1<<20, 4<<10, 1);
read_near(1<<20, 4<<10, 512);
read_near((1<<20) + (4<<10), 4<<10, 1);
read_near((1<<20) + (4<<10), 4<<10, 512);
});
}
TEST_F(object_data_handler_test_t, unaligned_write)
{
run_async([this] {
objaddr_t base = 1<<20;
write(base, (4<<10)+(1<<10), 'a');
read_near(base-(4<<10), 12<<10, 512);
base = (1<<20) + (64<<10);
write(base+(1<<10), (4<<10)+(1<<10), 'b');
read_near(base-(4<<10), 12<<10, 512);
base = (1<<20) + (128<<10);
write(base-(1<<10), (4<<10)+(2<<20), 'c');
read_near(base-(4<<10), 12<<10, 512);
});
}
TEST_F(object_data_handler_test_t, unaligned_overwrite)
{
run_async([this] {
objaddr_t base = 1<<20;
write(base, (128<<10) + (16<<10), 'x');
write(base, (4<<10)+(1<<10), 'a');
read_near(base-(4<<10), 12<<10, 2<<10);
base = (1<<20) + (64<<10);
write(base+(1<<10), (4<<10)+(1<<10), 'b');
read_near(base-(4<<10), 12<<10, 2<<10);
base = (1<<20) + (128<<10);
write(base-(1<<10), (4<<10)+(2<<20), 'c');
read_near(base-(4<<10), 12<<10, 2<<10);
read(base, (128<<10) + (16<<10));
});
}
TEST_F(object_data_handler_test_t, truncate)
{
run_async([this] {
objaddr_t base = 1<<20;
write(base, 8<<10, 'a');
write(base+(8<<10), 8<<10, 'b');
write(base+(16<<10), 8<<10, 'c');
truncate(base + (32<<10));
read(base, 64<<10);
truncate(base + (24<<10));
read(base, 64<<10);
truncate(base + (12<<10));
read(base, 64<<10);
truncate(base - (12<<10));
read(base, 64<<10);
});
}
TEST_F(object_data_handler_test_t, no_split) {
run_async([this] {
write(0, 8<<10, 'x');
write(0, 8<<10, 'a');
auto pins = get_mappings(0, 8<<10);
EXPECT_EQ(pins.size(), 1);
read(0, 8<<10);
});
}
TEST_F(object_data_handler_test_t, split_left) {
run_async([this] {
write(0, 128<<10, 'x');
write(64<<10, 60<<10, 'a');
auto pins = get_mappings(0, 128<<10);
EXPECT_EQ(pins.size(), 2);
size_t res[2] = {0, 64<<10};
auto base = pins.front()->get_key();
int i = 0;
for (auto &pin : pins) {
EXPECT_EQ(pin->get_key() - base, res[i]);
i++;
}
read(0, 128<<10);
});
}
TEST_F(object_data_handler_test_t, split_right) {
run_async([this] {
write(0, 128<<10, 'x');
write(4<<10, 60<<10, 'a');
auto pins = get_mappings(0, 128<<10);
EXPECT_EQ(pins.size(), 2);
size_t res[2] = {0, 64<<10};
auto base = pins.front()->get_key();
int i = 0;
for (auto &pin : pins) {
EXPECT_EQ(pin->get_key() - base, res[i]);
i++;
}
read(0, 128<<10);
});
}
TEST_F(object_data_handler_test_t, split_left_right) {
run_async([this] {
write(0, 128<<10, 'x');
write(48<<10, 32<<10, 'a');
auto pins = get_mappings(0, 128<<10);
EXPECT_EQ(pins.size(), 3);
size_t res[3] = {0, 48<<10, 80<<10};
auto base = pins.front()->get_key();
int i = 0;
for (auto &pin : pins) {
EXPECT_EQ(pin->get_key() - base, res[i]);
i++;
}
});
}
TEST_F(object_data_handler_test_t, multiple_split) {
run_async([this] {
write(0, 128<<10, 'x');
auto t = create_mutate_transaction();
// normal split
write(*t, 120<<10, 4<<10, 'a');
// not aligned right
write(*t, 4<<10, 5<<10, 'b');
// split right extent of last split result
write(*t, 32<<10, 4<<10, 'c');
// non aligned overwrite
write(*t, 13<<10, 4<<10, 'd');
write(*t, 64<<10, 32<<10, 'e');
// not split right
write(*t, 60<<10, 8<<10, 'f');
submit_transaction(std::move(t));
auto pins = get_mappings(0, 128<<10);
EXPECT_EQ(pins.size(), 10);
size_t res[10] = {0, 4<<10, 12<<10, 20<<10, 32<<10,
36<<10, 60<<10, 96<<10, 120<<10, 124<<10};
auto base = pins.front()->get_key();
int i = 0;
for (auto &pin : pins) {
EXPECT_EQ(pin->get_key() - base, res[i]);
i++;
}
read(0, 128<<10);
});
}
| 10,080 | 23.117225 | 80 | cc |
null | ceph-main/src/test/crimson/seastore/test_omap_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/crimson/gtest_seastar.h"
#include "test/crimson/seastore/transaction_manager_test_state.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/segment_manager.h"
#include "crimson/os/seastore/omap_manager.h"
#include "test/crimson/seastore/test_block.h"
using namespace crimson;
using namespace crimson::os;
using namespace crimson::os::seastore;
using namespace std;
namespace {
[[maybe_unused]] seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
}
const int STR_LEN = 50;
std::string rand_name(const int len)
{
std::string ret;
ret.reserve(len);
for (int i = 0; i < len; ++i) {
ret.append(1, (char)(rand() % ('z' - '0')) + '0');
}
return ret;
}
bufferlist rand_buffer(const int len) {
bufferptr ptr(len);
for (auto i = ptr.c_str(); i < ptr.c_str() + len; ++i) {
*i = (char)rand();
}
bufferlist bl;
bl.append(ptr);
return bl;
}
struct omap_manager_test_t :
public seastar_test_suite_t,
TMTestState {
OMapManagerRef omap_manager;
omap_manager_test_t() {}
seastar::future<> set_up_fut() final {
return tm_setup().then([this] {
omap_manager = omap_manager::create_omap_manager(*tm);
return seastar::now();
});
}
seastar::future<> tear_down_fut() final {
return tm_teardown().then([this] {
omap_manager.reset();
return seastar::now();
});
}
using test_omap_t = std::map<std::string, ceph::bufferlist>;
test_omap_t test_omap_mappings;
void set_key(
omap_root_t &omap_root,
Transaction &t,
const string &key,
const bufferlist &val) {
with_trans_intr(
t,
[&, this](auto &t) {
return omap_manager->omap_set_key(omap_root, t, key, val);
}).unsafe_get0();
test_omap_mappings[key] = val;
}
void set_key(
omap_root_t &omap_root,
Transaction &t,
const string &key,
const string &val) {
bufferlist bl;
bl.append(val);
set_key(omap_root, t, key, bl);
}
std::string set_random_key(
omap_root_t &omap_root,
Transaction &t) {
auto key = rand_name(STR_LEN);
set_key(
omap_root,
t,
key,
rand_buffer(STR_LEN));
return key;
}
void get_value(
omap_root_t &omap_root,
Transaction &t,
const string &key) {
auto ret = with_trans_intr(
t,
[&, this](auto &t) {
return omap_manager->omap_get_value(omap_root, t, key);
}).unsafe_get0();
auto iter = test_omap_mappings.find(key);
if (iter == test_omap_mappings.end()) {
EXPECT_FALSE(ret);
} else {
EXPECT_TRUE(ret);
if (ret) {
EXPECT_TRUE(*ret == iter->second);
}
}
}
void rm_key(
omap_root_t &omap_root,
Transaction &t,
const string &key) {
with_trans_intr(
t,
[&, this](auto &t) {
return omap_manager->omap_rm_key(omap_root, t, key);
}).unsafe_get0();
test_omap_mappings.erase(test_omap_mappings.find(key));
}
std::vector<std::string> rm_key_range(
omap_root_t &omap_root,
Transaction &t,
const std::string &first,
const std::string &last) {
logger().debug("rm keys in range {} ~ {}", first, last);
auto config = OMapManager::omap_list_config_t()
.with_max(3000)
.with_inclusive(true, false);
with_trans_intr(
t,
[&, this](auto &t) {
return omap_manager->omap_rm_key_range(
omap_root, t, first, last, config);
}).unsafe_get0();
std::vector<std::string> keys;
size_t count = 0;
for (auto iter = test_omap_mappings.begin();
iter != test_omap_mappings.end(); ) {
if (iter->first >= first && iter->first < last) {
keys.push_back(iter->first);
iter = test_omap_mappings.erase(iter);
count++;
} else {
iter++;
}
if (count == config.max_result_size) {
break;
}
}
return keys;
}
void list(
const omap_root_t &omap_root,
Transaction &t,
const std::optional<std::string> &first,
const std::optional<std::string> &last,
size_t max = 128,
bool inclusive = false) {
if (first && last) {
logger().debug("list on {} ~ {}", *first, *last);
} else if (first) {
logger().debug("list on {} ~ end", *first);
} else if (last) {
logger().debug("list on start ~ {}", *last);
} else {
logger().debug("list on start ~ end");
}
auto config = OMapManager::omap_list_config_t()
.with_max(max)
.with_inclusive(inclusive, false);
auto [complete, results] = with_trans_intr(
t,
[&, this](auto &t) {
return omap_manager->omap_list(omap_root, t, first, last, config);
}).unsafe_get0();
test_omap_t::iterator it, lit;
if (first) {
it = config.first_inclusive ?
test_omap_mappings.lower_bound(*first) :
test_omap_mappings.upper_bound(*first);
} else {
it = test_omap_mappings.begin();
}
if (last) {
lit = config.last_inclusive ?
test_omap_mappings.upper_bound(*last) :
test_omap_mappings.lower_bound(*last);
} else {
lit = test_omap_mappings.end();
}
for (auto &&[k, v]: results) {
EXPECT_NE(it, test_omap_mappings.end());
if (it == test_omap_mappings.end()) {
return;
}
EXPECT_EQ(k, it->first);
EXPECT_EQ(v, it->second);
it++;
}
if (it == lit) {
EXPECT_TRUE(complete);
} else {
EXPECT_EQ(results.size(), max);
}
}
void clear(
omap_root_t &omap_root,
Transaction &t) {
with_trans_intr(
t,
[&, this](auto &t) {
return omap_manager->omap_clear(omap_root, t);
}).unsafe_get0();
EXPECT_EQ(omap_root.get_location(), L_ADDR_NULL);
}
void check_mappings(omap_root_t &omap_root, Transaction &t) {
for (const auto &i: test_omap_mappings){
get_value(omap_root, t, i.first);
}
}
void check_mappings(omap_root_t &omap_root) {
auto t = create_read_transaction();
check_mappings(omap_root, *t);
}
std::vector<std::string> get_mapped_keys() {
std::vector<std::string> mkeys;
mkeys.reserve(test_omap_mappings.size());
for (auto &k: test_omap_mappings) {
mkeys.push_back(k.first);
}
return mkeys;
}
void replay() {
restart();
omap_manager = omap_manager::create_omap_manager(*tm);
}
auto initialize() {
auto t = create_mutate_transaction();
omap_root_t omap_root = with_trans_intr(
*t,
[this](auto &t) {
return omap_manager->initialize_omap(t, L_ADDR_MIN);
}).unsafe_get0();
submit_transaction(std::move(t));
return omap_root;
}
};
TEST_F(omap_manager_test_t, basic)
{
run_async([this] {
omap_root_t omap_root = initialize();
string key = "owner";
string val = "test";
{
auto t = create_mutate_transaction();
logger().debug("first transaction");
set_key(omap_root, *t, key, val);
get_value(omap_root, *t, key);
submit_transaction(std::move(t));
}
{
auto t = create_mutate_transaction();
logger().debug("second transaction");
get_value(omap_root, *t, key);
rm_key(omap_root, *t, key);
get_value(omap_root, *t, key);
submit_transaction(std::move(t));
}
{
auto t = create_mutate_transaction();
logger().debug("third transaction");
get_value(omap_root, *t, key);
submit_transaction(std::move(t));
}
});
}
TEST_F(omap_manager_test_t, force_leafnode_split)
{
run_async([this] {
omap_root_t omap_root = initialize();
for (unsigned i = 0; i < 40; i++) {
auto t = create_mutate_transaction();
logger().debug("opened transaction");
for (unsigned j = 0; j < 10; ++j) {
set_random_key(omap_root, *t);
if ((i % 20 == 0) && (j == 5)) {
check_mappings(omap_root, *t);
}
}
logger().debug("force split submit transaction i = {}", i);
submit_transaction(std::move(t));
check_mappings(omap_root);
}
});
}
TEST_F(omap_manager_test_t, force_leafnode_split_merge)
{
run_async([this] {
omap_root_t omap_root = initialize();
for (unsigned i = 0; i < 80; i++) {
auto t = create_mutate_transaction();
logger().debug("opened split_merge transaction");
for (unsigned j = 0; j < 5; ++j) {
set_random_key(omap_root, *t);
if ((i % 10 == 0) && (j == 3)) {
check_mappings(omap_root, *t);
}
}
logger().debug("submitting transaction");
submit_transaction(std::move(t));
if (i % 50 == 0) {
check_mappings(omap_root);
}
}
auto mkeys = get_mapped_keys();
auto t = create_mutate_transaction();
for (unsigned i = 0; i < mkeys.size(); i++) {
if (i % 3 != 0) {
rm_key(omap_root, *t, mkeys[i]);
}
if (i % 10 == 0) {
logger().debug("submitting transaction i= {}", i);
submit_transaction(std::move(t));
t = create_mutate_transaction();
}
if (i % 100 == 0) {
logger().debug("check_mappings i= {}", i);
check_mappings(omap_root, *t);
check_mappings(omap_root);
}
}
logger().debug("finally submitting transaction ");
submit_transaction(std::move(t));
});
}
TEST_F(omap_manager_test_t, force_leafnode_split_merge_fullandbalanced)
{
run_async([this] {
omap_root_t omap_root = initialize();
for (unsigned i = 0; i < 50; i++) {
auto t = create_mutate_transaction();
logger().debug("opened split_merge transaction");
for (unsigned j = 0; j < 5; ++j) {
set_random_key(omap_root, *t);
if ((i % 10 == 0) && (j == 3)) {
check_mappings(omap_root, *t);
}
}
logger().debug("submitting transaction");
submit_transaction(std::move(t));
if (i % 50 == 0) {
check_mappings(omap_root);
}
}
auto mkeys = get_mapped_keys();
auto t = create_mutate_transaction();
for (unsigned i = 0; i < mkeys.size(); i++) {
if (30 < i && i < 100) {
rm_key(omap_root, *t, mkeys[i]);
}
if (i % 10 == 0) {
logger().debug("submitting transaction i= {}", i);
submit_transaction(std::move(t));
t = create_mutate_transaction();
}
if (i % 50 == 0) {
logger().debug("check_mappings i= {}", i);
check_mappings(omap_root, *t);
check_mappings(omap_root);
}
if (i == 100) {
break;
}
}
logger().debug("finally submitting transaction ");
submit_transaction(std::move(t));
check_mappings(omap_root);
});
}
TEST_F(omap_manager_test_t, force_split_listkeys_list_rmkey_range_clear)
{
run_async([this] {
omap_root_t omap_root = initialize();
string first, last;
for (unsigned i = 0; i < 40; i++) {
auto t = create_mutate_transaction();
logger().debug("opened transaction");
for (unsigned j = 0; j < 10; ++j) {
auto key = set_random_key(omap_root, *t);
if (i == 10) {
first = key;
}
if (i == 30) {
last = key;
if (first > last) {
std::swap(first, last);
}
}
if ((i % 20 == 0) && (j == 5)) {
check_mappings(omap_root, *t);
}
}
logger().debug("force split submit transaction i = {}", i);
submit_transaction(std::move(t));
check_mappings(omap_root);
}
std::optional<std::string> first_temp;
std::optional<std::string> last_temp;
{
auto t = create_read_transaction();
first_temp = std::nullopt;
last_temp = std::nullopt;
list(omap_root, *t, first_temp, last_temp);
}
{
auto t = create_read_transaction();
first_temp = first;
last_temp = std::nullopt;
list(omap_root, *t, first_temp, last_temp, 100);
}
{
auto t = create_read_transaction();
first_temp = first;
last_temp = std::nullopt;
list(omap_root, *t, first_temp, last_temp, 100, true);
}
{
auto t = create_read_transaction();
first_temp = std::nullopt;
last_temp = last;
list(omap_root, *t, first_temp, last_temp, 10240);
}
{
auto t = create_read_transaction();
first_temp = first;
last_temp = last;
list(omap_root, *t, first_temp, last_temp, 10240, true);
}
{
auto t = create_read_transaction();
list(omap_root, *t, first, last, 10240, true);
}
{
auto t = create_mutate_transaction();
auto keys = rm_key_range(omap_root, *t, first, last);
for (const auto& key : keys) {
get_value(omap_root, *t, key);
}
submit_transaction(std::move(t));
}
{
auto t = create_mutate_transaction();
clear(omap_root, *t);
submit_transaction(std::move(t));
}
});
}
TEST_F(omap_manager_test_t, force_inner_node_split_list_rmkey_range)
{
run_async([this] {
omap_root_t omap_root = initialize();
string first = "";
string last;
while (cache->get_omap_tree_depth() < 3) {
for (unsigned i = 0; i < 40; i++) {
auto t = create_mutate_transaction();
logger().debug("opened transaction");
for (unsigned j = 0; j < 10; ++j) {
auto key = set_random_key(omap_root, *t);
if (key.compare(first) < 0 || !first.length()) {
first = key;
}
if (i == 10) {
last = key;
}
}
logger().debug("force split submit transaction i = {}", i);
submit_transaction(std::move(t));
}
}
std::optional<std::string> first_temp;
std::optional<std::string> last_temp;
{
auto t = create_read_transaction();
first_temp = first;
last_temp = std::nullopt;
list(omap_root, *t, first_temp, last_temp, 10240);
}
{
auto t = create_read_transaction();
first_temp = first;
last_temp = std::nullopt;
list(omap_root, *t, first_temp, last_temp, 10240, true);
}
{
auto t = create_read_transaction();
first_temp = std::nullopt;
last_temp = last;
list(omap_root, *t, first_temp, last_temp, 10240);
}
{
auto t = create_read_transaction();
first_temp = first;
last_temp = last;
list(omap_root, *t, first_temp, last_temp, 10240, true);
}
{
auto t = create_mutate_transaction();
auto keys = rm_key_range(omap_root, *t, first, last);
for (const auto& key : keys) {
get_value(omap_root, *t, key);
}
submit_transaction(std::move(t));
}
{
auto t = create_mutate_transaction();
clear(omap_root, *t);
submit_transaction(std::move(t));
}
});
}
TEST_F(omap_manager_test_t, internal_force_split)
{
run_async([this] {
omap_root_t omap_root = initialize();
for (unsigned i = 0; i < 10; i++) {
logger().debug("opened split transaction");
auto t = create_mutate_transaction();
for (unsigned j = 0; j < 80; ++j) {
set_random_key(omap_root, *t);
if ((i % 2 == 0) && (j % 50 == 0)) {
check_mappings(omap_root, *t);
}
}
logger().debug("submitting transaction i = {}", i);
submit_transaction(std::move(t));
}
check_mappings(omap_root);
});
}
TEST_F(omap_manager_test_t, internal_force_merge_fullandbalanced)
{
run_async([this] {
omap_root_t omap_root = initialize();
for (unsigned i = 0; i < 8; i++) {
logger().debug("opened split transaction");
auto t = create_mutate_transaction();
for (unsigned j = 0; j < 80; ++j) {
set_random_key(omap_root, *t);
if ((i % 2 == 0) && (j % 50 == 0)) {
check_mappings(omap_root, *t);
}
}
logger().debug("submitting transaction");
submit_transaction(std::move(t));
}
auto mkeys = get_mapped_keys();
auto t = create_mutate_transaction();
for (unsigned i = 0; i < mkeys.size(); i++) {
rm_key(omap_root, *t, mkeys[i]);
if (i % 10 == 0) {
logger().debug("submitting transaction i= {}", i);
submit_transaction(std::move(t));
t = create_mutate_transaction();
}
if (i % 50 == 0) {
logger().debug("check_mappings i= {}", i);
check_mappings(omap_root, *t);
check_mappings(omap_root);
}
}
logger().debug("finally submitting transaction ");
submit_transaction(std::move(t));
check_mappings(omap_root);
});
}
TEST_F(omap_manager_test_t, replay)
{
run_async([this] {
omap_root_t omap_root = initialize();
for (unsigned i = 0; i < 8; i++) {
logger().debug("opened split transaction");
auto t = create_mutate_transaction();
for (unsigned j = 0; j < 80; ++j) {
set_random_key(omap_root, *t);
if ((i % 2 == 0) && (j % 50 == 0)) {
check_mappings(omap_root, *t);
}
}
logger().debug("submitting transaction i = {}", i);
submit_transaction(std::move(t));
}
replay();
check_mappings(omap_root);
auto mkeys = get_mapped_keys();
auto t = create_mutate_transaction();
for (unsigned i = 0; i < mkeys.size(); i++) {
rm_key(omap_root, *t, mkeys[i]);
if (i % 10 == 0) {
logger().debug("submitting transaction i= {}", i);
submit_transaction(std::move(t));
replay();
t = create_mutate_transaction();
}
if (i % 50 == 0) {
logger().debug("check_mappings i= {}", i);
check_mappings(omap_root, *t);
check_mappings(omap_root);
}
}
logger().debug("finally submitting transaction ");
submit_transaction(std::move(t));
replay();
check_mappings(omap_root);
});
}
TEST_F(omap_manager_test_t, internal_force_split_to_root)
{
run_async([this] {
omap_root_t omap_root = initialize();
logger().debug("set big keys");
for (unsigned i = 0; i < 53; i++) {
auto t = create_mutate_transaction();
for (unsigned j = 0; j < 8; ++j) {
set_random_key(omap_root, *t);
}
logger().debug("submitting transaction i = {}", i);
submit_transaction(std::move(t));
}
logger().debug("set small keys");
for (unsigned i = 0; i < 100; i++) {
auto t = create_mutate_transaction();
for (unsigned j = 0; j < 8; ++j) {
set_random_key(omap_root, *t);
}
logger().debug("submitting transaction last");
submit_transaction(std::move(t));
}
check_mappings(omap_root);
});
}
| 18,451 | 24.556787 | 72 | cc |
null | ceph-main/src/test/crimson/seastore/test_randomblock_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/crimson/gtest_seastar.h"
#include <random>
#include "crimson/common/log.h"
#include "crimson/os/seastore/random_block_manager/block_rb_manager.h"
#include "crimson/os/seastore/random_block_manager/rbm_device.h"
using namespace crimson;
using namespace crimson::os;
using namespace crimson::os::seastore;
namespace {
[[maybe_unused]] seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
}
constexpr uint64_t DEFAULT_TEST_SIZE = 1 << 20;
struct rbm_test_t :
public seastar_test_suite_t {
std::unique_ptr<BlockRBManager> rbm_manager;
std::unique_ptr<random_block_device::RBMDevice> device;
struct rbm_transaction {
void add_rbm_allocated_blocks(alloc_delta_t &d) {
allocated_blocks.push_back(d);
}
void clear_rbm_allocated_blocks() {
if (!allocated_blocks.empty()) {
allocated_blocks.clear();
}
}
const auto &get_rbm_allocated_blocks() {
return allocated_blocks;
}
std::vector<alloc_delta_t> allocated_blocks;
};
std::default_random_engine generator;
uint64_t block_size = 0;
uint64_t size = 0;
device_config_t config;
rbm_test_t() = default;
seastar::future<> set_up_fut() final {
device = random_block_device::create_test_ephemeral(
random_block_device::DEFAULT_TEST_CBJOURNAL_SIZE, DEFAULT_TEST_SIZE);
block_size = device->get_block_size();
size = device->get_available_size();
rbm_manager.reset(new BlockRBManager(device.get(), std::string(), false));
config = get_rbm_ephemeral_device_config(0, 1);
return device->mkfs(config).handle_error(crimson::ct_error::assert_all{}
).then([this] {
return device->mount().handle_error(crimson::ct_error::assert_all{}
).then([this] {
return rbm_manager->open().handle_error(crimson::ct_error::assert_all{});
});
});
}
seastar::future<> tear_down_fut() final {
rbm_manager->close().unsafe_get0();
device->close().unsafe_get0();
rbm_manager.reset();
device.reset();
return seastar::now();
}
auto mkfs() {
return device->mkfs(config).unsafe_get0();
}
auto read_rbm_header() {
return device->read_rbm_header(RBM_START_ADDRESS).unsafe_get0();
}
auto open() {
device->mount().unsafe_get0();
return rbm_manager->open().unsafe_get0();
}
auto write(uint64_t addr, bufferptr &ptr) {
paddr_t paddr = convert_abs_addr_to_paddr(
addr,
rbm_manager->get_device_id());
return rbm_manager->write(paddr, ptr).unsafe_get0();
}
auto read(uint64_t addr, bufferptr &ptr) {
paddr_t paddr = convert_abs_addr_to_paddr(
addr,
rbm_manager->get_device_id());
return rbm_manager->read(paddr, ptr).unsafe_get0();
}
bufferptr generate_extent(size_t blocks) {
std::uniform_int_distribution<char> distribution(
std::numeric_limits<char>::min(),
std::numeric_limits<char>::max()
);
char contents = distribution(generator);
return buffer::ptr(buffer::create(blocks * block_size, contents));
}
void close() {
rbm_manager->close().unsafe_get0();
return;
}
};
TEST_F(rbm_test_t, mkfs_test)
{
run_async([this] {
auto super = read_rbm_header();
ASSERT_TRUE(
super.block_size == block_size &&
super.size == size
);
config.spec.id = DEVICE_ID_NULL;
mkfs();
super = read_rbm_header();
ASSERT_TRUE(
super.config.spec.id == DEVICE_ID_NULL &&
super.size == size
);
});
}
TEST_F(rbm_test_t, open_read_write_test)
{
run_async([this] {
auto content = generate_extent(1);
{
write(
block_size,
content
);
auto bp = bufferptr(ceph::buffer::create_page_aligned(block_size));
read(
block_size,
bp
);
bufferlist bl;
bufferlist block;
bl.append(bp);
block.append(content);
ASSERT_EQ(
bl.begin().crc32c(bl.length(), 1),
block.begin().crc32c(block.length(), 1));
}
close();
open();
{
auto bp = bufferptr(ceph::buffer::create_page_aligned(block_size));
read(
block_size,
bp
);
bufferlist bl;
bufferlist block;
bl.append(bp);
block.append(content);
ASSERT_EQ(
bl.begin().crc32c(bl.length(), 1),
block.begin().crc32c(block.length(), 1));
}
});
}
| 4,355 | 23.335196 | 78 | cc |
null | ceph-main/src/test/crimson/seastore/test_seastore.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <string>
#include <iostream>
#include <sstream>
#include "test/crimson/gtest_seastar.h"
#include "test/crimson/seastore/transaction_manager_test_state.h"
#include "crimson/os/futurized_collection.h"
#include "crimson/os/seastore/seastore.h"
#include "crimson/os/seastore/onode.h"
using namespace crimson;
using namespace crimson::os;
using namespace crimson::os::seastore;
using SeaStoreShard = FuturizedStore::Shard;
using CTransaction = ceph::os::Transaction;
using namespace std;
namespace {
[[maybe_unused]] seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
}
ghobject_t make_oid(int i) {
stringstream ss;
ss << "object_" << i;
auto ret = ghobject_t(
hobject_t(
sobject_t(ss.str(), CEPH_NOSNAP)));
ret.set_shard(shard_id_t(shard_id_t::NO_SHARD));
ret.hobj.nspace = "asdf";
ret.hobj.pool = 0;
uint32_t reverse_hash = hobject_t::_reverse_bits(0);
ret.hobj.set_bitwise_key_u32(reverse_hash + i * 100);
return ret;
}
ghobject_t make_temp_oid(int i) {
stringstream ss;
ss << "temp_object_" << i;
auto ret = ghobject_t(
hobject_t(
sobject_t(ss.str(), CEPH_NOSNAP)));
ret.set_shard(shard_id_t(shard_id_t::NO_SHARD));
ret.hobj.nspace = "hjkl";
ret.hobj.pool = -2ll;
uint32_t reverse_hash = hobject_t::_reverse_bits(0);
ret.hobj.set_bitwise_key_u32(reverse_hash + i * 100);
return ret;
}
struct seastore_test_t :
public seastar_test_suite_t,
SeaStoreTestState,
::testing::WithParamInterface<const char*> {
coll_t coll_name{spg_t{pg_t{0, 0}}};
CollectionRef coll;
seastore_test_t() {}
seastar::future<> set_up_fut() final {
std::string j_type = GetParam();
journal_type_t journal;
if (j_type == "segmented") {
journal = journal_type_t::SEGMENTED;
} else if (j_type == "circularbounded") {
journal = journal_type_t::RANDOM_BLOCK;
} else {
ceph_assert(0 == "no support");
}
return tm_setup(journal
).then([this] {
return sharded_seastore->create_new_collection(coll_name);
}).then([this](auto coll_ref) {
coll = coll_ref;
CTransaction t;
t.create_collection(coll_name, 0);
return sharded_seastore->do_transaction(
coll,
std::move(t));
});
}
seastar::future<> tear_down_fut() final {
coll.reset();
return tm_teardown();
}
void do_transaction(CTransaction &&t) {
return sharded_seastore->do_transaction(
coll,
std::move(t)).get0();
}
void set_meta(
const std::string& key,
const std::string& value) {
return seastore->write_meta(key, value).get0();
}
std::tuple<int, std::string> get_meta(
const std::string& key) {
return seastore->read_meta(key).get();
}
struct object_state_t {
const coll_t cid;
const CollectionRef coll;
const ghobject_t oid;
std::map<string, bufferlist> omap;
bufferlist contents;
void touch(
CTransaction &t) {
t.touch(cid, oid);
}
void touch(
SeaStoreShard &sharded_seastore) {
CTransaction t;
touch(t);
sharded_seastore.do_transaction(
coll,
std::move(t)).get0();
}
void truncate(
CTransaction &t,
uint64_t off) {
t.truncate(cid, oid, off);
}
void truncate(
SeaStoreShard &sharded_seastore,
uint64_t off) {
CTransaction t;
truncate(t, off);
sharded_seastore.do_transaction(
coll,
std::move(t)).get0();
}
std::map<uint64_t, uint64_t> fiemap(
SeaStoreShard &sharded_seastore,
uint64_t off,
uint64_t len) {
return sharded_seastore.fiemap(coll, oid, off, len).unsafe_get0();
}
bufferlist readv(
SeaStoreShard &sharded_seastore,
interval_set<uint64_t>&m) {
return sharded_seastore.readv(coll, oid, m).unsafe_get0();
}
void remove(
CTransaction &t) {
t.remove(cid, oid);
t.remove_collection(cid);
}
void remove(
SeaStoreShard &sharded_seastore) {
CTransaction t;
remove(t);
sharded_seastore.do_transaction(
coll,
std::move(t)).get0();
}
void set_omap(
CTransaction &t,
const string &key,
const bufferlist &val) {
omap[key] = val;
std::map<string, bufferlist> arg;
arg[key] = val;
t.omap_setkeys(
cid,
oid,
arg);
}
void set_omap(
SeaStoreShard &sharded_seastore,
const string &key,
const bufferlist &val) {
CTransaction t;
set_omap(t, key, val);
sharded_seastore.do_transaction(
coll,
std::move(t)).get0();
}
void write(
SeaStoreShard &sharded_seastore,
CTransaction &t,
uint64_t offset,
bufferlist bl) {
bufferlist new_contents;
if (offset > 0 && contents.length()) {
new_contents.substr_of(
contents,
0,
std::min<size_t>(offset, contents.length())
);
}
new_contents.append_zero(offset - new_contents.length());
new_contents.append(bl);
auto tail_offset = offset + bl.length();
if (contents.length() > tail_offset) {
bufferlist tail;
tail.substr_of(
contents,
tail_offset,
contents.length() - tail_offset);
new_contents.append(tail);
}
contents.swap(new_contents);
t.write(
cid,
oid,
offset,
bl.length(),
bl);
}
void write(
SeaStoreShard &sharded_seastore,
uint64_t offset,
bufferlist bl) {
CTransaction t;
write(sharded_seastore, t, offset, bl);
sharded_seastore.do_transaction(
coll,
std::move(t)).get0();
}
void write(
SeaStoreShard &sharded_seastore,
uint64_t offset,
size_t len,
char fill) {
auto buffer = bufferptr(buffer::create(len));
::memset(buffer.c_str(), fill, len);
bufferlist bl;
bl.append(buffer);
write(sharded_seastore, offset, bl);
}
void zero(
SeaStoreShard &sharded_seastore,
CTransaction &t,
uint64_t offset,
size_t len) {
ceph::buffer::list bl;
bl.append_zero(len);
bufferlist new_contents;
if (offset > 0 && contents.length()) {
new_contents.substr_of(
contents,
0,
std::min<size_t>(offset, contents.length())
);
}
new_contents.append_zero(offset - new_contents.length());
new_contents.append(bl);
auto tail_offset = offset + bl.length();
if (contents.length() > tail_offset) {
bufferlist tail;
tail.substr_of(
contents,
tail_offset,
contents.length() - tail_offset);
new_contents.append(tail);
}
contents.swap(new_contents);
t.zero(
cid,
oid,
offset,
len);
}
void zero(
SeaStoreShard &sharded_seastore,
uint64_t offset,
size_t len) {
CTransaction t;
zero(sharded_seastore, t, offset, len);
sharded_seastore.do_transaction(
coll,
std::move(t)).get0();
}
void read(
SeaStoreShard &sharded_seastore,
uint64_t offset,
uint64_t len) {
bufferlist to_check;
to_check.substr_of(
contents,
offset,
len);
auto ret = sharded_seastore.read(
coll,
oid,
offset,
len).unsafe_get0();
EXPECT_EQ(ret.length(), to_check.length());
EXPECT_EQ(ret, to_check);
}
void check_size(SeaStoreShard &sharded_seastore) {
auto st = sharded_seastore.stat(
coll,
oid).get0();
EXPECT_EQ(contents.length(), st.st_size);
}
void set_attr(
SeaStoreShard &sharded_seastore,
std::string key,
bufferlist& val) {
CTransaction t;
t.setattr(cid, oid, key, val);
sharded_seastore.do_transaction(
coll,
std::move(t)).get0();
}
void rm_attr(
SeaStoreShard &sharded_seastore,
std::string key) {
CTransaction t;
t.rmattr(cid, oid, key);
sharded_seastore.do_transaction(
coll,
std::move(t)).get0();
}
void rm_attrs(
SeaStoreShard &sharded_seastore) {
CTransaction t;
t.rmattrs(cid, oid);
sharded_seastore.do_transaction(
coll,
std::move(t)).get0();
}
SeaStoreShard::attrs_t get_attrs(
SeaStoreShard &sharded_seastore) {
return sharded_seastore.get_attrs(coll, oid)
.handle_error(SeaStoreShard::get_attrs_ertr::discard_all{})
.get();
}
ceph::bufferlist get_attr(
SeaStoreShard& sharded_seastore,
std::string_view name) {
return sharded_seastore.get_attr(coll, oid, name)
.handle_error(
SeaStoreShard::get_attr_errorator::discard_all{})
.get();
}
void check_omap_key(
SeaStoreShard &sharded_seastore,
const string &key) {
std::set<string> to_check;
to_check.insert(key);
auto result = sharded_seastore.omap_get_values(
coll,
oid,
to_check).unsafe_get0();
if (result.empty()) {
EXPECT_EQ(omap.find(key), omap.end());
} else {
auto iter = omap.find(key);
EXPECT_NE(iter, omap.end());
if (iter != omap.end()) {
EXPECT_EQ(result.size(), 1);
EXPECT_EQ(iter->second, result.begin()->second);
}
}
}
void check_omap(SeaStoreShard &sharded_seastore) {
auto refiter = omap.begin();
std::optional<std::string> start;
while(true) {
auto [done, kvs] = sharded_seastore.omap_get_values(
coll,
oid,
start).unsafe_get0();
auto iter = kvs.begin();
while (true) {
if ((done && iter == kvs.end()) && refiter == omap.end()) {
return; // finished
} else if (!done && iter == kvs.end()) {
break; // reload kvs
}
if (iter == kvs.end() || refiter->first < iter->first) {
logger().debug(
"check_omap: missing omap key {}",
refiter->first);
GTEST_FAIL() << "missing omap key " << refiter->first;
++refiter;
} else if (refiter == omap.end() || refiter->first > iter->first) {
logger().debug(
"check_omap: extra omap key {}",
iter->first);
GTEST_FAIL() << "extra omap key " << iter->first;
++iter;
} else {
EXPECT_EQ(iter->second, refiter->second);
++iter;
++refiter;
}
}
if (!done) {
start = kvs.rbegin()->first;
}
}
}
};
map<ghobject_t, object_state_t> test_objects;
object_state_t &get_object(
const ghobject_t &oid) {
return test_objects.emplace(
std::make_pair(
oid,
object_state_t{coll_name, coll, oid})).first->second;
}
void remove_object(
object_state_t &sobj) {
sobj.remove(*sharded_seastore);
auto erased = test_objects.erase(sobj.oid);
ceph_assert(erased == 1);
}
void validate_objects() const {
std::vector<ghobject_t> oids;
for (auto& [oid, obj] : test_objects) {
oids.emplace_back(oid);
}
auto ret = sharded_seastore->list_objects(
coll,
ghobject_t(),
ghobject_t::get_max(),
std::numeric_limits<uint64_t>::max()).get0();
EXPECT_EQ(std::get<1>(ret), ghobject_t::get_max());
EXPECT_EQ(std::get<0>(ret), oids);
}
// create temp objects
struct bound_t {
enum class type_t {
MIN,
MAX,
TEMP,
TEMP_END,
NORMAL_BEGIN,
NORMAL,
} type = type_t::MIN;
unsigned index = 0;
static bound_t get_temp(unsigned index) {
return bound_t{type_t::TEMP, index};
}
static bound_t get_normal(unsigned index) {
return bound_t{type_t::NORMAL, index};
}
static bound_t get_min() { return bound_t{type_t::MIN}; }
static bound_t get_max() { return bound_t{type_t::MAX}; }
static bound_t get_temp_end() { return bound_t{type_t::TEMP_END}; }
static bound_t get_normal_begin() {
return bound_t{type_t::NORMAL_BEGIN};
}
ghobject_t get_oid(SeaStore &seastore, CollectionRef &coll) const {
switch (type) {
case type_t::MIN:
return ghobject_t();
case type_t::MAX:
return ghobject_t::get_max();
case type_t::TEMP:
return make_temp_oid(index);
case type_t::TEMP_END:
return seastore.get_objs_range(coll, 0).temp_end;
case type_t::NORMAL_BEGIN:
return seastore.get_objs_range(coll, 0).obj_begin;
case type_t::NORMAL:
return make_oid(index);
default:
assert(0 == "impossible");
return ghobject_t();
}
}
};
struct list_test_case_t {
bound_t left;
bound_t right;
unsigned limit;
};
// list_test_cases_t :: [<limit, left_bound, right_bound>]
using list_test_cases_t = std::list<std::tuple<unsigned, bound_t, bound_t>>;
void test_list(
unsigned temp_to_create, /// create temp 0..temp_to_create-1
unsigned normal_to_create, /// create normal 0..normal_to_create-1
list_test_cases_t cases /// cases to test
) {
std::vector<ghobject_t> objs;
// setup
auto create = [this, &objs](ghobject_t hoid) {
objs.emplace_back(std::move(hoid));
auto &obj = get_object(objs.back());
obj.touch(*sharded_seastore);
obj.check_size(*sharded_seastore);
};
for (unsigned i = 0; i < temp_to_create; ++i) {
create(make_temp_oid(i));
}
for (unsigned i = 0; i < normal_to_create; ++i) {
create(make_oid(i));
}
// list and validate each case
for (auto [limit, in_left_bound, in_right_bound] : cases) {
auto left_bound = in_left_bound.get_oid(*seastore, coll);
auto right_bound = in_right_bound.get_oid(*seastore, coll);
// get results from seastore
auto [listed, next] = sharded_seastore->list_objects(
coll, left_bound, right_bound, limit).get0();
// compute correct answer
auto correct_begin = std::find_if(
objs.begin(), objs.end(),
[&left_bound](const auto &in) {
return in >= left_bound;
});
unsigned count = 0;
auto correct_end = correct_begin;
for (; count < limit &&
correct_end != objs.end() &&
*correct_end < right_bound;
++correct_end, ++count);
// validate return -- [correct_begin, correct_end) should match listed
decltype(objs) correct_listed(correct_begin, correct_end);
EXPECT_EQ(listed, correct_listed);
if (count < limit) {
if (correct_end == objs.end()) {
// if listed extends to end of range, next should be >= right_bound
EXPECT_GE(next, right_bound);
} else {
// next <= *correct_end since *correct_end is the next object to list
EXPECT_LE(next, *correct_end);
// next > *(correct_end - 1) since we already listed it
EXPECT_GT(next, *(correct_end - 1));
}
} else {
// we listed exactly limit objects
EXPECT_EQ(limit, listed.size());
EXPECT_GE(next, left_bound);
if (limit == 0) {
if (correct_end != objs.end()) {
// next <= *correct_end since *correct_end is the next object to list
EXPECT_LE(next, *correct_end);
}
} else {
// next > *(correct_end - 1) since we already listed it
EXPECT_GT(next, *(correct_end - 1));
}
}
}
// teardown
for (auto &&hoid : objs) { get_object(hoid).remove(*sharded_seastore); }
}
};
template <typename T, typename V>
auto contains(const T &t, const V &v) {
return std::find(
t.begin(),
t.end(),
v) != t.end();
}
TEST_P(seastore_test_t, collection_create_list_remove)
{
run_async([this] {
coll_t test_coll{spg_t{pg_t{1, 0}}};
{
sharded_seastore->create_new_collection(test_coll).get0();
{
CTransaction t;
t.create_collection(test_coll, 4);
do_transaction(std::move(t));
}
auto colls_cores = seastore->list_collections().get0();
std::vector<coll_t> colls;
colls.resize(colls_cores.size());
std::transform(
colls_cores.begin(), colls_cores.end(), colls.begin(),
[](auto p) { return p.first; });
EXPECT_EQ(colls.size(), 2);
EXPECT_TRUE(contains(colls, coll_name));
EXPECT_TRUE(contains(colls, test_coll));
}
{
{
CTransaction t;
t.remove_collection(test_coll);
do_transaction(std::move(t));
}
auto colls_cores = seastore->list_collections().get0();
std::vector<coll_t> colls;
colls.resize(colls_cores.size());
std::transform(
colls_cores.begin(), colls_cores.end(), colls.begin(),
[](auto p) { return p.first; });
EXPECT_EQ(colls.size(), 1);
EXPECT_TRUE(contains(colls, coll_name));
}
});
}
TEST_P(seastore_test_t, meta) {
run_async([this] {
set_meta("key1", "value1");
set_meta("key2", "value2");
const auto [ret1, value1] = get_meta("key1");
const auto [ret2, value2] = get_meta("key2");
EXPECT_EQ(ret1, 0);
EXPECT_EQ(ret2, 0);
EXPECT_EQ(value1, "value1");
EXPECT_EQ(value2, "value2");
});
}
TEST_P(seastore_test_t, touch_stat_list_remove)
{
run_async([this] {
auto &test_obj = get_object(make_oid(0));
test_obj.touch(*sharded_seastore);
test_obj.check_size(*sharded_seastore);
validate_objects();
remove_object(test_obj);
validate_objects();
});
}
using bound_t = seastore_test_t::bound_t;
constexpr unsigned MAX_LIMIT = std::numeric_limits<unsigned>::max();
static const seastore_test_t::list_test_cases_t temp_list_cases{
// list all temp, maybe overlap to normal on right
{MAX_LIMIT, bound_t::get_min() , bound_t::get_max() },
{ 5, bound_t::get_min() , bound_t::get_temp_end()},
{ 6, bound_t::get_min() , bound_t::get_temp_end()},
{ 6, bound_t::get_min() , bound_t::get_max() },
// list temp starting at min up to but not past boundary
{ 3, bound_t::get_min() , bound_t::get_temp(3) },
{ 3, bound_t::get_min() , bound_t::get_temp(4) },
{ 3, bound_t::get_min() , bound_t::get_temp(2) },
// list temp starting > min up to or past boundary
{ 3, bound_t::get_temp(2) , bound_t::get_temp_end()},
{ 3, bound_t::get_temp(2) , bound_t::get_max() },
{ 3, bound_t::get_temp(3) , bound_t::get_max() },
{ 3, bound_t::get_temp(1) , bound_t::get_max() },
// 0 limit
{ 0, bound_t::get_min() , bound_t::get_max() },
{ 0, bound_t::get_temp(1) , bound_t::get_max() },
{ 0, bound_t::get_temp_end(), bound_t::get_max() },
};
TEST_P(seastore_test_t, list_objects_temp_only)
{
run_async([this] { test_list(5, 0, temp_list_cases); });
}
TEST_P(seastore_test_t, list_objects_temp_overlap)
{
run_async([this] { test_list(5, 5, temp_list_cases); });
}
static const seastore_test_t::list_test_cases_t normal_list_cases{
// list all normal, maybe overlap to temp on left
{MAX_LIMIT, bound_t::get_min() , bound_t::get_max() },
{ 5, bound_t::get_normal_begin(), bound_t::get_max() },
{ 6, bound_t::get_normal_begin(), bound_t::get_max() },
{ 6, bound_t::get_temp(4) , bound_t::get_max() },
// list normal starting <= normal_begin < end
{ 3, bound_t::get_normal_begin(), bound_t::get_normal(3)},
{ 3, bound_t::get_normal_begin(), bound_t::get_normal(4)},
{ 3, bound_t::get_normal_begin(), bound_t::get_normal(2)},
{ 3, bound_t::get_temp(5) , bound_t::get_normal(2)},
{ 3, bound_t::get_temp(4) , bound_t::get_normal(2)},
// list normal starting > min up to end
{ 3, bound_t::get_normal(2) , bound_t::get_max() },
{ 3, bound_t::get_normal(2) , bound_t::get_max() },
{ 3, bound_t::get_normal(3) , bound_t::get_max() },
{ 3, bound_t::get_normal(1) , bound_t::get_max() },
// 0 limit
{ 0, bound_t::get_min() , bound_t::get_max() },
{ 0, bound_t::get_normal(1) , bound_t::get_max() },
{ 0, bound_t::get_normal_begin(), bound_t::get_max() },
};
TEST_P(seastore_test_t, list_objects_normal_only)
{
run_async([this] { test_list(5, 0, normal_list_cases); });
}
TEST_P(seastore_test_t, list_objects_normal_overlap)
{
run_async([this] { test_list(5, 5, normal_list_cases); });
}
bufferlist make_bufferlist(size_t len) {
bufferptr ptr(len);
bufferlist bl;
bl.append(ptr);
return bl;
}
TEST_P(seastore_test_t, omap_test_simple)
{
run_async([this] {
auto &test_obj = get_object(make_oid(0));
test_obj.touch(*sharded_seastore);
test_obj.set_omap(
*sharded_seastore,
"asdf",
make_bufferlist(128));
test_obj.check_omap_key(
*sharded_seastore,
"asdf");
});
}
TEST_P(seastore_test_t, attr)
{
run_async([this] {
auto& test_obj = get_object(make_oid(0));
test_obj.touch(*sharded_seastore);
{
std::string oi("asdfasdfasdf");
bufferlist bl;
encode(oi, bl);
test_obj.set_attr(*sharded_seastore, OI_ATTR, bl);
std::string ss("fdsfdsfs");
bl.clear();
encode(ss, bl);
test_obj.set_attr(*sharded_seastore, SS_ATTR, bl);
std::string test_val("ssssssssssss");
bl.clear();
encode(test_val, bl);
test_obj.set_attr(*sharded_seastore, "test_key", bl);
auto attrs = test_obj.get_attrs(*sharded_seastore);
std::string oi2;
bufferlist bl2 = attrs[OI_ATTR];
decode(oi2, bl2);
bl2.clear();
bl2 = attrs[SS_ATTR];
std::string ss2;
decode(ss2, bl2);
std::string test_val2;
bl2.clear();
bl2 = attrs["test_key"];
decode(test_val2, bl2);
EXPECT_EQ(ss, ss2);
EXPECT_EQ(oi, oi2);
EXPECT_EQ(test_val, test_val2);
bl2.clear();
bl2 = test_obj.get_attr(*sharded_seastore, "test_key");
test_val2.clear();
decode(test_val2, bl2);
EXPECT_EQ(test_val, test_val2);
//test rm_attrs
test_obj.rm_attrs(*sharded_seastore);
attrs = test_obj.get_attrs(*sharded_seastore);
EXPECT_EQ(attrs.find(OI_ATTR), attrs.end());
EXPECT_EQ(attrs.find(SS_ATTR), attrs.end());
EXPECT_EQ(attrs.find("test_key"), attrs.end());
std::cout << "test_key passed" << std::endl;
//create OI_ATTR with len > onode_layout_t::MAX_OI_LENGTH, rm OI_ATTR
//create SS_ATTR with len > onode_layout_t::MAX_SS_LENGTH, rm SS_ATTR
char oi_array[onode_layout_t::MAX_OI_LENGTH + 1] = {'a'};
std::string oi_str(&oi_array[0], sizeof(oi_array));
bl.clear();
encode(oi_str, bl);
test_obj.set_attr(*sharded_seastore, OI_ATTR, bl);
char ss_array[onode_layout_t::MAX_SS_LENGTH + 1] = {'b'};
std::string ss_str(&ss_array[0], sizeof(ss_array));
bl.clear();
encode(ss_str, bl);
test_obj.set_attr(*sharded_seastore, SS_ATTR, bl);
attrs = test_obj.get_attrs(*sharded_seastore);
bl2.clear();
bl2 = attrs[OI_ATTR];
std::string oi_str2;
decode(oi_str2, bl2);
EXPECT_EQ(oi_str, oi_str2);
bl2.clear();
bl2 = attrs[SS_ATTR];
std::string ss_str2;
decode(ss_str2, bl2);
EXPECT_EQ(ss_str, ss_str2);
bl2.clear();
ss_str2.clear();
bl2 = test_obj.get_attr(*sharded_seastore, SS_ATTR);
decode(ss_str2, bl2);
EXPECT_EQ(ss_str, ss_str2);
bl2.clear();
oi_str2.clear();
bl2 = test_obj.get_attr(*sharded_seastore, OI_ATTR);
decode(oi_str2, bl2);
EXPECT_EQ(oi_str, oi_str2);
test_obj.rm_attr(*sharded_seastore, OI_ATTR);
test_obj.rm_attr(*sharded_seastore, SS_ATTR);
attrs = test_obj.get_attrs(*sharded_seastore);
EXPECT_EQ(attrs.find(OI_ATTR), attrs.end());
EXPECT_EQ(attrs.find(SS_ATTR), attrs.end());
}
{
//create OI_ATTR with len <= onode_layout_t::MAX_OI_LENGTH, rm OI_ATTR
//create SS_ATTR with len <= onode_layout_t::MAX_SS_LENGTH, rm SS_ATTR
std::string oi("asdfasdfasdf");
bufferlist bl;
encode(oi, bl);
test_obj.set_attr(*sharded_seastore, OI_ATTR, bl);
std::string ss("f");
bl.clear();
encode(ss, bl);
test_obj.set_attr(*sharded_seastore, SS_ATTR, bl);
std::string test_val("ssssssssssss");
bl.clear();
encode(test_val, bl);
test_obj.set_attr(*sharded_seastore, "test_key", bl);
auto attrs = test_obj.get_attrs(*sharded_seastore);
std::string oi2;
bufferlist bl2 = attrs[OI_ATTR];
decode(oi2, bl2);
bl2.clear();
bl2 = attrs[SS_ATTR];
std::string ss2;
decode(ss2, bl2);
std::string test_val2;
bl2.clear();
bl2 = attrs["test_key"];
decode(test_val2, bl2);
EXPECT_EQ(ss, ss2);
EXPECT_EQ(oi, oi2);
EXPECT_EQ(test_val, test_val2);
test_obj.rm_attr(*sharded_seastore, OI_ATTR);
test_obj.rm_attr(*sharded_seastore, SS_ATTR);
test_obj.rm_attr(*sharded_seastore, "test_key");
attrs = test_obj.get_attrs(*sharded_seastore);
EXPECT_EQ(attrs.find(OI_ATTR), attrs.end());
EXPECT_EQ(attrs.find(SS_ATTR), attrs.end());
EXPECT_EQ(attrs.find("test_key"), attrs.end());
}
{
// create OI_ATTR with len > onode_layout_t::MAX_OI_LENGTH, then
// overwrite it with another OI_ATTR len of which < onode_layout_t::MAX_OI_LENGTH
// create SS_ATTR with len > onode_layout_t::MAX_SS_LENGTH, then
// overwrite it with another SS_ATTR len of which < onode_layout_t::MAX_SS_LENGTH
char oi_array[onode_layout_t::MAX_OI_LENGTH + 1] = {'a'};
std::string oi(&oi_array[0], sizeof(oi_array));
bufferlist bl;
encode(oi, bl);
test_obj.set_attr(*sharded_seastore, OI_ATTR, bl);
oi = "asdfasdfasdf";
bl.clear();
encode(oi, bl);
test_obj.set_attr(*sharded_seastore, OI_ATTR, bl);
char ss_array[onode_layout_t::MAX_SS_LENGTH + 1] = {'b'};
std::string ss(&ss_array[0], sizeof(ss_array));
bl.clear();
encode(ss, bl);
test_obj.set_attr(*sharded_seastore, SS_ATTR, bl);
ss = "f";
bl.clear();
encode(ss, bl);
test_obj.set_attr(*sharded_seastore, SS_ATTR, bl);
auto attrs = test_obj.get_attrs(*sharded_seastore);
std::string oi2, ss2;
bufferlist bl2 = attrs[OI_ATTR];
decode(oi2, bl2);
bl2.clear();
bl2 = attrs[SS_ATTR];
decode(ss2, bl2);
EXPECT_EQ(oi, oi2);
EXPECT_EQ(ss, ss2);
}
});
}
TEST_P(seastore_test_t, omap_test_iterator)
{
run_async([this] {
auto make_key = [](unsigned i) {
std::stringstream ss;
ss << "key" << i;
return ss.str();
};
auto &test_obj = get_object(make_oid(0));
test_obj.touch(*sharded_seastore);
for (unsigned i = 0; i < 20; ++i) {
test_obj.set_omap(
*sharded_seastore,
make_key(i),
make_bufferlist(128));
}
test_obj.check_omap(*sharded_seastore);
});
}
TEST_P(seastore_test_t, object_data_omap_remove)
{
run_async([this] {
auto make_key = [](unsigned i) {
std::stringstream ss;
ss << "key" << i;
return ss.str();
};
auto &test_obj = get_object(make_oid(0));
test_obj.touch(*sharded_seastore);
for (unsigned i = 0; i < 1024; ++i) {
test_obj.set_omap(
*sharded_seastore,
make_key(i),
make_bufferlist(128));
}
test_obj.check_omap(*sharded_seastore);
for (uint64_t i = 0; i < 16; i++) {
test_obj.write(
*sharded_seastore,
4096 * i,
4096,
'a');
}
test_obj.remove(*sharded_seastore);
});
}
TEST_P(seastore_test_t, simple_extent_test)
{
run_async([this] {
auto &test_obj = get_object(make_oid(0));
test_obj.write(
*sharded_seastore,
1024,
1024,
'a');
test_obj.read(
*sharded_seastore,
1024,
1024);
test_obj.check_size(*sharded_seastore);
});
}
TEST_P(seastore_test_t, fiemap_empty)
{
run_async([this] {
auto &test_obj = get_object(make_oid(0));
test_obj.touch(*sharded_seastore);
test_obj.truncate(*sharded_seastore, 100000);
std::map<uint64_t, uint64_t> m;
m = test_obj.fiemap(*sharded_seastore, 0, 100000);
EXPECT_TRUE(m.empty());
test_obj.remove(*sharded_seastore);
});
}
TEST_P(seastore_test_t, fiemap_holes)
{
run_async([this] {
const uint64_t MAX_EXTENTS = 100;
// large enough to ensure that seastore will allocate each write seperately
const uint64_t SKIP_STEP = 16 << 10;
auto &test_obj = get_object(make_oid(0));
bufferlist bl;
bl.append("foo");
test_obj.touch(*sharded_seastore);
for (uint64_t i = 0; i < MAX_EXTENTS; i++) {
test_obj.write(*sharded_seastore, SKIP_STEP * i, bl);
}
{ // fiemap test from 0 to SKIP_STEP * (MAX_EXTENTS - 1) + 3
auto m = test_obj.fiemap(
*sharded_seastore, 0, SKIP_STEP * (MAX_EXTENTS - 1) + 3);
ASSERT_EQ(m.size(), MAX_EXTENTS);
for (uint64_t i = 0; i < MAX_EXTENTS; i++) {
ASSERT_TRUE(m.count(SKIP_STEP * i));
ASSERT_GE(m[SKIP_STEP * i], bl.length());
}
}
{ // fiemap test from SKIP_STEP to SKIP_STEP * (MAX_EXTENTS - 2) + 3
auto m = test_obj.fiemap(
*sharded_seastore, SKIP_STEP, SKIP_STEP * (MAX_EXTENTS - 3) + 3);
ASSERT_EQ(m.size(), MAX_EXTENTS - 2);
for (uint64_t i = 1; i < MAX_EXTENTS - 1; i++) {
ASSERT_TRUE(m.count(SKIP_STEP * i));
ASSERT_GE(m[SKIP_STEP * i], bl.length());
}
}
{ // fiemap test SKIP_STEP + 1 to 2 * SKIP_STEP + 1 (partial overlap)
auto m = test_obj.fiemap(
*sharded_seastore, SKIP_STEP + 1, SKIP_STEP + 1);
ASSERT_EQ(m.size(), 2);
ASSERT_EQ(m.begin()->first, SKIP_STEP + 1);
ASSERT_GE(m.begin()->second, bl.length());
ASSERT_LE(m.rbegin()->first, (2 * SKIP_STEP) + 1);
ASSERT_EQ(m.rbegin()->first + m.rbegin()->second, 2 * SKIP_STEP + 2);
}
test_obj.remove(*sharded_seastore);
});
}
TEST_P(seastore_test_t, sparse_read)
{
run_async([this] {
const uint64_t MAX_EXTENTS = 100;
const uint64_t SKIP_STEP = 16 << 10;
auto &test_obj = get_object(make_oid(0));
bufferlist wbl;
wbl.append("foo");
test_obj.touch(*sharded_seastore);
for (uint64_t i = 0; i < MAX_EXTENTS; i++) {
test_obj.write(*sharded_seastore, SKIP_STEP * i, wbl);
}
interval_set<uint64_t> m;
m = interval_set<uint64_t>(
test_obj.fiemap(*sharded_seastore, 0, SKIP_STEP * (MAX_EXTENTS - 1) + 3));
ASSERT_TRUE(!m.empty());
uint64_t off = 0;
auto rbl = test_obj.readv(*sharded_seastore, m);
for (auto &&miter : m) {
bufferlist subl;
subl.substr_of(rbl, off, std::min(miter.second, uint64_t(wbl.length())));
ASSERT_TRUE(subl.contents_equal(wbl));
off += miter.second;
}
test_obj.remove(*sharded_seastore);
});
}
TEST_P(seastore_test_t, zero)
{
run_async([this] {
auto test_zero = [this](
// [(off, len, repeat)]
std::vector<std::tuple<uint64_t, uint64_t, uint64_t>> writes,
uint64_t zero_off, uint64_t zero_len) {
// Test zero within a block
auto &test_obj = get_object(make_oid(0));
uint64_t size = 0;
for (auto &[off, len, repeat]: writes) {
for (decltype(repeat) i = 0; i < repeat; ++i) {
test_obj.write(*sharded_seastore, off + (len * repeat), len, 'a');
}
size = off + (len * (repeat + 1));
}
test_obj.read(
*sharded_seastore,
0,
size);
test_obj.check_size(*sharded_seastore);
test_obj.zero(*sharded_seastore, zero_off, zero_len);
test_obj.read(
*sharded_seastore,
0,
size);
test_obj.check_size(*sharded_seastore);
remove_object(test_obj);
};
const uint64_t BS = 4<<10;
// Test zero within a block
test_zero(
{{1<<10, 1<<10, 1}},
1124, 200);
// Multiple writes, partial on left, partial on right.
test_zero(
{{BS, BS, 10}},
BS + 128,
BS * 4);
// Single large write, block boundary on right, partial on left.
test_zero(
{{BS, BS * 10, 1}},
BS + 128,
(BS * 4) - 128);
// Multiple writes, block boundary on left, partial on right.
test_zero(
{{BS, BS, 10}},
BS,
(BS * 4) + 128);
});
}
INSTANTIATE_TEST_SUITE_P(
seastore_test,
seastore_test_t,
::testing::Values (
"segmented",
"circularbounded"
)
);
| 31,870 | 26.10119 | 85 | cc |
null | ceph-main/src/test/crimson/seastore/test_seastore_cache.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/crimson/gtest_seastar.h"
#include "crimson/common/log.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/segment_manager/ephemeral.h"
#include "test/crimson/seastore/test_block.h"
using namespace crimson;
using namespace crimson::os;
using namespace crimson::os::seastore;
namespace {
[[maybe_unused]] seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
}
struct cache_test_t : public seastar_test_suite_t {
segment_manager::EphemeralSegmentManagerRef segment_manager;
ExtentPlacementManagerRef epm;
CacheRef cache;
paddr_t current;
journal_seq_t seq = JOURNAL_SEQ_MIN;
cache_test_t() = default;
seastar::future<paddr_t> submit_transaction(
TransactionRef t) {
auto record = cache->prepare_record(*t, JOURNAL_SEQ_NULL, JOURNAL_SEQ_NULL);
bufferlist bl;
for (auto &&block : record.extents) {
bl.append(block.bl);
}
ceph_assert((segment_off_t)bl.length() <
segment_manager->get_segment_size());
if (current.as_seg_paddr().get_segment_off() + (segment_off_t)bl.length() >
segment_manager->get_segment_size())
current = paddr_t::make_seg_paddr(
segment_id_t(
current.as_seg_paddr().get_segment_id().device_id(),
current.as_seg_paddr().get_segment_id().device_segment_id() + 1),
0);
auto prev = current;
current.as_seg_paddr().set_segment_off(
current.as_seg_paddr().get_segment_off()
+ bl.length());
return segment_manager->segment_write(
prev,
std::move(bl),
true
).safe_then(
[this, prev, t=std::move(t)]() mutable {
cache->complete_commit(*t, prev, seq /* TODO */);
return prev;
},
crimson::ct_error::all_same_way([](auto e) {
ASSERT_FALSE("failed to submit");
})
);
}
auto get_transaction() {
return cache->create_transaction(
Transaction::src_t::MUTATE, "test_cache", false);
}
template <typename T, typename... Args>
auto get_extent(Transaction &t, Args&&... args) {
return with_trans_intr(
t,
[this](auto &&... args) {
return cache->get_extent<T>(args...);
},
std::forward<Args>(args)...);
}
seastar::future<> set_up_fut() final {
segment_manager = segment_manager::create_test_ephemeral();
return segment_manager->init(
).safe_then([this] {
return segment_manager->mkfs(
segment_manager::get_ephemeral_device_config(0, 1, 0));
}).safe_then([this] {
epm.reset(new ExtentPlacementManager());
cache.reset(new Cache(*epm));
current = paddr_t::make_seg_paddr(segment_id_t(segment_manager->get_device_id(), 0), 0);
epm->test_init_no_background(segment_manager.get());
return seastar::do_with(
get_transaction(),
[this](auto &ref_t) {
cache->init();
return with_trans_intr(*ref_t, [&](auto &t) {
return cache->mkfs(t);
}).safe_then([this, &ref_t] {
return submit_transaction(std::move(ref_t)
).then([](auto p) {});
});
});
}).handle_error(
crimson::ct_error::all_same_way([](auto e) {
ASSERT_FALSE("failed to submit");
})
);
}
seastar::future<> tear_down_fut() final {
return cache->close(
).safe_then([this] {
segment_manager.reset();
epm.reset();
cache.reset();
}).handle_error(
Cache::close_ertr::assert_all{}
);
}
};
TEST_F(cache_test_t, test_addr_fixup)
{
run_async([this] {
paddr_t addr;
int csum = 0;
{
auto t = get_transaction();
auto extent = cache->alloc_new_extent<TestBlockPhysical>(
*t,
TestBlockPhysical::SIZE,
placement_hint_t::HOT,
0);
extent->set_contents('c');
csum = extent->get_crc32c();
submit_transaction(std::move(t)).get0();
addr = extent->get_paddr();
}
{
auto t = get_transaction();
auto extent = get_extent<TestBlockPhysical>(
*t,
addr,
TestBlockPhysical::SIZE).unsafe_get0();
ASSERT_EQ(extent->get_paddr(), addr);
ASSERT_EQ(extent->get_crc32c(), csum);
}
});
}
TEST_F(cache_test_t, test_dirty_extent)
{
run_async([this] {
paddr_t addr;
int csum = 0;
int csum2 = 0;
{
// write out initial test block
auto t = get_transaction();
auto extent = cache->alloc_new_extent<TestBlockPhysical>(
*t,
TestBlockPhysical::SIZE,
placement_hint_t::HOT,
0);
extent->set_contents('c');
csum = extent->get_crc32c();
auto reladdr = extent->get_paddr();
ASSERT_TRUE(reladdr.is_relative());
{
// test that read with same transaction sees new block though
// uncommitted
auto extent = get_extent<TestBlockPhysical>(
*t,
reladdr,
TestBlockPhysical::SIZE).unsafe_get0();
ASSERT_TRUE(extent->is_clean());
ASSERT_TRUE(extent->is_pending());
ASSERT_TRUE(extent->get_paddr().is_relative());
ASSERT_EQ(extent->get_version(), 0);
ASSERT_EQ(csum, extent->get_crc32c());
}
submit_transaction(std::move(t)).get0();
addr = extent->get_paddr();
}
{
// test that consecutive reads on the same extent get the same ref
auto t = get_transaction();
auto extent = get_extent<TestBlockPhysical>(
*t,
addr,
TestBlockPhysical::SIZE).unsafe_get0();
auto t2 = get_transaction();
auto extent2 = get_extent<TestBlockPhysical>(
*t2,
addr,
TestBlockPhysical::SIZE).unsafe_get0();
ASSERT_EQ(&*extent, &*extent2);
}
{
// read back test block
auto t = get_transaction();
auto extent = get_extent<TestBlockPhysical>(
*t,
addr,
TestBlockPhysical::SIZE).unsafe_get0();
// duplicate and reset contents
extent = cache->duplicate_for_write(*t, extent)->cast<TestBlockPhysical>();
extent->set_contents('c');
csum2 = extent->get_crc32c();
ASSERT_EQ(extent->get_paddr(), addr);
{
// test that concurrent read with fresh transaction sees old
// block
auto t2 = get_transaction();
auto extent = get_extent<TestBlockPhysical>(
*t2,
addr,
TestBlockPhysical::SIZE).unsafe_get0();
ASSERT_TRUE(extent->is_clean());
ASSERT_FALSE(extent->is_pending());
ASSERT_EQ(addr, extent->get_paddr());
ASSERT_EQ(extent->get_version(), 0);
ASSERT_EQ(csum, extent->get_crc32c());
}
{
// test that read with same transaction sees new block
auto extent = get_extent<TestBlockPhysical>(
*t,
addr,
TestBlockPhysical::SIZE).unsafe_get0();
ASSERT_TRUE(extent->is_dirty());
ASSERT_TRUE(extent->is_pending());
ASSERT_EQ(addr, extent->get_paddr());
ASSERT_EQ(extent->get_version(), 1);
ASSERT_EQ(csum2, extent->get_crc32c());
}
// submit transaction
submit_transaction(std::move(t)).get0();
ASSERT_TRUE(extent->is_dirty());
ASSERT_EQ(addr, extent->get_paddr());
ASSERT_EQ(extent->get_version(), 1);
ASSERT_EQ(extent->get_crc32c(), csum2);
}
{
// test that fresh transaction now sees newly dirty block
auto t = get_transaction();
auto extent = get_extent<TestBlockPhysical>(
*t,
addr,
TestBlockPhysical::SIZE).unsafe_get0();
ASSERT_TRUE(extent->is_dirty());
ASSERT_EQ(addr, extent->get_paddr());
ASSERT_EQ(extent->get_version(), 1);
ASSERT_EQ(csum2, extent->get_crc32c());
}
});
}
| 7,390 | 27.318008 | 94 | cc |
null | ceph-main/src/test/crimson/seastore/test_seastore_journal.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/crimson/gtest_seastar.h"
#include <random>
#include "crimson/common/log.h"
#include "crimson/os/seastore/async_cleaner.h"
#include "crimson/os/seastore/journal.h"
#include "crimson/os/seastore/segment_manager/ephemeral.h"
using namespace crimson;
using namespace crimson::os;
using namespace crimson::os::seastore;
namespace {
[[maybe_unused]] seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
}
struct record_validator_t {
record_t record;
paddr_t record_final_offset;
template <typename... T>
record_validator_t(T&&... record) : record(std::forward<T>(record)...) {}
void validate(SegmentManager &manager) {
paddr_t addr = make_record_relative_paddr(0);
for (auto &&block : record.extents) {
auto test = manager.read(
record_final_offset.add_relative(addr),
block.bl.length()).unsafe_get0();
addr = addr.add_offset(block.bl.length());
bufferlist bl;
bl.push_back(test);
ASSERT_EQ(
bl.length(),
block.bl.length());
ASSERT_EQ(
bl.begin().crc32c(bl.length(), 1),
block.bl.begin().crc32c(block.bl.length(), 1));
}
}
auto get_replay_handler() {
auto checker = [this, iter=record.deltas.begin()] (
paddr_t base,
const delta_info_t &di) mutable {
EXPECT_EQ(base, record_final_offset);
ceph_assert(iter != record.deltas.end());
EXPECT_EQ(di, *iter++);
EXPECT_EQ(base, record_final_offset);
return iter != record.deltas.end();
};
if (record.deltas.size()) {
return std::make_optional(std::move(checker));
} else {
return std::optional<decltype(checker)>();
}
}
};
struct journal_test_t : seastar_test_suite_t, SegmentProvider, JournalTrimmer {
segment_manager::EphemeralSegmentManagerRef segment_manager;
WritePipeline pipeline;
JournalRef journal;
std::vector<record_validator_t> records;
std::default_random_engine generator;
extent_len_t block_size;
SegmentManagerGroupRef sms;
segment_id_t next;
std::map<segment_id_t, segment_seq_t> segment_seqs;
std::map<segment_id_t, segment_type_t> segment_types;
journal_seq_t dummy_tail;
mutable segment_info_t tmp_info;
journal_test_t() = default;
/*
* JournalTrimmer interfaces
*/
journal_seq_t get_journal_head() const final { return dummy_tail; }
void set_journal_head(journal_seq_t) final {}
journal_seq_t get_dirty_tail() const final { return dummy_tail; }
journal_seq_t get_alloc_tail() const final { return dummy_tail; }
void update_journal_tails(journal_seq_t, journal_seq_t) final {}
bool try_reserve_inline_usage(std::size_t) final { return true; }
void release_inline_usage(std::size_t) final {}
std::size_t get_trim_size_per_cycle() const final {
return 0;
}
/*
* SegmentProvider interfaces
*/
const segment_info_t& get_seg_info(segment_id_t id) const final {
tmp_info = {};
tmp_info.seq = segment_seqs.at(id);
tmp_info.type = segment_types.at(id);
return tmp_info;
}
segment_id_t allocate_segment(
segment_seq_t seq,
segment_type_t type,
data_category_t,
rewrite_gen_t
) final {
auto ret = next;
next = segment_id_t{
segment_manager->get_device_id(),
next.device_segment_id() + 1};
segment_seqs[ret] = seq;
segment_types[ret] = type;
return ret;
}
void close_segment(segment_id_t) final {}
void update_segment_avail_bytes(segment_type_t, paddr_t) final {}
void update_modify_time(segment_id_t, sea_time_point, std::size_t) final {}
SegmentManagerGroup* get_segment_manager_group() final { return sms.get(); }
seastar::future<> set_up_fut() final {
segment_manager = segment_manager::create_test_ephemeral();
return segment_manager->init(
).safe_then([this] {
return segment_manager->mkfs(
segment_manager::get_ephemeral_device_config(0, 1, 0));
}).safe_then([this] {
block_size = segment_manager->get_block_size();
sms.reset(new SegmentManagerGroup());
next = segment_id_t(segment_manager->get_device_id(), 0);
journal = journal::make_segmented(*this, *this);
journal->set_write_pipeline(&pipeline);
sms->add_segment_manager(segment_manager.get());
return journal->open_for_mkfs();
}).safe_then([this](auto) {
dummy_tail = journal_seq_t{0,
paddr_t::make_seg_paddr(segment_id_t(segment_manager->get_device_id(), 0), 0)};
}, crimson::ct_error::all_same_way([] {
ASSERT_FALSE("Unable to mount");
}));
}
seastar::future<> tear_down_fut() final {
return journal->close(
).safe_then([this] {
segment_manager.reset();
sms.reset();
journal.reset();
}).handle_error(
crimson::ct_error::all_same_way([](auto e) {
ASSERT_FALSE("Unable to close");
})
);
}
template <typename T>
auto replay(T &&f) {
return journal->close(
).safe_then([this, f=std::move(f)]() mutable {
journal = journal::make_segmented(*this, *this);
journal->set_write_pipeline(&pipeline);
return journal->replay(std::forward<T>(std::move(f)));
}).safe_then([this] {
return journal->open_for_mount();
});
}
auto replay_and_check() {
auto record_iter = records.begin();
decltype(record_iter->get_replay_handler()) delta_checker = std::nullopt;
auto advance = [this, &record_iter, &delta_checker] {
ceph_assert(!delta_checker);
while (record_iter != records.end()) {
auto checker = record_iter->get_replay_handler();
record_iter++;
if (checker) {
delta_checker.emplace(std::move(*checker));
break;
}
}
};
advance();
replay(
[&advance,
&delta_checker]
(const auto &offsets,
const auto &di,
const journal_seq_t &,
const journal_seq_t &,
auto t) mutable {
if (!delta_checker) {
EXPECT_FALSE("No Deltas Left");
}
if (!(*delta_checker)(offsets.record_block_base, di)) {
delta_checker = std::nullopt;
advance();
}
return Journal::replay_ertr::make_ready_future<bool>(true);
}).unsafe_get0();
ASSERT_EQ(record_iter, records.end());
for (auto &i : records) {
i.validate(*segment_manager);
}
}
template <typename... T>
auto submit_record(T&&... _record) {
auto record{std::forward<T>(_record)...};
records.push_back(record);
OrderingHandle handle = get_dummy_ordering_handle();
auto [addr, _] = journal->submit_record(
std::move(record),
handle).unsafe_get0();
records.back().record_final_offset = addr;
return addr;
}
extent_t generate_extent(size_t blocks) {
std::uniform_int_distribution<char> distribution(
std::numeric_limits<char>::min(),
std::numeric_limits<char>::max()
);
char contents = distribution(generator);
bufferlist bl;
bl.append(buffer::ptr(buffer::create(blocks * block_size, contents)));
return extent_t{
extent_types_t::TEST_BLOCK,
L_ADDR_NULL,
bl};
}
delta_info_t generate_delta(size_t bytes) {
std::uniform_int_distribution<char> distribution(
std::numeric_limits<char>::min(),
std::numeric_limits<char>::max()
);
char contents = distribution(generator);
bufferlist bl;
bl.append(buffer::ptr(buffer::create(bytes, contents)));
return delta_info_t{
extent_types_t::TEST_BLOCK,
paddr_t{},
L_ADDR_NULL,
0, 0,
block_size,
1,
MAX_SEG_SEQ,
segment_type_t::NULL_SEG,
bl
};
}
};
TEST_F(journal_test_t, replay_one_journal_segment)
{
run_async([this] {
submit_record(record_t{
{ generate_extent(1), generate_extent(2) },
{ generate_delta(23), generate_delta(30) }
});
replay_and_check();
});
}
TEST_F(journal_test_t, replay_two_records)
{
run_async([this] {
submit_record(record_t{
{ generate_extent(1), generate_extent(2) },
{ generate_delta(23), generate_delta(30) }
});
submit_record(record_t{
{ generate_extent(4), generate_extent(1) },
{ generate_delta(23), generate_delta(400) }
});
replay_and_check();
});
}
TEST_F(journal_test_t, replay_twice)
{
run_async([this] {
submit_record(record_t{
{ generate_extent(1), generate_extent(2) },
{ generate_delta(23), generate_delta(30) }
});
submit_record(record_t{
{ generate_extent(4), generate_extent(1) },
{ generate_delta(23), generate_delta(400) }
});
replay_and_check();
submit_record(record_t{
{ generate_extent(2), generate_extent(5) },
{ generate_delta(230), generate_delta(40) }
});
replay_and_check();
});
}
TEST_F(journal_test_t, roll_journal_and_replay)
{
run_async([this] {
paddr_t current = submit_record(
record_t{
{ generate_extent(1), generate_extent(2) },
{ generate_delta(23), generate_delta(30) }
});
auto starting_segment = current.as_seg_paddr().get_segment_id();
unsigned so_far = 0;
while (current.as_seg_paddr().get_segment_id() == starting_segment) {
current = submit_record(record_t{
{ generate_extent(512), generate_extent(512) },
{ generate_delta(23), generate_delta(400) }
});
++so_far;
ASSERT_FALSE(so_far > 10);
}
replay_and_check();
});
}
| 9,357 | 26.203488 | 87 | cc |
null | ceph-main/src/test/crimson/seastore/test_transaction_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <random>
#include <boost/iterator/counting_iterator.hpp>
#include "test/crimson/gtest_seastar.h"
#include "test/crimson/seastore/transaction_manager_test_state.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/segment_manager/ephemeral.h"
#include "crimson/os/seastore/segment_manager.h"
#include "test/crimson/seastore/test_block.h"
using namespace crimson;
using namespace crimson::os;
using namespace crimson::os::seastore;
namespace {
[[maybe_unused]] seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
}
struct test_extent_record_t {
test_extent_desc_t desc;
unsigned refcount = 0;
test_extent_record_t() = default;
test_extent_record_t(
const test_extent_desc_t &desc,
unsigned refcount) : desc(desc), refcount(refcount) {}
void update(const test_extent_desc_t &to) {
desc = to;
}
bool operator==(const test_extent_desc_t &rhs) const {
return desc == rhs;
}
bool operator!=(const test_extent_desc_t &rhs) const {
return desc != rhs;
}
};
template<>
struct fmt::formatter<test_extent_record_t> : fmt::formatter<std::string_view> {
template <typename FormatContext>
auto format(const test_extent_record_t& r, FormatContext& ctx) const {
return fmt::format_to(ctx.out(), "test_extent_record_t({}, refcount={})",
r.desc, r.refcount);
}
};
struct transaction_manager_test_t :
public seastar_test_suite_t,
TMTestState,
::testing::WithParamInterface<const char*> {
std::random_device rd;
std::mt19937 gen;
transaction_manager_test_t(std::size_t num_main_devices, std::size_t num_cold_devices)
: TMTestState(num_main_devices, num_cold_devices), gen(rd()) {
}
laddr_t get_random_laddr(size_t block_size, laddr_t limit) {
return block_size *
std::uniform_int_distribution<>(0, (limit / block_size) - 1)(gen);
}
char get_random_contents() {
return static_cast<char>(std::uniform_int_distribution<>(0, 255)(gen));
}
seastar::future<> set_up_fut() final {
std::string j_type = GetParam();
if (j_type == "segmented") {
return tm_setup(journal_type_t::SEGMENTED);
} else if (j_type == "circularbounded") {
return tm_setup(journal_type_t::RANDOM_BLOCK);
} else {
ceph_assert(0 == "no support");
}
}
seastar::future<> tear_down_fut() final {
return tm_teardown();
}
struct test_extents_t : std::map<laddr_t, test_extent_record_t> {
using delta_t = std::map<laddr_t, std::optional<test_extent_record_t>>;
std::map<laddr_t, uint64_t> laddr_write_seq;
struct delta_overlay_t {
const test_extents_t &extents;
const delta_t δ
delta_overlay_t(
const test_extents_t &extents,
const delta_t &delta)
: extents(extents), delta(delta) {}
class iterator {
friend class test_extents_t;
const delta_overlay_t &parent;
test_extents_t::const_iterator biter;
delta_t::const_iterator oiter;
std::optional<std::pair<laddr_t, test_extent_record_t>> cur;
iterator(
const delta_overlay_t &parent,
test_extents_t::const_iterator biter,
delta_t::const_iterator oiter)
: parent(parent), biter(biter), oiter(oiter) {}
laddr_t get_bkey() {
return biter == parent.extents.end() ? L_ADDR_MAX : biter->first;
}
laddr_t get_okey() {
return oiter == parent.delta.end() ? L_ADDR_MAX : oiter->first;
}
bool is_end() {
return oiter == parent.delta.end() && biter == parent.extents.end();
}
bool is_valid() {
return is_end() ||
((get_okey() < get_bkey()) && (oiter->second)) ||
(get_okey() > get_bkey());
}
auto get_pair() {
assert(is_valid());
assert(!is_end());
auto okey = get_okey();
auto bkey = get_bkey();
return (
bkey < okey ?
std::pair<laddr_t, test_extent_record_t>(*biter) :
std::make_pair(okey, *(oiter->second)));
}
void adjust() {
while (!is_valid()) {
if (get_okey() < get_bkey()) {
assert(!oiter->second);
++oiter;
} else {
assert(get_okey() == get_bkey());
++biter;
}
}
assert(is_valid());
if (!is_end()) {
cur = get_pair();
} else {
cur = std::nullopt;
}
}
public:
iterator(const iterator &) = default;
iterator(iterator &&) = default;
iterator &operator++() {
assert(is_valid());
assert(!is_end());
if (get_bkey() < get_okey()) {
++biter;
} else {
++oiter;
}
adjust();
return *this;
}
bool operator==(const iterator &o) const {
return o.biter == biter && o.oiter == oiter;
}
bool operator!=(const iterator &o) const {
return !(*this == o);
}
auto operator*() {
assert(!is_end());
return *cur;
}
auto operator->() {
assert(!is_end());
return &*cur;
}
};
iterator begin() {
auto ret = iterator{*this, extents.begin(), delta.begin()};
ret.adjust();
return ret;
}
iterator end() {
auto ret = iterator{*this, extents.end(), delta.end()};
// adjust unnecessary
return ret;
}
iterator lower_bound(laddr_t l) {
auto ret = iterator{*this, extents.lower_bound(l), delta.lower_bound(l)};
ret.adjust();
return ret;
}
iterator upper_bound(laddr_t l) {
auto ret = iterator{*this, extents.upper_bound(l), delta.upper_bound(l)};
ret.adjust();
return ret;
}
iterator find(laddr_t l) {
auto ret = lower_bound(l);
if (ret == end() || ret->first != l) {
return end();
} else {
return ret;
}
}
};
private:
void check_available(
laddr_t addr, extent_len_t len, const delta_t &delta
) const {
delta_overlay_t overlay(*this, delta);
for (const auto &i: overlay) {
if (i.first < addr) {
EXPECT_FALSE(i.first + i.second.desc.len > addr);
} else {
EXPECT_FALSE(addr + len > i.first);
}
}
}
void check_hint(
laddr_t hint,
laddr_t addr,
extent_len_t len,
delta_t &delta) const {
delta_overlay_t overlay(*this, delta);
auto iter = overlay.lower_bound(hint);
laddr_t last = hint;
while (true) {
if (iter == overlay.end() || iter->first > addr) {
EXPECT_EQ(addr, last);
break;
}
EXPECT_FALSE(iter->first - last > len);
last = iter->first + iter->second.desc.len;
++iter;
}
}
std::optional<test_extent_record_t> &populate_delta(
laddr_t addr, delta_t &delta, const test_extent_desc_t *desc) const {
auto diter = delta.find(addr);
if (diter != delta.end())
return diter->second;
auto iter = find(addr);
if (iter == end()) {
assert(desc);
auto ret = delta.emplace(
std::make_pair(addr, test_extent_record_t{*desc, 0}));
assert(ret.second);
return ret.first->second;
} else {
auto ret = delta.emplace(*iter);
assert(ret.second);
return ret.first->second;
}
}
public:
delta_overlay_t get_overlay(const delta_t &delta) const {
return delta_overlay_t{*this, delta};
}
void insert(TestBlock &extent, delta_t &delta) const {
check_available(extent.get_laddr(), extent.get_length(), delta);
delta[extent.get_laddr()] =
test_extent_record_t{extent.get_desc(), 1};
}
void alloced(laddr_t hint, TestBlock &extent, delta_t &delta) const {
check_hint(hint, extent.get_laddr(), extent.get_length(), delta);
insert(extent, delta);
}
bool contains(laddr_t addr, const delta_t &delta) const {
delta_overlay_t overlay(*this, delta);
return overlay.find(addr) != overlay.end();
}
test_extent_record_t get(laddr_t addr, const delta_t &delta) const {
delta_overlay_t overlay(*this, delta);
auto iter = overlay.find(addr);
assert(iter != overlay.end());
return iter->second;
}
void update(
laddr_t addr,
const test_extent_desc_t &desc,
delta_t &delta) const {
auto &rec = populate_delta(addr, delta, &desc);
assert(rec);
rec->desc = desc;
}
int inc_ref(
laddr_t addr,
delta_t &delta) const {
auto &rec = populate_delta(addr, delta, nullptr);
assert(rec);
return ++rec->refcount;
}
int dec_ref(
laddr_t addr,
delta_t &delta) const {
auto &rec = populate_delta(addr, delta, nullptr);
assert(rec);
assert(rec->refcount > 0);
rec->refcount--;
if (rec->refcount == 0) {
delta[addr] = std::nullopt;
return 0;
} else {
return rec->refcount;
}
}
void consume(const delta_t &delta, const uint64_t write_seq = 0) {
for (const auto &i : delta) {
if (i.second) {
if (laddr_write_seq.find(i.first) == laddr_write_seq.end() ||
laddr_write_seq[i.first] <= write_seq) {
(*this)[i.first] = *i.second;
laddr_write_seq[i.first] = write_seq;
}
} else {
erase(i.first);
}
}
}
} test_mappings;
struct test_transaction_t {
TransactionRef t;
test_extents_t::delta_t mapping_delta;
};
test_transaction_t create_transaction() {
return { create_mutate_transaction(), {} };
}
test_transaction_t create_read_test_transaction() {
return {create_read_transaction(), {} };
}
test_transaction_t create_weak_test_transaction() {
return { create_weak_transaction(), {} };
}
TestBlockRef alloc_extent(
test_transaction_t &t,
laddr_t hint,
extent_len_t len,
char contents) {
auto extent = with_trans_intr(*(t.t), [&](auto& trans) {
return tm->alloc_extent<TestBlock>(trans, hint, len);
}).unsafe_get0();
extent->set_contents(contents);
EXPECT_FALSE(test_mappings.contains(extent->get_laddr(), t.mapping_delta));
EXPECT_EQ(len, extent->get_length());
test_mappings.alloced(hint, *extent, t.mapping_delta);
return extent;
}
TestBlockRef alloc_extent(
test_transaction_t &t,
laddr_t hint,
extent_len_t len) {
return alloc_extent(
t,
hint,
len,
get_random_contents());
}
bool check_usage() {
return epm->check_usage();
}
void replay() {
EXPECT_TRUE(check_usage());
restart();
}
void check() {
check_mappings();
check_usage();
}
void check_mappings() {
auto t = create_weak_test_transaction();
check_mappings(t);
}
TestBlockRef get_extent(
test_transaction_t &t,
laddr_t addr,
extent_len_t len) {
ceph_assert(test_mappings.contains(addr, t.mapping_delta));
ceph_assert(test_mappings.get(addr, t.mapping_delta).desc.len == len);
auto ext = with_trans_intr(*(t.t), [&](auto& trans) {
return tm->read_extent<TestBlock>(trans, addr, len);
}).unsafe_get0();
EXPECT_EQ(addr, ext->get_laddr());
return ext;
}
TestBlockRef try_get_extent(
test_transaction_t &t,
laddr_t addr) {
ceph_assert(test_mappings.contains(addr, t.mapping_delta));
using ertr = with_trans_ertr<TransactionManager::read_extent_iertr>;
using ret = ertr::future<TestBlockRef>;
auto ext = with_trans_intr(*(t.t), [&](auto& trans) {
return tm->read_extent<TestBlock>(trans, addr);
}).safe_then([](auto ext) -> ret {
return ertr::make_ready_future<TestBlockRef>(ext);
}).handle_error(
[](const crimson::ct_error::eagain &e) {
return seastar::make_ready_future<TestBlockRef>();
},
crimson::ct_error::assert_all{
"get_extent got invalid error"
}
).get0();
if (ext) {
EXPECT_EQ(addr, ext->get_laddr());
}
return ext;
}
TestBlockRef try_get_extent(
test_transaction_t &t,
laddr_t addr,
extent_len_t len) {
ceph_assert(test_mappings.contains(addr, t.mapping_delta));
ceph_assert(test_mappings.get(addr, t.mapping_delta).desc.len == len);
using ertr = with_trans_ertr<TransactionManager::read_extent_iertr>;
using ret = ertr::future<TestBlockRef>;
auto ext = with_trans_intr(*(t.t), [&](auto& trans) {
return tm->read_extent<TestBlock>(trans, addr, len);
}).safe_then([](auto ext) -> ret {
return ertr::make_ready_future<TestBlockRef>(ext);
}).handle_error(
[](const crimson::ct_error::eagain &e) {
return seastar::make_ready_future<TestBlockRef>();
},
crimson::ct_error::assert_all{
"get_extent got invalid error"
}
).get0();
if (ext) {
EXPECT_EQ(addr, ext->get_laddr());
}
return ext;
}
TestBlockRef try_read_pin(
test_transaction_t &t,
LBAMappingRef &&pin) {
using ertr = with_trans_ertr<TransactionManager::base_iertr>;
using ret = ertr::future<TestBlockRef>;
auto addr = pin->get_key();
auto ext = with_trans_intr(*(t.t), [&](auto& trans) {
return tm->read_pin<TestBlock>(trans, std::move(pin));
}).safe_then([](auto ext) -> ret {
return ertr::make_ready_future<TestBlockRef>(ext);
}).handle_error(
[](const crimson::ct_error::eagain &e) {
return seastar::make_ready_future<TestBlockRef>();
},
crimson::ct_error::assert_all{
"read_pin got invalid error"
}
).get0();
if (ext) {
EXPECT_EQ(addr, ext->get_laddr());
}
if (t.t->is_conflicted()) {
return nullptr;
}
return ext;
}
test_block_mutator_t mutator;
TestBlockRef mutate_extent(
test_transaction_t &t,
TestBlockRef ref) {
ceph_assert(test_mappings.contains(ref->get_laddr(), t.mapping_delta));
ceph_assert(
test_mappings.get(ref->get_laddr(), t.mapping_delta).desc.len ==
ref->get_length());
auto ext = tm->get_mutable_extent(*t.t, ref)->cast<TestBlock>();
EXPECT_EQ(ext->get_laddr(), ref->get_laddr());
EXPECT_EQ(ext->get_desc(), ref->get_desc());
mutator.mutate(*ext, gen);
test_mappings.update(ext->get_laddr(), ext->get_desc(), t.mapping_delta);
return ext;
}
TestBlockRef mutate_addr(
test_transaction_t &t,
laddr_t offset,
size_t length) {
auto ext = get_extent(t, offset, length);
mutate_extent(t, ext);
return ext;
}
LBAMappingRef get_pin(
test_transaction_t &t,
laddr_t offset) {
ceph_assert(test_mappings.contains(offset, t.mapping_delta));
auto pin = with_trans_intr(*(t.t), [&](auto& trans) {
return tm->get_pin(trans, offset);
}).unsafe_get0();
EXPECT_EQ(offset, pin->get_key());
return pin;
}
LBAMappingRef try_get_pin(
test_transaction_t &t,
laddr_t offset) {
ceph_assert(test_mappings.contains(offset, t.mapping_delta));
using ertr = with_trans_ertr<TransactionManager::get_pin_iertr>;
using ret = ertr::future<LBAMappingRef>;
auto pin = with_trans_intr(*(t.t), [&](auto& trans) {
return tm->get_pin(trans, offset);
}).safe_then([](auto pin) -> ret {
return ertr::make_ready_future<LBAMappingRef>(std::move(pin));
}).handle_error(
[](const crimson::ct_error::eagain &e) {
return seastar::make_ready_future<LBAMappingRef>();
},
crimson::ct_error::assert_all{
"get_extent got invalid error"
}
).get0();
if (pin) {
EXPECT_EQ(offset, pin->get_key());
}
return pin;
}
void inc_ref(test_transaction_t &t, laddr_t offset) {
ceph_assert(test_mappings.contains(offset, t.mapping_delta));
ceph_assert(test_mappings.get(offset, t.mapping_delta).refcount > 0);
auto refcnt = with_trans_intr(*(t.t), [&](auto& trans) {
return tm->inc_ref(trans, offset);
}).unsafe_get0();
auto check_refcnt = test_mappings.inc_ref(offset, t.mapping_delta);
EXPECT_EQ(refcnt, check_refcnt);
}
void dec_ref(test_transaction_t &t, laddr_t offset) {
ceph_assert(test_mappings.contains(offset, t.mapping_delta));
ceph_assert(test_mappings.get(offset, t.mapping_delta).refcount > 0);
auto refcnt = with_trans_intr(*(t.t), [&](auto& trans) {
return tm->dec_ref(trans, offset);
}).unsafe_get0();
auto check_refcnt = test_mappings.dec_ref(offset, t.mapping_delta);
EXPECT_EQ(refcnt, check_refcnt);
if (refcnt == 0)
logger().debug("dec_ref: {} at refcount 0", offset);
}
void check_mappings(test_transaction_t &t) {
auto overlay = test_mappings.get_overlay(t.mapping_delta);
for (const auto &i: overlay) {
logger().debug("check_mappings: {}->{}", i.first, i.second);
auto ext = get_extent(t, i.first, i.second.desc.len);
EXPECT_EQ(i.second, ext->get_desc());
}
with_trans_intr(
*t.t,
[this, &overlay](auto &t) {
return lba_manager->scan_mappings(
t,
0,
L_ADDR_MAX,
[iter=overlay.begin(), &overlay](auto l, auto p, auto len) mutable {
EXPECT_NE(iter, overlay.end());
logger().debug(
"check_mappings: scan {}",
l);
EXPECT_EQ(l, iter->first);
++iter;
});
}).unsafe_get0();
(void)with_trans_intr(
*t.t,
[=, this](auto &t) {
return lba_manager->check_child_trackers(t);
}).unsafe_get0();
}
bool try_submit_transaction(test_transaction_t t) {
using ertr = with_trans_ertr<TransactionManager::submit_transaction_iertr>;
using ret = ertr::future<bool>;
uint64_t write_seq = 0;
bool success = submit_transaction_fut_with_seq(*t.t
).safe_then([&write_seq](auto seq) -> ret {
write_seq = seq;
return ertr::make_ready_future<bool>(true);
}).handle_error(
[](const crimson::ct_error::eagain &e) {
return seastar::make_ready_future<bool>(false);
},
crimson::ct_error::assert_all{
"try_submit_transaction hit invalid error"
}
).then([this](auto ret) {
return epm->run_background_work_until_halt(
).then([ret] { return ret; });
}).get0();
if (success) {
test_mappings.consume(t.mapping_delta, write_seq);
}
return success;
}
void submit_transaction(test_transaction_t &&t) {
bool success = try_submit_transaction(std::move(t));
EXPECT_TRUE(success);
}
void submit_transaction_expect_conflict(test_transaction_t &&t) {
bool success = try_submit_transaction(std::move(t));
EXPECT_FALSE(success);
}
auto allocate_sequentially(const size_t size, const int num, bool run_clean = true) {
return repeat_eagain([this, size, num] {
return seastar::do_with(
create_transaction(),
[this, size, num](auto &t) {
return with_trans_intr(
*t.t,
[&t, this, size, num](auto &) {
return trans_intr::do_for_each(
boost::make_counting_iterator(0),
boost::make_counting_iterator(num),
[&t, this, size](auto) {
return tm->alloc_extent<TestBlock>(
*(t.t), L_ADDR_MIN, size
).si_then([&t, this, size](auto extent) {
extent->set_contents(get_random_contents());
EXPECT_FALSE(
test_mappings.contains(extent->get_laddr(), t.mapping_delta));
EXPECT_EQ(size, extent->get_length());
test_mappings.alloced(extent->get_laddr(), *extent, t.mapping_delta);
return seastar::now();
});
}).si_then([&t, this] {
return tm->submit_transaction(*t.t);
});
}).safe_then([&t, this] {
test_mappings.consume(t.mapping_delta);
});
});
}).safe_then([this, run_clean]() {
if (run_clean) {
return epm->run_background_work_until_halt();
} else {
return epm->background_process.trimmer->trim();
}
}).handle_error(
crimson::ct_error::assert_all{
"Invalid error in SeaStore::list_collections"
}
);
}
void test_parallel_extent_read() {
constexpr size_t TOTAL = 4<<20;
constexpr size_t BSIZE = 4<<10;
constexpr size_t BLOCKS = TOTAL / BSIZE;
run_async([this] {
for (unsigned i = 0; i < BLOCKS; ++i) {
auto t = create_transaction();
auto extent = alloc_extent(
t,
i * BSIZE,
BSIZE);
ASSERT_EQ(i * BSIZE, extent->get_laddr());
submit_transaction(std::move(t));
}
seastar::do_with(
create_read_test_transaction(),
[this](auto &t) {
return with_trans_intr(*(t.t), [this](auto &t) {
return trans_intr::parallel_for_each(
boost::make_counting_iterator(0lu),
boost::make_counting_iterator(BLOCKS),
[this, &t](auto i) {
return tm->read_extent<TestBlock>(t, i * BSIZE, BSIZE
).si_then([](auto) {
return seastar::now();
});
});
});
}).unsafe_get0();
});
}
void test_random_writes_concurrent() {
constexpr unsigned WRITE_STREAMS = 256;
constexpr size_t TOTAL = 4<<20;
constexpr size_t BSIZE = 4<<10;
constexpr size_t BLOCKS = TOTAL / BSIZE;
run_async([this] {
std::for_each(
boost::make_counting_iterator(0u),
boost::make_counting_iterator(WRITE_STREAMS),
[&](auto idx) {
for (unsigned i = idx; i < BLOCKS; i += WRITE_STREAMS) {
while (true) {
auto t = create_transaction();
auto extent = alloc_extent(
t,
i * BSIZE,
BSIZE);
ASSERT_EQ(i * BSIZE, extent->get_laddr());
if (try_submit_transaction(std::move(t)))
break;
}
}
});
int writes = 0;
unsigned failures = 0;
seastar::parallel_for_each(
boost::make_counting_iterator(0u),
boost::make_counting_iterator(WRITE_STREAMS),
[&](auto) {
return seastar::async([&] {
while (writes < 300) {
auto t = create_transaction();
auto ext = try_get_extent(
t,
get_random_laddr(BSIZE, TOTAL),
BSIZE);
if (!ext){
failures++;
continue;
}
auto mut = mutate_extent(t, ext);
auto success = try_submit_transaction(std::move(t));
writes += success;
failures += !success;
}
});
}).get0();
replay();
logger().info("random_writes_concurrent: checking");
check();
logger().info(
"random_writes_concurrent: {} suceeded, {} failed",
writes,
failures
);
});
}
void test_evict() {
// only support segmented backend currently
ASSERT_EQ(epm->get_main_backend_type(), backend_type_t::SEGMENTED);
ASSERT_TRUE(epm->background_process.has_cold_tier());
constexpr size_t device_size =
segment_manager::DEFAULT_TEST_EPHEMERAL.size;
constexpr size_t block_size =
segment_manager::DEFAULT_TEST_EPHEMERAL.block_size;
constexpr size_t segment_size =
segment_manager::DEFAULT_TEST_EPHEMERAL.segment_size;
ASSERT_GE(segment_size, block_size * 20);
run_async([this] {
// indicates there is no available segments to reclaim
double stop_ratio = (double)segment_size / (double)device_size / 2;
// 1 segment
double default_ratio = stop_ratio * 2;
// 1.25 segment
double fast_ratio = stop_ratio * 2.5;
epm->background_process
.eviction_state
.init(stop_ratio, default_ratio, fast_ratio);
// these variables are described in
// EPM::BackgroundProcess::eviction_state_t::maybe_update_eviction_mode
size_t ratio_A_size = segment_size / 2 - block_size * 10;
size_t ratio_B_size = segment_size / 2 + block_size * 10;
size_t ratio_C_size = segment_size + block_size;
size_t ratio_D_size = segment_size * 1.25 + block_size;
auto run_until = [this](size_t size) -> seastar::future<> {
return seastar::repeat([this, size] {
size_t current_size = epm->background_process
.main_cleaner->get_stat().data_stored;
if (current_size >= size) {
return seastar::futurize_invoke([] {
return seastar::stop_iteration::yes;
});
} else {
int num = (size - current_size) / block_size;
return seastar::do_for_each(
boost::make_counting_iterator(0),
boost::make_counting_iterator(num),
[this](auto) {
// don't start background process to test the behavior
// of generation changes during alloc new extents
return allocate_sequentially(block_size, 1, false);
}).then([] {
return seastar::stop_iteration::no;
});
}
});
};
std::vector<extent_types_t> all_extent_types{
extent_types_t::ROOT,
extent_types_t::LADDR_INTERNAL,
extent_types_t::LADDR_LEAF,
extent_types_t::OMAP_INNER,
extent_types_t::OMAP_LEAF,
extent_types_t::ONODE_BLOCK_STAGED,
extent_types_t::COLL_BLOCK,
extent_types_t::OBJECT_DATA_BLOCK,
extent_types_t::RETIRED_PLACEHOLDER,
extent_types_t::ALLOC_INFO,
extent_types_t::JOURNAL_TAIL,
extent_types_t::TEST_BLOCK,
extent_types_t::TEST_BLOCK_PHYSICAL,
extent_types_t::BACKREF_INTERNAL,
extent_types_t::BACKREF_LEAF
};
std::vector<rewrite_gen_t> all_generations;
for (auto i = INIT_GENERATION; i < REWRITE_GENERATIONS; i++) {
all_generations.push_back(i);
}
// input target-generation -> expected generation after the adjustment
using generation_mapping_t = std::map<rewrite_gen_t, rewrite_gen_t>;
std::map<extent_types_t, generation_mapping_t> expected_generations;
// this loop should be consistent with EPM::adjust_generation
for (auto t : all_extent_types) {
expected_generations[t] = {};
if (!is_logical_type(t)) {
for (auto gen : all_generations) {
expected_generations[t][gen] = INLINE_GENERATION;
}
} else {
if (get_extent_category(t) == data_category_t::METADATA) {
expected_generations[t][INIT_GENERATION] = INLINE_GENERATION;
} else {
expected_generations[t][INIT_GENERATION] = OOL_GENERATION;
}
for (auto i = INIT_GENERATION + 1; i < REWRITE_GENERATIONS; i++) {
expected_generations[t][i] = i;
}
}
}
auto update_data_gen_mapping = [&](std::function<rewrite_gen_t(rewrite_gen_t)> func) {
for (auto t : all_extent_types) {
if (!is_logical_type(t)) {
continue;
}
for (auto i = INIT_GENERATION + 1; i < REWRITE_GENERATIONS; i++) {
expected_generations[t][i] = func(i);
}
}
// since background process didn't start in allocate_sequentially
// we update eviction mode manually.
epm->background_process.maybe_update_eviction_mode();
};
auto test_gen = [&](const char *caller) {
for (auto t : all_extent_types) {
for (auto gen : all_generations) {
auto epm_gen = epm->adjust_generation(
get_extent_category(t),
t,
placement_hint_t::HOT,
gen);
if (expected_generations[t][gen] != epm_gen) {
logger().error("caller: {}, extent type: {}, input generation: {}, "
"expected generation : {}, adjust result from EPM: {}",
caller, t, gen, expected_generations[t][gen], epm_gen);
}
EXPECT_EQ(expected_generations[t][gen], epm_gen);
}
}
};
// verify that no data should go to the cold tier
update_data_gen_mapping([](rewrite_gen_t gen) -> rewrite_gen_t {
if (gen == MIN_COLD_GENERATION) {
return MIN_COLD_GENERATION - 1;
} else {
return gen;
}
});
test_gen("init");
run_until(ratio_A_size).get();
EXPECT_TRUE(epm->background_process.eviction_state.is_stop_mode());
test_gen("exceed ratio A");
epm->run_background_work_until_halt().get();
run_until(ratio_B_size).get();
EXPECT_TRUE(epm->background_process.eviction_state.is_stop_mode());
test_gen("exceed ratio B");
epm->run_background_work_until_halt().get();
// verify that data may go to the cold tier
run_until(ratio_C_size).get();
update_data_gen_mapping([](rewrite_gen_t gen) { return gen; });
EXPECT_TRUE(epm->background_process.eviction_state.is_default_mode());
test_gen("exceed ratio C");
epm->run_background_work_until_halt().get();
// verify that data must go to the cold tier
run_until(ratio_D_size).get();
update_data_gen_mapping([](rewrite_gen_t gen) {
if (gen >= MIN_REWRITE_GENERATION && gen < MIN_COLD_GENERATION) {
return MIN_COLD_GENERATION;
} else {
return gen;
}
});
EXPECT_TRUE(epm->background_process.eviction_state.is_fast_mode());
test_gen("exceed ratio D");
auto main_size = epm->background_process.main_cleaner->get_stat().data_stored;
auto cold_size = epm->background_process.cold_cleaner->get_stat().data_stored;
EXPECT_EQ(cold_size, 0);
epm->run_background_work_until_halt().get();
auto new_main_size = epm->background_process.main_cleaner->get_stat().data_stored;
auto new_cold_size = epm->background_process.cold_cleaner->get_stat().data_stored;
EXPECT_GE(main_size, new_main_size);
EXPECT_NE(new_cold_size, 0);
update_data_gen_mapping([](rewrite_gen_t gen) { return gen; });
EXPECT_TRUE(epm->background_process.eviction_state.is_default_mode());
test_gen("finish evict");
});
}
using remap_entry = TransactionManager::remap_entry;
LBAMappingRef remap_pin(
test_transaction_t &t,
LBAMappingRef &&opin,
extent_len_t new_offset,
extent_len_t new_len) {
if (t.t->is_conflicted()) {
return nullptr;
}
auto o_laddr = opin->get_key();
auto pin = with_trans_intr(*(t.t), [&](auto& trans) {
return tm->remap_pin<TestBlock>(
trans, std::move(opin), std::array{
remap_entry(new_offset, new_len)}
).si_then([](auto ret) {
return std::move(ret[0]);
});
}).handle_error(crimson::ct_error::eagain::handle([] {
LBAMappingRef t = nullptr;
return t;
}), crimson::ct_error::pass_further_all{}).unsafe_get0();
if (t.t->is_conflicted()) {
return nullptr;
}
test_mappings.dec_ref(o_laddr, t.mapping_delta);
EXPECT_FALSE(test_mappings.contains(o_laddr, t.mapping_delta));
EXPECT_TRUE(pin);
EXPECT_EQ(pin->get_length(), new_len);
EXPECT_EQ(pin->get_key(), o_laddr + new_offset);
auto extent = try_read_pin(t, pin->duplicate());
if (extent) {
test_mappings.alloced(pin->get_key(), *extent, t.mapping_delta);
EXPECT_TRUE(extent->is_exist_clean());
} else {
ceph_assert(t.t->is_conflicted());
return nullptr;
}
return pin;
}
using _overwrite_pin_iertr = TransactionManager::get_pin_iertr;
using _overwrite_pin_ret = _overwrite_pin_iertr::future<
std::tuple<LBAMappingRef, TestBlockRef, LBAMappingRef>>;
_overwrite_pin_ret _overwrite_pin(
Transaction &t,
LBAMappingRef &&opin,
extent_len_t new_offset,
extent_len_t new_len,
ceph::bufferlist &bl) {
auto o_laddr = opin->get_key();
auto o_len = opin->get_length();
if (new_offset != 0 && o_len != new_offset + new_len) {
return tm->remap_pin<TestBlock, 2>(
t,
std::move(opin),
std::array{
remap_entry(
0,
new_offset),
remap_entry(
new_offset + new_len,
o_len - new_offset - new_len)
}
).si_then([this, new_offset, new_len, o_laddr, &t, &bl](auto ret) {
return tm->alloc_extent<TestBlock>(t, o_laddr + new_offset, new_len
).si_then([this, ret = std::move(ret), new_len,
new_offset, o_laddr, &t, &bl](auto ext) mutable {
ceph_assert(ret.size() == 2);
auto iter = bl.cbegin();
iter.copy(new_len, ext->get_bptr().c_str());
auto r_laddr = o_laddr + new_offset + new_len;
// old pins expired after alloc new extent, need to get it.
return tm->get_pin(t, o_laddr
).si_then([this, &t, ext = std::move(ext), r_laddr](auto lpin) mutable {
return tm->get_pin(t, r_laddr
).si_then([lpin = std::move(lpin), ext = std::move(ext)]
(auto rpin) mutable {
return _overwrite_pin_iertr::make_ready_future<
std::tuple<LBAMappingRef, TestBlockRef, LBAMappingRef>>(
std::make_tuple(
std::move(lpin), std::move(ext), std::move(rpin)));
});
});
});
});
} else if (new_offset == 0 && o_len != new_offset + new_len) {
return tm->remap_pin<TestBlock, 1>(
t,
std::move(opin),
std::array{
remap_entry(
new_offset + new_len,
o_len - new_offset - new_len)
}
).si_then([this, new_offset, new_len, o_laddr, &t, &bl](auto ret) {
return tm->alloc_extent<TestBlock>(t, o_laddr + new_offset, new_len
).si_then([this, ret = std::move(ret), new_offset, new_len,
o_laddr, &t, &bl](auto ext) mutable {
ceph_assert(ret.size() == 1);
auto iter = bl.cbegin();
iter.copy(new_len, ext->get_bptr().c_str());
auto r_laddr = o_laddr + new_offset + new_len;
return tm->get_pin(t, r_laddr
).si_then([ext = std::move(ext)](auto rpin) mutable {
return _overwrite_pin_iertr::make_ready_future<
std::tuple<LBAMappingRef, TestBlockRef, LBAMappingRef>>(
std::make_tuple(
nullptr, std::move(ext), std::move(rpin)));
});
});
});
} else if (new_offset != 0 && o_len == new_offset + new_len) {
return tm->remap_pin<TestBlock, 1>(
t,
std::move(opin),
std::array{
remap_entry(
0,
new_offset)
}
).si_then([this, new_offset, new_len, o_laddr, &t, &bl](auto ret) {
return tm->alloc_extent<TestBlock>(t, o_laddr + new_offset, new_len
).si_then([this, ret = std::move(ret), new_len, o_laddr, &t, &bl]
(auto ext) mutable {
ceph_assert(ret.size() == 1);
auto iter = bl.cbegin();
iter.copy(new_len, ext->get_bptr().c_str());
return tm->get_pin(t, o_laddr
).si_then([ext = std::move(ext)](auto lpin) mutable {
return _overwrite_pin_iertr::make_ready_future<
std::tuple<LBAMappingRef, TestBlockRef, LBAMappingRef>>(
std::make_tuple(
std::move(lpin), std::move(ext), nullptr));
});
});
});
} else {
ceph_abort("impossible");
return _overwrite_pin_iertr::make_ready_future<
std::tuple<LBAMappingRef, TestBlockRef, LBAMappingRef>>(
std::make_tuple(nullptr, nullptr, nullptr));
}
}
using overwrite_pin_ret = std::tuple<LBAMappingRef, TestBlockRef, LBAMappingRef>;
overwrite_pin_ret overwrite_pin(
test_transaction_t &t,
LBAMappingRef &&opin,
extent_len_t new_offset,
extent_len_t new_len,
ceph::bufferlist &bl) {
if (t.t->is_conflicted()) {
return std::make_tuple<LBAMappingRef, TestBlockRef, LBAMappingRef>(
nullptr, nullptr, nullptr);
}
auto o_laddr = opin->get_key();
auto o_paddr = opin->get_val();
auto o_len = opin->get_length();
auto res = with_trans_intr(*(t.t), [&](auto& trans) {
return _overwrite_pin(
trans, std::move(opin), new_offset, new_len, bl);
}).handle_error(crimson::ct_error::eagain::handle([] {
return std::make_tuple<LBAMappingRef, TestBlockRef, LBAMappingRef>(
nullptr, nullptr, nullptr);
}), crimson::ct_error::pass_further_all{}).unsafe_get0();
if (t.t->is_conflicted()) {
return std::make_tuple<LBAMappingRef, TestBlockRef, LBAMappingRef>(
nullptr, nullptr, nullptr);
}
test_mappings.dec_ref(o_laddr, t.mapping_delta);
EXPECT_FALSE(test_mappings.contains(o_laddr, t.mapping_delta));
auto &[lpin, ext, rpin] = res;
EXPECT_TRUE(ext);
EXPECT_TRUE(lpin || rpin);
EXPECT_TRUE(o_len > ext->get_length());
if (lpin) {
EXPECT_EQ(lpin->get_key(), o_laddr);
EXPECT_EQ(lpin->get_val(), o_paddr);
EXPECT_EQ(lpin->get_length(), new_offset);
auto lext = try_read_pin(t, lpin->duplicate());
if (lext) {
test_mappings.alloced(lpin->get_key(), *lext, t.mapping_delta);
EXPECT_TRUE(lext->is_exist_clean());
} else {
ceph_assert(t.t->is_conflicted());
return std::make_tuple<LBAMappingRef, TestBlockRef, LBAMappingRef>(
nullptr, nullptr, nullptr);
}
}
EXPECT_EQ(ext->get_laddr(), o_laddr + new_offset);
EXPECT_EQ(ext->get_length(), new_len);
test_mappings.alloced(ext->get_laddr(), *ext, t.mapping_delta);
if (rpin) {
EXPECT_EQ(rpin->get_key(), o_laddr + new_offset + new_len);
EXPECT_EQ(rpin->get_val(), o_paddr.add_offset(new_offset)
.add_offset(new_len));
EXPECT_EQ(rpin->get_length(), o_len - new_offset - new_len);
auto rext = try_read_pin(t, rpin->duplicate());
if (rext) {
test_mappings.alloced(rpin->get_key(), *rext, t.mapping_delta);
EXPECT_TRUE(rext->is_exist_clean());
} else {
ceph_assert(t.t->is_conflicted());
return std::make_tuple<LBAMappingRef, TestBlockRef, LBAMappingRef>(
nullptr, nullptr, nullptr);
}
}
return std::make_tuple<LBAMappingRef, TestBlockRef, LBAMappingRef>(
std::move(lpin), std::move(ext), std::move(rpin));
}
void test_remap_pin() {
run_async([this] {
constexpr size_t l_offset = 32 << 10;
constexpr size_t l_len = 32 << 10;
constexpr size_t r_offset = 64 << 10;
constexpr size_t r_len = 32 << 10;
{
auto t = create_transaction();
auto lext = alloc_extent(t, l_offset, l_len);
lext->set_contents('l', 0, 16 << 10);
auto rext = alloc_extent(t, r_offset, r_len);
rext->set_contents('r', 16 << 10, 16 << 10);
submit_transaction(std::move(t));
}
{
auto t = create_transaction();
auto lpin = get_pin(t, l_offset);
auto rpin = get_pin(t, r_offset);
//split left
auto pin1 = remap_pin(t, std::move(lpin), 0, 16 << 10);
ASSERT_TRUE(pin1);
auto pin2 = remap_pin(t, std::move(pin1), 0, 8 << 10);
ASSERT_TRUE(pin2);
auto pin3 = remap_pin(t, std::move(pin2), 0, 4 << 10);
ASSERT_TRUE(pin3);
auto lext = get_extent(t, pin3->get_key(), pin3->get_length());
EXPECT_EQ('l', lext->get_bptr().c_str()[0]);
auto mlext = mutate_extent(t, lext);
ASSERT_TRUE(mlext->is_exist_mutation_pending());
ASSERT_TRUE(mlext.get() == lext.get());
//split right
auto pin4 = remap_pin(t, std::move(rpin), 16 << 10, 16 << 10);
ASSERT_TRUE(pin4);
auto pin5 = remap_pin(t, std::move(pin4), 8 << 10, 8 << 10);
ASSERT_TRUE(pin5);
auto pin6 = remap_pin(t, std::move(pin5), 4 << 10, 4 << 10);
ASSERT_TRUE(pin6);
auto rext = get_extent(t, pin6->get_key(), pin6->get_length());
EXPECT_EQ('r', rext->get_bptr().c_str()[0]);
auto mrext = mutate_extent(t, rext);
ASSERT_TRUE(mrext->is_exist_mutation_pending());
ASSERT_TRUE(mrext.get() == rext.get());
submit_transaction(std::move(t));
check();
}
replay();
check();
});
}
void test_overwrite_pin() {
run_async([this] {
constexpr size_t m_offset = 8 << 10;
constexpr size_t m_len = 56 << 10;
constexpr size_t l_offset = 64 << 10;
constexpr size_t l_len = 64 << 10;
constexpr size_t r_offset = 128 << 10;
constexpr size_t r_len = 64 << 10;
{
auto t = create_transaction();
auto m_ext = alloc_extent(t, m_offset, m_len);
m_ext->set_contents('a', 0 << 10, 8 << 10);
m_ext->set_contents('b', 16 << 10, 4 << 10);
m_ext->set_contents('c', 36 << 10, 4 << 10);
m_ext->set_contents('d', 52 << 10, 4 << 10);
auto l_ext = alloc_extent(t, l_offset, l_len);
auto r_ext = alloc_extent(t, r_offset, r_len);
submit_transaction(std::move(t));
}
{
auto t = create_transaction();
auto mpin = get_pin(t, m_offset);
auto lpin = get_pin(t, l_offset);
auto rpin = get_pin(t, r_offset);
bufferlist mbl1, mbl2, mbl3;
mbl1.append(ceph::bufferptr(ceph::buffer::create(8 << 10, 0)));
mbl2.append(ceph::bufferptr(ceph::buffer::create(16 << 10, 0)));
mbl3.append(ceph::bufferptr(ceph::buffer::create(12 << 10, 0)));
auto [mlp1, mext1, mrp1] = overwrite_pin(
t, std::move(mpin), 8 << 10 , 8 << 10, mbl1);
auto [mlp2, mext2, mrp2] = overwrite_pin(
t, std::move(mrp1), 4 << 10 , 16 << 10, mbl2);
auto [mlpin3, me3, mrpin3] = overwrite_pin(
t, std::move(mrp2), 4 << 10 , 12 << 10, mbl3);
auto mlext1 = get_extent(t, mlp1->get_key(), mlp1->get_length());
auto mlext2 = get_extent(t, mlp2->get_key(), mlp2->get_length());
auto mlext3 = get_extent(t, mlpin3->get_key(), mlpin3->get_length());
auto mrext3 = get_extent(t, mrpin3->get_key(), mrpin3->get_length());
EXPECT_EQ('a', mlext1->get_bptr().c_str()[0]);
EXPECT_EQ('b', mlext2->get_bptr().c_str()[0]);
EXPECT_EQ('c', mlext3->get_bptr().c_str()[0]);
EXPECT_EQ('d', mrext3->get_bptr().c_str()[0]);
auto mutate_mlext1 = mutate_extent(t, mlext1);
auto mutate_mlext2 = mutate_extent(t, mlext2);
auto mutate_mlext3 = mutate_extent(t, mlext3);
auto mutate_mrext3 = mutate_extent(t, mrext3);
ASSERT_TRUE(mutate_mlext1->is_exist_mutation_pending());
ASSERT_TRUE(mutate_mlext2->is_exist_mutation_pending());
ASSERT_TRUE(mutate_mlext3->is_exist_mutation_pending());
ASSERT_TRUE(mutate_mrext3->is_exist_mutation_pending());
ASSERT_TRUE(mutate_mlext1.get() == mlext1.get());
ASSERT_TRUE(mutate_mlext2.get() == mlext2.get());
ASSERT_TRUE(mutate_mlext3.get() == mlext3.get());
ASSERT_TRUE(mutate_mrext3.get() == mrext3.get());
bufferlist lbl1, rbl1;
lbl1.append(ceph::bufferptr(ceph::buffer::create(32 << 10, 0)));
auto [llp1, lext1, lrp1] = overwrite_pin(
t, std::move(lpin), 0 , 32 << 10, lbl1);
EXPECT_FALSE(llp1);
EXPECT_TRUE(lrp1);
EXPECT_TRUE(lext1);
rbl1.append(ceph::bufferptr(ceph::buffer::create(32 << 10, 0)));
auto [rlp1, rext1, rrp1] = overwrite_pin(
t, std::move(rpin), 32 << 10 , 32 << 10, rbl1);
EXPECT_TRUE(rlp1);
EXPECT_TRUE(rext1);
EXPECT_FALSE(rrp1);
submit_transaction(std::move(t));
check();
}
replay();
check();
});
}
void test_remap_pin_concurrent() {
run_async([this] {
constexpr unsigned REMAP_NUM = 32;
constexpr size_t offset = 0;
constexpr size_t length = 256 << 10;
{
auto t = create_transaction();
auto extent = alloc_extent(t, offset, length);
ASSERT_EQ(length, extent->get_length());
submit_transaction(std::move(t));
}
int success = 0;
int early_exit = 0;
int conflicted = 0;
seastar::parallel_for_each(
boost::make_counting_iterator(0u),
boost::make_counting_iterator(REMAP_NUM),
[&](auto) {
return seastar::async([&] {
uint32_t pieces = std::uniform_int_distribution<>(6, 31)(gen);
std::set<uint32_t> split_points;
for (uint32_t i = 0; i < pieces; i++) {
auto p = std::uniform_int_distribution<>(1, 256)(gen);
split_points.insert(p - p % 4);
}
auto t = create_transaction();
auto pin0 = try_get_pin(t, offset);
if (!pin0 || pin0->get_length() != length) {
early_exit++;
return;
}
auto last_pin = pin0->duplicate();
ASSERT_TRUE(!split_points.empty());
for (auto off : split_points) {
if (off == 0 || off >= 255) {
continue;
}
auto new_off = (off << 10) - last_pin->get_key();
auto new_len = last_pin->get_length() - new_off;
//always remap right extent at new split_point
auto pin = remap_pin(t, std::move(last_pin), new_off, new_len);
if (!pin) {
conflicted++;
return;
}
last_pin = pin->duplicate();
}
auto last_ext = try_get_extent(t, last_pin->get_key());
if (last_ext) {
auto last_ext1 = mutate_extent(t, last_ext);
ASSERT_TRUE(last_ext1->is_exist_mutation_pending());
} else {
conflicted++;
return;
}
if (try_submit_transaction(std::move(t))) {
success++;
logger().info("transaction {} submit the transction",
static_cast<void*>(t.t.get()));
} else {
conflicted++;
}
});
}).handle_exception([](std::exception_ptr e) {
logger().info("{}", e);
}).get0();
logger().info("test_remap_pin_concurrent: "
"early_exit {} conflicted {} success {}",
early_exit, conflicted, success);
ASSERT_TRUE(success == 1);
ASSERT_EQ(success + conflicted + early_exit, REMAP_NUM);
replay();
check();
});
}
void test_overwrite_pin_concurrent() {
run_async([this] {
constexpr unsigned REMAP_NUM = 32;
constexpr size_t offset = 0;
constexpr size_t length = 256 << 10;
{
auto t = create_transaction();
auto extent = alloc_extent(t, offset, length);
ASSERT_EQ(length, extent->get_length());
submit_transaction(std::move(t));
}
int success = 0;
int early_exit = 0;
int conflicted = 0;
seastar::parallel_for_each(
boost::make_counting_iterator(0u),
boost::make_counting_iterator(REMAP_NUM),
[&](auto) {
return seastar::async([&] {
uint32_t pieces = std::uniform_int_distribution<>(6, 31)(gen);
if (pieces % 2 == 1) {
pieces++;
}
std::list<uint32_t> split_points;
for (uint32_t i = 0; i < pieces; i++) {
auto p = std::uniform_int_distribution<>(1, 120)(gen);
split_points.push_back(p - p % 4);
}
split_points.sort();
auto t = create_transaction();
auto pin0 = try_get_pin(t, offset);
if (!pin0 || pin0->get_length() != length) {
early_exit++;
return;
}
auto empty_transaction = true;
auto last_rpin = pin0->duplicate();
ASSERT_TRUE(!split_points.empty());
while(!split_points.empty()) {
// new overwrite area: start_off ~ end_off
auto start_off = split_points.front();
split_points.pop_front();
auto end_off = split_points.front();
split_points.pop_front();
ASSERT_TRUE(start_off <= end_off);
if (((end_off << 10) == pin0->get_key() + pin0->get_length())
|| (start_off == end_off)) {
if (split_points.empty() && empty_transaction) {
early_exit++;
return;
}
continue;
}
empty_transaction = false;
auto new_off = (start_off << 10) - last_rpin->get_key();
auto new_len = (end_off - start_off) << 10;
bufferlist bl;
bl.append(ceph::bufferptr(ceph::buffer::create(new_len, 0)));
auto [lpin, ext, rpin] = overwrite_pin(
t, last_rpin->duplicate(), new_off, new_len, bl);
if (!ext) {
conflicted++;
return;
}
// lpin is nullptr might not cause by confliction,
// it might just not exist.
if (lpin) {
auto lext = try_get_extent(t, lpin->get_key());
if (!lext) {
conflicted++;
return;
}
if (get_random_contents() % 2 == 0) {
auto lext1 = mutate_extent(t, lext);
ASSERT_TRUE(lext1->is_exist_mutation_pending());
}
}
ASSERT_TRUE(rpin);
last_rpin = rpin->duplicate();
}
auto last_rext = try_get_extent(t, last_rpin->get_key());
if (!last_rext) {
conflicted++;
return;
}
if (get_random_contents() % 2 == 0) {
auto last_rext1 = mutate_extent(t, last_rext);
ASSERT_TRUE(last_rext1->is_exist_mutation_pending());
}
if (try_submit_transaction(std::move(t))) {
success++;
logger().info("transaction {} submit the transction",
static_cast<void*>(t.t.get()));
} else {
conflicted++;
}
});
}).handle_exception([](std::exception_ptr e) {
logger().info("{}", e);
}).get0();
logger().info("test_overwrite_pin_concurrent: "
"early_exit {} conflicted {} success {}",
early_exit, conflicted, success);
ASSERT_TRUE(success == 1 || early_exit == REMAP_NUM);
ASSERT_EQ(success + conflicted + early_exit, REMAP_NUM);
replay();
check();
});
}
};
struct tm_single_device_test_t :
public transaction_manager_test_t {
tm_single_device_test_t() : transaction_manager_test_t(1, 0) {}
};
struct tm_multi_device_test_t :
public transaction_manager_test_t {
tm_multi_device_test_t() : transaction_manager_test_t(3, 0) {}
};
struct tm_multi_tier_device_test_t :
public transaction_manager_test_t {
tm_multi_tier_device_test_t() : transaction_manager_test_t(1, 2) {}
};
TEST_P(tm_single_device_test_t, basic)
{
constexpr laddr_t SIZE = 4096;
run_async([this] {
constexpr laddr_t ADDR = 0xFF * SIZE;
{
auto t = create_transaction();
auto extent = alloc_extent(
t,
ADDR,
SIZE,
'a');
ASSERT_EQ(ADDR, extent->get_laddr());
check_mappings(t);
check();
submit_transaction(std::move(t));
check();
}
});
}
TEST_P(tm_single_device_test_t, mutate)
{
constexpr laddr_t SIZE = 4096;
run_async([this] {
constexpr laddr_t ADDR = 0xFF * SIZE;
{
auto t = create_transaction();
auto extent = alloc_extent(
t,
ADDR,
SIZE,
'a');
ASSERT_EQ(ADDR, extent->get_laddr());
check_mappings(t);
check();
submit_transaction(std::move(t));
check();
}
ASSERT_TRUE(check_usage());
replay();
{
auto t = create_transaction();
auto ext = get_extent(
t,
ADDR,
SIZE);
auto mut = mutate_extent(t, ext);
check_mappings(t);
check();
submit_transaction(std::move(t));
check();
}
ASSERT_TRUE(check_usage());
replay();
check();
});
}
TEST_P(tm_single_device_test_t, allocate_lba_conflict)
{
constexpr laddr_t SIZE = 4096;
run_async([this] {
constexpr laddr_t ADDR = 0xFF * SIZE;
constexpr laddr_t ADDR2 = 0xFE * SIZE;
auto t = create_transaction();
auto t2 = create_transaction();
// These should conflict as they should both modify the lba root
auto extent = alloc_extent(
t,
ADDR,
SIZE,
'a');
ASSERT_EQ(ADDR, extent->get_laddr());
check_mappings(t);
check();
auto extent2 = alloc_extent(
t2,
ADDR2,
SIZE,
'a');
ASSERT_EQ(ADDR2, extent2->get_laddr());
check_mappings(t2);
extent2.reset();
submit_transaction(std::move(t2));
submit_transaction_expect_conflict(std::move(t));
});
}
TEST_P(tm_single_device_test_t, mutate_lba_conflict)
{
constexpr laddr_t SIZE = 4096;
run_async([this] {
{
auto t = create_transaction();
for (unsigned i = 0; i < 300; ++i) {
auto extent = alloc_extent(
t,
laddr_t(i * SIZE),
SIZE);
}
check_mappings(t);
submit_transaction(std::move(t));
check();
}
constexpr laddr_t ADDR = 150 * SIZE;
{
auto t = create_transaction();
auto t2 = create_transaction();
mutate_addr(t, ADDR, SIZE);
mutate_addr(t2, ADDR, SIZE);
submit_transaction(std::move(t));
submit_transaction_expect_conflict(std::move(t2));
}
check();
{
auto t = create_transaction();
mutate_addr(t, ADDR, SIZE);
submit_transaction(std::move(t));
}
check();
});
}
TEST_P(tm_single_device_test_t, concurrent_mutate_lba_no_conflict)
{
constexpr laddr_t SIZE = 4096;
constexpr size_t NUM = 500;
constexpr laddr_t addr = 0;
constexpr laddr_t addr2 = SIZE * (NUM - 1);
run_async([this] {
{
auto t = create_transaction();
for (unsigned i = 0; i < NUM; ++i) {
auto extent = alloc_extent(
t,
laddr_t(i * SIZE),
SIZE);
}
submit_transaction(std::move(t));
}
{
auto t = create_transaction();
auto t2 = create_transaction();
mutate_addr(t, addr, SIZE);
mutate_addr(t2, addr2, SIZE);
submit_transaction(std::move(t));
submit_transaction(std::move(t2));
}
check();
});
}
TEST_P(tm_single_device_test_t, create_remove_same_transaction)
{
constexpr laddr_t SIZE = 4096;
run_async([this] {
constexpr laddr_t ADDR = 0xFF * SIZE;
{
auto t = create_transaction();
auto extent = alloc_extent(
t,
ADDR,
SIZE,
'a');
ASSERT_EQ(ADDR, extent->get_laddr());
check_mappings(t);
dec_ref(t, ADDR);
check_mappings(t);
extent = alloc_extent(
t,
ADDR,
SIZE,
'a');
submit_transaction(std::move(t));
check();
}
replay();
check();
});
}
TEST_P(tm_single_device_test_t, split_merge_read_same_transaction)
{
constexpr laddr_t SIZE = 4096;
run_async([this] {
{
auto t = create_transaction();
for (unsigned i = 0; i < 300; ++i) {
auto extent = alloc_extent(
t,
laddr_t(i * SIZE),
SIZE);
}
check_mappings(t);
submit_transaction(std::move(t));
check();
}
{
auto t = create_transaction();
for (unsigned i = 0; i < 240; ++i) {
dec_ref(
t,
laddr_t(i * SIZE));
}
check_mappings(t);
submit_transaction(std::move(t));
check();
}
});
}
TEST_P(tm_single_device_test_t, inc_dec_ref)
{
constexpr laddr_t SIZE = 4096;
run_async([this] {
constexpr laddr_t ADDR = 0xFF * SIZE;
{
auto t = create_transaction();
auto extent = alloc_extent(
t,
ADDR,
SIZE,
'a');
ASSERT_EQ(ADDR, extent->get_laddr());
check_mappings(t);
check();
submit_transaction(std::move(t));
check();
}
replay();
{
auto t = create_transaction();
inc_ref(t, ADDR);
check_mappings(t);
check();
submit_transaction(std::move(t));
check();
}
{
auto t = create_transaction();
dec_ref(t, ADDR);
check_mappings(t);
check();
submit_transaction(std::move(t));
check();
}
replay();
{
auto t = create_transaction();
dec_ref(t, ADDR);
check_mappings(t);
check();
submit_transaction(std::move(t));
check();
}
});
}
TEST_P(tm_single_device_test_t, cause_lba_split)
{
constexpr laddr_t SIZE = 4096;
run_async([this] {
for (unsigned i = 0; i < 200; ++i) {
auto t = create_transaction();
auto extent = alloc_extent(
t,
i * SIZE,
SIZE,
(char)(i & 0xFF));
ASSERT_EQ(i * SIZE, extent->get_laddr());
submit_transaction(std::move(t));
}
check();
});
}
TEST_P(tm_single_device_test_t, random_writes)
{
constexpr size_t TOTAL = 4<<20;
constexpr size_t BSIZE = 4<<10;
constexpr size_t PADDING_SIZE = 256<<10;
constexpr size_t BLOCKS = TOTAL / BSIZE;
run_async([this] {
for (unsigned i = 0; i < BLOCKS; ++i) {
auto t = create_transaction();
auto extent = alloc_extent(
t,
i * BSIZE,
BSIZE);
ASSERT_EQ(i * BSIZE, extent->get_laddr());
submit_transaction(std::move(t));
}
for (unsigned i = 0; i < 4; ++i) {
for (unsigned j = 0; j < 65; ++j) {
auto t = create_transaction();
for (unsigned k = 0; k < 2; ++k) {
auto ext = get_extent(
t,
get_random_laddr(BSIZE, TOTAL),
BSIZE);
auto mut = mutate_extent(t, ext);
// pad out transaction
auto padding = alloc_extent(
t,
TOTAL + (k * PADDING_SIZE),
PADDING_SIZE);
dec_ref(t, padding->get_laddr());
}
submit_transaction(std::move(t));
}
replay();
logger().info("random_writes: {} checking", i);
check();
logger().info("random_writes: {} done replaying/checking", i);
}
});
}
TEST_P(tm_single_device_test_t, find_hole_assert_trigger)
{
constexpr unsigned max = 10;
constexpr size_t BSIZE = 4<<10;
int num = 40;
run([&, this] {
return seastar::parallel_for_each(
boost::make_counting_iterator(0u),
boost::make_counting_iterator(max),
[&, this](auto idx) {
return allocate_sequentially(BSIZE, num);
});
});
}
TEST_P(tm_single_device_test_t, remap_lazy_read)
{
constexpr laddr_t offset = 0;
constexpr size_t length = 256 << 10;
run_async([this, offset] {
{
auto t = create_transaction();
auto extent = alloc_extent(
t,
offset,
length,
'a');
ASSERT_EQ(offset, extent->get_laddr());
check_mappings(t);
submit_transaction(std::move(t));
check();
}
replay();
{
auto t = create_transaction();
auto pin = get_pin(t, offset);
auto rpin = remap_pin(t, std::move(pin), 0, 128 << 10);
check_mappings(t);
submit_transaction(std::move(t));
check();
}
replay();
{
auto t = create_transaction();
auto pin = get_pin(t, offset);
bufferlist bl;
bl.append(ceph::bufferptr(ceph::buffer::create(64 << 10, 0)));
auto [lpin, ext, rpin] = overwrite_pin(
t, std::move(pin), 4 << 10 , 64 << 10, bl);
check_mappings(t);
submit_transaction(std::move(t));
check();
}
replay();
});
}
TEST_P(tm_single_device_test_t, random_writes_concurrent)
{
test_random_writes_concurrent();
}
TEST_P(tm_multi_device_test_t, random_writes_concurrent)
{
test_random_writes_concurrent();
}
TEST_P(tm_multi_tier_device_test_t, evict)
{
test_evict();
}
TEST_P(tm_single_device_test_t, parallel_extent_read)
{
test_parallel_extent_read();
}
TEST_P(tm_single_device_test_t, test_remap_pin)
{
test_remap_pin();
}
TEST_P(tm_single_device_test_t, test_overwrite_pin)
{
test_overwrite_pin();
}
TEST_P(tm_single_device_test_t, test_remap_pin_concurrent)
{
test_remap_pin_concurrent();
}
TEST_P(tm_single_device_test_t, test_overwrite_pin_concurrent)
{
test_overwrite_pin_concurrent();
}
INSTANTIATE_TEST_SUITE_P(
transaction_manager_test,
tm_single_device_test_t,
::testing::Values (
"segmented",
"circularbounded"
)
);
INSTANTIATE_TEST_SUITE_P(
transaction_manager_test,
tm_multi_device_test_t,
::testing::Values (
"segmented"
)
);
INSTANTIATE_TEST_SUITE_P(
transaction_manager_test,
tm_multi_tier_device_test_t,
::testing::Values (
"segmented"
)
);
| 58,950 | 28.416667 | 92 | cc |
null | ceph-main/src/test/crimson/seastore/transaction_manager_test_state.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <random>
#include <boost/iterator/counting_iterator.hpp>
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/extent_placement_manager.h"
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/segment_manager/ephemeral.h"
#include "crimson/os/seastore/seastore.h"
#include "crimson/os/seastore/segment_manager.h"
#include "crimson/os/seastore/collection_manager/flat_collection_manager.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.h"
#include "crimson/os/seastore/random_block_manager/rbm_device.h"
#include "crimson/os/seastore/journal/circular_bounded_journal.h"
#include "crimson/os/seastore/random_block_manager/block_rb_manager.h"
using namespace crimson;
using namespace crimson::os;
using namespace crimson::os::seastore;
class EphemeralDevices {
public:
virtual seastar::future<> setup() = 0;
virtual void remount() = 0;
virtual std::size_t get_num_devices() const = 0;
virtual void reset() = 0;
virtual std::vector<Device*> get_secondary_devices() = 0;
virtual ~EphemeralDevices() {}
virtual Device* get_primary_device() = 0;
virtual DeviceRef get_primary_device_ref() = 0;
virtual void set_primary_device_ref(DeviceRef) = 0;
};
using EphemeralDevicesRef = std::unique_ptr<EphemeralDevices>;
class EphemeralSegmentedDevices : public EphemeralDevices {
segment_manager::EphemeralSegmentManagerRef segment_manager;
std::list<segment_manager::EphemeralSegmentManagerRef> secondary_segment_managers;
std::size_t num_main_device_managers;
std::size_t num_cold_device_managers;
public:
EphemeralSegmentedDevices(std::size_t num_main_devices,
std::size_t num_cold_devices)
: num_main_device_managers(num_main_devices),
num_cold_device_managers(num_cold_devices)
{
auto num_device_managers = num_main_device_managers + num_cold_device_managers;
assert(num_device_managers > 0);
secondary_segment_managers.resize(num_device_managers - 1);
}
seastar::future<> setup() final {
segment_manager = segment_manager::create_test_ephemeral();
for (auto &sec_sm : secondary_segment_managers) {
sec_sm = segment_manager::create_test_ephemeral();
}
return segment_manager->init(
).safe_then([this] {
return crimson::do_for_each(
secondary_segment_managers.begin(),
secondary_segment_managers.end(),
[](auto &sec_sm)
{
return sec_sm->init();
});
}).safe_then([this] {
return segment_manager->mkfs(
segment_manager::get_ephemeral_device_config(
0, num_main_device_managers, num_cold_device_managers));
}).safe_then([this] {
return seastar::do_with(std::size_t(0), [this](auto &cnt) {
return crimson::do_for_each(
secondary_segment_managers.begin(),
secondary_segment_managers.end(),
[this, &cnt](auto &sec_sm)
{
++cnt;
return sec_sm->mkfs(
segment_manager::get_ephemeral_device_config(
cnt, num_main_device_managers, num_cold_device_managers));
});
});
}).handle_error(
crimson::ct_error::assert_all{}
);
}
void remount() final {
segment_manager->remount();
for (auto &sec_sm : secondary_segment_managers) {
sec_sm->remount();
}
}
std::size_t get_num_devices() const final {
return secondary_segment_managers.size() + 1;
}
void reset() final {
segment_manager.reset();
for (auto &sec_sm : secondary_segment_managers) {
sec_sm.reset();
}
}
std::vector<Device*> get_secondary_devices() final {
std::vector<Device*> sec_devices;
for (auto &sec_sm : secondary_segment_managers) {
sec_devices.emplace_back(sec_sm.get());
}
return sec_devices;
}
Device* get_primary_device() final {
return segment_manager.get();
}
DeviceRef get_primary_device_ref() final;
void set_primary_device_ref(DeviceRef) final;
};
class EphemeralRandomBlockDevices : public EphemeralDevices {
random_block_device::RBMDeviceRef rb_device;
std::list<random_block_device::RBMDeviceRef> secondary_rb_devices;
public:
EphemeralRandomBlockDevices(std::size_t num_device_managers) {
assert(num_device_managers > 0);
secondary_rb_devices.resize(num_device_managers - 1);
}
seastar::future<> setup() final {
rb_device = random_block_device::create_test_ephemeral();
device_config_t config = get_rbm_ephemeral_device_config(0, 1);
return rb_device->mkfs(config).handle_error(crimson::ct_error::assert_all{});
}
void remount() final {}
std::size_t get_num_devices() const final {
return secondary_rb_devices.size() + 1;
}
void reset() final {
rb_device.reset();
for (auto &sec_rb : secondary_rb_devices) {
sec_rb.reset();
}
}
std::vector<Device*> get_secondary_devices() final {
std::vector<Device*> sec_devices;
for (auto &sec_rb : secondary_rb_devices) {
sec_devices.emplace_back(sec_rb.get());
}
return sec_devices;
}
Device* get_primary_device() final {
return rb_device.get();
}
DeviceRef get_primary_device_ref() final;
void set_primary_device_ref(DeviceRef) final;
};
class EphemeralTestState {
protected:
journal_type_t journal_type;
size_t num_main_device_managers = 0;
size_t num_cold_device_managers = 0;
EphemeralDevicesRef devices;
bool secondary_is_cold;
EphemeralTestState(std::size_t num_main_device_managers,
std::size_t num_cold_device_managers) :
num_main_device_managers(num_main_device_managers),
num_cold_device_managers(num_cold_device_managers) {}
virtual seastar::future<> _init() = 0;
virtual seastar::future<> _destroy() = 0;
virtual seastar::future<> _teardown() = 0;
seastar::future<> teardown() {
return _teardown().then([this] {
return _destroy();
});
}
virtual FuturizedStore::mkfs_ertr::future<> _mkfs() = 0;
virtual FuturizedStore::mount_ertr::future<> _mount() = 0;
seastar::future<> restart_fut() {
LOG_PREFIX(EphemeralTestState::restart_fut);
SUBINFO(test, "begin ...");
return teardown().then([this] {
devices->remount();
return _init().then([this] {
return _mount().handle_error(crimson::ct_error::assert_all{});
});
}).then([FNAME] {
SUBINFO(test, "finish");
});
}
void restart() {
restart_fut().get0();
}
seastar::future<> tm_setup(
journal_type_t type = journal_type_t::SEGMENTED) {
LOG_PREFIX(EphemeralTestState::tm_setup);
journal_type = type;
if (journal_type == journal_type_t::SEGMENTED) {
devices.reset(new
EphemeralSegmentedDevices(
num_main_device_managers, num_cold_device_managers));
} else {
assert(journal_type == journal_type_t::RANDOM_BLOCK);
//TODO: multiple devices
ceph_assert(num_main_device_managers == 1);
ceph_assert(num_cold_device_managers == 0);
devices.reset(new EphemeralRandomBlockDevices(1));
}
SUBINFO(test, "begin with {} devices ...", devices->get_num_devices());
return devices->setup(
).then([this] {
return _init();
}).then([this, FNAME] {
return _mkfs(
).safe_then([this] {
return restart_fut();
}).handle_error(
crimson::ct_error::assert_all{}
).then([FNAME] {
SUBINFO(test, "finish");
});
});
}
seastar::future<> tm_teardown() {
LOG_PREFIX(EphemeralTestState::tm_teardown);
SUBINFO(test, "begin");
return teardown().then([this, FNAME] {
devices->reset();
SUBINFO(test, "finish");
});
}
};
class TMTestState : public EphemeralTestState {
protected:
TransactionManagerRef tm;
LBAManager *lba_manager;
Cache* cache;
ExtentPlacementManager *epm;
uint64_t seq = 0;
TMTestState() : EphemeralTestState(1, 0) {}
TMTestState(std::size_t num_main_devices, std::size_t num_cold_devices)
: EphemeralTestState(num_main_devices, num_cold_devices) {}
virtual seastar::future<> _init() override {
auto sec_devices = devices->get_secondary_devices();
auto p_dev = devices->get_primary_device();
tm = make_transaction_manager(p_dev, sec_devices, true);
epm = tm->get_epm();
lba_manager = tm->get_lba_manager();
cache = tm->get_cache();
return seastar::now();
}
virtual seastar::future<> _destroy() override {
epm = nullptr;
lba_manager = nullptr;
cache = nullptr;
tm.reset();
return seastar::now();
}
virtual seastar::future<> _teardown() {
return tm->close().handle_error(
crimson::ct_error::assert_all{"Error in teardown"}
);
}
virtual FuturizedStore::mount_ertr::future<> _mount() {
return tm->mount(
).handle_error(
crimson::ct_error::assert_all{"Error in mount"}
).then([this] {
return epm->stop_background();
}).then([this] {
return epm->run_background_work_until_halt();
});
}
virtual FuturizedStore::mkfs_ertr::future<> _mkfs() {
return tm->mkfs(
).handle_error(
crimson::ct_error::assert_all{"Error in mkfs"}
);
}
auto create_mutate_transaction() {
return tm->create_transaction(
Transaction::src_t::MUTATE, "test_mutate");
}
auto create_read_transaction() {
return tm->create_transaction(
Transaction::src_t::READ, "test_read");
}
auto create_weak_transaction() {
return tm->create_transaction(
Transaction::src_t::READ, "test_read_weak", true);
}
auto submit_transaction_fut2(Transaction& t) {
return tm->submit_transaction(t);
}
auto submit_transaction_fut(Transaction &t) {
return with_trans_intr(
t,
[this](auto &t) {
return tm->submit_transaction(t);
});
}
auto submit_transaction_fut_with_seq(Transaction &t) {
using ertr = TransactionManager::base_iertr;
return with_trans_intr(
t,
[this](auto &t) {
return tm->submit_transaction(t
).si_then([this] {
return ertr::make_ready_future<uint64_t>(seq++);
});
});
}
void submit_transaction(TransactionRef t) {
submit_transaction_fut(*t).unsafe_get0();
epm->run_background_work_until_halt().get0();
}
};
DeviceRef EphemeralSegmentedDevices::get_primary_device_ref() {
return std::move(segment_manager);
}
DeviceRef EphemeralRandomBlockDevices::get_primary_device_ref() {
return std::move(rb_device);
}
void EphemeralSegmentedDevices::set_primary_device_ref(DeviceRef dev) {
segment_manager =
segment_manager::EphemeralSegmentManagerRef(
static_cast<segment_manager::EphemeralSegmentManager*>(dev.release()));
}
void EphemeralRandomBlockDevices::set_primary_device_ref(DeviceRef dev) {
rb_device =
random_block_device::RBMDeviceRef(
static_cast<random_block_device::RBMDevice*>(dev.release()));
}
class SeaStoreTestState : public EphemeralTestState {
class TestMDStoreState {
std::map<std::string, std::string> md;
public:
class Store final : public SeaStore::MDStore {
TestMDStoreState &parent;
public:
Store(TestMDStoreState &parent) : parent(parent) {}
write_meta_ret write_meta(
const std::string& key, const std::string& value) final {
parent.md[key] = value;
return seastar::now();
}
read_meta_ret read_meta(const std::string& key) final {
auto iter = parent.md.find(key);
if (iter != parent.md.end()) {
return read_meta_ret(
read_meta_ertr::ready_future_marker{},
iter->second);
} else {
return read_meta_ret(
read_meta_ertr::ready_future_marker{},
std::nullopt);
}
}
};
Store get_mdstore() {
return Store(*this);
}
} mdstore_state;
protected:
std::unique_ptr<SeaStore> seastore;
FuturizedStore::Shard *sharded_seastore;
SeaStoreTestState() : EphemeralTestState(1, 0) {}
virtual seastar::future<> _init() final {
seastore = make_test_seastore(
std::make_unique<TestMDStoreState::Store>(mdstore_state.get_mdstore()));
return seastore->test_start(devices->get_primary_device_ref()
).then([this] {
sharded_seastore = &(seastore->get_sharded_store());
});
}
virtual seastar::future<> _destroy() final {
devices->set_primary_device_ref(seastore->get_primary_device_ref());
return seastore->stop().then([this] {
seastore.reset();
});
}
virtual seastar::future<> _teardown() final {
return seastore->umount();
}
virtual FuturizedStore::mount_ertr::future<> _mount() final {
return seastore->test_mount();
}
virtual FuturizedStore::mkfs_ertr::future<> _mkfs() final {
return seastore->test_mkfs(uuid_d{});
}
};
| 12,787 | 28.063636 | 84 | h |
null | ceph-main/src/test/crimson/seastore/nvmedevice/test_nvmedevice.cc | //-*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/buffer.h"
#include "crimson/os/seastore/random_block_manager/rbm_device.h"
#include "crimson/os/seastore/random_block_manager/nvme_block_device.h"
#include "test/crimson/gtest_seastar.h"
#include "include/stringify.h"
using namespace crimson;
using namespace crimson::os;
using namespace crimson::os::seastore;
using namespace random_block_device;
using namespace random_block_device::nvme;
struct nvdev_test_t : seastar_test_suite_t {
std::unique_ptr<RBMDevice> device;
std::string dev_path;
static const uint64_t DEV_SIZE = 1024 * 1024 * 1024;
nvdev_test_t() :
device(nullptr),
dev_path("randomblock_manager.test_nvmedevice" + stringify(getpid())) {
int fd = ::open(dev_path.c_str(), O_CREAT|O_RDWR|O_TRUNC, 0644);
ceph_assert(fd >= 0);
::ftruncate(fd, DEV_SIZE);
::close(fd);
}
~nvdev_test_t() {
::unlink(dev_path.c_str());
}
};
static const uint64_t BUF_SIZE = 1024;
static const uint64_t BLK_SIZE = 4096;
struct nvdev_test_block_t {
uint8_t data[BUF_SIZE];
DENC(nvdev_test_block_t, v, p) {
DENC_START(1, 1, p);
for (uint64_t i = 0 ; i < BUF_SIZE; i++)
{
denc(v.data[i], p);
}
DENC_FINISH(p);
}
};
WRITE_CLASS_DENC_BOUNDED(
nvdev_test_block_t
)
using crimson::common::local_conf;
TEST_F(nvdev_test_t, write_and_verify_test)
{
run_async([this] {
device.reset(new random_block_device::nvme::NVMeBlockDevice(dev_path));
local_conf().set_val("seastore_cbjournal_size", "1048576").get();
device->start().get();
device->mkfs(
device_config_t{
true,
device_spec_t{
(magic_t)std::rand(),
device_type_t::RANDOM_BLOCK_SSD,
static_cast<device_id_t>(DEVICE_ID_RANDOM_BLOCK_MIN)},
seastore_meta_t{uuid_d()},
secondary_device_set_t()}
).unsafe_get();
device->mount().unsafe_get();
nvdev_test_block_t original_data;
std::minstd_rand0 generator;
uint8_t value = generator();
memset(original_data.data, value, BUF_SIZE);
uint64_t bl_length = 0;
Device& d = device->get_sharded_device();
{
bufferlist bl;
encode(original_data, bl);
bl_length = bl.length();
auto write_buf = ceph::bufferptr(buffer::create_page_aligned(BLK_SIZE));
bl.begin().copy(bl_length, write_buf.c_str());
((RBMDevice*)&d)->write(0, std::move(write_buf)).unsafe_get();
}
nvdev_test_block_t read_data;
{
auto read_buf = ceph::bufferptr(buffer::create_page_aligned(BLK_SIZE));
((RBMDevice*)&d)->read(0, read_buf).unsafe_get();
bufferlist bl;
bl.push_back(read_buf);
auto bliter = bl.cbegin();
decode(read_data, bliter);
}
int ret = memcmp(original_data.data, read_data.data, BUF_SIZE);
((RBMDevice*)&d)->close().unsafe_get();
device->stop().get();
ASSERT_TRUE(ret == 0);
device.reset(nullptr);
});
}
| 2,936 | 26.707547 | 78 | cc |
null | ceph-main/src/test/crimson/seastore/onode_tree/test_fltree_onode_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include <boost/range/combine.hpp>
#include "test/crimson/gtest_seastar.h"
#include "test/crimson/seastore/transaction_manager_test_state.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/tree_utils.h"
using namespace crimson;
using namespace crimson::os;
using namespace crimson::os::seastore;
using namespace crimson::os::seastore::onode;
using CTransaction = ceph::os::Transaction;
using namespace std;
namespace {
[[maybe_unused]] seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
}
struct onode_item_t {
uint32_t size;
uint64_t id;
uint64_t block_size;
uint32_t cnt_modify = 0;
void initialize(Transaction& t, Onode& value) const {
auto& layout = value.get_mutable_layout(t);
layout.size = size;
layout.omap_root.update(omap_root_t(id, cnt_modify,
value.get_metadata_hint(block_size)));
validate(value);
}
void validate(Onode& value) const {
auto& layout = value.get_layout();
ceph_assert(laddr_t(layout.size) == laddr_t{size});
ceph_assert(layout.omap_root.get(value.get_metadata_hint(block_size)).addr == id);
ceph_assert(layout.omap_root.get(value.get_metadata_hint(block_size)).depth == cnt_modify);
}
void modify(Transaction& t, Onode& value) {
validate(value);
++cnt_modify;
initialize(t, value);
}
static onode_item_t create(std::size_t size, std::size_t id, uint64_t block_size) {
ceph_assert(size <= std::numeric_limits<uint32_t>::max());
return {(uint32_t)size, id, block_size};
}
};
struct fltree_onode_manager_test_t
: public seastar_test_suite_t, TMTestState {
using iterator_t = typename KVPool<onode_item_t>::iterator_t;
FLTreeOnodeManagerRef manager;
seastar::future<> set_up_fut() final {
return tm_setup();
}
seastar::future<> tear_down_fut() final {
return tm_teardown();
}
virtual seastar::future<> _init() final {
return TMTestState::_init().then([this] {
manager.reset(new FLTreeOnodeManager(*tm));
});
}
virtual seastar::future<> _destroy() final {
manager.reset();
return TMTestState::_destroy();
}
virtual FuturizedStore::mkfs_ertr::future<> _mkfs() final {
return TMTestState::_mkfs(
).safe_then([this] {
return restart_fut();
}).safe_then([this] {
return repeat_eagain([this] {
return seastar::do_with(
create_mutate_transaction(),
[this](auto &ref_t)
{
return with_trans_intr(*ref_t, [&](auto &t) {
return manager->mkfs(t
).si_then([this, &t] {
return submit_transaction_fut2(t);
});
});
});
});
}).handle_error(
crimson::ct_error::assert_all{"Invalid error in _mkfs"}
);
}
template <typename F>
void with_transaction(F&& f) {
auto t = create_mutate_transaction();
std::invoke(f, *t);
submit_transaction(std::move(t));
}
template <typename F>
void with_onode_write(iterator_t& it, F&& f) {
with_transaction([this, &it, f=std::move(f)] (auto& t) {
auto p_kv = *it;
auto onode = with_trans_intr(t, [&](auto &t) {
return manager->get_or_create_onode(t, p_kv->key);
}).unsafe_get0();
std::invoke(f, t, *onode, p_kv->value);
with_trans_intr(t, [&](auto &t) {
return manager->write_dirty(t, {onode});
}).unsafe_get0();
});
}
void validate_onode(iterator_t& it) {
with_transaction([this, &it] (auto& t) {
auto p_kv = *it;
auto onode = with_trans_intr(t, [&](auto &t) {
return manager->get_onode(t, p_kv->key);
}).unsafe_get0();
p_kv->value.validate(*onode);
});
}
void validate_erased(iterator_t& it) {
with_transaction([this, &it] (auto& t) {
auto p_kv = *it;
auto exist = with_trans_intr(t, [&](auto &t) {
return manager->contains_onode(t, p_kv->key);
}).unsafe_get0();
ceph_assert(exist == false);
});
}
template <typename F>
void with_onodes_process(
const iterator_t& start, const iterator_t& end, F&& f) {
std::vector<ghobject_t> oids;
std::vector<onode_item_t*> items;
auto it = start;
while(it != end) {
auto p_kv = *it;
oids.emplace_back(p_kv->key);
items.emplace_back(&p_kv->value);
++it;
}
with_transaction([&oids, &items, f=std::move(f)] (auto& t) mutable {
std::invoke(f, t, oids, items);
});
}
template <typename F>
void with_onodes_write(
const iterator_t& start, const iterator_t& end, F&& f) {
with_onodes_process(start, end,
[this, f=std::move(f)] (auto& t, auto& oids, auto& items) {
auto onodes = with_trans_intr(t, [&](auto &t) {
return manager->get_or_create_onodes(t, oids);
}).unsafe_get0();
for (auto tup : boost::combine(onodes, items)) {
OnodeRef onode;
onode_item_t* p_item;
boost::tie(onode, p_item) = tup;
std::invoke(f, t, *onode, *p_item);
}
with_trans_intr(t, [&](auto &t) {
return manager->write_dirty(t, onodes);
}).unsafe_get0();
});
}
void validate_onodes(
const iterator_t& start, const iterator_t& end) {
with_onodes_process(start, end,
[this] (auto& t, auto& oids, auto& items) {
for (auto tup : boost::combine(oids, items)) {
ghobject_t oid;
onode_item_t* p_item;
boost::tie(oid, p_item) = tup;
auto onode = with_trans_intr(t, [&](auto &t) {
return manager->get_onode(t, oid);
}).unsafe_get0();
p_item->validate(*onode);
}
});
}
void validate_erased(
const iterator_t& start, const iterator_t& end) {
with_onodes_process(start, end,
[this] (auto& t, auto& oids, auto& items) {
for (auto& oid : oids) {
auto exist = with_trans_intr(t, [&](auto &t) {
return manager->contains_onode(t, oid);
}).unsafe_get0();
ceph_assert(exist == false);
}
});
}
static constexpr uint64_t LIST_LIMIT = 10;
void validate_list_onodes(KVPool<onode_item_t>& pool) {
with_onodes_process(pool.begin(), pool.end(),
[this] (auto& t, auto& oids, auto& items) {
std::vector<ghobject_t> listed_oids;
auto start = ghobject_t();
auto end = ghobject_t::get_max();
assert(start < end);
assert(start < oids[0]);
assert(oids[0] < end);
while (start != end) {
auto [list_ret, list_end] = with_trans_intr(t, [&](auto &t) {
return manager->list_onodes(t, start, end, LIST_LIMIT);
}).unsafe_get0();
listed_oids.insert(listed_oids.end(), list_ret.begin(), list_ret.end());
start = list_end;
}
ceph_assert(oids.size() == listed_oids.size());
});
}
fltree_onode_manager_test_t() {}
};
TEST_F(fltree_onode_manager_test_t, 1_single)
{
run_async([this] {
uint64_t block_size = tm->get_block_size();
auto pool = KVPool<onode_item_t>::create_range({0, 1}, {128, 256}, block_size);
auto iter = pool.begin();
with_onode_write(iter, [](auto& t, auto& onode, auto& item) {
item.initialize(t, onode);
});
validate_onode(iter);
with_onode_write(iter, [](auto& t, auto& onode, auto& item) {
item.modify(t, onode);
});
validate_onode(iter);
validate_list_onodes(pool);
with_onode_write(iter, [this](auto& t, auto& onode, auto& item) {
OnodeRef onode_ref = &onode;
with_trans_intr(t, [&](auto &t) {
return manager->erase_onode(t, onode_ref);
}).unsafe_get0();
});
validate_erased(iter);
});
}
TEST_F(fltree_onode_manager_test_t, 2_synthetic)
{
run_async([this] {
uint64_t block_size = tm->get_block_size();
auto pool = KVPool<onode_item_t>::create_range(
{0, 100}, {32, 64, 128, 256, 512}, block_size);
auto start = pool.begin();
auto end = pool.end();
with_onodes_write(start, end,
[](auto& t, auto& onode, auto& item) {
item.initialize(t, onode);
});
validate_onodes(start, end);
validate_list_onodes(pool);
auto rd_start = pool.random_begin();
auto rd_end = rd_start + 50;
with_onodes_write(rd_start, rd_end,
[](auto& t, auto& onode, auto& item) {
item.modify(t, onode);
});
validate_onodes(start, end);
pool.shuffle();
rd_start = pool.random_begin();
rd_end = rd_start + 50;
with_onodes_write(rd_start, rd_end,
[](auto& t, auto& onode, auto& item) {
item.modify(t, onode);
});
validate_onodes(start, end);
pool.shuffle();
rd_start = pool.random_begin();
rd_end = rd_start + 50;
with_onodes_write(rd_start, rd_end,
[this](auto& t, auto& onode, auto& item) {
OnodeRef onode_ref = &onode;
with_trans_intr(t, [&](auto &t) {
return manager->erase_onode(t, onode_ref);
}).unsafe_get0();
});
validate_erased(rd_start, rd_end);
pool.erase_from_random(rd_start, rd_end);
start = pool.begin();
end = pool.end();
validate_onodes(start, end);
validate_list_onodes(pool);
});
}
| 9,287 | 28.207547 | 95 | cc |
null | ceph-main/src/test/crimson/seastore/onode_tree/test_staged_fltree.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include <array>
#include <cstring>
#include <memory>
#include <set>
#include <sstream>
#include <vector>
#include "crimson/common/log.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager/dummy.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager/seastore.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_layout.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/tree.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/tree_utils.h"
#include "test/crimson/gtest_seastar.h"
#include "test/crimson/seastore/transaction_manager_test_state.h"
#include "test_value.h"
using namespace crimson::os::seastore::onode;
#define INTR(fun, t) \
with_trans_intr( \
t, \
[&] (auto &tr) { \
return fun(tr); \
} \
)
#define INTR_R(fun, t, args...) \
with_trans_intr( \
t, \
[&] (auto &tr) { \
return fun(tr, args); \
} \
)
#define INTR_WITH_PARAM(fun, c, b, v) \
with_trans_intr( \
c.t, \
[=] (auto &t) { \
return fun(c, L_ADDR_MIN, b, v); \
} \
)
namespace {
constexpr bool IS_DUMMY_SYNC = false;
using DummyManager = DummyNodeExtentManager<IS_DUMMY_SYNC>;
using UnboundedBtree = Btree<UnboundedValue>;
[[maybe_unused]] seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
ghobject_t make_ghobj(
shard_t shard, pool_t pool, crush_hash_t crush,
std::string ns, std::string oid, snap_t snap, gen_t gen) {
return ghobject_t{shard_id_t{shard}, pool, crush, ns, oid, snap, gen};
}
// return a key_view_t and its underlying memory buffer.
// the buffer needs to be freed manually.
std::pair<key_view_t, void*> build_key_view(const ghobject_t& hobj) {
key_hobj_t key_hobj(hobj);
size_t key_size = sizeof(shard_pool_crush_t) + sizeof(snap_gen_t) +
ns_oid_view_t::estimate_size(key_hobj);
void* p_mem = std::malloc(key_size);
key_view_t key_view;
char* p_fill = (char*)p_mem + key_size;
auto spc = shard_pool_crush_t::from_key(key_hobj);
p_fill -= sizeof(shard_pool_crush_t);
std::memcpy(p_fill, &spc, sizeof(shard_pool_crush_t));
key_view.set(*reinterpret_cast<const shard_pool_crush_t*>(p_fill));
auto p_ns_oid = p_fill;
ns_oid_view_t::test_append(key_hobj, p_fill);
ns_oid_view_t ns_oid_view(p_ns_oid);
key_view.set(ns_oid_view);
auto sg = snap_gen_t::from_key(key_hobj);
p_fill -= sizeof(snap_gen_t);
ceph_assert(p_fill == (char*)p_mem);
std::memcpy(p_fill, &sg, sizeof(snap_gen_t));
key_view.set(*reinterpret_cast<const snap_gen_t*>(p_fill));
return {key_view, p_mem};
}
}
struct a_basic_test_t : public seastar_test_suite_t {};
TEST_F(a_basic_test_t, 1_basic_sizes)
{
logger().info("\n"
"Bytes of struct:\n"
" node_header_t: {}\n"
" shard_pool_t: {}\n"
" shard_pool_crush_t: {}\n"
" crush_t: {}\n"
" snap_gen_t: {}\n"
" slot_0_t: {}\n"
" slot_1_t: {}\n"
" slot_3_t: {}\n"
" node_fields_0_t: {}\n"
" node_fields_1_t: {}\n"
" node_fields_2_t: {}\n"
" internal_fields_3_t: {}\n"
" leaf_fields_3_t: {}\n"
" internal_sub_item_t: {}",
sizeof(node_header_t), sizeof(shard_pool_t),
sizeof(shard_pool_crush_t), sizeof(crush_t), sizeof(snap_gen_t),
sizeof(slot_0_t), sizeof(slot_1_t), sizeof(slot_3_t),
sizeof(node_fields_0_t), sizeof(node_fields_1_t), sizeof(node_fields_2_t),
sizeof(internal_fields_3_t), sizeof(leaf_fields_3_t), sizeof(internal_sub_item_t)
);
auto hobj = make_ghobj(0, 0, 0, "n", "o", 0, 0);
key_hobj_t key(hobj);
auto [key_view, p_mem] = build_key_view(hobj);
value_config_t value;
value.payload_size = 8;
#define _STAGE_T(NodeType) node_to_stage_t<typename NodeType::node_stage_t>
#define NXT_T(StageType) staged<typename StageType::next_param_t>
laddr_t i_value{0};
logger().info("\n"
"Bytes of a key-value insertion (full-string):\n"
" s-p-c, 'n'-'o', s-g => value_payload(8): typically internal 43B, leaf 59B\n"
" InternalNode0: {} {} {}\n"
" InternalNode1: {} {} {}\n"
" InternalNode2: {} {}\n"
" InternalNode3: {}\n"
" LeafNode0: {} {} {}\n"
" LeafNode1: {} {} {}\n"
" LeafNode2: {} {}\n"
" LeafNode3: {}",
_STAGE_T(InternalNode0)::insert_size(key_view, i_value),
NXT_T(_STAGE_T(InternalNode0))::insert_size(key_view, i_value),
NXT_T(NXT_T(_STAGE_T(InternalNode0)))::insert_size(key_view, i_value),
_STAGE_T(InternalNode1)::insert_size(key_view, i_value),
NXT_T(_STAGE_T(InternalNode1))::insert_size(key_view, i_value),
NXT_T(NXT_T(_STAGE_T(InternalNode1)))::insert_size(key_view, i_value),
_STAGE_T(InternalNode2)::insert_size(key_view, i_value),
NXT_T(_STAGE_T(InternalNode2))::insert_size(key_view, i_value),
_STAGE_T(InternalNode3)::insert_size(key_view, i_value),
_STAGE_T(LeafNode0)::insert_size(key, value),
NXT_T(_STAGE_T(LeafNode0))::insert_size(key, value),
NXT_T(NXT_T(_STAGE_T(LeafNode0)))::insert_size(key, value),
_STAGE_T(LeafNode1)::insert_size(key, value),
NXT_T(_STAGE_T(LeafNode1))::insert_size(key, value),
NXT_T(NXT_T(_STAGE_T(LeafNode1)))::insert_size(key, value),
_STAGE_T(LeafNode2)::insert_size(key, value),
NXT_T(_STAGE_T(LeafNode2))::insert_size(key, value),
_STAGE_T(LeafNode3)::insert_size(key, value)
);
std::free(p_mem);
}
TEST_F(a_basic_test_t, 2_node_sizes)
{
run_async([] {
auto nm = NodeExtentManager::create_dummy(IS_DUMMY_SYNC);
auto t = make_test_transaction();
ValueBuilderImpl<UnboundedValue> vb;
context_t c{*nm, vb, *t};
std::array<std::pair<NodeImplURef, NodeExtentMutable>, 16> nodes = {
INTR_WITH_PARAM(InternalNode0::allocate, c, false, 1u).unsafe_get0().make_pair(),
INTR_WITH_PARAM(InternalNode1::allocate, c, false, 1u).unsafe_get0().make_pair(),
INTR_WITH_PARAM(InternalNode2::allocate, c, false, 1u).unsafe_get0().make_pair(),
INTR_WITH_PARAM(InternalNode3::allocate, c, false, 1u).unsafe_get0().make_pair(),
INTR_WITH_PARAM(InternalNode0::allocate, c, true, 1u).unsafe_get0().make_pair(),
INTR_WITH_PARAM(InternalNode1::allocate, c, true, 1u).unsafe_get0().make_pair(),
INTR_WITH_PARAM(InternalNode2::allocate, c, true, 1u).unsafe_get0().make_pair(),
INTR_WITH_PARAM(InternalNode3::allocate, c, true, 1u).unsafe_get0().make_pair(),
INTR_WITH_PARAM(LeafNode0::allocate, c, false, 0u).unsafe_get0().make_pair(),
INTR_WITH_PARAM(LeafNode1::allocate, c, false, 0u).unsafe_get0().make_pair(),
INTR_WITH_PARAM(LeafNode2::allocate, c, false, 0u).unsafe_get0().make_pair(),
INTR_WITH_PARAM(LeafNode3::allocate, c, false, 0u).unsafe_get0().make_pair(),
INTR_WITH_PARAM(LeafNode0::allocate, c, true, 0u).unsafe_get0().make_pair(),
INTR_WITH_PARAM(LeafNode1::allocate, c, true, 0u).unsafe_get0().make_pair(),
INTR_WITH_PARAM(LeafNode2::allocate, c, true, 0u).unsafe_get0().make_pair(),
INTR_WITH_PARAM(LeafNode3::allocate, c, true, 0u).unsafe_get0().make_pair()
};
std::ostringstream oss;
oss << "\nallocated nodes:";
for (auto iter = nodes.begin(); iter != nodes.end(); ++iter) {
oss << "\n ";
auto& ref_node = iter->first;
ref_node->dump_brief(oss);
}
logger().info("{}", oss.str());
});
}
struct b_dummy_tree_test_t : public seastar_test_suite_t {
TransactionRef ref_t;
std::unique_ptr<UnboundedBtree> tree;
b_dummy_tree_test_t() = default;
seastar::future<> set_up_fut() override final {
ref_t = make_test_transaction();
tree.reset(
new UnboundedBtree(NodeExtentManager::create_dummy(IS_DUMMY_SYNC))
);
return INTR(tree->mkfs, *ref_t).handle_error(
crimson::ct_error::all_same_way([] {
ASSERT_FALSE("Unable to mkfs");
})
);
}
seastar::future<> tear_down_fut() final {
ref_t.reset();
tree.reset();
return seastar::now();
}
};
TEST_F(b_dummy_tree_test_t, 3_random_insert_erase_leaf_node)
{
run_async([this] {
logger().info("\n---------------------------------------------"
"\nrandomized leaf node insert:\n");
auto key_s = ghobject_t();
auto key_e = ghobject_t::get_max();
ASSERT_TRUE(INTR_R(tree->find, *ref_t, key_s).unsafe_get0().is_end());
ASSERT_TRUE(INTR(tree->begin, *ref_t).unsafe_get0().is_end());
ASSERT_TRUE(INTR(tree->last, *ref_t).unsafe_get0().is_end());
std::map<ghobject_t,
std::tuple<test_item_t, UnboundedBtree::Cursor>> insert_history;
auto f_validate_insert_new = [this, &insert_history] (
const ghobject_t& key, const test_item_t& value) {
auto conf = UnboundedBtree::tree_value_config_t{value.get_payload_size()};
auto [cursor, success] = INTR_R(tree->insert,
*ref_t, key, conf).unsafe_get0();
initialize_cursor_from_item(*ref_t, key, value, cursor, success);
insert_history.emplace(key, std::make_tuple(value, cursor));
auto cursor_ = INTR_R(tree->find, *ref_t, key).unsafe_get0();
ceph_assert(cursor_ != tree->end());
ceph_assert(cursor_.value() == cursor.value());
validate_cursor_from_item(key, value, cursor_);
return cursor.value();
};
auto f_validate_erase = [this, &insert_history] (const ghobject_t& key) {
auto cursor_erase = INTR_R(tree->find, *ref_t, key).unsafe_get0();
auto cursor_next = INTR(cursor_erase.get_next, *ref_t).unsafe_get0();
auto cursor_ret = INTR_R(tree->erase, *ref_t, cursor_erase).unsafe_get0();
ceph_assert(cursor_erase.is_end());
ceph_assert(cursor_ret == cursor_next);
auto cursor_lb = INTR_R(tree->lower_bound, *ref_t, key).unsafe_get0();
ceph_assert(cursor_lb == cursor_next);
auto it = insert_history.find(key);
ceph_assert(std::get<1>(it->second).is_end());
insert_history.erase(it);
};
auto f_insert_erase_insert = [&f_validate_insert_new, &f_validate_erase] (
const ghobject_t& key, const test_item_t& value) {
f_validate_insert_new(key, value);
f_validate_erase(key);
return f_validate_insert_new(key, value);
};
auto values = Values<test_item_t>(15);
// insert key1, value1 at STAGE_LEFT
auto key1 = make_ghobj(3, 3, 3, "ns3", "oid3", 3, 3);
auto value1 = values.pick();
auto test_value1 = f_insert_erase_insert(key1, value1);
// validate lookup
{
auto cursor1_s = INTR_R(tree->lower_bound, *ref_t, key_s).unsafe_get0();
ASSERT_EQ(cursor1_s.get_ghobj(), key1);
ASSERT_EQ(cursor1_s.value(), test_value1);
auto cursor1_e = INTR_R(tree->lower_bound, *ref_t, key_e).unsafe_get0();
ASSERT_TRUE(cursor1_e.is_end());
}
// insert the same key1 with a different value
{
auto value1_dup = values.pick();
auto conf = UnboundedBtree::tree_value_config_t{value1_dup.get_payload_size()};
auto [cursor1_dup, ret1_dup] = INTR_R(tree->insert,
*ref_t, key1, conf).unsafe_get0();
ASSERT_FALSE(ret1_dup);
validate_cursor_from_item(key1, value1, cursor1_dup);
}
// insert key2, value2 to key1's left at STAGE_LEFT
// insert node front at STAGE_LEFT
auto key2 = make_ghobj(2, 2, 2, "ns3", "oid3", 3, 3);
auto value2 = values.pick();
f_insert_erase_insert(key2, value2);
// insert key3, value3 to key1's right at STAGE_LEFT
// insert node last at STAGE_LEFT
auto key3 = make_ghobj(4, 4, 4, "ns3", "oid3", 3, 3);
auto value3 = values.pick();
f_insert_erase_insert(key3, value3);
// insert key4, value4 to key1's left at STAGE_STRING (collision)
auto key4 = make_ghobj(3, 3, 3, "ns2", "oid2", 3, 3);
auto value4 = values.pick();
f_insert_erase_insert(key4, value4);
// insert key5, value5 to key1's right at STAGE_STRING (collision)
auto key5 = make_ghobj(3, 3, 3, "ns4", "oid4", 3, 3);
auto value5 = values.pick();
f_insert_erase_insert(key5, value5);
// insert key6, value6 to key1's left at STAGE_RIGHT
auto key6 = make_ghobj(3, 3, 3, "ns3", "oid3", 2, 2);
auto value6 = values.pick();
f_insert_erase_insert(key6, value6);
// insert key7, value7 to key1's right at STAGE_RIGHT
auto key7 = make_ghobj(3, 3, 3, "ns3", "oid3", 4, 4);
auto value7 = values.pick();
f_insert_erase_insert(key7, value7);
// insert node front at STAGE_RIGHT
auto key8 = make_ghobj(2, 2, 2, "ns3", "oid3", 2, 2);
auto value8 = values.pick();
f_insert_erase_insert(key8, value8);
// insert node front at STAGE_STRING (collision)
auto key9 = make_ghobj(2, 2, 2, "ns2", "oid2", 3, 3);
auto value9 = values.pick();
f_insert_erase_insert(key9, value9);
// insert node last at STAGE_RIGHT
auto key10 = make_ghobj(4, 4, 4, "ns3", "oid3", 4, 4);
auto value10 = values.pick();
f_insert_erase_insert(key10, value10);
// insert node last at STAGE_STRING (collision)
auto key11 = make_ghobj(4, 4, 4, "ns4", "oid4", 3, 3);
auto value11 = values.pick();
f_insert_erase_insert(key11, value11);
// insert key, value randomly until a perfect 3-ary tree is formed
std::vector<std::pair<ghobject_t, test_item_t>> kvs{
{make_ghobj(2, 2, 2, "ns2", "oid2", 2, 2), values.pick()},
{make_ghobj(2, 2, 2, "ns2", "oid2", 4, 4), values.pick()},
{make_ghobj(2, 2, 2, "ns3", "oid3", 4, 4), values.pick()},
{make_ghobj(2, 2, 2, "ns4", "oid4", 2, 2), values.pick()},
{make_ghobj(2, 2, 2, "ns4", "oid4", 3, 3), values.pick()},
{make_ghobj(2, 2, 2, "ns4", "oid4", 4, 4), values.pick()},
{make_ghobj(3, 3, 3, "ns2", "oid2", 2, 2), values.pick()},
{make_ghobj(3, 3, 3, "ns2", "oid2", 4, 4), values.pick()},
{make_ghobj(3, 3, 3, "ns4", "oid4", 2, 2), values.pick()},
{make_ghobj(3, 3, 3, "ns4", "oid4", 4, 4), values.pick()},
{make_ghobj(4, 4, 4, "ns2", "oid2", 2, 2), values.pick()},
{make_ghobj(4, 4, 4, "ns2", "oid2", 3, 3), values.pick()},
{make_ghobj(4, 4, 4, "ns2", "oid2", 4, 4), values.pick()},
{make_ghobj(4, 4, 4, "ns3", "oid3", 2, 2), values.pick()},
{make_ghobj(4, 4, 4, "ns4", "oid4", 2, 2), values.pick()},
{make_ghobj(4, 4, 4, "ns4", "oid4", 4, 4), values.pick()}};
auto [smallest_key, smallest_value] = kvs[0];
auto [largest_key, largest_value] = kvs[kvs.size() - 1];
std::shuffle(kvs.begin(), kvs.end(), std::default_random_engine{});
std::for_each(kvs.begin(), kvs.end(), [&f_insert_erase_insert] (auto& kv) {
f_insert_erase_insert(kv.first, kv.second);
});
ASSERT_EQ(INTR(tree->height, *ref_t).unsafe_get0(), 1);
ASSERT_FALSE(tree->test_is_clean());
for (auto& [k, val] : insert_history) {
auto& [v, c] = val;
// validate values in tree keep intact
auto cursor = with_trans_intr(*ref_t, [this, &k=k](auto& tr) {
return tree->find(tr, k);
}).unsafe_get0();
EXPECT_NE(cursor, tree->end());
validate_cursor_from_item(k, v, cursor);
// validate values in cursors keep intact
validate_cursor_from_item(k, v, c);
}
{
auto cursor = INTR_R(tree->lower_bound, *ref_t, key_s).unsafe_get0();
validate_cursor_from_item(smallest_key, smallest_value, cursor);
}
{
auto cursor = INTR(tree->begin, *ref_t).unsafe_get0();
validate_cursor_from_item(smallest_key, smallest_value, cursor);
}
{
auto cursor = INTR(tree->last, *ref_t).unsafe_get0();
validate_cursor_from_item(largest_key, largest_value, cursor);
}
// validate range query
{
kvs.clear();
for (auto& [k, val] : insert_history) {
auto& [v, c] = val;
kvs.emplace_back(k, v);
}
insert_history.clear();
std::sort(kvs.begin(), kvs.end(), [](auto& l, auto& r) {
return l.first < r.first;
});
auto cursor = INTR(tree->begin, *ref_t).unsafe_get0();
for (auto& [k, v] : kvs) {
ASSERT_FALSE(cursor.is_end());
validate_cursor_from_item(k, v, cursor);
cursor = INTR(cursor.get_next, *ref_t).unsafe_get0();
}
ASSERT_TRUE(cursor.is_end());
}
std::ostringstream oss;
tree->dump(*ref_t, oss);
logger().info("\n{}\n", oss.str());
// randomized erase until empty
std::shuffle(kvs.begin(), kvs.end(), std::default_random_engine{});
for (auto& [k, v] : kvs) {
auto e_size = with_trans_intr(*ref_t, [this, &k=k](auto& tr) {
return tree->erase(tr, k);
}).unsafe_get0();
ASSERT_EQ(e_size, 1);
}
auto cursor = INTR(tree->begin, *ref_t).unsafe_get0();
ASSERT_TRUE(cursor.is_end());
ASSERT_EQ(INTR(tree->height, *ref_t).unsafe_get0(), 1);
});
}
static std::set<ghobject_t> build_key_set(
std::pair<unsigned, unsigned> range_2,
std::pair<unsigned, unsigned> range_1,
std::pair<unsigned, unsigned> range_0,
std::string padding = "",
bool is_internal = false) {
ceph_assert(range_1.second <= 10);
std::set<ghobject_t> ret;
ghobject_t key;
for (unsigned i = range_2.first; i < range_2.second; ++i) {
for (unsigned j = range_1.first; j < range_1.second; ++j) {
for (unsigned k = range_0.first; k < range_0.second; ++k) {
std::ostringstream os_ns;
os_ns << "ns" << j;
std::ostringstream os_oid;
os_oid << "oid" << j << padding;
key = make_ghobj(i, i, i, os_ns.str(), os_oid.str(), k, k);
ret.insert(key);
}
}
}
if (is_internal) {
ret.insert(make_ghobj(9, 9, 9, "ns~last", "oid~last", 9, 9));
}
return ret;
}
class TestTree {
public:
TestTree()
: moved_nm{NodeExtentManager::create_dummy(IS_DUMMY_SYNC)},
ref_t{make_test_transaction()},
t{*ref_t},
c{*moved_nm, vb, t},
tree{std::move(moved_nm)},
values{0} {}
seastar::future<> build_tree(
std::pair<unsigned, unsigned> range_2,
std::pair<unsigned, unsigned> range_1,
std::pair<unsigned, unsigned> range_0,
size_t value_size) {
return seastar::async([this, range_2, range_1, range_0, value_size] {
INTR(tree.mkfs, t).unsafe_get0();
//logger().info("\n---------------------------------------------"
// "\nbefore leaf node split:\n");
auto keys = build_key_set(range_2, range_1, range_0);
for (auto& key : keys) {
auto value = values.create(value_size);
insert_tree(key, value).get0();
}
ASSERT_EQ(INTR(tree.height, t).unsafe_get0(), 1);
ASSERT_FALSE(tree.test_is_clean());
//std::ostringstream oss;
//tree.dump(t, oss);
//logger().info("\n{}\n", oss.str());
});
}
seastar::future<> build_tree(
const std::vector<ghobject_t>& keys, const std::vector<test_item_t>& values) {
return seastar::async([this, keys, values] {
INTR(tree.mkfs, t).unsafe_get0();
//logger().info("\n---------------------------------------------"
// "\nbefore leaf node split:\n");
ASSERT_EQ(keys.size(), values.size());
auto key_iter = keys.begin();
auto value_iter = values.begin();
while (key_iter != keys.end()) {
insert_tree(*key_iter, *value_iter).get0();
++key_iter;
++value_iter;
}
ASSERT_EQ(INTR(tree.height, t).unsafe_get0(), 1);
ASSERT_FALSE(tree.test_is_clean());
//std::ostringstream oss;
//tree.dump(t, oss);
//logger().info("\n{}\n", oss.str());
});
}
seastar::future<> split_merge(
const ghobject_t& key,
const test_item_t& value,
const split_expectation_t& expected,
std::optional<ghobject_t> next_key) {
return seastar::async([this, key, value, expected, next_key] {
// clone
auto ref_dummy = NodeExtentManager::create_dummy(IS_DUMMY_SYNC);
auto p_dummy = static_cast<DummyManager*>(ref_dummy.get());
UnboundedBtree tree_clone(std::move(ref_dummy));
auto ref_t_clone = make_test_transaction();
Transaction& t_clone = *ref_t_clone;
INTR_R(tree_clone.test_clone_from, t_clone, t, tree).unsafe_get0();
// insert and split
logger().info("\n\nINSERT-SPLIT {}:", key_hobj_t(key));
auto conf = UnboundedBtree::tree_value_config_t{value.get_payload_size()};
auto [cursor, success] = INTR_R(tree_clone.insert,
t_clone, key, conf).unsafe_get0();
initialize_cursor_from_item(t, key, value, cursor, success);
{
std::ostringstream oss;
tree_clone.dump(t_clone, oss);
logger().info("dump new root:\n{}", oss.str());
}
EXPECT_EQ(INTR(tree_clone.height, t_clone).unsafe_get0(), 2);
for (auto& [k, val] : insert_history) {
auto& [v, c] = val;
auto result = with_trans_intr(t_clone, [&tree_clone, &k=k] (auto& tr) {
return tree_clone.find(tr, k);
}).unsafe_get0();
EXPECT_NE(result, tree_clone.end());
validate_cursor_from_item(k, v, result);
}
auto result = INTR_R(tree_clone.find, t_clone, key).unsafe_get0();
EXPECT_NE(result, tree_clone.end());
validate_cursor_from_item(key, value, result);
EXPECT_TRUE(last_split.match(expected));
EXPECT_EQ(p_dummy->size(), 3);
// erase and merge
logger().info("\n\nERASE-MERGE {}:", key_hobj_t(key));
auto nxt_cursor = with_trans_intr(t_clone, [&cursor=cursor](auto& tr) {
return cursor.erase<true>(tr);
}).unsafe_get0();
{
// track root again to dump
auto begin = INTR(tree_clone.begin, t_clone).unsafe_get0();
std::ignore = begin;
std::ostringstream oss;
tree_clone.dump(t_clone, oss);
logger().info("dump root:\n{}", oss.str());
}
if (next_key.has_value()) {
auto found = insert_history.find(*next_key);
ceph_assert(found != insert_history.end());
validate_cursor_from_item(
*next_key, std::get<0>(found->second), nxt_cursor);
} else {
EXPECT_TRUE(nxt_cursor.is_end());
}
for (auto& [k, val] : insert_history) {
auto& [v, c] = val;
auto result = with_trans_intr(t_clone, [&tree_clone, &k=k](auto& tr) {
return tree_clone.find(tr, k);
}).unsafe_get0();
EXPECT_NE(result, tree_clone.end());
validate_cursor_from_item(k, v, result);
}
EXPECT_EQ(INTR(tree_clone.height, t_clone).unsafe_get0(), 1);
EXPECT_EQ(p_dummy->size(), 1);
});
}
test_item_t create_value(size_t size) {
return values.create(size);
}
private:
seastar::future<> insert_tree(const ghobject_t& key, const test_item_t& value) {
return seastar::async([this, &key, &value] {
auto conf = UnboundedBtree::tree_value_config_t{value.get_payload_size()};
auto [cursor, success] = INTR_R(tree.insert,
t, key, conf).unsafe_get0();
initialize_cursor_from_item(t, key, value, cursor, success);
insert_history.emplace(key, std::make_tuple(value, cursor));
});
}
NodeExtentManagerURef moved_nm;
TransactionRef ref_t;
Transaction& t;
ValueBuilderImpl<UnboundedValue> vb;
context_t c;
UnboundedBtree tree;
Values<test_item_t> values;
std::map<ghobject_t,
std::tuple<test_item_t, UnboundedBtree::Cursor>> insert_history;
};
struct c_dummy_test_t : public seastar_test_suite_t {};
TEST_F(c_dummy_test_t, 4_split_merge_leaf_node)
{
run_async([] {
{
TestTree test;
test.build_tree({2, 5}, {2, 5}, {2, 5}, 120).get0();
auto value = test.create_value(1144);
logger().info("\n---------------------------------------------"
"\nsplit at stage 2; insert to left front at stage 2, 1, 0\n");
test.split_merge(make_ghobj(1, 1, 1, "ns3", "oid3", 3, 3), value,
{2u, 2u, true, InsertType::BEGIN},
{make_ghobj(2, 2, 2, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(2, 2, 2, "ns1", "oid1", 3, 3), value,
{2u, 1u, true, InsertType::BEGIN},
{make_ghobj(2, 2, 2, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(2, 2, 2, "ns2", "oid2", 1, 1), value,
{2u, 0u, true, InsertType::BEGIN},
{make_ghobj(2, 2, 2, "ns2", "oid2", 2, 2)}).get0();
logger().info("\n---------------------------------------------"
"\nsplit at stage 2; insert to left back at stage 0, 1, 2, 1, 0\n");
test.split_merge(make_ghobj(2, 2, 2, "ns4", "oid4", 5, 5), value,
{2u, 0u, true, InsertType::LAST},
{make_ghobj(3, 3, 3, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(2, 2, 2, "ns5", "oid5", 3, 3), value,
{2u, 1u, true, InsertType::LAST},
{make_ghobj(3, 3, 3, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(2, 3, 3, "ns3", "oid3", 3, 3), value,
{2u, 2u, true, InsertType::LAST},
{make_ghobj(3, 3, 3, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(3, 3, 3, "ns1", "oid1", 3, 3), value,
{2u, 1u, true, InsertType::LAST},
{make_ghobj(3, 3, 3, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(3, 3, 3, "ns2", "oid2", 1, 1), value,
{2u, 0u, true, InsertType::LAST},
{make_ghobj(3, 3, 3, "ns2", "oid2", 2, 2)}).get0();
auto value0 = test.create_value(1416);
logger().info("\n---------------------------------------------"
"\nsplit at stage 2; insert to right front at stage 0, 1, 2, 1, 0\n");
test.split_merge(make_ghobj(3, 3, 3, "ns4", "oid4", 5, 5), value0,
{2u, 0u, false, InsertType::BEGIN},
{make_ghobj(4, 4, 4, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(3, 3, 3, "ns5", "oid5", 3, 3), value0,
{2u, 1u, false, InsertType::BEGIN},
{make_ghobj(4, 4, 4, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(3, 4, 4, "ns3", "oid3", 3, 3), value0,
{2u, 2u, false, InsertType::BEGIN},
{make_ghobj(4, 4, 4, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(4, 4, 4, "ns1", "oid1", 3, 3), value0,
{2u, 1u, false, InsertType::BEGIN},
{make_ghobj(4, 4, 4, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(4, 4, 4, "ns2", "oid2", 1, 1), value0,
{2u, 0u, false, InsertType::BEGIN},
{make_ghobj(4, 4, 4, "ns2", "oid2", 2, 2)}).get0();
logger().info("\n---------------------------------------------"
"\nsplit at stage 2; insert to right back at stage 0, 1, 2\n");
test.split_merge(make_ghobj(4, 4, 4, "ns4", "oid4", 5, 5), value0,
{2u, 0u, false, InsertType::LAST},
std::nullopt).get0();
test.split_merge(make_ghobj(4, 4, 4, "ns5", "oid5", 3, 3), value0,
{2u, 1u, false, InsertType::LAST},
std::nullopt).get0();
test.split_merge(make_ghobj(5, 5, 5, "ns3", "oid3", 3, 3), value0,
{2u, 2u, false, InsertType::LAST},
std::nullopt).get0();
auto value1 = test.create_value(316);
logger().info("\n---------------------------------------------"
"\nsplit at stage 1; insert to left middle at stage 0, 1, 2, 1, 0\n");
test.split_merge(make_ghobj(2, 2, 2, "ns4", "oid4", 5, 5), value1,
{1u, 0u, true, InsertType::MID},
{make_ghobj(3, 3, 3, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(2, 2, 2, "ns5", "oid5", 3, 3), value1,
{1u, 1u, true, InsertType::MID},
{make_ghobj(3, 3, 3, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(2, 2, 3, "ns3", "oid3", 3, 3), value1,
{1u, 2u, true, InsertType::MID},
{make_ghobj(3, 3, 3, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(3, 3, 3, "ns1", "oid1", 3, 3), value1,
{1u, 1u, true, InsertType::MID},
{make_ghobj(3, 3, 3, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(3, 3, 3, "ns2", "oid2", 1, 1), value1,
{1u, 0u, true, InsertType::MID},
{make_ghobj(3, 3, 3, "ns2", "oid2", 2, 2)}).get0();
logger().info("\n---------------------------------------------"
"\nsplit at stage 1; insert to left back at stage 0, 1, 0\n");
test.split_merge(make_ghobj(3, 3, 3, "ns2", "oid2", 5, 5), value1,
{1u, 0u, true, InsertType::LAST},
{make_ghobj(3, 3, 3, "ns3", "oid3", 2, 2)}).get0();
test.split_merge(make_ghobj(3, 3, 3, "ns2", "oid3", 3, 3), value1,
{1u, 1u, true, InsertType::LAST},
{make_ghobj(3, 3, 3, "ns3", "oid3", 2, 2)}).get0();
test.split_merge(make_ghobj(3, 3, 3, "ns3", "oid3", 1, 1), value1,
{1u, 0u, true, InsertType::LAST},
{make_ghobj(3, 3, 3, "ns3", "oid3", 2, 2)}).get0();
auto value2 = test.create_value(452);
logger().info("\n---------------------------------------------"
"\nsplit at stage 1; insert to right front at stage 0, 1, 0\n");
test.split_merge(make_ghobj(3, 3, 3, "ns3", "oid3", 5, 5), value2,
{1u, 0u, false, InsertType::BEGIN},
{make_ghobj(3, 3, 3, "ns4", "oid4", 2, 2)}).get0();
test.split_merge(make_ghobj(3, 3, 3, "ns3", "oid4", 3, 3), value2,
{1u, 1u, false, InsertType::BEGIN},
{make_ghobj(3, 3, 3, "ns4", "oid4", 2, 2)}).get0();
test.split_merge(make_ghobj(3, 3, 3, "ns4", "oid4", 1, 1), value2,
{1u, 0u, false, InsertType::BEGIN},
{make_ghobj(3, 3, 3, "ns4", "oid4", 2, 2)}).get0();
logger().info("\n---------------------------------------------"
"\nsplit at stage 1; insert to right middle at stage 0, 1, 2, 1, 0\n");
test.split_merge(make_ghobj(3, 3, 3, "ns4", "oid4", 5, 5), value2,
{1u, 0u, false, InsertType::MID},
{make_ghobj(4, 4, 4, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(3, 3, 3, "ns5", "oid5", 3, 3), value2,
{1u, 1u, false, InsertType::MID},
{make_ghobj(4, 4, 4, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(3, 3, 4, "ns3", "oid3", 3, 3), value2,
{1u, 2u, false, InsertType::MID},
{make_ghobj(4, 4, 4, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(4, 4, 4, "ns1", "oid1", 3, 3), value2,
{1u, 1u, false, InsertType::MID},
{make_ghobj(4, 4, 4, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(4, 4, 4, "ns2", "oid2", 1, 1), value2,
{1u, 0u, false, InsertType::MID},
{make_ghobj(4, 4, 4, "ns2", "oid2", 2, 2)}).get0();
auto value3 = test.create_value(834);
logger().info("\n---------------------------------------------"
"\nsplit at stage 0; insert to right middle at stage 0, 1, 2, 1, 0\n");
test.split_merge(make_ghobj(3, 3, 3, "ns4", "oid4", 5, 5), value3,
{0u, 0u, false, InsertType::MID},
{make_ghobj(4, 4, 4, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(3, 3, 3, "ns5", "oid5", 3, 3), value3,
{0u, 1u, false, InsertType::MID},
{make_ghobj(4, 4, 4, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(3, 3, 4, "ns3", "oid3", 3, 3), value3,
{0u, 2u, false, InsertType::MID},
{make_ghobj(4, 4, 4, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(4, 4, 4, "ns1", "oid1", 3, 3), value3,
{0u, 1u, false, InsertType::MID},
{make_ghobj(4, 4, 4, "ns2", "oid2", 2, 2)}).get0();
test.split_merge(make_ghobj(4, 4, 4, "ns2", "oid2", 1, 1), value3,
{0u, 0u, false, InsertType::MID},
{make_ghobj(4, 4, 4, "ns2", "oid2", 2, 2)}).get0();
logger().info("\n---------------------------------------------"
"\nsplit at stage 0; insert to right front at stage 0\n");
test.split_merge(make_ghobj(3, 3, 3, "ns4", "oid4", 2, 3), value3,
{0u, 0u, false, InsertType::BEGIN},
{make_ghobj(3, 3, 3, "ns4", "oid4", 3, 3)}).get0();
auto value4 = test.create_value(572);
logger().info("\n---------------------------------------------"
"\nsplit at stage 0; insert to left back at stage 0\n");
test.split_merge(make_ghobj(3, 3, 3, "ns2", "oid2", 3, 4), value4,
{0u, 0u, true, InsertType::LAST},
{make_ghobj(3, 3, 3, "ns2", "oid2", 4, 4)}).get0();
}
{
TestTree test;
test.build_tree({2, 4}, {2, 4}, {2, 4}, 232).get0();
auto value = test.create_value(1996);
logger().info("\n---------------------------------------------"
"\nsplit at [0, 0, 0]; insert to left front at stage 2, 1, 0\n");
test.split_merge(make_ghobj(1, 1, 1, "ns3", "oid3", 3, 3), value,
{2u, 2u, true, InsertType::BEGIN},
{make_ghobj(2, 2, 2, "ns2", "oid2", 2, 2)}).get0();
EXPECT_TRUE(last_split.match_split_pos({0, {0, {0}}}));
test.split_merge(make_ghobj(2, 2, 2, "ns1", "oid1", 3, 3), value,
{2u, 1u, true, InsertType::BEGIN},
{make_ghobj(2, 2, 2, "ns2", "oid2", 2, 2)}).get0();
EXPECT_TRUE(last_split.match_split_pos({0, {0, {0}}}));
test.split_merge(make_ghobj(2, 2, 2, "ns2", "oid2", 1, 1), value,
{2u, 0u, true, InsertType::BEGIN},
{make_ghobj(2, 2, 2, "ns2", "oid2", 2, 2)}).get0();
EXPECT_TRUE(last_split.match_split_pos({0, {0, {0}}}));
}
{
TestTree test;
std::vector<ghobject_t> keys = {
make_ghobj(2, 2, 2, "ns3", "oid3", 3, 3),
make_ghobj(3, 3, 3, "ns3", "oid3", 3, 3)};
std::vector<test_item_t> values = {
test.create_value(1360),
test.create_value(1632)};
test.build_tree(keys, values).get0();
auto value = test.create_value(1640);
logger().info("\n---------------------------------------------"
"\nsplit at [END, END, END]; insert to right at stage 0, 1, 2\n");
test.split_merge(make_ghobj(3, 3, 3, "ns3", "oid3", 4, 4), value,
{0u, 0u, false, InsertType::BEGIN},
std::nullopt).get0();
EXPECT_TRUE(last_split.match_split_pos({1, {0, {1}}}));
test.split_merge(make_ghobj(3, 3, 3, "ns4", "oid4", 3, 3), value,
{1u, 1u, false, InsertType::BEGIN},
std::nullopt).get0();
EXPECT_TRUE(last_split.match_split_pos({1, {1, {0}}}));
test.split_merge(make_ghobj(4, 4, 4, "ns3", "oid3", 3, 3), value,
{2u, 2u, false, InsertType::BEGIN},
std::nullopt).get0();
EXPECT_TRUE(last_split.match_split_pos({2, {0, {0}}}));
}
});
}
namespace crimson::os::seastore::onode {
class DummyChildPool {
class DummyChildImpl final : public NodeImpl {
public:
using URef = std::unique_ptr<DummyChildImpl>;
DummyChildImpl(const std::set<ghobject_t>& keys, bool is_level_tail, laddr_t laddr)
: keys{keys}, _is_level_tail{is_level_tail}, _laddr{laddr} {
std::tie(key_view, p_mem_key_view) = build_key_view(*keys.crbegin());
build_name();
}
~DummyChildImpl() override {
std::free(p_mem_key_view);
}
const std::set<ghobject_t>& get_keys() const { return keys; }
void reset(const std::set<ghobject_t>& _keys, bool level_tail) {
keys = _keys;
_is_level_tail = level_tail;
std::free(p_mem_key_view);
std::tie(key_view, p_mem_key_view) = build_key_view(*keys.crbegin());
build_name();
}
public:
laddr_t laddr() const override { return _laddr; }
bool is_level_tail() const override { return _is_level_tail; }
std::optional<key_view_t> get_pivot_index() const override { return {key_view}; }
bool is_extent_retired() const override { return _is_extent_retired; }
const std::string& get_name() const override { return name; }
search_position_t make_tail() override {
_is_level_tail = true;
build_name();
return search_position_t::end();
}
eagain_ifuture<> retire_extent(context_t) override {
assert(!_is_extent_retired);
_is_extent_retired = true;
return eagain_iertr::now();
}
protected:
node_type_t node_type() const override { return node_type_t::LEAF; }
field_type_t field_type() const override { return field_type_t::N0; }
const char* read() const override {
ceph_abort("impossible path"); }
extent_len_t get_node_size() const override {
ceph_abort("impossible path"); }
nextent_state_t get_extent_state() const override {
ceph_abort("impossible path"); }
level_t level() const override { return 0u; }
void prepare_mutate(context_t) override {
ceph_abort("impossible path"); }
void validate_non_empty() const override {
ceph_abort("impossible path"); }
bool is_keys_empty() const override {
ceph_abort("impossible path"); }
bool has_single_value() const override {
ceph_abort("impossible path"); }
node_offset_t free_size() const override {
ceph_abort("impossible path"); }
extent_len_t total_size() const override {
ceph_abort("impossible path"); }
bool is_size_underflow() const override {
ceph_abort("impossible path"); }
std::tuple<match_stage_t, search_position_t> erase(const search_position_t&) override {
ceph_abort("impossible path"); }
std::tuple<match_stage_t, std::size_t> evaluate_merge(NodeImpl&) override {
ceph_abort("impossible path"); }
search_position_t merge(NodeExtentMutable&, NodeImpl&, match_stage_t, extent_len_t) override {
ceph_abort("impossible path"); }
eagain_ifuture<NodeExtentMutable> rebuild_extent(context_t) override {
ceph_abort("impossible path"); }
node_stats_t get_stats() const override {
ceph_abort("impossible path"); }
std::ostream& dump(std::ostream&) const override {
ceph_abort("impossible path"); }
std::ostream& dump_brief(std::ostream&) const override {
ceph_abort("impossible path"); }
void validate_layout() const override {
ceph_abort("impossible path"); }
void test_copy_to(NodeExtentMutable&) const override {
ceph_abort("impossible path"); }
void test_set_tail(NodeExtentMutable&) override {
ceph_abort("impossible path"); }
private:
void build_name() {
std::ostringstream sos;
sos << "DummyNode"
<< "@0x" << std::hex << laddr() << std::dec
<< "Lv" << (unsigned)level()
<< (is_level_tail() ? "$" : "")
<< "(" << key_view << ")";
name = sos.str();
}
std::set<ghobject_t> keys;
bool _is_level_tail;
laddr_t _laddr;
std::string name;
bool _is_extent_retired = false;
key_view_t key_view;
void* p_mem_key_view;
};
class DummyChild final : public Node {
public:
~DummyChild() override = default;
key_view_t get_pivot_key() const { return *impl->get_pivot_index(); }
eagain_ifuture<> populate_split(
context_t c, std::set<Ref<DummyChild>>& splitable_nodes) {
ceph_assert(can_split());
ceph_assert(splitable_nodes.find(this) != splitable_nodes.end());
size_t index;
const auto& keys = impl->get_keys();
if (keys.size() == 2) {
index = 1;
} else {
index = rd() % (keys.size() - 2) + 1;
}
auto iter = keys.begin();
std::advance(iter, index);
std::set<ghobject_t> left_keys(keys.begin(), iter);
std::set<ghobject_t> right_keys(iter, keys.end());
bool right_is_tail = impl->is_level_tail();
impl->reset(left_keys, false);
auto right_child = DummyChild::create_new(right_keys, right_is_tail, pool);
if (!can_split()) {
splitable_nodes.erase(this);
}
if (right_child->can_split()) {
splitable_nodes.insert(right_child);
}
Ref<Node> this_ref = this;
return apply_split_to_parent(
c, std::move(this_ref), std::move(right_child), false);
}
eagain_ifuture<> insert_and_split(
context_t c, const ghobject_t& insert_key,
std::set<Ref<DummyChild>>& splitable_nodes) {
const auto& keys = impl->get_keys();
ceph_assert(keys.size() == 1);
auto& key = *keys.begin();
ceph_assert(insert_key < key);
std::set<ghobject_t> new_keys;
new_keys.insert(insert_key);
new_keys.insert(key);
impl->reset(new_keys, impl->is_level_tail());
splitable_nodes.clear();
splitable_nodes.insert(this);
auto fut = populate_split(c, splitable_nodes);
ceph_assert(splitable_nodes.size() == 0);
return fut;
}
eagain_ifuture<> merge(context_t c, Ref<DummyChild>&& this_ref) {
return parent_info().ptr->get_child_peers(c, parent_info().position
).si_then([c, this_ref = std::move(this_ref), this] (auto lr_nodes) mutable {
auto& [lnode, rnode] = lr_nodes;
if (rnode) {
lnode.reset();
Ref<DummyChild> r_dummy(static_cast<DummyChild*>(rnode.get()));
rnode.reset();
pool.untrack_node(r_dummy);
assert(r_dummy->use_count() == 1);
return do_merge(c, std::move(this_ref), std::move(r_dummy), true);
} else {
ceph_assert(lnode);
Ref<DummyChild> l_dummy(static_cast<DummyChild*>(lnode.get()));
pool.untrack_node(this_ref);
assert(this_ref->use_count() == 1);
return do_merge(c, std::move(l_dummy), std::move(this_ref), false);
}
});
}
eagain_ifuture<> fix_key(context_t c, const ghobject_t& new_key) {
const auto& keys = impl->get_keys();
ceph_assert(keys.size() == 1);
assert(impl->is_level_tail() == false);
std::set<ghobject_t> new_keys;
new_keys.insert(new_key);
impl->reset(new_keys, impl->is_level_tail());
Ref<Node> this_ref = this;
return fix_parent_index<true>(c, std::move(this_ref), false);
}
bool match_pos(const search_position_t& pos) const {
ceph_assert(!is_root());
return pos == parent_info().position;
}
static Ref<DummyChild> create(
const std::set<ghobject_t>& keys, bool is_level_tail,
laddr_t addr, DummyChildPool& pool) {
auto ref_impl = std::make_unique<DummyChildImpl>(keys, is_level_tail, addr);
return new DummyChild(ref_impl.get(), std::move(ref_impl), pool);
}
static Ref<DummyChild> create_new(
const std::set<ghobject_t>& keys, bool is_level_tail, DummyChildPool& pool) {
static laddr_t seed = 0;
return create(keys, is_level_tail, seed++, pool);
}
static eagain_ifuture<Ref<DummyChild>> create_initial(
context_t c, const std::set<ghobject_t>& keys,
DummyChildPool& pool, RootNodeTracker& root_tracker) {
auto initial = create_new(keys, true, pool);
return c.nm.get_super(c.t, root_tracker
).handle_error_interruptible(
eagain_iertr::pass_further{},
crimson::ct_error::assert_all{"Invalid error during create_initial()"}
).si_then([c, initial](auto super) {
initial->make_root_new(c, std::move(super));
return initial->upgrade_root(c, L_ADDR_MIN).si_then([initial] {
return initial;
});
});
}
protected:
eagain_ifuture<> test_clone_non_root(
context_t, Ref<InternalNode> new_parent) const override {
ceph_assert(!is_root());
auto p_pool_clone = pool.pool_clone_in_progress;
ceph_assert(p_pool_clone != nullptr);
auto clone = create(
impl->get_keys(), impl->is_level_tail(), impl->laddr(), *p_pool_clone);
clone->as_child(parent_info().position, new_parent);
return eagain_iertr::now();
}
eagain_ifuture<Ref<tree_cursor_t>> lookup_smallest(context_t) override {
ceph_abort("impossible path"); }
eagain_ifuture<Ref<tree_cursor_t>> lookup_largest(context_t) override {
ceph_abort("impossible path"); }
eagain_ifuture<> test_clone_root(context_t, RootNodeTracker&) const override {
ceph_abort("impossible path"); }
eagain_ifuture<search_result_t> lower_bound_tracked(
context_t, const key_hobj_t&, MatchHistory&) override {
ceph_abort("impossible path"); }
eagain_ifuture<> do_get_tree_stats(context_t, tree_stats_t&) override {
ceph_abort("impossible path"); }
bool is_tracking() const override { return false; }
void track_merge(Ref<Node>, match_stage_t, search_position_t&) override {
ceph_abort("impossible path"); }
private:
DummyChild(DummyChildImpl* impl, DummyChildImpl::URef&& ref, DummyChildPool& pool)
: Node(std::move(ref)), impl{impl}, pool{pool} {
pool.track_node(this);
}
bool can_split() const { return impl->get_keys().size() > 1; }
static eagain_ifuture<> do_merge(
context_t c, Ref<DummyChild>&& left, Ref<DummyChild>&& right, bool stole_key) {
assert(right->use_count() == 1);
assert(left->impl->get_keys().size() == 1);
assert(right->impl->get_keys().size() == 1);
bool left_is_tail = right->impl->is_level_tail();
const std::set<ghobject_t>* p_keys;
if (stole_key) {
p_keys = &right->impl->get_keys();
} else {
p_keys = &left->impl->get_keys();
}
left->impl->reset(*p_keys, left_is_tail);
auto left_addr = left->impl->laddr();
return left->parent_info().ptr->apply_children_merge<true>(
c, std::move(left), left_addr, std::move(right), !stole_key);
}
DummyChildImpl* impl;
DummyChildPool& pool;
mutable std::random_device rd;
};
public:
DummyChildPool() = default;
~DummyChildPool() { reset(); }
auto build_tree(const std::set<ghobject_t>& keys) {
reset();
// create tree
auto ref_dummy = NodeExtentManager::create_dummy(IS_DUMMY_SYNC);
p_dummy = static_cast<DummyManager*>(ref_dummy.get());
p_btree.emplace(std::move(ref_dummy));
return with_trans_intr(get_context().t, [this, &keys] (auto &tr) {
return DummyChild::create_initial(get_context(), keys, *this, *p_btree->root_tracker
).si_then([this](auto initial_child) {
// split
splitable_nodes.insert(initial_child);
return trans_intr::repeat([this] ()
-> eagain_ifuture<seastar::stop_iteration> {
if (splitable_nodes.empty()) {
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
}
auto index = rd() % splitable_nodes.size();
auto iter = splitable_nodes.begin();
std::advance(iter, index);
Ref<DummyChild> child = *iter;
return child->populate_split(get_context(), splitable_nodes
).si_then([] {
return seastar::stop_iteration::no;
});
});
}).si_then([this] {
//std::ostringstream oss;
//p_btree->dump(t(), oss);
//logger().info("\n{}\n", oss.str());
return p_btree->height(t());
}).si_then([](auto height) {
ceph_assert(height == 2);
});
});
}
seastar::future<> split_merge(ghobject_t key, search_position_t pos,
const split_expectation_t& expected) {
return seastar::async([this, key, pos, expected] {
DummyChildPool pool_clone;
clone_to(pool_clone);
// insert and split
logger().info("\n\nINSERT-SPLIT {} at pos({}):", key_hobj_t(key), pos);
auto node_to_split = pool_clone.get_node_by_pos(pos);
with_trans_intr(pool_clone.get_context().t, [&] (auto &t) {
return node_to_split->insert_and_split(
pool_clone.get_context(), key, pool_clone.splitable_nodes);
}).unsafe_get0();
{
std::ostringstream oss;
pool_clone.p_btree->dump(pool_clone.t(), oss);
logger().info("dump new root:\n{}", oss.str());
}
auto &pt = pool_clone.t();
EXPECT_EQ(INTR(pool_clone.p_btree->height, pt).unsafe_get0(), 3);
EXPECT_TRUE(last_split.match(expected));
EXPECT_EQ(pool_clone.p_dummy->size(), 3);
// erase and merge
[[maybe_unused]] auto pivot_key = node_to_split->get_pivot_key();
logger().info("\n\nERASE-MERGE {}:", node_to_split->get_name());
assert(pivot_key == key_hobj_t(key));
with_trans_intr(pool_clone.get_context().t, [&] (auto &t) {
return node_to_split->merge(
pool_clone.get_context(), std::move(node_to_split));
}).unsafe_get0();
auto &pt2 = pool_clone.t();
EXPECT_EQ(INTR(pool_clone.p_btree->height ,pt2).unsafe_get0(), 2);
EXPECT_EQ(pool_clone.p_dummy->size(), 1);
});
}
seastar::future<> fix_index(
ghobject_t new_key, search_position_t pos, bool expect_split) {
return seastar::async([this, new_key, pos, expect_split] {
DummyChildPool pool_clone;
clone_to(pool_clone);
// fix
auto node_to_fix = pool_clone.get_node_by_pos(pos);
auto old_key = node_to_fix->get_pivot_key().to_ghobj();
logger().info("\n\nFIX pos({}) from {} to {}, expect_split={}:",
pos, node_to_fix->get_name(), key_hobj_t(new_key), expect_split);
with_trans_intr(pool_clone.get_context().t, [&] (auto &t) {
return node_to_fix->fix_key(pool_clone.get_context(), new_key);
}).unsafe_get0();
if (expect_split) {
std::ostringstream oss;
pool_clone.p_btree->dump(pool_clone.t(), oss);
logger().info("dump new root:\n{}", oss.str());
auto &pt = pool_clone.t();
EXPECT_EQ(INTR(pool_clone.p_btree->height, pt).unsafe_get0(), 3);
EXPECT_EQ(pool_clone.p_dummy->size(), 3);
} else {
auto &pt = pool_clone.t();
EXPECT_EQ(INTR(pool_clone.p_btree->height, pt).unsafe_get0(), 2);
EXPECT_EQ(pool_clone.p_dummy->size(), 1);
}
// fix back
logger().info("\n\nFIX pos({}) from {} back to {}:",
pos, node_to_fix->get_name(), key_hobj_t(old_key));
with_trans_intr(pool_clone.get_context().t, [&] (auto &t) {
return node_to_fix->fix_key(pool_clone.get_context(), old_key);
}).unsafe_get0();
auto &pt = pool_clone.t();
EXPECT_EQ(INTR(pool_clone.p_btree->height, pt).unsafe_get0(), 2);
EXPECT_EQ(pool_clone.p_dummy->size(), 1);
});
}
private:
void clone_to(DummyChildPool& pool_clone) {
pool_clone_in_progress = &pool_clone;
auto ref_dummy = NodeExtentManager::create_dummy(IS_DUMMY_SYNC);
pool_clone.p_dummy = static_cast<DummyManager*>(ref_dummy.get());
pool_clone.p_btree.emplace(std::move(ref_dummy));
auto &pt = pool_clone.t();
[[maybe_unused]] auto &tr = t();
INTR_R(pool_clone.p_btree->test_clone_from,
pt, tr, *p_btree).unsafe_get0();
pool_clone_in_progress = nullptr;
}
void reset() {
ceph_assert(pool_clone_in_progress == nullptr);
if (tracked_children.size()) {
ceph_assert(!p_btree->test_is_clean());
tracked_children.clear();
ceph_assert(p_btree->test_is_clean());
p_dummy = nullptr;
p_btree.reset();
} else {
ceph_assert(!p_btree.has_value());
}
splitable_nodes.clear();
}
void track_node(Ref<DummyChild> node) {
ceph_assert(tracked_children.find(node) == tracked_children.end());
tracked_children.insert(node);
}
void untrack_node(Ref<DummyChild> node) {
auto ret = tracked_children.erase(node);
ceph_assert(ret == 1);
}
Ref<DummyChild> get_node_by_pos(const search_position_t& pos) const {
auto iter = std::find_if(
tracked_children.begin(), tracked_children.end(), [&pos](auto& child) {
return child->match_pos(pos);
});
ceph_assert(iter != tracked_children.end());
return *iter;
}
context_t get_context() {
ceph_assert(p_dummy != nullptr);
return {*p_dummy, vb, t()};
}
Transaction& t() const { return *ref_t; }
std::set<Ref<DummyChild>> tracked_children;
std::optional<UnboundedBtree> p_btree;
DummyManager* p_dummy = nullptr;
ValueBuilderImpl<UnboundedValue> vb;
TransactionRef ref_t = make_test_transaction();
std::random_device rd;
std::set<Ref<DummyChild>> splitable_nodes;
DummyChildPool* pool_clone_in_progress = nullptr;
};
}
TEST_F(c_dummy_test_t, 5_split_merge_internal_node)
{
run_async([] {
DummyChildPool pool;
{
logger().info("\n---------------------------------------------"
"\nbefore internal node insert:\n");
auto padding = std::string(250, '_');
auto keys = build_key_set({2, 6}, {2, 5}, {2, 5}, padding, true);
keys.erase(make_ghobj(2, 2, 2, "ns2", "oid2" + padding, 2, 2));
keys.erase(make_ghobj(2, 2, 2, "ns2", "oid2" + padding, 3, 3));
keys.erase(make_ghobj(2, 2, 2, "ns2", "oid2" + padding, 4, 4));
keys.erase(make_ghobj(5, 5, 5, "ns4", "oid4" + padding, 2, 2));
keys.erase(make_ghobj(5, 5, 5, "ns4", "oid4" + padding, 3, 3));
keys.erase(make_ghobj(5, 5, 5, "ns4", "oid4" + padding, 4, 4));
auto padding_s = std::string(257, '_');
keys.insert(make_ghobj(2, 2, 2, "ns2", "oid2" + padding_s, 2, 2));
keys.insert(make_ghobj(2, 2, 2, "ns2", "oid2" + padding_s, 3, 3));
keys.insert(make_ghobj(2, 2, 2, "ns2", "oid2" + padding_s, 4, 4));
auto padding_e = std::string(247, '_');
keys.insert(make_ghobj(5, 5, 5, "ns4", "oid4" + padding_e, 2, 2));
keys.insert(make_ghobj(5, 5, 5, "ns4", "oid4" + padding_e, 3, 3));
keys.insert(make_ghobj(5, 5, 5, "ns4", "oid4" + padding_e, 4, 4));
pool.build_tree(keys).unsafe_get0();
logger().info("\n---------------------------------------------"
"\nsplit at stage 2; insert to right front at stage 0, 1, 2, 1, 0\n");
pool.split_merge(make_ghobj(3, 3, 3, "ns4", "oid4" + padding, 5, 5), {2, {0, {0}}},
{2u, 0u, false, InsertType::BEGIN}).get();
pool.split_merge(make_ghobj(3, 3, 3, "ns5", "oid5", 3, 3), {2, {0, {0}}},
{2u, 1u, false, InsertType::BEGIN}).get();
pool.split_merge(make_ghobj(3, 4, 4, "ns3", "oid3", 3, 3), {2, {0, {0}}},
{2u, 2u, false, InsertType::BEGIN}).get();
pool.split_merge(make_ghobj(4, 4, 4, "ns1", "oid1", 3, 3), {2, {0, {0}}},
{2u, 1u, false, InsertType::BEGIN}).get();
pool.split_merge(make_ghobj(4, 4, 4, "ns2", "oid2" + padding, 1, 1), {2, {0, {0}}},
{2u, 0u, false, InsertType::BEGIN}).get();
logger().info("\n---------------------------------------------"
"\nsplit at stage 2; insert to right middle at stage 0, 1, 2, 1, 0\n");
pool.split_merge(make_ghobj(4, 4, 4, "ns4", "oid4" + padding, 5, 5), {3, {0, {0}}},
{2u, 0u, false, InsertType::MID}).get();
pool.split_merge(make_ghobj(4, 4, 4, "ns5", "oid5", 3, 3), {3, {0, {0}}},
{2u, 1u, false, InsertType::MID}).get();
pool.split_merge(make_ghobj(4, 4, 5, "ns3", "oid3", 3, 3), {3, {0, {0}}},
{2u, 2u, false, InsertType::MID}).get();
pool.split_merge(make_ghobj(5, 5, 5, "ns1", "oid1", 3, 3), {3, {0, {0}}},
{2u, 1u, false, InsertType::MID}).get();
pool.split_merge(make_ghobj(5, 5, 5, "ns2", "oid2" + padding, 1, 1), {3, {0, {0}}},
{2u, 0u, false, InsertType::MID}).get();
logger().info("\n---------------------------------------------"
"\nsplit at stage 2; insert to right back at stage 0, 1, 2\n");
pool.split_merge(make_ghobj(5, 5, 5, "ns4", "oid4" + padding_e, 5, 5), search_position_t::end() ,
{2u, 0u, false, InsertType::LAST}).get();
pool.split_merge(make_ghobj(5, 5, 5, "ns5", "oid5", 3, 3), search_position_t::end(),
{2u, 1u, false, InsertType::LAST}).get();
pool.split_merge(make_ghobj(6, 6, 6, "ns3", "oid3", 3, 3), search_position_t::end(),
{2u, 2u, false, InsertType::LAST}).get();
logger().info("\n---------------------------------------------"
"\nsplit at stage 0; insert to left front at stage 2, 1, 0\n");
pool.split_merge(make_ghobj(1, 1, 1, "ns3", "oid3", 3, 3), {0, {0, {0}}},
{0u, 2u, true, InsertType::BEGIN}).get();
pool.split_merge(make_ghobj(2, 2, 2, "ns1", "oid1", 3, 3), {0, {0, {0}}},
{0u, 1u, true, InsertType::BEGIN}).get();
pool.split_merge(make_ghobj(2, 2, 2, "ns2", "oid2" + padding_s, 1, 1), {0, {0, {0}}},
{0u, 0u, true, InsertType::BEGIN}).get();
logger().info("\n---------------------------------------------"
"\nsplit at stage 0; insert to left middle at stage 0, 1, 2, 1, 0\n");
pool.split_merge(make_ghobj(2, 2, 2, "ns4", "oid4" + padding, 5, 5), {1, {0, {0}}},
{0u, 0u, true, InsertType::MID}).get();
pool.split_merge(make_ghobj(2, 2, 2, "ns5", "oid5", 3, 3), {1, {0, {0}}},
{0u, 1u, true, InsertType::MID}).get();
pool.split_merge(make_ghobj(2, 2, 3, "ns3", "oid3" + std::string(80, '_'), 3, 3), {1, {0, {0}}} ,
{0u, 2u, true, InsertType::MID}).get();
pool.split_merge(make_ghobj(3, 3, 3, "ns1", "oid1", 3, 3), {1, {0, {0}}},
{0u, 1u, true, InsertType::MID}).get();
pool.split_merge(make_ghobj(3, 3, 3, "ns2", "oid2" + padding, 1, 1), {1, {0, {0}}},
{0u, 0u, true, InsertType::MID}).get();
logger().info("\n---------------------------------------------"
"\nsplit at stage 0; insert to left back at stage 0\n");
pool.split_merge(make_ghobj(3, 3, 3, "ns4", "oid4" + padding, 3, 4), {1, {2, {2}}},
{0u, 0u, true, InsertType::LAST}).get();
}
{
logger().info("\n---------------------------------------------"
"\nbefore internal node insert (1):\n");
auto padding = std::string(244, '_');
auto keys = build_key_set({2, 6}, {2, 5}, {2, 5}, padding, true);
keys.insert(make_ghobj(5, 5, 5, "ns4", "oid4" + padding, 5, 5));
keys.insert(make_ghobj(5, 5, 5, "ns4", "oid4" + padding, 6, 6));
keys.insert(make_ghobj(5, 5, 5, "ns4", "oid4" + padding, 7, 7));
pool.build_tree(keys).unsafe_get0();
logger().info("\n---------------------------------------------"
"\nsplit at stage 2; insert to left back at stage 0, 1, 2, 1\n");
pool.split_merge(make_ghobj(3, 3, 3, "ns4", "oid4" + padding, 5, 5), {2, {0, {0}}},
{2u, 0u, true, InsertType::LAST}).get();
pool.split_merge(make_ghobj(3, 3, 3, "ns5", "oid5", 3, 3), {2, {0, {0}}},
{2u, 1u, true, InsertType::LAST}).get();
pool.split_merge(make_ghobj(3, 4, 4, "n", "o", 3, 3), {2, {0, {0}}},
{2u, 2u, true, InsertType::LAST}).get();
pool.split_merge(make_ghobj(4, 4, 4, "n", "o", 3, 3), {2, {0, {0}}},
{2u, 1u, true, InsertType::LAST}).get();
logger().info("\n---------------------------------------------"
"\nsplit at stage 2; insert to left middle at stage 2\n");
pool.split_merge(make_ghobj(2, 3, 3, "n", "o", 3, 3), {1, {0, {0}}},
{2u, 2u, true, InsertType::MID}).get();
}
{
logger().info("\n---------------------------------------------"
"\nbefore internal node insert (2):\n");
auto padding = std::string(243, '_');
auto keys = build_key_set({2, 6}, {2, 5}, {2, 5}, padding, true);
keys.insert(make_ghobj(4, 4, 4, "n", "o", 3, 3));
keys.insert(make_ghobj(5, 5, 5, "ns4", "oid4" + padding, 5, 5));
keys.insert(make_ghobj(5, 5, 5, "ns4", "oid4" + padding, 6, 6));
pool.build_tree(keys).unsafe_get0();
logger().info("\n---------------------------------------------"
"\nsplit at stage 2; insert to left back at stage (0, 1, 2, 1,) 0\n");
pool.split_merge(make_ghobj(4, 4, 4, "n", "o", 2, 2), {2, {0, {0}}},
{2u, 0u, true, InsertType::LAST}).get();
}
{
logger().info("\n---------------------------------------------"
"\nbefore internal node insert (3):\n");
auto padding = std::string(419, '_');
auto keys = build_key_set({2, 5}, {2, 5}, {2, 5}, padding, true);
keys.erase(make_ghobj(4, 4, 4, "ns4", "oid4" + padding, 2, 2));
keys.erase(make_ghobj(4, 4, 4, "ns4", "oid4" + padding, 3, 3));
keys.erase(make_ghobj(4, 4, 4, "ns4", "oid4" + padding, 4, 4));
pool.build_tree(keys).unsafe_get0();
logger().info("\n---------------------------------------------"
"\nsplit at stage 1; insert to right front at stage 0, 1, 0\n");
pool.split_merge(make_ghobj(3, 3, 3, "ns2", "oid2" + padding, 5, 5), {1, {1, {0}}},
{1u, 0u, false, InsertType::BEGIN}).get();
pool.split_merge(make_ghobj(3, 3, 3, "ns2", "oid3", 3, 3), {1, {1, {0}}},
{1u, 1u, false, InsertType::BEGIN}).get();
pool.split_merge(make_ghobj(3, 3, 3, "ns3", "oid3" + padding, 1, 1), {1, {1, {0}}},
{1u, 0u, false, InsertType::BEGIN}).get();
}
{
logger().info("\n---------------------------------------------"
"\nbefore internal node insert (4):\n");
auto padding = std::string(361, '_');
auto keys = build_key_set({2, 5}, {2, 5}, {2, 5}, padding, true);
keys.erase(make_ghobj(2, 2, 2, "ns2", "oid2" + padding, 2, 2));
keys.erase(make_ghobj(2, 2, 2, "ns2", "oid2" + padding, 3, 3));
keys.erase(make_ghobj(2, 2, 2, "ns2", "oid2" + padding, 4, 4));
auto padding_s = std::string(386, '_');
keys.insert(make_ghobj(2, 2, 2, "ns2", "oid2" + padding_s, 2, 2));
keys.insert(make_ghobj(2, 2, 2, "ns2", "oid2" + padding_s, 3, 3));
keys.insert(make_ghobj(2, 2, 2, "ns2", "oid2" + padding_s, 4, 4));
pool.build_tree(keys).unsafe_get0();
logger().info("\n---------------------------------------------"
"\nsplit at stage 1; insert to left back at stage 0, 1\n");
pool.split_merge(make_ghobj(3, 3, 3, "ns2", "oid2" + padding, 5, 5), {1, {1, {0}}},
{1u, 0u, true, InsertType::LAST}).get();
pool.split_merge(make_ghobj(3, 3, 3, "ns2", "oid3", 3, 3), {1, {1, {0}}},
{1u, 1u, true, InsertType::LAST}).get();
logger().info("\n---------------------------------------------"
"\nfix end index from stage 0 to 0, 1, 2\n");
auto padding1 = std::string(400, '_');
pool.fix_index(make_ghobj(4, 4, 4, "ns4", "oid4" + padding, 5, 5),
{2, {2, {2}}}, false).get();
pool.fix_index(make_ghobj(4, 4, 4, "ns5", "oid5" + padding1, 3, 3),
{2, {2, {2}}}, true).get();
pool.fix_index(make_ghobj(5, 5, 5, "ns3", "oid3" + padding1, 3, 3),
{2, {2, {2}}}, true).get();
}
{
logger().info("\n---------------------------------------------"
"\nbefore internal node insert (5):\n");
auto padding = std::string(412, '_');
auto keys = build_key_set({2, 5}, {2, 5}, {2, 5}, padding);
keys.insert(make_ghobj(3, 3, 3, "ns2", "oid3", 3, 3));
keys.insert(make_ghobj(4, 4, 4, "ns3", "oid3" + padding, 5, 5));
keys.insert(make_ghobj(9, 9, 9, "ns~last", "oid~last", 9, 9));
keys.erase(make_ghobj(4, 4, 4, "ns4", "oid4" + padding, 2, 2));
keys.erase(make_ghobj(4, 4, 4, "ns4", "oid4" + padding, 3, 3));
keys.erase(make_ghobj(4, 4, 4, "ns4", "oid4" + padding, 4, 4));
pool.build_tree(keys).unsafe_get0();
logger().info("\n---------------------------------------------"
"\nsplit at stage 1; insert to left back at stage (0, 1,) 0\n");
pool.split_merge(make_ghobj(3, 3, 3, "ns2", "oid3", 2, 2), {1, {1, {0}}},
{1u, 0u, true, InsertType::LAST}).get();
}
{
logger().info("\n---------------------------------------------"
"\nbefore internal node insert (6):\n");
auto padding = std::string(328, '_');
auto keys = build_key_set({2, 5}, {2, 5}, {2, 5}, padding);
keys.insert(make_ghobj(5, 5, 5, "ns3", "oid3" + std::string(270, '_'), 3, 3));
keys.insert(make_ghobj(9, 9, 9, "ns~last", "oid~last", 9, 9));
pool.build_tree(keys).unsafe_get0();
logger().info("\n---------------------------------------------"
"\nsplit at stage 0; insert to right front at stage 0\n");
pool.split_merge(make_ghobj(3, 3, 3, "ns3", "oid3" + padding, 2, 3), {1, {1, {1}}},
{0u, 0u, false, InsertType::BEGIN}).get();
logger().info("\n---------------------------------------------"
"\nfix end index from stage 2 to 0, 1, 2\n");
auto padding1 = std::string(400, '_');
pool.fix_index(make_ghobj(4, 4, 4, "ns4", "oid4" + padding, 5, 5),
{3, {0, {0}}}, false).get();
pool.fix_index(make_ghobj(4, 4, 4, "ns5", "oid5" + padding1, 3, 3),
{3, {0, {0}}}, true).get();
pool.fix_index(make_ghobj(5, 5, 5, "ns4", "oid4" + padding1, 3, 3),
{3, {0, {0}}}, true).get();
}
{
logger().info("\n---------------------------------------------"
"\nbefore internal node insert (7):\n");
auto padding = std::string(323, '_');
auto keys = build_key_set({2, 5}, {2, 5}, {2, 5}, padding);
keys.insert(make_ghobj(4, 4, 4, "ns5", "oid5" + padding, 3, 3));
keys.insert(make_ghobj(9, 9, 9, "ns~last", "oid~last", 9, 9));
pool.build_tree(keys).unsafe_get0();
logger().info("\n---------------------------------------------"
"\nfix end index from stage 1 to 0, 1, 2\n");
auto padding1 = std::string(400, '_');
pool.fix_index(make_ghobj(4, 4, 4, "ns4", "oid4" + padding, 5, 5),
{2, {3, {0}}}, false).get();
pool.fix_index(make_ghobj(4, 4, 4, "ns6", "oid6" + padding1, 3, 3),
{2, {3, {0}}}, true).get();
pool.fix_index(make_ghobj(5, 5, 5, "ns3", "oid3" + padding1, 3, 3),
{2, {3, {0}}}, true).get();
}
// Impossible to split at {0, 0, 0}
// Impossible to split at [END, END, END]
});
}
struct d_seastore_tm_test_t :
public seastar_test_suite_t, TMTestState {
seastar::future<> set_up_fut() override final {
return tm_setup();
}
seastar::future<> tear_down_fut() override final {
return tm_teardown();
}
};
TEST_F(d_seastore_tm_test_t, 6_random_tree_insert_erase)
{
run_async([this] {
constexpr bool TEST_SEASTORE = true;
constexpr bool TRACK_CURSORS = true;
auto kvs = KVPool<test_item_t>::create_raw_range(
{8, 11, 64, 256, 301, 320},
{8, 11, 64, 256, 301, 320},
{8, 16, 128, 512, 576, 640},
{0, 16}, {0, 10}, {0, 4});
auto moved_nm = (TEST_SEASTORE ? NodeExtentManager::create_seastore(*tm)
: NodeExtentManager::create_dummy(IS_DUMMY_SYNC));
auto p_nm = moved_nm.get();
auto tree = std::make_unique<TreeBuilder<TRACK_CURSORS, BoundedValue>>(
kvs, std::move(moved_nm));
{
auto t = create_mutate_transaction();
INTR(tree->bootstrap, *t).unsafe_get();
submit_transaction(std::move(t));
}
// test insert
{
auto t = create_mutate_transaction();
INTR(tree->insert, *t).unsafe_get();
submit_transaction(std::move(t));
}
{
auto t = create_read_transaction();
INTR(tree->get_stats, *t).unsafe_get();
}
if constexpr (TEST_SEASTORE) {
restart();
tree->reload(NodeExtentManager::create_seastore(*tm));
}
{
// Note: create_weak_transaction() can also work, but too slow.
auto t = create_read_transaction();
INTR(tree->validate, *t).unsafe_get();
}
// test erase 3/4
{
auto t = create_mutate_transaction();
auto size = kvs.size() / 4 * 3;
INTR_R(tree->erase, *t, size).unsafe_get();
submit_transaction(std::move(t));
}
{
auto t = create_read_transaction();
INTR(tree->get_stats, *t).unsafe_get();
}
if constexpr (TEST_SEASTORE) {
restart();
tree->reload(NodeExtentManager::create_seastore(*tm));
}
{
auto t = create_read_transaction();
INTR(tree->validate, *t).unsafe_get();
}
// test erase remaining
{
auto t = create_mutate_transaction();
auto size = kvs.size();
INTR_R(tree->erase, *t, size).unsafe_get();
submit_transaction(std::move(t));
}
{
auto t = create_read_transaction();
INTR(tree->get_stats, *t).unsafe_get();
}
if constexpr (TEST_SEASTORE) {
restart();
tree->reload(NodeExtentManager::create_seastore(*tm));
}
{
auto t = create_read_transaction();
INTR(tree->validate, *t).unsafe_get();
EXPECT_EQ(INTR(tree->height, *t).unsafe_get0(), 1);
}
if constexpr (!TEST_SEASTORE) {
auto p_dummy = static_cast<DummyManager*>(p_nm);
EXPECT_EQ(p_dummy->size(), 1);
}
tree.reset();
});
}
TEST_F(d_seastore_tm_test_t, 7_tree_insert_erase_eagain)
{
run_async([this] {
constexpr double EAGAIN_PROBABILITY = 0.1;
constexpr bool TRACK_CURSORS = false;
auto kvs = KVPool<test_item_t>::create_raw_range(
{8, 11, 64, 128, 255, 256},
{8, 13, 64, 512, 2035, 2048},
{8, 16, 128, 576, 992, 1200},
{0, 8}, {0, 10}, {0, 4});
auto moved_nm = NodeExtentManager::create_seastore(
*tm, L_ADDR_MIN, EAGAIN_PROBABILITY);
auto p_nm = static_cast<SeastoreNodeExtentManager<true>*>(moved_nm.get());
auto tree = std::make_unique<TreeBuilder<TRACK_CURSORS, ExtendedValue>>(
kvs, std::move(moved_nm));
unsigned num_ops = 0;
unsigned num_ops_eagain = 0;
// bootstrap
++num_ops;
repeat_eagain([this, &tree, &num_ops_eagain] {
++num_ops_eagain;
return seastar::do_with(
create_mutate_transaction(),
[this, &tree](auto &t) {
return INTR(tree->bootstrap, *t
).safe_then([this, &t] {
return submit_transaction_fut(*t);
});
});
}).unsafe_get0();
epm->run_background_work_until_halt().get0();
// insert
logger().warn("start inserting {} kvs ...", kvs.size());
{
auto iter = kvs.random_begin();
while (iter != kvs.random_end()) {
++num_ops;
repeat_eagain([this, &tree, &num_ops_eagain, &iter] {
++num_ops_eagain;
return seastar::do_with(
create_mutate_transaction(),
[this, &tree, &iter](auto &t) {
return INTR_R(tree->insert_one, *t, iter
).safe_then([this, &t](auto cursor) {
cursor.invalidate();
return submit_transaction_fut(*t);
});
});
}).unsafe_get0();
epm->run_background_work_until_halt().get0();
++iter;
}
}
{
p_nm->set_generate_eagain(false);
auto t = create_read_transaction();
INTR(tree->get_stats, *t).unsafe_get0();
p_nm->set_generate_eagain(true);
}
// lookup
logger().warn("start lookup {} kvs ...", kvs.size());
{
auto iter = kvs.begin();
while (iter != kvs.end()) {
++num_ops;
repeat_eagain([this, &tree, &num_ops_eagain, &iter] {
++num_ops_eagain;
auto t = create_read_transaction();
return INTR_R(tree->validate_one, *t, iter
).safe_then([t=std::move(t)]{});
}).unsafe_get0();
++iter;
}
}
// erase
logger().warn("start erase {} kvs ...", kvs.size());
{
kvs.shuffle();
auto iter = kvs.random_begin();
while (iter != kvs.random_end()) {
++num_ops;
repeat_eagain([this, &tree, &num_ops_eagain, &iter] {
++num_ops_eagain;
return seastar::do_with(
create_mutate_transaction(),
[this, &tree, &iter](auto &t) {
return INTR_R(tree->erase_one, *t, iter
).safe_then([this, &t] () mutable {
return submit_transaction_fut(*t);
});
});
}).unsafe_get0();
epm->run_background_work_until_halt().get0();
++iter;
}
kvs.erase_from_random(kvs.random_begin(), kvs.random_end());
}
{
p_nm->set_generate_eagain(false);
auto t = create_read_transaction();
INTR(tree->get_stats, *t).unsafe_get0();
INTR(tree->validate, *t).unsafe_get0();
EXPECT_EQ(INTR(tree->height,*t).unsafe_get0(), 1);
}
// we can adjust EAGAIN_PROBABILITY to get a proper eagain_rate
double eagain_rate = num_ops_eagain;
eagain_rate /= num_ops;
logger().info("eagain rate: {}", eagain_rate);
tree.reset();
});
}
| 74,510 | 40.766256 | 103 | cc |
null | ceph-main/src/test/crimson/seastore/onode_tree/test_value.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <fmt/format.h>
#include "crimson/common/log.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/value.h"
namespace crimson::os::seastore::onode {
struct test_item_t {
using id_t = uint16_t;
using magic_t = uint32_t;
value_size_t size;
id_t id;
magic_t magic;
value_size_t get_payload_size() const {
assert(size > sizeof(value_header_t));
return static_cast<value_size_t>(size - sizeof(value_header_t));
}
static test_item_t create(std::size_t _size, std::size_t _id) {
ceph_assert(_size <= std::numeric_limits<value_size_t>::max());
ceph_assert(_size > sizeof(value_header_t));
value_size_t size = _size;
ceph_assert(_id <= std::numeric_limits<id_t>::max());
id_t id = _id;
return {size, id, (magic_t)id * 137};
}
};
inline std::ostream& operator<<(std::ostream& os, const test_item_t& item) {
return os << "TestItem(#" << item.id << ", " << item.size << "B)";
}
enum class delta_op_t : uint8_t {
UPDATE_ID,
UPDATE_TAIL_MAGIC,
};
inline std::ostream& operator<<(std::ostream& os, const delta_op_t op) {
switch (op) {
case delta_op_t::UPDATE_ID:
return os << "update_id";
case delta_op_t::UPDATE_TAIL_MAGIC:
return os << "update_tail_magic";
default:
return os << "unknown";
}
}
} // namespace crimson::os::seastore::onode
#if FMT_VERSION >= 90000
template<> struct fmt::formatter<crimson::os::seastore::onode::delta_op_t> : fmt::ostream_formatter {};
#endif
namespace crimson::os::seastore::onode {
template <value_magic_t MAGIC,
string_size_t MAX_NS_SIZE,
string_size_t MAX_OID_SIZE,
value_size_t MAX_VALUE_PAYLOAD_SIZE,
extent_len_t INTERNAL_NODE_SIZE,
extent_len_t LEAF_NODE_SIZE,
bool DO_SPLIT_CHECK>
class TestValue final : public Value {
public:
static constexpr tree_conf_t TREE_CONF = {
MAGIC,
MAX_NS_SIZE,
MAX_OID_SIZE,
MAX_VALUE_PAYLOAD_SIZE,
INTERNAL_NODE_SIZE,
LEAF_NODE_SIZE,
DO_SPLIT_CHECK
};
using id_t = test_item_t::id_t;
using magic_t = test_item_t::magic_t;
struct magic_packed_t {
magic_t value;
} __attribute__((packed));
private:
struct payload_t {
id_t id;
} __attribute__((packed));
struct Replayable {
static void set_id(NodeExtentMutable& payload_mut, id_t id) {
auto p_payload = get_write(payload_mut);
p_payload->id = id;
}
static void set_tail_magic(NodeExtentMutable& payload_mut, magic_t magic) {
auto length = payload_mut.get_length();
auto offset_magic = length - sizeof(magic_t);
payload_mut.copy_in_relative(offset_magic, magic);
}
private:
static payload_t* get_write(NodeExtentMutable& payload_mut) {
return reinterpret_cast<payload_t*>(payload_mut.get_write());
}
};
public:
class Recorder final : public ValueDeltaRecorder {
public:
Recorder(ceph::bufferlist& encoded)
: ValueDeltaRecorder(encoded) {}
~Recorder() override = default;
void encode_set_id(NodeExtentMutable& payload_mut, id_t id) {
auto& encoded = get_encoded(payload_mut);
ceph::encode(delta_op_t::UPDATE_ID, encoded);
ceph::encode(id, encoded);
}
void encode_set_tail_magic(NodeExtentMutable& payload_mut, magic_t magic) {
auto& encoded = get_encoded(payload_mut);
ceph::encode(delta_op_t::UPDATE_TAIL_MAGIC, encoded);
ceph::encode(magic, encoded);
}
protected:
value_magic_t get_header_magic() const override {
return TREE_CONF.value_magic;
}
void apply_value_delta(ceph::bufferlist::const_iterator& delta,
NodeExtentMutable& payload_mut,
laddr_t value_addr) override {
delta_op_t op;
try {
ceph::decode(op, delta);
switch (op) {
case delta_op_t::UPDATE_ID: {
logger().debug("OTree::TestValue::Replay: decoding UPDATE_ID ...");
id_t id;
ceph::decode(id, delta);
logger().debug("OTree::TestValue::Replay: apply id={} ...", id);
Replayable::set_id(payload_mut, id);
break;
}
case delta_op_t::UPDATE_TAIL_MAGIC: {
logger().debug("OTree::TestValue::Replay: decoding UPDATE_TAIL_MAGIC ...");
magic_t magic;
ceph::decode(magic, delta);
logger().debug("OTree::TestValue::Replay: apply magic={} ...", magic);
Replayable::set_tail_magic(payload_mut, magic);
break;
}
default:
logger().error("OTree::TestValue::Replay: got unknown op {} when replay {:#x}+{:#x}",
op, value_addr, payload_mut.get_length());
ceph_abort();
}
} catch (buffer::error& e) {
logger().error("OTree::TestValue::Replay: got decode error {} when replay {:#x}+{:#x}",
e.what(), value_addr, payload_mut.get_length());
ceph_abort();
}
}
private:
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
};
TestValue(NodeExtentManager& nm, const ValueBuilder& vb, Ref<tree_cursor_t>& p_cursor)
: Value(nm, vb, p_cursor) {}
~TestValue() override = default;
id_t get_id() const {
return read_payload<payload_t>()->id;
}
void set_id_replayable(Transaction& t, id_t id) {
auto value_mutable = prepare_mutate_payload<payload_t, Recorder>(t);
if (value_mutable.second) {
value_mutable.second->encode_set_id(value_mutable.first, id);
}
Replayable::set_id(value_mutable.first, id);
}
magic_t get_tail_magic() const {
auto p_payload = read_payload<payload_t>();
auto offset_magic = get_payload_size() - sizeof(magic_t);
auto p_magic = reinterpret_cast<const char*>(p_payload) + offset_magic;
return reinterpret_cast<const magic_packed_t*>(p_magic)->value;
}
void set_tail_magic_replayable(Transaction& t, magic_t magic) {
auto value_mutable = prepare_mutate_payload<payload_t, Recorder>(t);
if (value_mutable.second) {
value_mutable.second->encode_set_tail_magic(value_mutable.first, magic);
}
Replayable::set_tail_magic(value_mutable.first, magic);
}
/*
* tree_util.h related interfaces
*/
using item_t = test_item_t;
void initialize(Transaction& t, const item_t& item) {
ceph_assert(get_payload_size() + sizeof(value_header_t) == item.size);
set_id_replayable(t, item.id);
set_tail_magic_replayable(t, item.magic);
}
void validate(const item_t& item) const {
ceph_assert(get_payload_size() + sizeof(value_header_t) == item.size);
ceph_assert(get_id() == item.id);
ceph_assert(get_tail_magic() == item.magic);
}
};
using UnboundedValue = TestValue<
value_magic_t::TEST_UNBOUND, 4096, 4096, 4096, 4096, 4096, false>;
using BoundedValue = TestValue<
value_magic_t::TEST_BOUNDED, 320, 320, 640, 4096, 4096, true>;
// should be the same configuration with FLTreeOnode
using ExtendedValue = TestValue<
value_magic_t::TEST_EXTENDED, 256, 2048, 1200, 8192, 16384, true>;
}
#if FMT_VERSION >= 90000
template<>
struct fmt::formatter<crimson::os::seastore::onode::test_item_t> : fmt::ostream_formatter {};
#endif
| 7,324 | 29.394191 | 103 | h |
null | ceph-main/src/test/crush/CrushWrapper.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Cloudwatt <libre.licensing@cloudwatt.com>
* Copyright (C) 2014 Red Hat <contact@redhat.com>
*
* Author: Loic Dachary <loic@dachary.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Library Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Library Public License for more details.
*
*/
#include <iostream>
#include <gtest/gtest.h>
#include "common/ceph_argparse.h"
#include "common/common_init.h"
#include "include/stringify.h"
#include "include/Context.h"
#include "osd/osd_types.h"
#include "crush/CrushWrapper.h"
using namespace std;
class CrushWrapperTest : public ::testing::Test
{
public:
void SetUp() final
{
CephInitParameters params(CEPH_ENTITY_TYPE_CLIENT);
cct = common_preinit(params, CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
cct->_conf.set_val("debug_crush", "0");
}
void TearDown() final
{
cct->put();
cct = nullptr;
}
protected:
CephContext *cct = nullptr;
};
TEST_F(CrushWrapperTest, get_immediate_parent) {
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
const int ROOT_TYPE = 1;
c->set_type_name(ROOT_TYPE, "root");
const int OSD_TYPE = 0;
c->set_type_name(OSD_TYPE, "osd");
int rootno;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, 0, NULL, NULL, &rootno);
c->set_item_name(rootno, "default");
int item = 0;
pair <string,string> loc;
int ret;
loc = c->get_immediate_parent(item, &ret);
EXPECT_EQ(-ENOENT, ret);
{
map<string,string> loc;
loc["root"] = "default";
EXPECT_EQ(0, c->insert_item(cct, item, 1.0,
"osd.0", loc));
}
loc = c->get_immediate_parent(item, &ret);
EXPECT_EQ(0, ret);
EXPECT_EQ("root", loc.first);
EXPECT_EQ("default", loc.second);
}
TEST_F(CrushWrapperTest, move_bucket) {
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
const int ROOT_TYPE = 2;
c->set_type_name(ROOT_TYPE, "root");
const int HOST_TYPE = 1;
c->set_type_name(HOST_TYPE, "host");
const int OSD_TYPE = 0;
c->set_type_name(OSD_TYPE, "osd");
int root0;
EXPECT_EQ(0, c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, 0, NULL, NULL, &root0));
EXPECT_EQ(0, c->set_item_name(root0, "root0"));
{
map<string,string> loc;
loc["root"] = "root0";
loc["host"] = "host0";
int item = 0;
EXPECT_EQ(0, c->insert_item(cct, item, 1.0,
"osd.0", loc));
}
int host0 = c->get_item_id("host0");
int root1;
EXPECT_EQ(0, c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, 0, NULL, NULL, &root1));
EXPECT_EQ(0, c->set_item_name(root1, "root1"));
map<string,string> loc;
loc["root"] = "root1";
// 0 is not a valid bucket number, must be negative
EXPECT_EQ(-EINVAL, c->move_bucket(cct, 0, loc));
// -100 is not an existing bucket
EXPECT_EQ(-ENOENT, c->move_bucket(cct, -100, loc));
// move host0 from root0 to root1
{
pair <string,string> loc;
int ret;
loc = c->get_immediate_parent(host0, &ret);
EXPECT_EQ(0, ret);
EXPECT_EQ("root", loc.first);
EXPECT_EQ("root0", loc.second);
}
EXPECT_EQ(0, c->move_bucket(cct, host0, loc));
{
pair <string,string> loc;
int ret;
loc = c->get_immediate_parent(host0, &ret);
EXPECT_EQ(0, ret);
EXPECT_EQ("root", loc.first);
EXPECT_EQ("root1", loc.second);
}
}
TEST_F(CrushWrapperTest, swap_bucket) {
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
const int ROOT_TYPE = 2;
c->set_type_name(ROOT_TYPE, "root");
const int HOST_TYPE = 1;
c->set_type_name(HOST_TYPE, "host");
const int OSD_TYPE = 0;
c->set_type_name(OSD_TYPE, "osd");
int root;
EXPECT_EQ(0, c->add_bucket(0, CRUSH_BUCKET_STRAW2, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, 0, NULL, NULL, &root));
EXPECT_EQ(0, c->set_item_name(root, "root"));
int a, b;
EXPECT_EQ(0, c->add_bucket(0, CRUSH_BUCKET_STRAW2, CRUSH_HASH_RJENKINS1,
HOST_TYPE, 0, NULL, NULL, &a));
EXPECT_EQ(0, c->set_item_name(a, "a"));
EXPECT_EQ(0, c->add_bucket(0, CRUSH_BUCKET_STRAW2, CRUSH_HASH_RJENKINS1,
HOST_TYPE, 0, NULL, NULL, &b));
EXPECT_EQ(0, c->set_item_name(b, "b"));
{
map<string,string> loc;
loc["root"] = "root";
EXPECT_EQ(0, c->move_bucket(cct, a, loc));
}
{
map<string,string> loc;
loc["root"] = "root";
loc["host"] = "a";
EXPECT_EQ(0, c->insert_item(cct, 0, 1.0, "osd.0", loc));
EXPECT_EQ(0, c->insert_item(cct, 1, 1.0, "osd.1", loc));
EXPECT_EQ(0, c->insert_item(cct, 2, 1.0, "osd.2", loc));
}
{
map<string,string> loc;
loc["host"] = "b";
EXPECT_EQ(0, c->insert_item(cct, 3, 1.0, "osd.3", loc));
}
ASSERT_EQ(0x30000, c->get_item_weight(a));
ASSERT_EQ(string("a"), c->get_item_name(a));
ASSERT_EQ(0x10000, c->get_item_weight(b));
ASSERT_EQ(string("b"), c->get_item_name(b));
ASSERT_EQ(a, c->get_bucket_item(root, 0));
ASSERT_EQ(0, c->get_bucket_item(a, 0));
ASSERT_EQ(1, c->get_bucket_item(a, 1));
ASSERT_EQ(2, c->get_bucket_item(a, 2));
ASSERT_EQ(3, c->get_bucket_item(b, 0));
// check if it can swap parent with child
ASSERT_EQ(-EINVAL, c->swap_bucket(cct, root, a));
c->swap_bucket(cct, a, b);
ASSERT_EQ(0x30000, c->get_item_weight(b));
ASSERT_EQ(string("a"), c->get_item_name(b));
ASSERT_EQ(0x10000, c->get_item_weight(a));
ASSERT_EQ(string("b"), c->get_item_name(a));
ASSERT_EQ(a, c->get_bucket_item(root, 0));
ASSERT_EQ(0, c->get_bucket_item(b, 0));
ASSERT_EQ(1, c->get_bucket_item(b, 1));
ASSERT_EQ(2, c->get_bucket_item(b, 2));
ASSERT_EQ(3, c->get_bucket_item(a, 0));
}
TEST_F(CrushWrapperTest, rename_bucket_or_item) {
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
const int ROOT_TYPE = 2;
c->set_type_name(ROOT_TYPE, "root");
const int HOST_TYPE = 1;
c->set_type_name(HOST_TYPE, "host");
const int OSD_TYPE = 0;
c->set_type_name(OSD_TYPE, "osd");
int root0;
EXPECT_EQ(0, c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, 0, NULL, NULL, &root0));
EXPECT_EQ(0, c->set_item_name(root0, "root0"));
int item = 0;
{
map<string,string> loc;
loc["root"] = "root0";
loc["host"] = "host0";
EXPECT_EQ(0, c->insert_item(cct, item, 1.0,
"osd.0", loc));
}
item++;
{
map<string,string> loc;
loc["root"] = "root0";
loc["host"] = "host1";
EXPECT_EQ(0, c->insert_item(cct, item, 1.0,
"osd.1", loc));
}
stringstream ss;
EXPECT_EQ(-EINVAL, c->can_rename_item("host0", "????", &ss));
EXPECT_EQ(-EINVAL, c->rename_item("host0", "????", &ss));
EXPECT_EQ(-EINVAL, c->can_rename_bucket("host0", "????", &ss));
EXPECT_EQ(-EINVAL, c->rename_bucket("host0", "????", &ss));
EXPECT_EQ(-EEXIST, c->can_rename_item("host0", "host1", &ss));
EXPECT_EQ(-EEXIST, c->rename_item("host0", "host1", &ss));
EXPECT_EQ(-EEXIST, c->can_rename_bucket("host0", "host1", &ss));
EXPECT_EQ(-EEXIST, c->rename_bucket("host0", "host1", &ss));
EXPECT_EQ(-EALREADY, c->can_rename_item("gone", "host1", &ss));
EXPECT_EQ(-EALREADY, c->rename_item("gone", "host1", &ss));
EXPECT_EQ(-EALREADY, c->can_rename_bucket("gone", "host1", &ss));
EXPECT_EQ(-EALREADY, c->rename_bucket("gone", "host1", &ss));
EXPECT_EQ(-ENOENT, c->can_rename_item("doesnotexist", "somethingelse", &ss));
EXPECT_EQ(-ENOENT, c->rename_item("doesnotexist", "somethingelse", &ss));
EXPECT_EQ(-ENOENT, c->can_rename_bucket("doesnotexist", "somethingelse", &ss));
EXPECT_EQ(-ENOENT, c->rename_bucket("doesnotexist", "somethingelse", &ss));
EXPECT_EQ(-ENOTDIR, c->can_rename_bucket("osd.1", "somethingelse", &ss));
EXPECT_EQ(-ENOTDIR, c->rename_bucket("osd.1", "somethingelse", &ss));
int host0id = c->get_item_id("host0");
EXPECT_EQ(0, c->rename_bucket("host0", "host0renamed", &ss));
EXPECT_EQ(host0id, c->get_item_id("host0renamed"));
int osd0id = c->get_item_id("osd0");
EXPECT_EQ(0, c->rename_item("osd.0", "osd0renamed", &ss));
EXPECT_EQ(osd0id, c->get_item_id("osd0renamed"));
}
TEST_F(CrushWrapperTest, check_item_loc) {
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
int item = 0;
float expected_weight = 1.0;
// fail if loc is empty
{
float weight;
map<string,string> loc;
EXPECT_FALSE(c->check_item_loc(cct, item, loc, &weight));
}
const int ROOT_TYPE = 2;
c->set_type_name(ROOT_TYPE, "root");
const int HOST_TYPE = 1;
c->set_type_name(HOST_TYPE, "host");
const int OSD_TYPE = 0;
c->set_type_name(OSD_TYPE, "osd");
int rootno;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, 0, NULL, NULL, &rootno);
c->set_item_name(rootno, "default");
// fail because the item is not found at the specified location
{
float weight;
map<string,string> loc;
loc["root"] = "default";
EXPECT_FALSE(c->check_item_loc(cct, item, loc, &weight));
}
// fail because the bucket name does not match an existing bucket
{
float weight;
map<string,string> loc;
loc["root"] = "default";
const string HOST("host0");
loc["host"] = HOST;
EXPECT_FALSE(c->check_item_loc(cct, item, loc, &weight));
}
const string OSD("osd.0");
{
map<string,string> loc;
loc["root"] = "default";
EXPECT_EQ(0, c->insert_item(cct, item, expected_weight,
OSD, loc));
}
// fail because osd.0 is not a bucket and must not be in loc, in
// addition to being of the wrong type
{
float weight;
map<string,string> loc;
loc["root"] = "osd.0";
EXPECT_FALSE(c->check_item_loc(cct, item, loc, &weight));
}
// succeed and retrieves the expected weight
{
float weight;
map<string,string> loc;
loc["root"] = "default";
EXPECT_TRUE(c->check_item_loc(cct, item, loc, &weight));
EXPECT_EQ(expected_weight, weight);
}
}
TEST_F(CrushWrapperTest, update_item) {
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
const int ROOT_TYPE = 2;
c->set_type_name(ROOT_TYPE, "root");
const int HOST_TYPE = 1;
c->set_type_name(HOST_TYPE, "host");
const int OSD_TYPE = 0;
c->set_type_name(OSD_TYPE, "osd");
int rootno;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, 0, NULL, NULL, &rootno);
c->set_item_name(rootno, "default");
const string HOST0("host0");
int host0;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
HOST_TYPE, 0, NULL, NULL, &host0);
c->set_item_name(host0, HOST0);
const string HOST1("host1");
int host1;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
HOST_TYPE, 0, NULL, NULL, &host1);
c->set_item_name(host1, HOST1);
int item = 0;
// fail if invalid names anywhere in loc
{
map<string,string> loc;
loc["rack"] = "\001";
EXPECT_EQ(-EINVAL, c->update_item(cct, item, 1.0,
"osd." + stringify(item), loc));
}
// fail if invalid item name
{
map<string,string> loc;
EXPECT_EQ(-EINVAL, c->update_item(cct, item, 1.0,
"\005", loc));
}
const string OSD0("osd.0");
const string OSD1("osd.1");
float original_weight = 1.0;
float modified_weight = 2.0;
float weight;
map<string,string> loc;
loc["root"] = "default";
loc["host"] = HOST0;
EXPECT_GE(0.0, c->get_item_weightf(host0));
EXPECT_EQ(0, c->insert_item(cct, item, original_weight,
OSD0, loc));
// updating nothing changes nothing
EXPECT_EQ(OSD0, c->get_item_name(item));
EXPECT_EQ(original_weight, c->get_item_weightf(item));
EXPECT_TRUE(c->check_item_loc(cct, item, loc, &weight));
EXPECT_EQ(0, c->update_item(cct, item, original_weight,
OSD0, loc));
EXPECT_EQ(OSD0, c->get_item_name(item));
EXPECT_EQ(original_weight, c->get_item_weightf(item));
EXPECT_TRUE(c->check_item_loc(cct, item, loc, &weight));
// update the name and weight of the item but not the location
EXPECT_EQ(OSD0, c->get_item_name(item));
EXPECT_EQ(original_weight, c->get_item_weightf(item));
EXPECT_TRUE(c->check_item_loc(cct, item, loc, &weight));
EXPECT_EQ(1, c->update_item(cct, item, modified_weight,
OSD1, loc));
EXPECT_EQ(OSD1, c->get_item_name(item));
EXPECT_EQ(modified_weight, c->get_item_weightf(item));
EXPECT_TRUE(c->check_item_loc(cct, item, loc, &weight));
c->set_item_name(item, OSD0);
c->adjust_item_weightf(cct, item, original_weight);
// update the name and weight of the item and change its location
map<string,string> other_loc;
other_loc["root"] = "default";
other_loc["host"] = HOST1;
EXPECT_EQ(OSD0, c->get_item_name(item));
EXPECT_EQ(original_weight, c->get_item_weightf(item));
EXPECT_TRUE(c->check_item_loc(cct, item, loc, &weight));
EXPECT_FALSE(c->check_item_loc(cct, item, other_loc, &weight));
EXPECT_EQ(1, c->update_item(cct, item, modified_weight,
OSD1, other_loc));
EXPECT_EQ(OSD1, c->get_item_name(item));
EXPECT_EQ(modified_weight, c->get_item_weightf(item));
EXPECT_FALSE(c->check_item_loc(cct, item, loc, &weight));
EXPECT_TRUE(c->check_item_loc(cct, item, other_loc, &weight));
}
TEST_F(CrushWrapperTest, adjust_item_weight) {
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
const int ROOT_TYPE = 2;
c->set_type_name(ROOT_TYPE, "root");
const int HOST_TYPE = 1;
c->set_type_name(HOST_TYPE, "host");
const int OSD_TYPE = 0;
c->set_type_name(OSD_TYPE, "osd");
int rootno;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, 0, NULL, NULL, &rootno);
c->set_item_name(rootno, "default");
const string HOST0("host0");
int host0;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
HOST_TYPE, 0, NULL, NULL, &host0);
c->set_item_name(host0, HOST0);
const string FAKE("fake");
int hostfake;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
HOST_TYPE, 0, NULL, NULL, &hostfake);
c->set_item_name(hostfake, FAKE);
int item = 0;
// construct crush map
{
map<string,string> loc;
loc["host"] = "host0";
float host_weight = 2.0;
int bucket_id = 0;
item = 0;
EXPECT_EQ(0, c->insert_item(cct, item, 1.0,
"osd." + stringify(item), loc));
item = 1;
EXPECT_EQ(0, c->insert_item(cct, item, 1.0,
"osd." + stringify(item), loc));
bucket_id = c->get_item_id("host0");
EXPECT_EQ(true, c->bucket_exists(bucket_id));
EXPECT_EQ(host_weight, c->get_bucket_weightf(bucket_id));
map<string,string> bloc;
bloc["root"] = "default";
EXPECT_EQ(0, c->insert_item(cct, host0, host_weight,
HOST0, bloc));
}
{
map<string,string> loc;
loc["host"] = "fake";
float host_weight = 2.0;
int bucket_id = 0;
item = 0;
EXPECT_EQ(0, c->insert_item(cct, item, 1.0,
"osd." + stringify(item), loc));
item = 1;
EXPECT_EQ(0, c->insert_item(cct, item, 1.0,
"osd." + stringify(item), loc));
bucket_id = c->get_item_id("fake");
EXPECT_EQ(true, c->bucket_exists(bucket_id));
EXPECT_EQ(host_weight, c->get_bucket_weightf(bucket_id));
map<string,string> bloc;
bloc["root"] = "default";
EXPECT_EQ(0, c->insert_item(cct, hostfake, host_weight,
FAKE, bloc));
}
//
// When there is:
//
// default --> host0 --> osd.0 1.0
// | |
// | +-> osd.1 1.0
// |
// +-> fake --> osd.0 1.0
// |
// +-> osd.1 1.0
//
// Trying to adjust osd.0 weight to 2.0 in all buckets
// Trying to adjust osd.1 weight to 2.0 in host=fake
//
// So the crush map will be:
//
// default --> host0 --> osd.0 2.0
// | |
// | +-> osd.1 1.0
// |
// +-> fake --> osd.0 2.0
// |
// +-> osd.1 2.0
//
float original_weight = 1.0;
float modified_weight = 2.0;
map<string,string> loc_one, loc_two;
loc_one["host"] = "host0";
loc_two["host"] = "fake";
item = 0;
EXPECT_EQ(2, c->adjust_item_weightf(cct, item, modified_weight));
EXPECT_EQ(modified_weight, c->get_item_weightf_in_loc(item, loc_one));
EXPECT_EQ(modified_weight, c->get_item_weightf_in_loc(item, loc_two));
item = 1;
EXPECT_EQ(1, c->adjust_item_weightf_in_loc(cct, item, modified_weight, loc_two));
EXPECT_EQ(original_weight, c->get_item_weightf_in_loc(item, loc_one));
EXPECT_EQ(modified_weight, c->get_item_weightf_in_loc(item, loc_two));
}
TEST_F(CrushWrapperTest, adjust_subtree_weight) {
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
const int ROOT_TYPE = 2;
c->set_type_name(ROOT_TYPE, "root");
const int HOST_TYPE = 1;
c->set_type_name(HOST_TYPE, "host");
const int OSD_TYPE = 0;
c->set_type_name(OSD_TYPE, "osd");
int rootno;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, 0, NULL, NULL, &rootno);
c->set_item_name(rootno, "default");
const string HOST0("host0");
int host0;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
HOST_TYPE, 0, NULL, NULL, &host0);
c->set_item_name(host0, HOST0);
const string FAKE("fake");
int hostfake;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
HOST_TYPE, 0, NULL, NULL, &hostfake);
c->set_item_name(hostfake, FAKE);
int item = 0;
// construct crush map
{
map<string,string> loc;
loc["host"] = "host0";
float host_weight = 2.0;
int bucket_id = 0;
item = 0;
EXPECT_EQ(0, c->insert_item(cct, item, 1.0,
"osd." + stringify(item), loc));
item = 1;
EXPECT_EQ(0, c->insert_item(cct, item, 1.0,
"osd." + stringify(item), loc));
bucket_id = c->get_item_id("host0");
EXPECT_EQ(true, c->bucket_exists(bucket_id));
EXPECT_EQ(host_weight, c->get_bucket_weightf(bucket_id));
map<string,string> bloc;
bloc["root"] = "default";
EXPECT_EQ(0, c->insert_item(cct, host0, host_weight,
HOST0, bloc));
}
{
map<string,string> loc;
loc["host"] = "fake";
float host_weight = 2.0;
int bucket_id = 0;
item = 0;
EXPECT_EQ(0, c->insert_item(cct, item, 1.0,
"osd." + stringify(item), loc));
item = 1;
EXPECT_EQ(0, c->insert_item(cct, item, 1.0,
"osd." + stringify(item), loc));
bucket_id = c->get_item_id("fake");
EXPECT_EQ(true, c->bucket_exists(bucket_id));
EXPECT_EQ(host_weight, c->get_bucket_weightf(bucket_id));
map<string,string> bloc;
bloc["root"] = "default";
EXPECT_EQ(0, c->insert_item(cct, hostfake, host_weight,
FAKE, bloc));
}
//cout << "--------before---------" << std::endl;
//c->dump_tree(&cout, NULL);
ASSERT_EQ(c->get_bucket_weight(host0), 131072);
ASSERT_EQ(c->get_bucket_weight(rootno), 262144);
int r = c->adjust_subtree_weightf(cct, host0, 2.0);
ASSERT_EQ(r, 2); // 2 items changed
//cout << "--------after---------" << std::endl;
//c->dump_tree(&cout, NULL);
ASSERT_EQ(c->get_bucket_weight(host0), 262144);
ASSERT_EQ(c->get_item_weight(host0), 262144);
ASSERT_EQ(c->get_bucket_weight(rootno), 262144 + 131072);
}
TEST_F(CrushWrapperTest, insert_item) {
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
const int ROOT_TYPE = 2;
c->set_type_name(ROOT_TYPE, "root");
const int HOST_TYPE = 1;
c->set_type_name(HOST_TYPE, "host");
const int OSD_TYPE = 0;
c->set_type_name(OSD_TYPE, "osd");
int rootno;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, 0, NULL, NULL, &rootno);
c->set_item_name(rootno, "default");
int item = 0;
// invalid names anywhere in loc trigger an error
{
map<string,string> loc;
loc["host"] = "\001";
EXPECT_EQ(-EINVAL, c->insert_item(cct, item, 1.0,
"osd." + stringify(item), loc));
}
// insert an item in an existing bucket
{
map<string,string> loc;
loc["root"] = "default";
item++;
EXPECT_EQ(0, c->insert_item(cct, item, 1.0,
"osd." + stringify(item), loc));
int another_item = item + 1;
EXPECT_EQ(-EEXIST, c->insert_item(cct, another_item, 1.0,
"osd." + stringify(item), loc));
}
// implicit creation of a bucket
{
string name = "NAME";
map<string,string> loc;
loc["root"] = "default";
loc["host"] = name;
item++;
EXPECT_EQ(0, c->insert_item(cct, item, 1.0,
"osd." + stringify(item), loc));
}
// adding to an existing item name that is not associated with a bucket
{
string name = "ITEM_WITHOUT_BUCKET";
map<string,string> loc;
loc["root"] = "default";
loc["host"] = name;
item++;
c->set_item_name(item, name);
item++;
EXPECT_EQ(-EINVAL, c->insert_item(cct, item, 1.0,
"osd." + stringify(item), loc));
}
//
// When there is:
//
// default --> host0 --> item
//
// Trying to insert the same item higher in the hirarchy will fail
// because it would create a loop.
//
// default --> host0 --> item
// |
// +-> item
//
{
item++;
{
map<string,string> loc;
loc["root"] = "default";
loc["host"] = "host0";
EXPECT_EQ(0, c->insert_item(cct, item, 1.0,
"osd." + stringify(item), loc));
}
{
map<string,string> loc;
loc["root"] = "default";
EXPECT_EQ(-EINVAL, c->insert_item(cct, item, 1.0,
"osd." + stringify(item), loc));
}
}
//
// When there is:
//
// default --> host0
//
// Trying to insert default under host0 must fail
// because it would create a loop.
//
// default --> host0 --> default
//
{
map<string,string> loc;
loc["host"] = "host0";
EXPECT_EQ(-ELOOP, c->insert_item(cct, rootno, 1.0,
"default", loc));
}
// fail when mapping a bucket to the wrong type
{
// create an OSD bucket
int osdno;
int r = c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
10, 0, NULL, NULL, &osdno);
ASSERT_EQ(0, r);
c->set_item_name(osdno, "myosd");
map<string,string> loc;
loc["root"] = "default";
// wrongfully pretend the osd is of type host
loc["host"] = "myosd";
item++;
EXPECT_EQ(-EINVAL, c->insert_item(cct, item, 1.0,
"osd." + stringify(item), loc));
}
// fail when no location
{
map<string,string> loc;
item++;
EXPECT_EQ(-EINVAL, c->insert_item(cct, item, 1.0,
"osd." + stringify(item), loc));
}
}
TEST_F(CrushWrapperTest, remove_item) {
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
const int ROOT_TYPE = 2;
c->set_type_name(ROOT_TYPE, "root");
const int HOST_TYPE = 1;
c->set_type_name(HOST_TYPE, "host");
const int OSD_TYPE = 0;
c->set_type_name(OSD_TYPE, "osd");
{
int root;
ASSERT_EQ(0, c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, 0, NULL, NULL, &root));
c->set_item_name(root, "root0");
}
{
int host;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
HOST_TYPE, 0, NULL, NULL, &host);
c->set_item_name(host, "host0");
}
const int num_osd = 12;
{
map<string, string> loc = {{"root", "root0"},
{"host", "host0"}};
string name{"osd."};
for (int item = 0; item < num_osd; item++) {
ASSERT_EQ(0, c->insert_item(cct, item, 1.0,
name + to_string(item), loc));
}
}
const int item_to_remove = num_osd / 2;
map<string, string> loc;
loc.insert(c->get_immediate_parent(item_to_remove));
ASSERT_EQ(0, c->remove_item(cct, item_to_remove, true));
float weight;
EXPECT_FALSE(c->check_item_loc(cct, item_to_remove, loc, &weight));
}
TEST_F(CrushWrapperTest, item_bucket_names) {
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
int index = 123;
string name = "NAME";
EXPECT_EQ(-EINVAL, c->set_item_name(index, "\001"));
EXPECT_EQ(0, c->set_item_name(index, name));
EXPECT_TRUE(c->name_exists(name));
EXPECT_TRUE(c->item_exists(index));
EXPECT_EQ(index, c->get_item_id(name));
EXPECT_EQ(name, c->get_item_name(index));
}
TEST_F(CrushWrapperTest, bucket_types) {
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
int index = 123;
string name = "NAME";
c->set_type_name(index, name);
EXPECT_EQ(1, c->get_num_type_names());
EXPECT_EQ(index, c->get_type_id(name));
EXPECT_EQ(name, c->get_type_name(index));
}
TEST_F(CrushWrapperTest, is_valid_crush_name) {
EXPECT_TRUE(CrushWrapper::is_valid_crush_name("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012456789-_"));
EXPECT_FALSE(CrushWrapper::is_valid_crush_name(""));
EXPECT_FALSE(CrushWrapper::is_valid_crush_name("\001"));
}
TEST_F(CrushWrapperTest, is_valid_crush_loc) {
map<string,string> loc;
EXPECT_TRUE(CrushWrapper::is_valid_crush_loc(cct, loc));
loc["good"] = "better";
EXPECT_TRUE(CrushWrapper::is_valid_crush_loc(cct, loc));
{
map<string,string> loc;
loc["\005"] = "default";
EXPECT_FALSE(CrushWrapper::is_valid_crush_loc(cct, loc));
}
{
map<string,string> loc;
loc["host"] = "\003";
EXPECT_FALSE(CrushWrapper::is_valid_crush_loc(cct, loc));
}
}
TEST_F(CrushWrapperTest, dump_rules) {
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
const int ROOT_TYPE = 1;
c->set_type_name(ROOT_TYPE, "root");
const int OSD_TYPE = 0;
c->set_type_name(OSD_TYPE, "osd");
string failure_domain_type("osd");
string root_name("default");
int rootno;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, 0, NULL, NULL, &rootno);
c->set_item_name(rootno, root_name);
int item = 0;
pair <string,string> loc;
int ret;
loc = c->get_immediate_parent(item, &ret);
EXPECT_EQ(-ENOENT, ret);
{
map<string,string> loc;
loc["root"] = root_name;
EXPECT_EQ(0, c->insert_item(cct, item, 1.0,
"osd.0", loc));
}
// no rule by default
{
auto f = Formatter::create_unique("json-pretty");
f->open_array_section("rules");
c->dump_rules(f.get());
f->close_section();
stringstream ss;
f->flush(ss);
EXPECT_EQ("[]\n", ss.str());
}
string name("NAME");
int rule = c->add_simple_rule(name, root_name, failure_domain_type, "",
"firstn", pg_pool_t::TYPE_ERASURE);
EXPECT_EQ(0, rule);
{
auto f = Formatter::create_unique("xml");
c->dump_rules(f.get());
stringstream ss;
f->flush(ss);
EXPECT_EQ((unsigned)0, ss.str().find("<rule><rule_id>0</rule_id><rule_name>NAME</rule_name>"));
}
{
auto f = Formatter::create_unique("xml");
c->dump_rule(rule, f.get());
stringstream ss;
f->flush(ss);
EXPECT_EQ((unsigned)0, ss.str().find("<rule><rule_id>0</rule_id><rule_name>NAME</rule_name>"));
EXPECT_NE(string::npos,
ss.str().find("<item_name>default</item_name></step>"));
}
map<int,float> wm;
c->get_rule_weight_osd_map(0, &wm);
ASSERT_TRUE(wm.size() == 1);
ASSERT_TRUE(wm[0] == 1.0);
}
TEST_F(CrushWrapperTest, distance) {
CrushWrapper c;
c.create();
c.set_type_name(1, "host");
c.set_type_name(2, "rack");
c.set_type_name(3, "root");
int bno;
int r = c.add_bucket(0, CRUSH_BUCKET_STRAW,
CRUSH_HASH_DEFAULT, 3, 0, NULL,
NULL, &bno);
ASSERT_EQ(0, r);
ASSERT_EQ(-1, bno);
c.set_item_name(bno, "default");
c.set_max_devices(10);
//JSONFormatter jf(true);
map<string,string> loc;
loc["host"] = "a1";
loc["rack"] = "a";
loc["root"] = "default";
c.insert_item(cct, 0, 1, "osd.0", loc);
loc.clear();
loc["host"] = "a2";
loc["rack"] = "a";
loc["root"] = "default";
c.insert_item(cct, 1, 1, "osd.1", loc);
loc.clear();
loc["host"] = "b1";
loc["rack"] = "b";
loc["root"] = "default";
c.insert_item(cct, 2, 1, "osd.2", loc);
loc.clear();
loc["host"] = "b2";
loc["rack"] = "b";
loc["root"] = "default";
c.insert_item(cct, 3, 1, "osd.3", loc);
vector<pair<string,string> > ol;
c.get_full_location_ordered(3, ol);
ASSERT_EQ(3u, ol.size());
ASSERT_EQ(make_pair(string("host"),string("b2")), ol[0]);
ASSERT_EQ(make_pair(string("rack"),string("b")), ol[1]);
ASSERT_EQ(make_pair(string("root"),string("default")), ol[2]);
//c.dump(&jf);
//jf.flush(cout);
multimap<string,string> p;
p.insert(make_pair("host","b2"));
p.insert(make_pair("rack","b"));
p.insert(make_pair("root","default"));
ASSERT_EQ(3, c.get_common_ancestor_distance(cct, 0, p));
ASSERT_EQ(3, c.get_common_ancestor_distance(cct, 1, p));
ASSERT_EQ(2, c.get_common_ancestor_distance(cct, 2, p));
ASSERT_EQ(1, c.get_common_ancestor_distance(cct, 3, p));
ASSERT_EQ(-ENOENT, c.get_common_ancestor_distance(cct, 123, p));
// make sure a "multipath" location will reflect a minimal
// distance for both paths
p.insert(make_pair("host","b1"));
ASSERT_EQ(1, c.get_common_ancestor_distance(cct, 2, p));
ASSERT_EQ(1, c.get_common_ancestor_distance(cct, 3, p));
}
TEST_F(CrushWrapperTest, choose_args_compat) {
CrushWrapper c;
c.create();
c.set_type_name(1, "host");
c.set_type_name(2, "rack");
c.set_type_name(3, "root");
int weight = 12;
map<string,string> loc;
loc["host"] = "b1";
loc["rack"] = "r11";
loc["root"] = "default";
int item = 1;
c.insert_item(cct, item, weight, "osd.1", loc);
loc["host"] = "b2";
loc["rack"] = "r12";
loc["root"] = "default";
item = 2;
c.insert_item(cct, item, weight, "osd.2", loc);
ceph_assert(c.add_simple_rule("rule1", "r11", "host", "",
"firstn", pg_pool_t::TYPE_ERASURE) >= 0);
int id = c.get_item_id("b1");
__u32 weights = 666 * 0x10000;
crush_weight_set weight_set;
weight_set.size = 1;
weight_set.weights = &weights;
int maxbuckets = c.get_max_buckets();
ceph_assert(maxbuckets > 0);
crush_choose_arg choose_args[maxbuckets];
memset(choose_args, '\0', sizeof(crush_choose_arg) * maxbuckets);
choose_args[-1-id].ids_size = 0;
choose_args[-1-id].weight_set_positions = 1;
choose_args[-1-id].weight_set = &weight_set;
crush_choose_arg_map arg_map;
arg_map.size = c.get_max_buckets();
arg_map.args = choose_args;
uint64_t features = CEPH_FEATURE_CRUSH_TUNABLES5|CEPH_FEATURE_INCARNATION_2;
int64_t caid = CrushWrapper::DEFAULT_CHOOSE_ARGS;
// if the client is capable, encode choose_args
{
c.choose_args[caid] = arg_map;
bufferlist bl;
c.encode(bl, features|CEPH_FEATURE_CRUSH_CHOOSE_ARGS);
auto i = bl.cbegin();
CrushWrapper c_new;
c_new.decode(i);
ASSERT_EQ(1u, c_new.choose_args.size());
ASSERT_EQ(1u, c_new.choose_args[caid].args[-1-id].weight_set_positions);
ASSERT_EQ(weights, c_new.choose_args[caid].args[-1-id].weight_set[0].weights[0]);
ASSERT_EQ(weight, c_new.get_bucket_item_weightf(id, 0));
}
// if the client is not compatible, copy choose_arg in the weights
{
c.choose_args[caid] = arg_map;
bufferlist bl;
c.encode(bl, features);
c.choose_args.clear();
auto i = bl.cbegin();
CrushWrapper c_new;
c_new.decode(i);
ASSERT_EQ(0u, c_new.choose_args.size());
ASSERT_EQ((int)weights, c_new.get_bucket_item_weight(id, 0));
}
}
TEST_F(CrushWrapperTest, remove_root) {
CrushWrapper c;
c.create();
c.set_type_name(1, "host");
c.set_type_name(2, "rack");
c.set_type_name(3, "root");
int weight = 1;
map<string,string> loc;
loc["host"] = "b1";
loc["rack"] = "r11";
loc["root"] = "default";
int item = 1;
c.insert_item(cct, item, weight, "osd.1", loc);
item = 2;
loc["host"] = "b2";
loc["rack"] = "r12";
loc["root"] = "default";
c.insert_item(cct, item, weight, "osd.2", loc);
ceph_assert(c.add_simple_rule("rule1", "r11", "host", "",
"firstn", pg_pool_t::TYPE_ERASURE) >= 0);
ASSERT_TRUE(c.name_exists("default"));
ASSERT_TRUE(c.name_exists("r11"));
ASSERT_TRUE(c.name_exists("r12"));
ASSERT_EQ(c.remove_root(cct, c.get_item_id("default")), 0);
ASSERT_FALSE(c.name_exists("default"));
ASSERT_FALSE(c.name_exists("r11"));
ASSERT_FALSE(c.name_exists("r12"));
}
TEST_F(CrushWrapperTest, trim_roots_with_class) {
CrushWrapper c;
c.create();
c.set_type_name(1, "root");
int weight = 1;
map<string,string> loc;
loc["root"] = "default";
int item = 1;
c.insert_item(cct, item, weight, "osd.1", loc);
int cl = c.get_or_create_class_id("ssd");
c.class_map[item] = cl;
int root_id = c.get_item_id("default");
int clone_id;
map<int32_t, map<int32_t, int32_t>> old_class_bucket;
map<int,map<int,vector<int>>> cmap_item_weight; // cargs -> bno -> weights
set<int32_t> used_ids;
ASSERT_EQ(c.device_class_clone(root_id, cl, old_class_bucket, used_ids,
&clone_id, &cmap_item_weight), 0);
ASSERT_TRUE(c.name_exists("default"));
ASSERT_TRUE(c.name_exists("default~ssd"));
c.trim_roots_with_class(cct);
ASSERT_TRUE(c.name_exists("default"));
ASSERT_FALSE(c.name_exists("default~ssd"));
}
TEST_F(CrushWrapperTest, device_class_clone) {
CrushWrapper c;
c.create();
c.set_type_name(1, "host");
c.set_type_name(2, "root");
map<string,string> loc;
loc["host"] = "b1";
loc["root"] = "default";
int weight = 1;
int item = 1;
c.insert_item(cct, item, weight, "osd.1", loc);
int cl = c.get_or_create_class_id("ssd");
c.class_map[item] = cl;
int item_no_class = 2;
c.insert_item(cct, item_no_class, weight, "osd.2", loc);
c.reweight(cct);
map<int32_t, map<int32_t, int32_t>> old_class_bucket;
map<int,map<int,vector<int>>> cmap_item_weight; // cargs -> bno -> weights
set<int32_t> used_ids;
int root_id = c.get_item_id("default");
int clone_id;
ASSERT_EQ(c.device_class_clone(root_id, cl, old_class_bucket, used_ids,
&clone_id, &cmap_item_weight), 0);
ASSERT_TRUE(c.name_exists("default~ssd"));
ASSERT_EQ(clone_id, c.get_item_id("default~ssd"));
ASSERT_TRUE(c.subtree_contains(clone_id, item));
ASSERT_FALSE(c.subtree_contains(clone_id, item_no_class));
ASSERT_TRUE(c.subtree_contains(root_id, item_no_class));
ASSERT_EQ(c.get_item_weightf(root_id), 2);
ASSERT_EQ(c.get_item_weightf(clone_id), 1);
// cloning again does nothing and returns the existing one
int other_clone_id;
ASSERT_EQ(c.device_class_clone(root_id, cl, old_class_bucket, used_ids,
&other_clone_id, &cmap_item_weight), 0);
ASSERT_EQ(clone_id, other_clone_id);
// invalid arguments
ASSERT_EQ(c.device_class_clone(12345, cl, old_class_bucket, used_ids,
&other_clone_id, &cmap_item_weight), -ECHILD);
ASSERT_EQ(c.device_class_clone(root_id, 12345, old_class_bucket, used_ids,
&other_clone_id, &cmap_item_weight), -EBADF);
}
TEST_F(CrushWrapperTest, split_id_class) {
CrushWrapper c;
c.create();
c.set_type_name(1, "root");
int weight = 1;
map<string,string> loc;
loc["root"] = "default";
int item = 1;
c.insert_item(cct, item, weight, "osd.1", loc);
int class_id = c.get_or_create_class_id("ssd");
c.class_map[item] = class_id;
map<int32_t, map<int32_t, int32_t>> old_class_bucket;
map<int,map<int,vector<int>>> cmap_item_weight; // cargs -> bno -> weights
set<int32_t> used_ids;
int item_id = c.get_item_id("default");
int clone_id;
ASSERT_EQ(c.device_class_clone(item_id, class_id, old_class_bucket, used_ids,
&clone_id, &cmap_item_weight), 0);
int retrieved_item_id;
int retrieved_class_id;
ASSERT_EQ(c.split_id_class(clone_id, &retrieved_item_id, &retrieved_class_id), 0);
ASSERT_EQ(item_id, retrieved_item_id);
ASSERT_EQ(class_id, retrieved_class_id);
ASSERT_EQ(c.split_id_class(item_id, &retrieved_item_id, &retrieved_class_id), 0);
ASSERT_EQ(item_id, retrieved_item_id);
ASSERT_EQ(-1, retrieved_class_id);
}
TEST_F(CrushWrapperTest, populate_classes) {
CrushWrapper c;
c.create();
c.set_type_name(1, "root");
int weight = 1;
map<string,string> loc;
loc["root"] = "default";
int item = 1;
c.insert_item(cct, item, weight, "osd.1", loc);
int class_id = c.get_or_create_class_id("ssd");
c.class_map[item] = class_id;
map<int32_t, map<int32_t, int32_t>> old_class_bucket;
ASSERT_EQ(c.populate_classes(old_class_bucket), 0);
ASSERT_TRUE(c.name_exists("default~ssd"));
old_class_bucket = c.class_bucket;
ASSERT_EQ(c.populate_classes(old_class_bucket), 0);
ASSERT_EQ(old_class_bucket, c.class_bucket);
}
TEST_F(CrushWrapperTest, remove_class_name) {
CrushWrapper c;
c.create();
ASSERT_EQ(-ENOENT, c.remove_class_name("ssd"));
ASSERT_GE(0, c.get_or_create_class_id("ssd"));
ASSERT_EQ(0, c.remove_class_name("ssd"));
ASSERT_EQ(-ENOENT, c.remove_class_name("ssd"));
}
TEST_F(CrushWrapperTest, try_remap_rule) {
// build a simple 2 level map
CrushWrapper c;
c.create();
c.set_type_name(0, "osd");
c.set_type_name(1, "host");
c.set_type_name(2, "rack");
c.set_type_name(3, "root");
int bno;
int r = c.add_bucket(0, CRUSH_BUCKET_STRAW2,
CRUSH_HASH_DEFAULT, 3, 0, NULL,
NULL, &bno);
ASSERT_EQ(0, r);
ASSERT_EQ(-1, bno);
c.set_item_name(bno, "default");
c.set_max_devices(20);
//JSONFormatter jf(true);
map<string,string> loc;
loc["host"] = "foo";
loc["rack"] = "a";
loc["root"] = "default";
c.insert_item(cct, 0, 1, "osd.0", loc);
c.insert_item(cct, 1, 1, "osd.1", loc);
c.insert_item(cct, 2, 1, "osd.2", loc);
loc.clear();
loc["host"] = "bar";
loc["rack"] = "a";
loc["root"] = "default";
c.insert_item(cct, 3, 1, "osd.3", loc);
c.insert_item(cct, 4, 1, "osd.4", loc);
c.insert_item(cct, 5, 1, "osd.5", loc);
loc.clear();
loc["host"] = "baz";
loc["rack"] = "b";
loc["root"] = "default";
c.insert_item(cct, 6, 1, "osd.6", loc);
c.insert_item(cct, 7, 1, "osd.7", loc);
c.insert_item(cct, 8, 1, "osd.8", loc);
loc.clear();
loc["host"] = "qux";
loc["rack"] = "b";
loc["root"] = "default";
c.insert_item(cct, 9, 1, "osd.9", loc);
c.insert_item(cct, 10, 1, "osd.10", loc);
c.insert_item(cct, 11, 1, "osd.11", loc);
c.finalize();
loc.clear();
loc["host"] = "bif";
loc["rack"] = "c";
loc["root"] = "default";
c.insert_item(cct, 12, 1, "osd.12", loc);
c.insert_item(cct, 13, 1, "osd.13", loc);
c.insert_item(cct, 14, 1, "osd.14", loc);
c.finalize();
loc.clear();
loc["host"] = "pop";
loc["rack"] = "c";
loc["root"] = "default";
c.insert_item(cct, 15, 1, "osd.15", loc);
c.insert_item(cct, 16, 1, "osd.16", loc);
c.insert_item(cct, 17, 1, "osd.17", loc);
c.finalize();
//c.dump(&jf);
//jf.flush(cout);
// take + emit
{
}
// take + choose device + emit
{
cout << "take + choose + emit" << std::endl;
ostringstream err;
int rule = c.add_simple_rule("one", "default", "osd", "",
"firstn", 0, &err);
ASSERT_EQ(rule, 0);
vector<int> orig = { 0, 3, 9 };
set<int> overfull = { 3 };
vector<int> underfull = { 0, 2, 5, 8, 11 };
vector<int> more_underfull = {};
vector<int> out;
int r = c.try_remap_rule(cct, rule, 3,
overfull, underfull, more_underfull,
orig, &out);
cout << orig << " -> r = " << (int)r << " out " << out << std::endl;
ASSERT_EQ(r, 0);
ASSERT_EQ(3u, out.size());
ASSERT_EQ(0, out[0]);
ASSERT_EQ(2, out[1]);
ASSERT_EQ(9, out[2]);
// make sure we cope with dups between underfull and future values in orig
underfull = {9, 0, 2, 5};
orig = {1, 3, 9};
r = c.try_remap_rule(cct, rule, 3,
overfull, underfull, more_underfull,
orig, &out);
cout << orig << " -> r = " << (int)r << " out " << out << std::endl;
ASSERT_EQ(r, 0);
ASSERT_EQ(3u, out.size());
ASSERT_EQ(1, out[0]);
ASSERT_EQ(0, out[1]);
ASSERT_EQ(9, out[2]);
//
// Check that more_underfull is used when underfull runs out
orig = { 0, 3, 9 };
overfull = { 3, 9 };
underfull = { 2 };
more_underfull = { 5, 8, 11 };
r = c.try_remap_rule(cct, rule, 3,
overfull, underfull, more_underfull,
orig, &out);
cout << orig << " -> r = " << (int)r << " out " << out << std::endl;
ASSERT_EQ(r, 0);
ASSERT_EQ(3u, out.size());
ASSERT_EQ(0, out[0]);
ASSERT_EQ(2, out[1]);
ASSERT_EQ(5, out[2]);
}
// chooseleaf
{
cout << "take + chooseleaf + emit" << std::endl;
ostringstream err;
int rule = c.add_simple_rule("two", "default", "host", "",
"firstn", 0, &err);
ASSERT_EQ(rule, 1);
vector<int> orig = { 0, 3, 9 };
set<int> overfull = { 3 };
vector<int> underfull = { 0, 2, 5, 8, 11 };
vector<int> more_underfull = { };
vector<int> out;
int r = c.try_remap_rule(cct, rule, 3,
overfull, underfull, more_underfull,
orig, &out);
cout << orig << " -> r = " << (int)r << " out " << out << std::endl;
ASSERT_EQ(r, 0);
ASSERT_EQ(3u, out.size());
ASSERT_EQ(0, out[0]);
ASSERT_EQ(5, out[1]);
ASSERT_EQ(9, out[2]);
}
// choose + choose
{
cout << "take + choose + choose + choose + emit" << std::endl;
int rule = c.add_rule(2, 5, 0);
ASSERT_EQ(2, rule);
c.set_rule_step_take(rule, 0, bno);
c.set_rule_step_choose_indep(rule, 1, 2, 2);
c.set_rule_step_choose_indep(rule, 2, 2, 1);
c.set_rule_step_choose_indep(rule, 3, 1, 0);
c.set_rule_step_emit(rule, 4);
vector<int> orig = { 0, 3, 16, 12 };
set<int> overfull = { 3, 12 };
vector<int> underfull = { 6, 7, 9, 3, 0, 1, 15, 16, 13, 2, 5, 8, 11 };
vector<int> more_underfull = { };
vector<int> out;
int r = c.try_remap_rule(cct, rule, 3,
overfull, underfull, more_underfull,
orig, &out);
cout << orig << " -> r = " << (int)r << " out " << out << std::endl;
ASSERT_EQ(r, 0);
ASSERT_EQ(4u, out.size());
ASSERT_EQ(0, out[0]);
ASSERT_EQ(5, out[1]);
ASSERT_EQ(16, out[2]);
ASSERT_EQ(13, out[3]);
orig.pop_back();
out.clear();
r = c.try_remap_rule(cct, rule, 3,
overfull, underfull, more_underfull,
orig, &out);
cout << orig << " -> r = " << (int)r << " out " << out << std::endl;
ASSERT_EQ(r, 0);
ASSERT_EQ(3u, out.size());
ASSERT_EQ(0, out[0]);
ASSERT_EQ(5, out[1]);
ASSERT_EQ(16, out[2]);
}
}
// Local Variables:
// compile-command: "cd ../../../build ; make -j4 unittest_crush_wrapper && valgrind --tool=memcheck bin/unittest_crush_wrapper"
// End:
| 42,266 | 27.969842 | 128 | cc |
null | ceph-main/src/test/crush/crush.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Inktank <info@inktank.com>
*
* LGPL-2.1 (see COPYING-LGPL2.1) or later
*/
#include <gtest/gtest.h>
#include <iostream>
#include <memory>
#include <set>
#include "common/ceph_argparse.h"
#include "common/common_init.h"
#include "include/stringify.h"
#include "crush/CrushWrapper.h"
#include "osd/osd_types.h"
using namespace std;
std::unique_ptr<CrushWrapper> build_indep_map(CephContext *cct, int num_rack,
int num_host, int num_osd)
{
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
c->create();
c->set_type_name(5, "root");
c->set_type_name(4, "row");
c->set_type_name(3, "rack");
c->set_type_name(2, "chasis");
c->set_type_name(1, "host");
c->set_type_name(0, "osd");
int rootno;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
5, 0, NULL, NULL, &rootno);
c->set_item_name(rootno, "default");
map<string,string> loc;
loc["root"] = "default";
int osd = 0;
for (int r=0; r<num_rack; ++r) {
loc["rack"] = string("rack-") + stringify(r);
for (int h=0; h<num_host; ++h) {
loc["host"] = string("host-") + stringify(r) + string("-") + stringify(h);
for (int o=0; o<num_osd; ++o, ++osd) {
c->insert_item(cct, osd, 1.0, string("osd.") + stringify(osd), loc);
}
}
}
int ret;
int ruleno = 0;
ret = c->add_rule(ruleno, 4, 123);
ceph_assert(ret == ruleno);
ret = c->set_rule_step(ruleno, 0, CRUSH_RULE_SET_CHOOSELEAF_TRIES, 10, 0);
ceph_assert(ret == 0);
ret = c->set_rule_step(ruleno, 1, CRUSH_RULE_TAKE, rootno, 0);
ceph_assert(ret == 0);
ret = c->set_rule_step(ruleno, 2, CRUSH_RULE_CHOOSELEAF_INDEP, CRUSH_CHOOSE_N, 1);
ceph_assert(ret == 0);
ret = c->set_rule_step(ruleno, 3, CRUSH_RULE_EMIT, 0, 0);
ceph_assert(ret == 0);
c->set_rule_name(ruleno, "data");
c->finalize();
if (false) {
Formatter *f = Formatter::create("json-pretty");
f->open_object_section("crush_map");
c->dump(f);
f->close_section();
f->flush(cout);
delete f;
}
return c;
}
int get_num_dups(const vector<int>& v)
{
std::set<int> s;
int dups = 0;
for (auto n : v) {
if (s.count(n))
++dups;
else if (n != CRUSH_ITEM_NONE)
s.insert(n);
}
return dups;
}
class CRUSHTest : public ::testing::Test
{
public:
void SetUp() final
{
CephInitParameters params(CEPH_ENTITY_TYPE_CLIENT);
cct = common_preinit(params, CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
}
void TearDown() final
{
cct->put();
cct = nullptr;
}
protected:
CephContext *cct = nullptr;
};
TEST_F(CRUSHTest, indep_toosmall) {
std::unique_ptr<CrushWrapper> c(build_indep_map(cct, 1, 3, 1));
vector<__u32> weight(c->get_max_devices(), 0x10000);
c->dump_tree(&cout, NULL);
for (int x = 0; x < 100; ++x) {
vector<int> out;
c->do_rule(0, x, out, 5, weight, 0);
cout << x << " -> " << out << std::endl;
int num_none = 0;
for (unsigned i=0; i<out.size(); ++i) {
if (out[i] == CRUSH_ITEM_NONE)
num_none++;
}
ASSERT_EQ(2, num_none);
ASSERT_EQ(0, get_num_dups(out));
}
}
TEST_F(CRUSHTest, indep_basic) {
std::unique_ptr<CrushWrapper> c(build_indep_map(cct, 3, 3, 3));
vector<__u32> weight(c->get_max_devices(), 0x10000);
c->dump_tree(&cout, NULL);
for (int x = 0; x < 100; ++x) {
vector<int> out;
c->do_rule(0, x, out, 5, weight, 0);
cout << x << " -> " << out << std::endl;
int num_none = 0;
for (unsigned i=0; i<out.size(); ++i) {
if (out[i] == CRUSH_ITEM_NONE)
num_none++;
}
ASSERT_EQ(0, num_none);
ASSERT_EQ(0, get_num_dups(out));
}
}
TEST_F(CRUSHTest, indep_out_alt) {
std::unique_ptr<CrushWrapper> c(build_indep_map(cct, 3, 3, 3));
vector<__u32> weight(c->get_max_devices(), 0x10000);
// mark a bunch of osds out
int num = 3*3*3;
for (int i=0; i<num / 2; ++i)
weight[i*2] = 0;
c->dump_tree(&cout, NULL);
// need more retries to get 9/9 hosts for x in 0..99
c->set_choose_total_tries(100);
for (int x = 0; x < 100; ++x) {
vector<int> out;
c->do_rule(0, x, out, 9, weight, 0);
cout << x << " -> " << out << std::endl;
int num_none = 0;
for (unsigned i=0; i<out.size(); ++i) {
if (out[i] == CRUSH_ITEM_NONE)
num_none++;
}
ASSERT_EQ(0, num_none);
ASSERT_EQ(0, get_num_dups(out));
}
}
TEST_F(CRUSHTest, indep_out_contig) {
std::unique_ptr<CrushWrapper> c(build_indep_map(cct, 3, 3, 3));
vector<__u32> weight(c->get_max_devices(), 0x10000);
// mark a bunch of osds out
int num = 3*3*3;
for (int i=0; i<num / 3; ++i)
weight[i] = 0;
c->dump_tree(&cout, NULL);
c->set_choose_total_tries(100);
for (int x = 0; x < 100; ++x) {
vector<int> out;
c->do_rule(0, x, out, 7, weight, 0);
cout << x << " -> " << out << std::endl;
int num_none = 0;
for (unsigned i=0; i<out.size(); ++i) {
if (out[i] == CRUSH_ITEM_NONE)
num_none++;
}
ASSERT_EQ(1, num_none);
ASSERT_EQ(0, get_num_dups(out));
}
}
TEST_F(CRUSHTest, indep_out_progressive) {
std::unique_ptr<CrushWrapper> c(build_indep_map(cct, 3, 3, 3));
c->set_choose_total_tries(100);
vector<__u32> tweight(c->get_max_devices(), 0x10000);
c->dump_tree(&cout, NULL);
int tchanged = 0;
for (int x = 1; x < 5; ++x) {
vector<__u32> weight(c->get_max_devices(), 0x10000);
std::map<int,unsigned> pos;
vector<int> prev;
for (unsigned i=0; i<weight.size(); ++i) {
vector<int> out;
c->do_rule(0, x, out, 7, weight, 0);
cout << "(" << i << "/" << weight.size() << " out) "
<< x << " -> " << out << std::endl;
int num_none = 0;
for (unsigned k=0; k<out.size(); ++k) {
if (out[k] == CRUSH_ITEM_NONE)
num_none++;
}
ASSERT_EQ(0, get_num_dups(out));
// make sure nothing moved
int moved = 0;
int changed = 0;
for (unsigned j=0; j<out.size(); ++j) {
if (i && out[j] != prev[j]) {
++changed;
++tchanged;
}
if (out[j] == CRUSH_ITEM_NONE) {
continue;
}
if (i && pos.count(out[j])) {
// result shouldn't have moved position
if (j != pos[out[j]]) {
cout << " " << out[j] << " moved from " << pos[out[j]] << " to " << j << std::endl;
++moved;
}
//ASSERT_EQ(j, pos[out[j]]);
}
}
if (moved || changed)
cout << " " << moved << " moved, " << changed << " changed" << std::endl;
ASSERT_LE(moved, 1);
ASSERT_LE(changed, 3);
// mark another osd out
weight[i] = 0;
prev = out;
pos.clear();
for (unsigned j=0; j<out.size(); ++j) {
if (out[j] != CRUSH_ITEM_NONE)
pos[out[j]] = j;
}
}
}
cout << tchanged << " total changed" << std::endl;
}
TEST_F(CRUSHTest, straw_zero) {
// zero weight items should have no effect on placement.
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
const int ROOT_TYPE = 1;
c->set_type_name(ROOT_TYPE, "root");
const int OSD_TYPE = 0;
c->set_type_name(OSD_TYPE, "osd");
int n = 5;
int items[n], weights[n];
for (int i=0; i <n; ++i) {
items[i] = i;
weights[i] = 0x10000 * (n-i-1);
}
c->set_max_devices(n);
string root_name0("root0");
int root0;
EXPECT_EQ(0, c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, n, items, weights, &root0));
EXPECT_EQ(0, c->set_item_name(root0, root_name0));
string name0("rule0");
int rule0 = c->add_simple_rule(name0, root_name0, "osd", "",
"firstn", pg_pool_t::TYPE_REPLICATED);
EXPECT_EQ(0, rule0);
string root_name1("root1");
int root1;
EXPECT_EQ(0, c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, n-1, items, weights, &root1));
EXPECT_EQ(0, c->set_item_name(root1, root_name1));
string name1("rule1");
int rule1 = c->add_simple_rule(name1, root_name1, "osd", "",
"firstn", pg_pool_t::TYPE_REPLICATED);
EXPECT_EQ(1, rule1);
c->finalize();
vector<unsigned> reweight(n, 0x10000);
for (int i=0; i<10000; ++i) {
vector<int> out0, out1;
c->do_rule(rule0, i, out0, 1, reweight, 0);
ASSERT_EQ(1u, out0.size());
c->do_rule(rule1, i, out1, 1, reweight, 0);
ASSERT_EQ(1u, out1.size());
ASSERT_EQ(out0[0], out1[0]);
//cout << i << "\t" << out0 << "\t" << out1 << std::endl;
}
}
TEST_F(CRUSHTest, straw_same) {
// items with the same weight should map about the same as items
// with very similar weights.
//
// give the 0 vector a paired stair pattern, with dup weights. note
// that the original straw flaw does not appear when there are 2 of
// the initial weight, but it does when there is just 1.
//
// give the 1 vector a similar stair pattern, but make the same
// steps weights slightly different (no dups). this works.
//
// compare the result and verify that the resulting mapping is
// almost identical.
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
const int ROOT_TYPE = 1;
c->set_type_name(ROOT_TYPE, "root");
const int OSD_TYPE = 0;
c->set_type_name(OSD_TYPE, "osd");
int n = 10;
int items[n], weights[n];
for (int i=0; i <n; ++i) {
items[i] = i;
weights[i] = 0x10000 * ((i+1)/2 + 1);
}
c->set_max_devices(n);
string root_name0("root0");
int root0;
EXPECT_EQ(0, c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, n, items, weights, &root0));
EXPECT_EQ(0, c->set_item_name(root0, root_name0));
string name0("rule0");
int rule0 = c->add_simple_rule(name0, root_name0, "osd", "",
"firstn", pg_pool_t::TYPE_REPLICATED);
EXPECT_EQ(0, rule0);
for (int i=0; i <n; ++i) {
items[i] = i;
weights[i] = 0x10000 * ((i+1)/2 + 1) + (i%2)*100;
}
string root_name1("root1");
int root1;
EXPECT_EQ(0, c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, n, items, weights, &root1));
EXPECT_EQ(0, c->set_item_name(root1, root_name1));
string name1("rule1");
int rule1 = c->add_simple_rule(name1, root_name1, "osd", "",
"firstn", pg_pool_t::TYPE_REPLICATED);
EXPECT_EQ(1, rule1);
if (0) {
crush_bucket_straw *sb0 = reinterpret_cast<crush_bucket_straw*>(c->get_crush_map()->buckets[-1-root0]);
crush_bucket_straw *sb1 = reinterpret_cast<crush_bucket_straw*>(c->get_crush_map()->buckets[-1-root1]);
for (int i=0; i<n; ++i) {
cout << i
<< "\t" << sb0->item_weights[i]
<< "\t" << sb1->item_weights[i]
<< "\t"
<< "\t" << sb0->straws[i]
<< "\t" << sb1->straws[i]
<< std::endl;
}
}
if (0) {
JSONFormatter jf(true);
jf.open_object_section("crush");
c->dump(&jf);
jf.close_section();
jf.flush(cout);
}
c->finalize();
vector<int> sum0(n, 0), sum1(n, 0);
vector<unsigned> reweight(n, 0x10000);
int different = 0;
int max = 100000;
for (int i=0; i<max; ++i) {
vector<int> out0, out1;
c->do_rule(rule0, i, out0, 1, reweight, 0);
ASSERT_EQ(1u, out0.size());
c->do_rule(rule1, i, out1, 1, reweight, 0);
ASSERT_EQ(1u, out1.size());
sum0[out0[0]]++;
sum1[out1[0]]++;
if (out0[0] != out1[0])
different++;
}
for (int i=0; i<n; ++i) {
cout << i
<< "\t" << ((double)weights[i] / (double)weights[0])
<< "\t" << sum0[i] << "\t" << ((double)sum0[i]/(double)sum0[0])
<< "\t" << sum1[i] << "\t" << ((double)sum1[i]/(double)sum1[0])
<< std::endl;
}
double ratio = ((double)different / (double)max);
cout << different << " of " << max << " = "
<< ratio
<< " different" << std::endl;
ASSERT_LT(ratio, .001);
}
double calc_straw2_stddev(int *weights, int n, bool verbose)
{
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
const int ROOT_TYPE = 2;
c->set_type_name(ROOT_TYPE, "root");
const int HOST_TYPE = 1;
c->set_type_name(HOST_TYPE, "host");
const int OSD_TYPE = 0;
c->set_type_name(OSD_TYPE, "osd");
int items[n];
for (int i=0; i <n; ++i) {
items[i] = i;
}
c->set_max_devices(n);
string root_name0("root0");
int root0;
crush_bucket *b0 = crush_make_bucket(c->get_crush_map(),
CRUSH_BUCKET_STRAW2, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, n, items, weights);
crush_add_bucket(c->get_crush_map(), 0, b0, &root0);
c->set_item_name(root0, root_name0);
string name0("rule0");
int rule0 = c->add_simple_rule(name0, root_name0, "osd", "",
"firstn", pg_pool_t::TYPE_REPLICATED);
int sum[n];
double totalweight = 0;
vector<unsigned> reweight(n);
for (int i=0; i<n; ++i) {
sum[i] = 0;
reweight[i] = 0x10000;
totalweight += weights[i];
}
totalweight /= (double)0x10000;
double avgweight = totalweight / n;
c->finalize();
int total = 1000000;
for (int i=0; i<total; ++i) {
vector<int> out;
c->do_rule(rule0, i, out, 1, reweight, 0);
sum[out[0]]++;
}
double expected = (double)total / (double)n;
if (verbose)
cout << "expect\t\t\t" << expected << std::endl;
double stddev = 0;
double exptotal = 0;
if (verbose)
cout << "osd\tweight\tcount\tadjusted\n";
std::streamsize p = cout.precision();
cout << std::setprecision(4);
for (int i=0; i<n; ++i) {
double w = (double)weights[i] / (double)0x10000;
double adj = (double)sum[i] * avgweight / w;
stddev += (adj - expected) * (adj - expected);
exptotal += adj;
if (verbose)
cout << i
<< "\t" << w
<< "\t" << sum[i]
<< "\t" << (int)adj
<< std::endl;
}
cout << std::setprecision(p);
{
stddev = sqrt(stddev / (double)n);
if (verbose)
cout << "std dev " << stddev << std::endl;
double p = 1.0 / (double)n;
double estddev = sqrt(exptotal * p * (1.0 - p));
if (verbose)
cout << " vs " << estddev << "\t(expected)" << std::endl;
}
return stddev;
}
TEST_F(CRUSHTest, straw2_stddev)
{
int n = 15;
int weights[n];
cout << "maxskew\tstddev\n";
for (double step = 1.0; step < 2; step += .25) {
int w = 0x10000;
for (int i = 0; i < n; ++i) {
weights[i] = w;
w *= step;
}
double stddev = calc_straw2_stddev(weights, n, true);
cout << ((double)weights[n-1]/(double)weights[0])
<< "\t" << stddev << std::endl;
}
}
TEST_F(CRUSHTest, straw2_reweight) {
// when we adjust the weight of an item in a straw2 bucket,
// we should *only* see movement from or to that item, never
// between other items.
int weights[] = {
0x10000,
0x10000,
0x20000,
0x20000,
0x30000,
0x50000,
0x8000,
0x20000,
0x10000,
0x10000,
0x20000,
0x10000,
0x10000,
0x20000,
0x300000,
0x10000,
0x20000
};
int n = 15;
std::unique_ptr<CrushWrapper> c(new CrushWrapper);
const int ROOT_TYPE = 2;
c->set_type_name(ROOT_TYPE, "root");
const int HOST_TYPE = 1;
c->set_type_name(HOST_TYPE, "host");
const int OSD_TYPE = 0;
c->set_type_name(OSD_TYPE, "osd");
int items[n];
for (int i=0; i <n; ++i) {
items[i] = i;
//weights[i] = 0x10000;
}
c->set_max_devices(n);
string root_name0("root0");
int root0;
crush_bucket *b0 = crush_make_bucket(c->get_crush_map(),
CRUSH_BUCKET_STRAW2, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, n, items, weights);
EXPECT_EQ(0, crush_add_bucket(c->get_crush_map(), 0, b0, &root0));
EXPECT_EQ(0, c->set_item_name(root0, root_name0));
string name0("rule0");
int rule0 = c->add_simple_rule(name0, root_name0, "osd", "",
"firstn", pg_pool_t::TYPE_REPLICATED);
EXPECT_EQ(0, rule0);
int changed = 1;
weights[changed] = weights[changed] / 10 * (rand() % 10);
string root_name1("root1");
int root1;
crush_bucket *b1 = crush_make_bucket(c->get_crush_map(),
CRUSH_BUCKET_STRAW2, CRUSH_HASH_RJENKINS1,
ROOT_TYPE, n, items, weights);
EXPECT_EQ(0, crush_add_bucket(c->get_crush_map(), 0, b1, &root1));
EXPECT_EQ(0, c->set_item_name(root1, root_name1));
string name1("rule1");
int rule1 = c->add_simple_rule(name1, root_name1, "osd", "",
"firstn", pg_pool_t::TYPE_REPLICATED);
EXPECT_EQ(1, rule1);
int sum[n];
double totalweight = 0;
vector<unsigned> reweight(n);
for (int i=0; i<n; ++i) {
sum[i] = 0;
reweight[i] = 0x10000;
totalweight += weights[i];
}
totalweight /= (double)0x10000;
double avgweight = totalweight / n;
c->finalize();
int total = 1000000;
for (int i=0; i<total; ++i) {
vector<int> out0, out1;
c->do_rule(rule0, i, out0, 1, reweight, 0);
ASSERT_EQ(1u, out0.size());
c->do_rule(rule1, i, out1, 1, reweight, 0);
ASSERT_EQ(1u, out1.size());
sum[out1[0]]++;
//sum[rand()%n]++;
if (out1[0] == changed) {
ASSERT_EQ(changed, out0[0]);
} else if (out0[0] != changed) {
ASSERT_EQ(out0[0], out1[0]);
}
}
double expected = (double)total / (double)n;
cout << "expect\t\t\t" << expected << std::endl;
double stddev = 0;
cout << "osd\tweight\tcount\tadjusted\n";
std::streamsize p = cout.precision();
cout << std::setprecision(4);
for (int i=0; i<n; ++i) {
double w = (double)weights[i] / (double)0x10000;
double adj = (double)sum[i] * avgweight / w;
stddev += (adj - expected) * (adj - expected);
cout << i
<< "\t" << w
<< "\t" << sum[i]
<< "\t" << (int)adj
<< std::endl;
}
cout << std::setprecision(p);
{
stddev = sqrt(stddev / (double)n);
cout << "std dev " << stddev << std::endl;
double p = 1.0 / (double)n;
double estddev = sqrt((double)total * p * (1.0 - p));
cout << " vs " << estddev << std::endl;
}
}
| 17,605 | 25.635401 | 107 | cc |
null | ceph-main/src/test/crush/crush_weights.sh | #!/usr/bin/env bash
source $(dirname $0)/../detect-build-env-vars.sh
if [ `uname` = FreeBSD ]; then
SED=gsed
else
SED=sed
fi
read -r -d '' cm <<'EOF'
# devices
device 0 device0
device 1 device1
device 2 device2
device 3 device3
device 4 device4
# types
type 0 osd
type 1 domain
type 2 pool
# buckets
domain root {
id -1 # do not change unnecessarily
# weight 5.00000
alg straw2
hash 0 # rjenkins1
item device0 weight 10.00000
item device1 weight 10.00000
item device2 weight 10.00000
item device3 weight 10.00000
item device4 weight 1.00000
}
# rules
rule data {
id 0
type replicated
step take root
step choose firstn 0 type osd
step emit
}
EOF
three=($(echo "$cm" | crushtool -c /dev/fd/0 --test --show-utilization \
--min-x 1 --max-x 1000000 --num-rep 3 | \
grep "device \(0\|4\)" | $SED -e 's/^.*stored : \([0-9]\+\).*$/\1/'))
if test $(echo "scale=5; (10 - ${three[0]}/${three[1]}) < .75" | bc) = 1; then
echo 3 replicas weights better distributed than they should be. 1>&2
exit 1
fi
one=($(echo "$cm" | crushtool -c /dev/fd/0 --test --show-utilization \
--min-x 1 --max-x 1000000 --num-rep 1 | \
grep "device \(0\|4\)" | $SED -e 's/^.*stored : \([0-9]\+\).*$/\1/'))
if test $(echo "scale=5; (10 - ${one[0]}/${one[1]}) > .1 || (10 - ${one[0]}/${one[1]}) < -.1" | bc) = 1; then
echo 1 replica not distributed as they should be. 1>&2
exit 1
fi
| 1,507 | 23.721311 | 109 | sh |
null | ceph-main/src/test/debian-strech/install-deps.sh | ../../../install-deps.sh | 24 | 24 | 24 | sh |
null | ceph-main/src/test/direct_messenger/DirectMessenger.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "DirectMessenger.h"
#include "DispatchStrategy.h"
class DirectConnection : public Connection {
/// sent messages are dispatched here
DispatchStrategy *const dispatchers;
/// the connection that will be attached to outgoing messages, so that replies
/// can be dispatched back to the sender. the pointer is atomic for
/// thread-safety between mark_down() and send_message(). no reference is held
/// on this Connection to avoid cyclical refs. we don't need a reference
/// because its owning DirectMessenger will mark both connections down (and
/// clear this pointer) before dropping its own reference
std::atomic<Connection*> reply_connection{nullptr};
private:
FRIEND_MAKE_REF(DirectConnection);
DirectConnection(CephContext *cct, DirectMessenger *m,
DispatchStrategy *dispatchers)
: Connection(cct, m),
dispatchers(dispatchers)
{}
public:
/// sets the Connection that will receive replies to outgoing messages
void set_direct_reply_connection(ConnectionRef conn);
/// return true if a peer connection exists
bool is_connected() override;
/// pass the given message directly to our dispatchers
int send_message(Message *m) override;
/// release our pointer to the peer connection. later calls to is_connected()
/// will return false, and send_message() will fail with -ENOTCONN
void mark_down() override;
/// noop - keepalive messages are not needed within a process
void send_keepalive() override {}
/// noop - reconnect/recovery semantics are not needed within a process
void mark_disposable() override {}
};
void DirectConnection::set_direct_reply_connection(ConnectionRef conn)
{
reply_connection.store(conn.get());
}
bool DirectConnection::is_connected()
{
// true between calls to set_direct_reply_connection() and mark_down()
return reply_connection.load() != nullptr;
}
int DirectConnection::send_message(Message *m)
{
// read reply_connection atomically and take a reference
ConnectionRef conn = reply_connection.load();
if (!conn) {
m->put();
return -ENOTCONN;
}
// attach reply_connection to the Message, so that calls to
// m->get_connection()->send_message() can be dispatched back to the sender
m->set_connection(conn);
dispatchers->ds_dispatch(m);
return 0;
}
void DirectConnection::mark_down()
{
Connection *conn = reply_connection.load();
if (!conn) {
return; // already marked down
}
if (!reply_connection.compare_exchange_weak(conn, nullptr)) {
return; // lost the race to mark down
}
// called only once to avoid loops
conn->mark_down();
}
static ConnectionRef create_loopback(DirectMessenger *m,
entity_name_t name,
DispatchStrategy *dispatchers)
{
auto loopback = ceph::make_ref<DirectConnection>(m->cct, m, dispatchers);
// loopback replies go to itself
loopback->set_direct_reply_connection(loopback);
loopback->set_peer_type(name.type());
loopback->set_features(CEPH_FEATURES_ALL);
return loopback;
}
DirectMessenger::DirectMessenger(CephContext *cct, entity_name_t name,
string mname, uint64_t nonce,
DispatchStrategy *dispatchers)
: SimplePolicyMessenger(cct, name, mname, nonce),
dispatchers(dispatchers),
loopback_connection(create_loopback(this, name, dispatchers))
{
dispatchers->set_messenger(this);
}
DirectMessenger::~DirectMessenger()
{
}
int DirectMessenger::set_direct_peer(DirectMessenger *peer)
{
if (get_myinst() == peer->get_myinst()) {
return -EADDRINUSE; // must have a different entity instance
}
peer_inst = peer->get_myinst();
// allocate a Connection that dispatches to the peer messenger
auto direct_connection = ceph::make_ref<DirectConnection>(cct, peer, peer->dispatchers.get());
direct_connection->set_peer_addr(peer_inst.addr);
direct_connection->set_peer_type(peer_inst.name.type());
direct_connection->set_features(CEPH_FEATURES_ALL);
// if set_direct_peer() was already called on the peer messenger, we can
// finish by attaching their connections. if not, the later call to
// peer->set_direct_peer() will attach their connection to ours
auto connection = peer->get_connection(get_myinst());
if (connection) {
auto p = static_cast<DirectConnection*>(connection.get());
p->set_direct_reply_connection(direct_connection);
direct_connection->set_direct_reply_connection(p);
}
peer_connection = std::move(direct_connection);
return 0;
}
int DirectMessenger::bind(const entity_addr_t &bind_addr)
{
if (peer_connection) {
return -EINVAL; // can't change address after sharing it with the peer
}
set_myaddr(bind_addr);
loopback_connection->set_peer_addr(bind_addr);
return 0;
}
int DirectMessenger::client_bind(const entity_addr_t &bind_addr)
{
// same as bind
return bind(bind_addr);
}
int DirectMessenger::start()
{
if (!peer_connection) {
return -EINVAL; // did not connect to a peer
}
if (started) {
return -EINVAL; // already started
}
dispatchers->start();
return SimplePolicyMessenger::start();
}
int DirectMessenger::shutdown()
{
if (!started) {
return -EINVAL; // not started
}
mark_down_all();
peer_connection.reset();
loopback_connection.reset();
dispatchers->shutdown();
SimplePolicyMessenger::shutdown();
sem.Put(); // signal wait()
return 0;
}
void DirectMessenger::wait()
{
sem.Get(); // wait on signal from shutdown()
dispatchers->wait();
}
ConnectionRef DirectMessenger::get_connection(const entity_inst_t& dst)
{
if (dst == peer_inst) {
return peer_connection;
}
if (dst == get_myinst()) {
return loopback_connection;
}
return nullptr;
}
ConnectionRef DirectMessenger::get_loopback_connection()
{
return loopback_connection;
}
int DirectMessenger::send_message(Message *m, const entity_inst_t& dst)
{
auto conn = get_connection(dst);
if (!conn) {
m->put();
return -ENOTCONN;
}
return conn->send_message(m);
}
void DirectMessenger::mark_down(const entity_addr_t& addr)
{
ConnectionRef conn;
if (addr == peer_inst.addr) {
conn = peer_connection;
} else if (addr == get_myaddr_legacy()) {
conn = loopback_connection;
}
if (conn) {
conn->mark_down();
}
}
void DirectMessenger::mark_down_all()
{
if (peer_connection) {
peer_connection->mark_down();
}
loopback_connection->mark_down();
}
| 6,907 | 26.304348 | 96 | cc |
null | ceph-main/src/test/direct_messenger/DirectMessenger.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MSG_DIRECTMESSENGER_H
#define CEPH_MSG_DIRECTMESSENGER_H
#include "msg/SimplePolicyMessenger.h"
#include "common/Semaphore.h"
class DispatchStrategy;
/**
* DirectMessenger provides a direct path between two messengers
* within a process. A pair of DirectMessengers share their
* DispatchStrategy with each other, and calls to send_message()
* forward the message directly to the other.
*
* This is for testing and i/o injection only, and cannot be used
* for normal messengers with ms_type.
*/
class DirectMessenger : public SimplePolicyMessenger {
private:
/// strategy for local dispatch
std::unique_ptr<DispatchStrategy> dispatchers;
/// peer instance for comparison in get_connection()
entity_inst_t peer_inst;
/// connection that sends to the peer's dispatchers
ConnectionRef peer_connection;
/// connection that sends to my own dispatchers
ConnectionRef loopback_connection;
/// semaphore for signalling wait() from shutdown()
Semaphore sem;
public:
DirectMessenger(CephContext *cct, entity_name_t name,
string mname, uint64_t nonce,
DispatchStrategy *dispatchers);
~DirectMessenger();
/// attach to a peer messenger. must be called before start()
int set_direct_peer(DirectMessenger *peer);
// Messenger interface
/// sets the addr. must not be called after set_direct_peer() or start()
int bind(const entity_addr_t& bind_addr) override;
/// sets the addr. must not be called after set_direct_peer() or start()
int client_bind(const entity_addr_t& bind_addr) override;
/// starts dispatchers
int start() override;
/// breaks connections, stops dispatchers, and unblocks callers of wait()
int shutdown() override;
/// blocks until shutdown() completes
void wait() override;
/// returns a connection to the peer instance, a loopback connection to our
/// own instance, or null if not connected
ConnectionRef get_connection(const entity_inst_t& dst) override;
/// returns a loopback connection that dispatches to this messenger
ConnectionRef get_loopback_connection() override;
/// dispatches a message to the peer instance if connected
int send_message(Message *m, const entity_inst_t& dst) override;
/// mark down the connection for the given address
void mark_down(const entity_addr_t& a) override;
/// mark down all connections
void mark_down_all() override;
// unimplemented Messenger interface
void set_addr_unknowns(const entity_addr_t &addr) override {}
void set_addr(const entity_addr_t &addr) override {}
int get_dispatch_queue_len() override { return 0; }
double get_dispatch_queue_max_age(utime_t now) override { return 0; }
void set_cluster_protocol(int p) override {}
};
#endif
| 3,186 | 31.191919 | 77 | h |
null | ceph-main/src/test/direct_messenger/DispatchStrategy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CohortFS, LLC
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef DISPATCH_STRATEGY_H
#define DISPATCH_STRATEGY_H
#include "msg/Message.h"
class Messenger;
class DispatchStrategy
{
protected:
Messenger *msgr = nullptr;
public:
DispatchStrategy() {}
Messenger *get_messenger() { return msgr; }
void set_messenger(Messenger *_msgr) { msgr = _msgr; }
virtual void ds_dispatch(Message *m) = 0;
virtual void shutdown() = 0;
virtual void start() = 0;
virtual void wait() = 0;
virtual ~DispatchStrategy() {}
};
#endif /* DISPATCH_STRATEGY_H */
| 906 | 22.868421 | 70 | h |
null | ceph-main/src/test/direct_messenger/FastStrategy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CohortFS, LLC
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef FAST_STRATEGY_H
#define FAST_STRATEGY_H
#include "DispatchStrategy.h"
class FastStrategy : public DispatchStrategy {
public:
FastStrategy() {}
void ds_dispatch(Message *m) override {
msgr->ms_fast_preprocess(m);
if (msgr->ms_can_fast_dispatch(m))
msgr->ms_fast_dispatch(m);
else
msgr->ms_deliver_dispatch(m);
}
void shutdown() override {}
void start() override {}
void wait() override {}
virtual ~FastStrategy() {}
};
#endif /* FAST_STRATEGY_H */
| 900 | 24.027778 | 70 | h |
null | ceph-main/src/test/direct_messenger/QueueStrategy.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CohortFS, LLC
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <string>
#include "QueueStrategy.h"
#define dout_subsys ceph_subsys_ms
#include "common/debug.h"
QueueStrategy::QueueStrategy(int _n_threads)
: n_threads(_n_threads),
stop(false),
mqueue(),
disp_threads()
{
}
void QueueStrategy::ds_dispatch(Message *m) {
msgr->ms_fast_preprocess(m);
if (msgr->ms_can_fast_dispatch(m)) {
msgr->ms_fast_dispatch(m);
return;
}
std::lock_guard l{lock};
mqueue.push_back(*m);
if (disp_threads.size()) {
if (! disp_threads.empty()) {
QSThread *thrd = &disp_threads.front();
disp_threads.pop_front();
thrd->cond.notify_all();
}
}
}
void QueueStrategy::entry(QSThread *thrd)
{
for (;;) {
ceph::ref_t<Message> m;
std::unique_lock l{lock};
for (;;) {
if (! mqueue.empty()) {
m = ceph::ref_t<Message>(&mqueue.front(), false);
mqueue.pop_front();
break;
}
if (stop)
break;
disp_threads.push_front(*thrd);
thrd->cond.wait(l);
}
l.unlock();
if (stop) {
if (!m) break;
continue;
}
get_messenger()->ms_deliver_dispatch(m);
}
}
void QueueStrategy::shutdown()
{
QSThread *thrd;
std::lock_guard l{lock};
stop = true;
while (disp_threads.size()) {
thrd = &(disp_threads.front());
disp_threads.pop_front();
thrd->cond.notify_all();
}
}
void QueueStrategy::wait()
{
std::unique_lock l{lock};
ceph_assert(stop);
for (auto& thread : threads) {
l.unlock();
// join outside of lock
thread->join();
l.lock();
}
}
void QueueStrategy::start()
{
ceph_assert(!stop);
std::lock_guard l{lock};
threads.reserve(n_threads);
for (int ix = 0; ix < n_threads; ++ix) {
std::string thread_name = "ms_qs_";
thread_name.append(std::to_string(ix));
auto thrd = std::make_unique<QSThread>(this);
thrd->create(thread_name.c_str());
threads.emplace_back(std::move(thrd));
}
}
| 2,295 | 20.259259 | 70 | cc |
null | ceph-main/src/test/direct_messenger/QueueStrategy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CohortFS, LLC
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef QUEUE_STRATEGY_H
#define QUEUE_STRATEGY_H
#include <vector>
#include <memory>
#include <boost/intrusive/list.hpp>
#include "DispatchStrategy.h"
#include "msg/Messenger.h"
namespace bi = boost::intrusive;
class QueueStrategy : public DispatchStrategy {
ceph::mutex lock = ceph::make_mutex("QueueStrategy::lock");
const int n_threads;
bool stop;
Message::Queue mqueue;
class QSThread : public Thread {
public:
bi::list_member_hook<> thread_q;
QueueStrategy *dq;
ceph::condition_variable cond;
explicit QSThread(QueueStrategy *dq) : thread_q(), dq(dq) {}
void* entry() {
dq->entry(this);
return NULL;
}
typedef bi::list< QSThread,
bi::member_hook< QSThread,
bi::list_member_hook<>,
&QSThread::thread_q > > Queue;
};
std::vector<std::unique_ptr<QSThread>> threads; //< all threads
QSThread::Queue disp_threads; //< waiting threads
public:
explicit QueueStrategy(int n_threads);
void ds_dispatch(Message *m) override;
void shutdown() override;
void start() override;
void wait() override;
void entry(QSThread *thrd);
virtual ~QueueStrategy() {}
};
#endif /* QUEUE_STRATEGY_H */
| 1,597 | 23.96875 | 70 | h |
null | ceph-main/src/test/direct_messenger/test_direct_messenger.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <condition_variable>
#include <mutex>
#include <thread>
#include <gtest/gtest.h>
#include "global/global_init.h"
#include "common/ceph_argparse.h"
#include "DirectMessenger.h"
#include "FastStrategy.h"
#include "QueueStrategy.h"
#include "messages/MPing.h"
/// mock dispatcher that calls the given callback
class MockDispatcher : public Dispatcher {
std::function<void(Message*)> callback;
public:
MockDispatcher(CephContext *cct, std::function<void(Message*)> callback)
: Dispatcher(cct), callback(std::move(callback)) {}
bool ms_handle_reset(Connection *con) override { return false; }
void ms_handle_remote_reset(Connection *con) override {}
bool ms_handle_refused(Connection *con) override { return false; }
bool ms_dispatch(Message *m) override {
callback(m);
m->put();
return true;
}
};
/// test synchronous dispatch of messenger and connection interfaces
TEST(DirectMessenger, SyncDispatch)
{
auto cct = g_ceph_context;
// use FastStrategy for synchronous dispatch
DirectMessenger client(cct, entity_name_t::CLIENT(1),
"client", 0, new FastStrategy());
DirectMessenger server(cct, entity_name_t::CLIENT(2),
"server", 0, new FastStrategy());
ASSERT_EQ(0, client.set_direct_peer(&server));
ASSERT_EQ(0, server.set_direct_peer(&client));
bool got_request = false;
bool got_reply = false;
MockDispatcher client_dispatcher(cct, [&] (Message *m) {
got_reply = true;
});
client.add_dispatcher_head(&client_dispatcher);
MockDispatcher server_dispatcher(cct, [&] (Message *m) {
got_request = true;
ASSERT_EQ(0, m->get_connection()->send_message(new MPing()));
});
server.add_dispatcher_head(&server_dispatcher);
ASSERT_EQ(0, client.start());
ASSERT_EQ(0, server.start());
// test DirectMessenger::send_message()
ASSERT_EQ(0, client.send_message(new MPing(), server.get_myinst()));
ASSERT_TRUE(got_request);
ASSERT_TRUE(got_reply);
// test DirectConnection::send_message()
{
got_request = false;
got_reply = false;
auto conn = client.get_connection(server.get_myinst());
ASSERT_EQ(0, conn->send_message(new MPing()));
ASSERT_TRUE(got_request);
ASSERT_TRUE(got_reply);
}
// test DirectMessenger::send_message() with loopback address
got_request = false;
got_reply = false;
ASSERT_EQ(0, client.send_message(new MPing(), client.get_myinst()));
ASSERT_FALSE(got_request); // server should never see this
ASSERT_TRUE(got_reply);
// test DirectConnection::send_message() with loopback address
{
got_request = false;
got_reply = false;
auto conn = client.get_connection(client.get_myinst());
ASSERT_EQ(0, conn->send_message(new MPing()));
ASSERT_FALSE(got_request); // server should never see this
ASSERT_TRUE(got_reply);
}
// test DirectConnection::send_message() with loopback connection
{
got_request = false;
got_reply = false;
auto conn = client.get_loopback_connection();
ASSERT_EQ(0, conn->send_message(new MPing()));
ASSERT_FALSE(got_request); // server should never see this
ASSERT_TRUE(got_reply);
}
ASSERT_EQ(0, client.shutdown());
client.wait();
ASSERT_EQ(0, server.shutdown());
server.wait();
}
/// test asynchronous dispatch of messenger and connection interfaces
TEST(DirectMessenger, AsyncDispatch)
{
auto cct = g_ceph_context;
// use QueueStrategy for async replies
DirectMessenger client(cct, entity_name_t::CLIENT(1),
"client", 0, new QueueStrategy(1));
DirectMessenger server(cct, entity_name_t::CLIENT(2),
"server", 0, new FastStrategy());
ASSERT_EQ(0, client.set_direct_peer(&server));
ASSERT_EQ(0, server.set_direct_peer(&client));
// condition variable to wait on ping reply
std::mutex mutex;
std::condition_variable cond;
bool done = false;
auto wait_for_reply = [&] {
std::unique_lock<std::mutex> lock(mutex);
while (!done) {
cond.wait(lock);
}
done = false; // clear for reuse
};
// client dispatcher signals the condition variable on reply
MockDispatcher client_dispatcher(cct, [&] (Message *m) {
std::lock_guard<std::mutex> lock(mutex);
done = true;
cond.notify_one();
});
client.add_dispatcher_head(&client_dispatcher);
MockDispatcher server_dispatcher(cct, [&] (Message *m) {
// hold the lock over the call to send_message() to prove that the client's
// dispatch is asynchronous. if it isn't, it will deadlock
std::lock_guard<std::mutex> lock(mutex);
ASSERT_EQ(0, m->get_connection()->send_message(new MPing()));
});
server.add_dispatcher_head(&server_dispatcher);
ASSERT_EQ(0, client.start());
ASSERT_EQ(0, server.start());
// test DirectMessenger::send_message()
ASSERT_EQ(0, client.send_message(new MPing(), server.get_myinst()));
wait_for_reply();
// test DirectConnection::send_message()
{
auto conn = client.get_connection(server.get_myinst());
ASSERT_EQ(0, conn->send_message(new MPing()));
}
wait_for_reply();
// test DirectMessenger::send_message() with loopback address
{
// hold the lock to test that loopback dispatch is asynchronous
std::lock_guard<std::mutex> lock(mutex);
ASSERT_EQ(0, client.send_message(new MPing(), client.get_myinst()));
}
wait_for_reply();
// test DirectConnection::send_message() with loopback address
{
auto conn = client.get_connection(client.get_myinst());
// hold the lock to test that loopback dispatch is asynchronous
std::lock_guard<std::mutex> lock(mutex);
ASSERT_EQ(0, conn->send_message(new MPing()));
}
wait_for_reply();
// test DirectConnection::send_message() with loopback connection
{
auto conn = client.get_loopback_connection();
// hold the lock to test that loopback dispatch is asynchronous
std::lock_guard<std::mutex> lock(mutex);
ASSERT_EQ(0, conn->send_message(new MPing()));
}
wait_for_reply();
ASSERT_EQ(0, client.shutdown());
client.wait();
ASSERT_EQ(0, server.shutdown());
server.wait();
}
/// test that wait() blocks until shutdown()
TEST(DirectMessenger, WaitShutdown)
{
auto cct = g_ceph_context;
// test wait() with both Queue- and FastStrategy
DirectMessenger client(cct, entity_name_t::CLIENT(1),
"client", 0, new QueueStrategy(1));
DirectMessenger server(cct, entity_name_t::CLIENT(2),
"server", 0, new FastStrategy());
ASSERT_EQ(0, client.set_direct_peer(&server));
ASSERT_EQ(0, server.set_direct_peer(&client));
ASSERT_EQ(0, client.start());
ASSERT_EQ(0, server.start());
std::atomic<bool> client_waiting{false};
std::atomic<bool> server_waiting{false};
// spawn threads to wait() on each of the messengers
std::thread client_thread([&] {
client_waiting = true;
client.wait();
client_waiting = false;
});
std::thread server_thread([&] {
server_waiting = true;
server.wait();
server_waiting = false;
});
// give them time to start
std::this_thread::sleep_for(std::chrono::milliseconds(50));
ASSERT_TRUE(client_waiting);
ASSERT_TRUE(server_waiting);
// call shutdown to unblock the waiting threads
ASSERT_EQ(0, client.shutdown());
ASSERT_EQ(0, server.shutdown());
client_thread.join();
server_thread.join();
ASSERT_FALSE(client_waiting);
ASSERT_FALSE(server_waiting);
}
/// test connection and messenger interfaces after mark_down()
TEST(DirectMessenger, MarkDown)
{
auto cct = g_ceph_context;
DirectMessenger client(cct, entity_name_t::CLIENT(1),
"client", 0, new FastStrategy());
DirectMessenger server(cct, entity_name_t::CLIENT(2),
"server", 0, new FastStrategy());
ASSERT_EQ(0, client.set_direct_peer(&server));
ASSERT_EQ(0, server.set_direct_peer(&client));
ASSERT_EQ(0, client.start());
ASSERT_EQ(0, server.start());
auto client_to_server = client.get_connection(server.get_myinst());
auto server_to_client = server.get_connection(client.get_myinst());
ASSERT_TRUE(client_to_server->is_connected());
ASSERT_TRUE(server_to_client->is_connected());
// mark_down() breaks the connection on both sides
client_to_server->mark_down();
ASSERT_FALSE(client_to_server->is_connected());
ASSERT_EQ(-ENOTCONN, client_to_server->send_message(new MPing()));
ASSERT_EQ(-ENOTCONN, client.send_message(new MPing(), server.get_myinst()));
ASSERT_FALSE(server_to_client->is_connected());
ASSERT_EQ(-ENOTCONN, server_to_client->send_message(new MPing()));
ASSERT_EQ(-ENOTCONN, server.send_message(new MPing(), client.get_myinst()));
ASSERT_EQ(0, client.shutdown());
client.wait();
ASSERT_EQ(0, server.shutdown());
server.wait();
}
/// test connection and messenger interfaces after shutdown()
TEST(DirectMessenger, SendShutdown)
{
auto cct = g_ceph_context;
// put client on the heap so we can free it early
std::unique_ptr<DirectMessenger> client{
new DirectMessenger(cct, entity_name_t::CLIENT(1),
"client", 0, new FastStrategy())};
DirectMessenger server(cct, entity_name_t::CLIENT(2),
"server", 0, new FastStrategy());
ASSERT_EQ(0, client->set_direct_peer(&server));
ASSERT_EQ(0, server.set_direct_peer(client.get()));
ASSERT_EQ(0, client->start());
ASSERT_EQ(0, server.start());
const auto client_inst = client->get_myinst();
const auto server_inst = server.get_myinst();
auto client_to_server = client->get_connection(server_inst);
auto server_to_client = server.get_connection(client_inst);
ASSERT_TRUE(client_to_server->is_connected());
ASSERT_TRUE(server_to_client->is_connected());
// shut down the client to break connections
ASSERT_EQ(0, client->shutdown());
client->wait();
ASSERT_FALSE(client_to_server->is_connected());
ASSERT_EQ(-ENOTCONN, client_to_server->send_message(new MPing()));
ASSERT_EQ(-ENOTCONN, client->send_message(new MPing(), server_inst));
// free the client connection/messenger to test that calls to the server no
// longer try to dereference them
client_to_server.reset();
client.reset();
ASSERT_FALSE(server_to_client->is_connected());
ASSERT_EQ(-ENOTCONN, server_to_client->send_message(new MPing()));
ASSERT_EQ(-ENOTCONN, server.send_message(new MPing(), client_inst));
ASSERT_EQ(0, server.shutdown());
server.wait();
}
/// test connection and messenger interfaces after bind()
TEST(DirectMessenger, Bind)
{
auto cct = g_ceph_context;
DirectMessenger client(cct, entity_name_t::CLIENT(1),
"client", 0, new FastStrategy());
DirectMessenger server(cct, entity_name_t::CLIENT(2),
"server", 0, new FastStrategy());
entity_addr_t client_addr;
client_addr.set_family(AF_INET);
client_addr.set_port(1);
// client bind succeeds before set_direct_peer()
ASSERT_EQ(0, client.bind(client_addr));
ASSERT_EQ(0, client.set_direct_peer(&server));
ASSERT_EQ(0, server.set_direct_peer(&client));
// server bind fails after set_direct_peer()
entity_addr_t empty_addr;
ASSERT_EQ(-EINVAL, server.bind(empty_addr));
ASSERT_EQ(0, client.start());
ASSERT_EQ(0, server.start());
auto client_to_server = client.get_connection(server.get_myinst());
auto server_to_client = server.get_connection(client.get_myinst());
ASSERT_TRUE(client_to_server->is_connected());
ASSERT_TRUE(server_to_client->is_connected());
// no address in connection to server
ASSERT_EQ(empty_addr, client_to_server->get_peer_addr());
// bind address is reflected in connection to client
ASSERT_EQ(client_addr, server_to_client->get_peer_addr());
// mark_down() with bind address breaks the connection
server.mark_down(client_addr);
ASSERT_FALSE(client_to_server->is_connected());
ASSERT_FALSE(server_to_client->is_connected());
ASSERT_EQ(0, client.shutdown());
client.wait();
ASSERT_EQ(0, server.shutdown());
server.wait();
}
/// test connection and messenger interfaces before calls to set_direct_peer()
TEST(DirectMessenger, StartWithoutPeer)
{
auto cct = g_ceph_context;
DirectMessenger client(cct, entity_name_t::CLIENT(1),
"client", 0, new FastStrategy());
DirectMessenger server(cct, entity_name_t::CLIENT(2),
"server", 0, new FastStrategy());
// can't start until set_direct_peer()
ASSERT_EQ(-EINVAL, client.start());
ASSERT_EQ(-EINVAL, server.start());
ASSERT_EQ(0, client.set_direct_peer(&server));
// only client can start
ASSERT_EQ(0, client.start());
ASSERT_EQ(-EINVAL, server.start());
// client has a connection but can't send
auto conn = client.get_connection(server.get_myinst());
ASSERT_NE(nullptr, conn);
ASSERT_FALSE(conn->is_connected());
ASSERT_EQ(-ENOTCONN, conn->send_message(new MPing()));
ASSERT_EQ(-ENOTCONN, client.send_message(new MPing(), server.get_myinst()));
ASSERT_EQ(0, client.shutdown());
client.wait();
}
int main(int argc, char **argv)
{
// command-line arguments
auto args = argv_to_vec(argc, argv);
auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_ANY,
CODE_ENVIRONMENT_DAEMON,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(cct.get());
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 13,475 | 29.837529 | 79 | cc |
null | ceph-main/src/test/dokan/dokan.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2022 Cloudbase Solutions
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <windows.h>
#include <iostream>
#include <fstream>
#include <filesystem>
#include <sys/socket.h>
#include <direct.h>
#include "gtest/gtest.h"
#include "common/SubProcess.h"
#include "common/run_cmd.h"
#include "include/uuid.h"
#define DEFAULT_MOUNTPOINT "X:\\"
#define MOUNT_POLL_ATTEMPT 10
#define MOUNT_POLL_INTERVAL_MS 1000
#define TEST_VOL_SERIAL "1234567890"
#define MByte 1048576
namespace fs = std::filesystem;
using namespace std::chrono_literals;
std::string get_uuid() {
uuid_d suffix;
suffix.generate_random();
return suffix.to_string();
}
bool move_eof(HANDLE handle, LARGE_INTEGER offset) {
// Move file pointer to FILE_BEGIN + offset
if (!SetFilePointerEx(handle, offset, NULL, FILE_BEGIN)) {
std::cerr << "Setting file pointer failed. err: "
<< GetLastError() << std::endl;
return false;
}
if (!SetEndOfFile(handle)) {
std::cerr << "Setting EOF failed. err: " << GetLastError() << std::endl;
return false;
}
return true;
}
void write_file(std::string file_path, std::string data) {
std::ofstream file;
file.open(file_path);
ASSERT_TRUE(file.is_open())
<< "Failed to open file: " << file_path;
file << data;
file.flush();
file.close();
}
void expect_write_failure(std::string file_path) {
std::ofstream file;
file.open(file_path);
ASSERT_FALSE(file.is_open());
}
std::string read_file(std::string file_path) {
std::ifstream file;
file.open(file_path);
std::string content((std::istreambuf_iterator<char>(file)),
std::istreambuf_iterator<char>());
file.close();
return content;
}
void check_write_file(std::string file_path, std::string data) {
write_file(file_path, data);
ASSERT_EQ(read_file(file_path), data);
}
int wait_for_mount(std::string mount_path) {
std::cerr << "Waiting for mount: " << mount_path << std::endl;
int attempts = 0;
do {
attempts++;
if (attempts < MOUNT_POLL_ATTEMPT)
Sleep(MOUNT_POLL_INTERVAL_MS);
} while (!fs::exists(mount_path)
&& attempts < MOUNT_POLL_ATTEMPT);
if (!fs::exists(mount_path)) {
std::cerr << "Timed out waiting for ceph-dokan mount: "
<< mount_path << std::endl;
return -ETIMEDOUT;
}
std::cerr << "Successfully mounted: " << mount_path << std::endl;
return 0;
}
void map_dokan(SubProcess** mount, const char* mountpoint) {
SubProcess* new_mount = new SubProcess("ceph-dokan");
new_mount->add_cmd_args("map", "--win-vol-name", "TestCeph",
"--win-vol-serial", TEST_VOL_SERIAL,
"-l", mountpoint, NULL);
*mount = new_mount;
ASSERT_EQ(new_mount->spawn(), 0);
ASSERT_EQ(wait_for_mount(mountpoint), 0);
}
void map_dokan_read_only(
SubProcess** mount,
const char* mountpoint
) {
SubProcess* new_mount = new SubProcess("ceph-dokan");
new_mount->add_cmd_args("map", "--win-vol-name", "TestCeph",
"--win-vol-serial", TEST_VOL_SERIAL,
"--read-only", "-l", mountpoint, NULL);
*mount = new_mount;
ASSERT_EQ(new_mount->spawn(), 0);
ASSERT_EQ(wait_for_mount(mountpoint), 0);
std::cerr << mountpoint << " mounted in read-only mode"
<< std::endl;
}
void map_dokan_with_maxpath(
SubProcess** mount,
const char* mountpoint,
uint64_t max_path_len)
{
SubProcess* new_mount = new SubProcess("ceph-dokan");
new_mount->add_cmd_args("map", "--debug", "--dokan-stderr",
"--win-vol-name", "TestCeph",
"--win-vol-serial", TEST_VOL_SERIAL,
"--max-path-len",
(std::to_string(max_path_len)).c_str(),
"-l", mountpoint, NULL);
*mount = new_mount;
ASSERT_EQ(new_mount->spawn(), 0);
if (256 <= max_path_len && max_path_len <= 4096) {
ASSERT_EQ(wait_for_mount(mountpoint), 0);
} else {
ASSERT_NE(wait_for_mount(mountpoint), 0);
}
}
void unmap_dokan(SubProcess* mount, const char* mountpoint) {
std::string ret = run_cmd("ceph-dokan", "unmap", "-l",
mountpoint, (char*)NULL);
ASSERT_EQ(ret, "") << "Failed unmapping: " << mountpoint;
std::cerr<< "Unmounted: " << mountpoint << std::endl;
ASSERT_EQ(mount->join(), 0);
}
int get_volume_max_path(std::string mountpoint){
char volume_name[MAX_PATH + 1] = { 0 };
char file_system_name[MAX_PATH + 1] = { 0 };
DWORD serial_number = 0;
DWORD max_component_len = 0;
DWORD file_system_flags = 0;
if (GetVolumeInformation(
mountpoint.c_str(),
volume_name,
sizeof(volume_name),
&serial_number,
&max_component_len,
&file_system_flags,
file_system_name,
sizeof(file_system_name)) != TRUE) {
std::cerr << "GetVolumeInformation() failed, error: "
<< GetLastError() << std::endl;
}
return max_component_len;
}
static SubProcess* shared_mount = nullptr;
class DokanTests : public testing::Test
{
protected:
static void SetUpTestSuite() {
map_dokan(&shared_mount, DEFAULT_MOUNTPOINT);
}
static void TearDownTestSuite() {
if (shared_mount) {
unmap_dokan(shared_mount, DEFAULT_MOUNTPOINT);
}
shared_mount = nullptr;
}
};
TEST_F(DokanTests, test_mount) {
std::string mountpoint = "Y:\\";
SubProcess* mount = nullptr;
map_dokan(&mount, mountpoint.c_str());
unmap_dokan(mount, mountpoint.c_str());
}
TEST_F(DokanTests, test_mount_read_only) {
std::string mountpoint = "Z:\\";
std::string data = "abc123";
std::string success_file_path = "ro_success_" + get_uuid();
std::string failed_file_path = "ro_fail_" + get_uuid();
SubProcess* mount = nullptr;
map_dokan(&mount, mountpoint.c_str());
check_write_file(mountpoint + success_file_path, data);
ASSERT_TRUE(fs::exists(mountpoint + success_file_path));
unmap_dokan(mount, mountpoint.c_str());
mount = nullptr;
map_dokan_read_only(&mount, mountpoint.c_str());
expect_write_failure(mountpoint + failed_file_path);
ASSERT_FALSE(fs::exists(mountpoint + failed_file_path));
ASSERT_TRUE(fs::exists(mountpoint + success_file_path));
ASSERT_EQ(read_file(mountpoint + success_file_path), data);
std::string exception_msg(
"filesystem error: cannot remove: No such device ["
+ mountpoint + success_file_path + "]");
EXPECT_THROW({
try {
fs::remove(mountpoint + success_file_path);
} catch(const fs::filesystem_error &e) {
EXPECT_STREQ(e.what(), exception_msg.c_str());
throw;
}
}, fs::filesystem_error);
unmap_dokan(mount, mountpoint.c_str());
map_dokan(&mount, mountpoint.c_str());
ASSERT_TRUE(fs::exists(mountpoint + success_file_path));
ASSERT_TRUE(fs::remove(mountpoint + success_file_path));
unmap_dokan(mount, mountpoint.c_str());
}
TEST_F(DokanTests, test_delete_on_close) {
std::string file_path = DEFAULT_MOUNTPOINT"file_" + get_uuid();
HANDLE hFile = CreateFile(
file_path.c_str(),
GENERIC_WRITE, // open for writing
0, // sharing mode, none in this case
0, // use default security descriptor
CREATE_NEW,
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_DELETE_ON_CLOSE,
0);
ASSERT_NE(hFile, INVALID_HANDLE_VALUE)
<< "Could not open file: "
<< DEFAULT_MOUNTPOINT"test_create.txt "
<< "err: " << GetLastError() << std::endl;
ASSERT_NE(CloseHandle(hFile), 0);
// FILE_FLAG_DELETE_ON_CLOSE is used
ASSERT_FALSE(fs::exists(file_path));
}
TEST_F(DokanTests, test_io) {
std::string data = "abcdef";
std::string file_path = "test_io_" + get_uuid();
std::string mountpoint = "I:\\";
SubProcess* mount = nullptr;
map_dokan(&mount, mountpoint.c_str());
check_write_file(mountpoint + file_path, data);
ASSERT_TRUE(fs::exists(mountpoint + file_path));
unmap_dokan(mount, mountpoint.c_str());
mountpoint = "O:\\";
mount = nullptr;
map_dokan(&mount, mountpoint.c_str());
ASSERT_TRUE(fs::exists(mountpoint + file_path));
EXPECT_EQ(data, read_file(mountpoint + file_path));
ASSERT_TRUE(fs::remove((mountpoint + file_path).c_str()));
ASSERT_FALSE(fs::exists(mountpoint + file_path));
unmap_dokan(mount, mountpoint.c_str());
}
TEST_F(DokanTests, test_subfolders) {
std::string base_dir_path = DEFAULT_MOUNTPOINT"base_dir_"
+ get_uuid() + "\\";
std::string sub_dir_path = base_dir_path
+ "test_sub_dir" + get_uuid();
std::string base_dir_file = base_dir_path
+ "file_" + get_uuid();
std::string sub_dir_file = sub_dir_path
+ "file_" + get_uuid();
std::string data = "abc";
ASSERT_EQ(fs::create_directory(base_dir_path), true);
ASSERT_TRUE(fs::exists(base_dir_path));
ASSERT_EQ(fs::create_directory(sub_dir_path), true);
ASSERT_TRUE(fs::exists(sub_dir_path));
check_write_file(base_dir_file, data);
ASSERT_TRUE(fs::exists(base_dir_file));
check_write_file(sub_dir_file, data);
ASSERT_TRUE(fs::exists(sub_dir_file));
ASSERT_TRUE(fs::remove((sub_dir_file).c_str()))
<< "Failed to remove file: " << sub_dir_file;
ASSERT_FALSE(fs::exists(sub_dir_file));
// Remove empty dir
ASSERT_TRUE(fs::remove((sub_dir_path).c_str()))
<< "Failed to remove directory: " << sub_dir_path;
ASSERT_FALSE(fs::exists(sub_dir_file));
ASSERT_NE(fs::remove_all((base_dir_path).c_str()), 0)
<< "Failed to remove directory: " << base_dir_path;
ASSERT_FALSE(fs::exists(sub_dir_file));
}
TEST_F(DokanTests, test_find_files) {
std::string basedir_path = "X:/find_" + get_uuid();
std::string subdir_path = basedir_path + "/dir_" + get_uuid();
std::string file1_path = basedir_path + "/file1_" + get_uuid();
std::string file2_path = subdir_path + "/file2_" + get_uuid();
ASSERT_TRUE(
fs::create_directories(subdir_path)
);
std::ofstream{file1_path};
std::ofstream{file2_path};
std::vector<std::string> paths;
for (const auto & entry :
fs::recursive_directory_iterator(basedir_path)
) {
paths.push_back(entry.path().generic_string());
}
ASSERT_NE(std::find(begin(paths), end(paths), subdir_path), end(paths));
ASSERT_NE(std::find(begin(paths), end(paths), file1_path), end(paths));
ASSERT_NE(std::find(begin(paths), end(paths), file2_path), end(paths));
// clean-up
ASSERT_NE(fs::remove_all(basedir_path), 0);
}
TEST_F(DokanTests, test_move_file) {
std::string dir1_path = DEFAULT_MOUNTPOINT
"test_mv_1_" + get_uuid() + "\\";
std::string dir2_path = DEFAULT_MOUNTPOINT
"test_mv_2_" + get_uuid() + "\\";
std::string file_name = "mv_file_" + get_uuid();
std::string data = "abcd";
ASSERT_TRUE(fs::create_directory(dir1_path));
ASSERT_TRUE(fs::create_directory(dir2_path));
check_write_file(dir1_path + file_name, data);
fs::rename(dir1_path + file_name, dir2_path + file_name);
ASSERT_TRUE(fs::exists(dir2_path + file_name));
ASSERT_FALSE(fs::exists(dir1_path + file_name));
ASSERT_EQ(data, read_file(dir2_path + file_name));
// clean-up
ASSERT_NE(fs::remove_all(dir1_path),0);
ASSERT_NE(fs::remove_all(dir2_path),0);
}
TEST_F(DokanTests, test_max_path) {
std::string mountpoint = "P:\\";
std::string extended_mountpoint = "\\\\?\\" + mountpoint;
SubProcess* mount = nullptr;
char dir[200] = { 0 };
char file[200] = { 0 };
std::string data = "abcd1234";
memset(dir, 'd', sizeof(dir) - 1);
memset(file, 'f', sizeof(file) - 1);
uint64_t max_path_len = 4096;
map_dokan_with_maxpath(&mount,
mountpoint.c_str(),
max_path_len);
EXPECT_EQ(get_volume_max_path(extended_mountpoint),
max_path_len);
std::string long_dir_path = extended_mountpoint;
std::string dir_names[15];
for (int i = 0; i < 15; i++) {
std::string crt_dir = std::string(dir) + "_"
+ get_uuid() + "\\";
long_dir_path.append(crt_dir);
int stat = _mkdir(long_dir_path.c_str());
ASSERT_EQ(stat, 0) << "Error creating directory " << i
<< ": " << GetLastError() << std::endl;
dir_names[i] = crt_dir;
}
std::string file_path = long_dir_path + "\\" + std::string(file)
+ "_" + get_uuid();
check_write_file(file_path, data);
// clean-up
// fs::remove is unable to handle long Windows paths
EXPECT_NE(DeleteFileA(file_path.c_str()), 0);
for (int i = 14; i >= 0; i--) {
std::string remove_dir = extended_mountpoint;
for (int j = 0; j <= i; j++) {
remove_dir.append(dir_names[j]);
}
EXPECT_NE(RemoveDirectoryA(remove_dir.c_str()), 0);
}
unmap_dokan(mount, mountpoint.c_str());
// value exceeds 32767, so a failure is expected
max_path_len = 32770;
map_dokan_with_maxpath(&mount,
mountpoint.c_str(),
max_path_len);
ASSERT_FALSE(fs::exists(mountpoint));
// value is below 256, so a failure is expected
max_path_len = 150;
map_dokan_with_maxpath(&mount,
mountpoint.c_str(),
max_path_len);
ASSERT_FALSE(fs::exists(mountpoint));
// default value
map_dokan(&mount, mountpoint.c_str());
EXPECT_EQ(get_volume_max_path(mountpoint.c_str()), 256);
unmap_dokan(mount, mountpoint.c_str());
}
TEST_F(DokanTests, test_set_eof) {
std::string file_path = DEFAULT_MOUNTPOINT"test_eof_"
+ get_uuid();
HANDLE hFile = CreateFile(
file_path.c_str(),
GENERIC_WRITE, // open for writing
0, // sharing mode, none in this case
0, // use default security descriptor
CREATE_NEW,
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_DELETE_ON_CLOSE,
0);
ASSERT_NE(hFile, INVALID_HANDLE_VALUE)
<< "Could not open file: "
<< DEFAULT_MOUNTPOINT"test_create.txt "
<< "err: " << GetLastError() << std::endl;
LARGE_INTEGER offset;
offset.QuadPart = 2 * MByte; // 2MB
LARGE_INTEGER file_size;
ASSERT_TRUE(move_eof(hFile, offset));
ASSERT_NE(GetFileSizeEx(hFile, &file_size), 0);
EXPECT_EQ(file_size.QuadPart, offset.QuadPart);
offset.QuadPart = MByte; // 1MB
ASSERT_TRUE(move_eof(hFile, offset));
ASSERT_NE(GetFileSizeEx(hFile, &file_size), 0);
EXPECT_EQ(file_size.QuadPart, offset.QuadPart);
ASSERT_NE(CloseHandle(hFile), 0);
// FILE_FLAG_DELETE_ON_CLOSE is used
ASSERT_FALSE(fs::exists(file_path));
}
TEST_F(DokanTests, test_set_alloc_size) {
std::string file_path = DEFAULT_MOUNTPOINT"test_alloc_size_"
+ get_uuid();
HANDLE hFile = CreateFile(
file_path.c_str(),
GENERIC_WRITE, // open for writing
0, // sharing mode, none in this case
0, // use default security descriptor
CREATE_NEW,
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_DELETE_ON_CLOSE,
0);
ASSERT_NE(hFile, INVALID_HANDLE_VALUE)
<< "Could not open file: "
<< DEFAULT_MOUNTPOINT"test_create.txt "
<< "err: " << GetLastError() << std::endl;
LARGE_INTEGER li;
li.QuadPart = MByte;
FILE_ALLOCATION_INFO fai;
fai.AllocationSize = li;
ASSERT_NE(SetFileInformationByHandle(
hFile,
FileAllocationInfo,
&fai,
sizeof(FILE_ALLOCATION_INFO)
),0) << "Error: " << GetLastError();
LARGE_INTEGER offset;
offset.QuadPart = 2 * MByte;
LARGE_INTEGER file_size;
ASSERT_TRUE(move_eof(hFile, offset));
ASSERT_NE(GetFileSizeEx(hFile, &file_size), 0);
EXPECT_EQ(file_size.QuadPart, offset.QuadPart);
offset.QuadPart = MByte;
ASSERT_TRUE(move_eof(hFile, offset));
ASSERT_NE(GetFileSizeEx(hFile, &file_size), 0);
EXPECT_EQ(file_size.QuadPart, offset.QuadPart);
ASSERT_NE(CloseHandle(hFile), 0);
// FILE_FLAG_DELETE_ON_CLOSE is used
ASSERT_FALSE(fs::exists(file_path));
}
TEST_F(DokanTests, test_file_type) {
std::string test_dir = DEFAULT_MOUNTPOINT"test_info_"
+ get_uuid() + "\\";
std::string file_path = test_dir + "file_"
+ get_uuid();
std::string dir_path = test_dir + "dir_"
+ get_uuid() + "\\";
ASSERT_TRUE(fs::create_directory(test_dir));
std::ofstream{file_path};
ASSERT_TRUE(fs::create_directory(dir_path));
ASSERT_TRUE(fs::is_regular_file(fs::status(file_path)));
ASSERT_TRUE(fs::is_directory(fs::status(dir_path)));
// clean-up
fs::remove_all(test_dir);
}
TEST_F(DokanTests, test_volume_info) {
char volume_name[MAX_PATH + 1] = { 0 };
char file_system_name[MAX_PATH + 1] = { 0 };
DWORD serial_number = 0;
DWORD max_component_len = 0;
DWORD file_system_flags = 0;
ASSERT_EQ(
GetVolumeInformation(
DEFAULT_MOUNTPOINT,
volume_name,
sizeof(volume_name),
&serial_number,
&max_component_len,
&file_system_flags,
file_system_name,
sizeof(file_system_name)),TRUE)
<< "GetVolumeInformation() failed, error: "
<< GetLastError() << std::endl;
ASSERT_STREQ(volume_name, "TestCeph")
<< "Received: " << volume_name << std::endl;
ASSERT_STREQ(file_system_name, "Ceph")
<< "Received: " << file_system_name << std::endl;
ASSERT_EQ(max_component_len, 256);
ASSERT_EQ(serial_number, std::stoi(TEST_VOL_SERIAL))
<< "Received: " << serial_number << std::endl;
// Consider adding specific flags
// and check for them
// ASSERT_EQ(file_system_flags, 271);
}
TEST_F(DokanTests, test_get_free_space) {
std::error_code ec;
const std::filesystem::space_info si =
std::filesystem::space(DEFAULT_MOUNTPOINT, ec);
ASSERT_EQ(ec.value(), 0);
ASSERT_NE(static_cast<std::intmax_t>(si.capacity), 0);
ASSERT_NE(static_cast<std::intmax_t>(si.free), 0);
ASSERT_NE(static_cast<std::intmax_t>(si.available), 0);
}
TEST_F(DokanTests, test_file_timestamp) {
std::string file1 = DEFAULT_MOUNTPOINT"test_time1_"
+ get_uuid();
std::string file2 = DEFAULT_MOUNTPOINT"test_time2_"
+ get_uuid();
std::string file3 = DEFAULT_MOUNTPOINT"test_time3_"
+ get_uuid();
std::ofstream{file1};
Sleep(1000);
std::ofstream{file2};
Sleep(1000);
std::ofstream{file3};
int64_t file1_creation = fs::last_write_time(file1)
.time_since_epoch().count();
int64_t file2_creation = fs::last_write_time(file2)
.time_since_epoch().count();
int64_t file3_creation = fs::last_write_time(file3)
.time_since_epoch().count();
EXPECT_LT(file1_creation, file2_creation);
EXPECT_LT(file2_creation, file3_creation);
// add 1h to file 1 creation time
fs::file_time_type file1_time = fs::last_write_time(file1);
fs::last_write_time(file1, file1_time + 1h);
int64_t file1_new_time = fs::last_write_time(file1)
.time_since_epoch().count();
EXPECT_EQ((file1_time + 1h).time_since_epoch().count(),
file1_new_time);
EXPECT_GT(file1_new_time, file2_creation);
EXPECT_GT(file1_new_time, file3_creation);
ASSERT_TRUE(fs::remove(file1));
ASSERT_TRUE(fs::remove(file2));
ASSERT_TRUE(fs::remove(file3));
}
TEST_F(DokanTests, test_delete_disposition) {
std::string file_path = DEFAULT_MOUNTPOINT"test_disp_"
+ get_uuid();
HANDLE hFile = CreateFile(file_path.c_str(),
GENERIC_ALL, // required for delete
0, // exclusive access
NULL,
CREATE_ALWAYS,
0,
NULL);
ASSERT_NE(hFile, INVALID_HANDLE_VALUE)
<< "Could not open file: " << file_path
<< "err: " << GetLastError() << std::endl;
FILE_DISPOSITION_INFO fdi;
fdi.DeleteFile = TRUE; // marking for deletion
ASSERT_NE(
SetFileInformationByHandle(
hFile,
FileDispositionInfo,
&fdi,
sizeof(FILE_DISPOSITION_INFO)), 0);
ASSERT_NE(CloseHandle(hFile), 0);
ASSERT_FALSE(fs::exists(file_path));
}
bool check_create_disposition(std::string path, DWORD disposition) {
HANDLE hFile = CreateFile(path.c_str(),
GENERIC_WRITE,
0, // exclusive access
NULL,
disposition,
0,
NULL);
if(hFile == INVALID_HANDLE_VALUE) {
return false;
}
if(CloseHandle(hFile) == 0) {
return false;
}
return true;
}
TEST_F(DokanTests, test_create_dispositions) {
std::string file_path = DEFAULT_MOUNTPOINT"test_create_"
+ get_uuid();
std::string non_existant_file = DEFAULT_MOUNTPOINT
"test_create_" + get_uuid();
EXPECT_TRUE(
check_create_disposition(file_path, CREATE_NEW));
// CREATE_ALWAYS with existing file
EXPECT_TRUE(
check_create_disposition(file_path, CREATE_ALWAYS));
EXPECT_EQ(GetLastError(), ERROR_ALREADY_EXISTS);
// CREATE_NEW with existing file
EXPECT_FALSE(
check_create_disposition(file_path, CREATE_NEW));
EXPECT_EQ(GetLastError(), ERROR_FILE_EXISTS);
// OPEN_EXISTING with existing file
EXPECT_TRUE(
check_create_disposition(file_path, OPEN_EXISTING));
ASSERT_FALSE(fs::exists(non_existant_file));
// OPEN_EXISTING with non-existant file
EXPECT_FALSE(
check_create_disposition(non_existant_file, OPEN_EXISTING));
EXPECT_EQ(GetLastError(), ERROR_FILE_NOT_FOUND);
// OPEN_ALWAYS with existing file
EXPECT_TRUE(
check_create_disposition(file_path, OPEN_ALWAYS));
EXPECT_EQ(GetLastError(), ERROR_ALREADY_EXISTS);
ASSERT_FALSE(fs::exists(non_existant_file));
// OPEN_ALWAYS with non-existant file
EXPECT_TRUE(
check_create_disposition(non_existant_file, OPEN_ALWAYS));
EXPECT_EQ(GetLastError(), 0);
ASSERT_TRUE(fs::remove(non_existant_file));
// TRUNCATE_EXISTING with existing file
EXPECT_TRUE(
check_create_disposition(file_path, TRUNCATE_EXISTING));
// TRUNCATE_EXISTING with non-existant file
EXPECT_FALSE(
check_create_disposition(non_existant_file,
TRUNCATE_EXISTING));
EXPECT_EQ(GetLastError(), ERROR_FILE_NOT_FOUND);
// clean-up
ASSERT_TRUE(fs::remove(file_path));
}
| 24,051 | 30.15544 | 80 | cc |
null | ceph-main/src/test/encoding/check-generated.sh | #!/usr/bin/env bash
set -e
source $(dirname $0)/../detect-build-env-vars.sh
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
dir=$1
tmp1=`mktemp /tmp/typ-XXXXXXXXX`
tmp2=`mktemp /tmp/typ-XXXXXXXXX`
tmp3=`mktemp /tmp/typ-XXXXXXXXX`
tmp4=`mktemp /tmp/typ-XXXXXXXXX`
failed=0
numtests=0
echo "checking ceph-dencoder generated test instances..."
echo "numgen type"
while read type; do
num=`ceph-dencoder type $type count_tests`
echo "$num $type"
for n in `seq 1 1 $num 2>/dev/null`; do
pids=""
run_in_background pids save_stdout "$tmp1" ceph-dencoder type "$type" select_test "$n" dump_json
run_in_background pids save_stdout "$tmp2" ceph-dencoder type "$type" select_test "$n" encode decode dump_json
run_in_background pids save_stdout "$tmp3" ceph-dencoder type "$type" select_test "$n" copy dump_json
run_in_background pids save_stdout "$tmp4" ceph-dencoder type "$type" select_test "$n" copy_ctor dump_json
wait_background pids
if [ $? -ne 0 ]; then
echo "**** $type test $n encode+decode check failed ****"
echo " ceph-dencoder type $type select_test $n encode decode"
failed=$(($failed + 3))
continue
fi
# nondeterministic classes may dump nondeterministically. compare
# the sorted json output. this is a weaker test, but is better
# than nothing.
deterministic=0
if ceph-dencoder type "$type" is_deterministic; then
deterministic=1
fi
if [ $deterministic -eq 0 ]; then
echo " sorting json output for nondeterministic object"
for f in $tmp1 $tmp2 $tmp3 $tmp4; do
sort $f | sed 's/,$//' > $f.new
mv $f.new $f
done
fi
if ! cmp $tmp1 $tmp2; then
echo "**** $type test $n dump_json check failed ****"
echo " ceph-dencoder type $type select_test $n dump_json > $tmp1"
echo " ceph-dencoder type $type select_test $n encode decode dump_json > $tmp2"
diff $tmp1 $tmp2
failed=$(($failed + 1))
fi
if ! cmp $tmp1 $tmp3; then
echo "**** $type test $n copy dump_json check failed ****"
echo " ceph-dencoder type $type select_test $n dump_json > $tmp1"
echo " ceph-dencoder type $type select_test $n copy dump_json > $tmp2"
diff $tmp1 $tmp2
failed=$(($failed + 1))
fi
if ! cmp $tmp1 $tmp4; then
echo "**** $type test $n copy_ctor dump_json check failed ****"
echo " ceph-dencoder type $type select_test $n dump_json > $tmp1"
echo " ceph-dencoder type $type select_test $n copy_ctor dump_json > $tmp2"
diff $tmp1 $tmp2
failed=$(($failed + 1))
fi
if [ $deterministic -ne 0 ]; then
run_in_background pids ceph-dencoder type "$type" select_test $n encode export "$tmp1"
run_in_background pids ceph-dencoder type "$type" select_test $n encode decode encode export "$tmp2"
wait_background pids
if ! cmp $tmp1 $tmp2; then
echo "**** $type test $n binary reencode check failed ****"
echo " ceph-dencoder type $type select_test $n encode export $tmp1"
echo " ceph-dencoder type $type select_test $n encode decode encode export $tmp2"
diff <(hexdump -C $tmp1) <(hexdump -C $tmp2)
failed=$(($failed + 1))
fi
fi
numtests=$(($numtests + 3))
done
done < <(ceph-dencoder list_types)
rm -f $tmp1 $tmp2 $tmp3 $tmp4
if [ $failed -gt 0 ]; then
echo "FAILED $failed / $numtests tests."
exit 1
fi
echo "passed $numtests tests."
| 3,335 | 31.705882 | 111 | sh |
null | ceph-main/src/test/encoding/generate-corpus-objects.sh | #!/usr/bin/env bash
set -ex
BDIR=`pwd`
p=$1
echo path $p
test ! -d $p
mkdir $p
strings bin/ceph-osd | grep "^$p/%s__%d.%x"
v=`git describe | cut -c 2-`
echo version $v
echo 'binaries look ok, vstarting'
echo
MON=3 MDS=3 OSD=5 MDS=3 MGR=2 RGW=1 ../src/vstart.sh -x -n -l --bluestore -e
export PATH=bin:$PATH
# do some work to generate a hopefully braod set of object instances
echo 'starting some background work'
../qa/workunits/rados/test.sh &
../qa/workunits/rbd/test_librbd.sh &
../qa/workunits/libcephfs/test.sh &
../qa/workunits/rgw/run-s3tests.sh &
ceph-syn --syn makedirs 3 3 3 &
echo 'waiting a bit'
sleep 10
echo 'triggering some recovery'
kill -9 `cat out/osd.0.pid`
sleep 10
ceph osd out 0
sleep 10
init-ceph start osd.0
ceph osd in 0
sleep 5
echo 'triggering mds work'
bin/ceph mds fail 0
echo 'waiting for worker to join (and ignoring errors)'
wait || true
echo 'importing'
../src/test/encoding/import.sh $p $v ../ceph-object-corpus/archive
for d in ../ceph-object-corpus/archive/$v/objects/*
do
echo prune $d
../ceph-object-corpus/bin/prune.sh $d 25
done
echo 'done'
| 1,105 | 17.433333 | 76 | sh |
null | ceph-main/src/test/encoding/identity.sh | #!/bin/sh -e
dir=$1
set -e
tmp1=`mktemp /tmp/typ-XXXXXXXXX`
tmp2=`mktemp /tmp/typ-XXXXXXXXX`
for type in `ls $dir`
do
if ./ceph-dencoder type $type 2>/dev/null; then
echo "type $type"
for o in `ls $dir/$type`; do
f="$dir/$type/$o"
echo "\t$f"
./ceph-dencoder type $type import $f decode dump_json > $tmp1
./ceph-dencoder type $type import $f decode encode decode dump_json > $tmp2
cmp $tmp1 $tmp2 || exit 1
./ceph-dencoder type $type import $f decode encode export $tmp1
cmp $tmp1 $f || exit 1
done
else
echo "skip $type"
fi
done
rm -f $tmp1 $tmp2
echo OK
| 664 | 19.151515 | 87 | sh |
null | ceph-main/src/test/encoding/import-generated.sh | #!/bin/sh -e
archive=$1
[ -d "$archive" ] || echo "usage: $0 <archive>"
ver=`bin/ceph-dencoder version`
echo "version $ver"
[ -d "$archive/$ver" ] || mkdir "$archive/$ver"
tmp1=`mktemp /tmp/typ-XXXXXXXXX`
echo "numgen\ttype"
for type in `bin/ceph-dencoder list_types`; do
[ -d "$archive/$ver/objects/$type" ] || mkdir -p "$archive/$ver/objects/$type"
num=`bin/ceph-dencoder type $type count_tests`
echo "$num\t$type"
max=$(($num - 1))
for n in `seq 0 $max`; do
bin/ceph-dencoder type $type select_test $n encode export $tmp1
md=`md5sum $tmp1 | awk '{print $1}'`
echo "\t$md"
[ -e "$archive/$ver/objects/$type/$md" ] || cp $tmp1 $archive/$ver/objects/$type/$md
done
done
rm $tmp1
| 716 | 22.129032 | 85 | sh |
null | ceph-main/src/test/encoding/import.sh | #!/bin/sh -e
src=$1
ver=$2
archive=$3
[ -d "$archive" ] && [ -d "$src" ] || echo "usage: $0 <srcdir> <version> <archive>"
[ -d "$archive/$ver" ] || mkdir "$archive/$ver"
dest_dir="$archive/$ver/objects"
[ -d "$dest_dir" ] || mkdir "$dest_dir"
for f in `find $src -type f`
do
n=`basename $f`
type=`echo $n | sed 's/__.*//'`
md=`md5sum $f | awk '{print $1}'`
[ -d "$dest_dir/$type" ] || mkdir $dest_dir/$type
[ -e "$dest_dir/$type/$md" ] || cp $f $dest_dir/$type/$md
done
| 497 | 19.75 | 83 | sh |
null | ceph-main/src/test/encoding/readable.sh | #!/usr/bin/env bash
set -e
source $(dirname $0)/../detect-build-env-vars.sh
[ -z "$CEPH_ROOT" ] && CEPH_ROOT=..
dir=$CEPH_ROOT/ceph-object-corpus
failed=0
numtests=0
pids=""
if [ -x ./ceph-dencoder ]; then
CEPH_DENCODER=./ceph-dencoder
else
CEPH_DENCODER=ceph-dencoder
fi
myversion=`$CEPH_DENCODER version`
DEBUG=0
WAITALL_DELAY=.1
debug() { if [ "$DEBUG" -gt 0 ]; then echo "DEBUG: $*" >&2; fi }
test_object() {
local type=$1
local output_file=$2
local failed=0
local numtests=0
tmp1=`mktemp /tmp/test_object_1-XXXXXXXXX`
tmp2=`mktemp /tmp/test_object_2-XXXXXXXXX`
rm -f $output_file
if $CEPH_DENCODER type $type 2>/dev/null; then
#echo "type $type";
echo " $vdir/objects/$type"
# is there a fwd incompat change between $arversion and $version?
incompat=""
incompat_paths=""
sawarversion=0
for iv in `ls $dir/archive | sort -n`; do
if [ "$iv" = "$arversion" ]; then
sawarversion=1
fi
if [ $sawarversion -eq 1 ] && [ -e "$dir/archive/$iv/forward_incompat/$type" ]; then
incompat="$iv"
# Check if we'll be ignoring only specified objects, not whole type. If so, remember
# all paths for this type into variable. Assuming that this path won't contain any
# whitechars (implication of above for loop).
if [ -d "$dir/archive/$iv/forward_incompat/$type" ]; then
if [ -n "`ls $dir/archive/$iv/forward_incompat/$type/ | sort -n`" ]; then
incompat_paths="$incompat_paths $dir/archive/$iv/forward_incompat/$type"
else
echo "type $type directory empty, ignoring whole type instead of single objects"
fi;
fi
fi
if [ "$iv" = "$version" ]; then
rm -rf $tmp1 $tmp2
break
fi
done
if [ -n "$incompat" ]; then
if [ -z "$incompat_paths" ]; then
echo "skipping incompat $type version $arversion, changed at $incompat < code $myversion"
rm -rf $tmp1 $tmp2
return
else
# If we are ignoring not whole type, but objects that are in $incompat_path,
# we don't skip here, just give info.
echo "postponed skip one of incompact $type version $arversion, changed at $incompat < code $myversion"
fi;
fi
for f in `ls $vdir/objects/$type`; do
skip=0;
# Check if processed object $f of $type should be skipped (postponed skip)
if [ -n "$incompat_paths" ]; then
for i_path in $incompat_paths; do
# Check if $f is a symbolic link and if it's pointing to existing target
if [ -L "$i_path/$f" ]; then
echo "skipping object $f of type $type"
skip=1
break
fi;
done;
fi;
if [ $skip -ne 0 ]; then
continue
fi;
$CEPH_DENCODER type $type import $vdir/objects/$type/$f decode dump_json > $tmp1 &
pid1="$!"
$CEPH_DENCODER type $type import $vdir/objects/$type/$f decode encode decode dump_json > $tmp2 &
pid2="$!"
#echo "\t$vdir/$type/$f"
if ! wait $pid1; then
echo "**** failed to decode $vdir/objects/$type/$f ****"
failed=$(($failed + 1))
rm -f $tmp1 $tmp2
continue
fi
if ! wait $pid2; then
echo "**** failed to decode+encode+decode $vdir/objects/$type/$f ****"
failed=$(($failed + 1))
rm -f $tmp1 $tmp2
continue
fi
# nondeterministic classes may dump
# nondeterministically. compare the sorted json
# output. this is a weaker test, but is better than
# nothing.
if ! $CEPH_DENCODER type $type is_deterministic; then
echo " sorting json output for nondeterministic object"
for f in $tmp1 $tmp2; do
sort $f | sed 's/,$//' > $f.new
mv $f.new $f
done
fi
if ! cmp $tmp1 $tmp2; then
echo "**** reencode of $vdir/objects/$type/$f resulted in a different dump ****"
diff $tmp1 $tmp2
failed=$(($failed + 1))
fi
numtests=$(($numtests + 1))
rm -f $tmp1 $tmp2
done
else
echo "skipping unrecognized type $type"
rm -f $tmp1 $tmp2
fi
echo "failed=$failed" > $output_file
echo "numtests=$numtests" >> $output_file
}
waitall() { # PID...
## Wait for children to exit and indicate whether all exited with 0 status.
local errors=0
while :; do
debug "Processes remaining: $*"
for pid in "$@"; do
shift
if kill -0 "$pid" 2>/dev/null; then
debug "$pid is still alive."
set -- "$@" "$pid"
elif wait "$pid"; then
debug "$pid exited with zero exit status."
else
debug "$pid exited with non-zero exit status."
errors=$(($errors + 1))
fi
done
[ $# -eq 0 ] && break
sleep ${WAITALL_DELAY:-1}
done
[ $errors -eq 0 ]
}
######
# MAIN
######
do_join() {
waitall $pids
pids=""
# Reading the output of jobs to compute failed & numtests
# Tests are run in parallel but sum should be done sequentialy to avoid
# races between threads
while [ "$running_jobs" -ge 0 ]; do
if [ -f $output_file.$running_jobs ]; then
read_failed=$(grep "^failed=" $output_file.$running_jobs | cut -d "=" -f 2)
read_numtests=$(grep "^numtests=" $output_file.$running_jobs | cut -d "=" -f 2)
rm -f $output_file.$running_jobs
failed=$(($failed + $read_failed))
numtests=$(($numtests + $read_numtests))
fi
running_jobs=$(($running_jobs - 1))
done
running_jobs=0
}
# Using $MAX_PARALLEL_JOBS jobs if defined, unless the number of logical
# processors
if [ `uname` == FreeBSD -o `uname` == Darwin ]; then
NPROC=`sysctl -n hw.ncpu`
max_parallel_jobs=${MAX_PARALLEL_JOBS:-${NPROC}}
else
max_parallel_jobs=${MAX_PARALLEL_JOBS:-$(nproc)}
fi
output_file=`mktemp /tmp/output_file-XXXXXXXXX`
running_jobs=0
for arversion in `ls $dir/archive | sort -n`; do
vdir="$dir/archive/$arversion"
#echo $vdir
if [ ! -d "$vdir/objects" ]; then
continue;
fi
for type in `ls $vdir/objects`; do
test_object $type $output_file.$running_jobs &
pids="$pids $!"
running_jobs=$(($running_jobs + 1))
# Once we spawned enough jobs, let's wait them to complete
# Every spawned job have almost the same execution time so
# it's not a big deal having them not ending at the same time
if [ "$running_jobs" -eq "$max_parallel_jobs" ]; then
do_join
fi
rm -f ${output_file}*
done
done
do_join
if [ $failed -gt 0 ]; then
echo "FAILED $failed / $numtests tests."
exit 1
fi
if [ $numtests -eq 0 ]; then
echo "FAILED: no tests found to run!"
exit 1
fi
echo "passed $numtests tests."
| 7,056 | 28.161157 | 113 | sh |
null | ceph-main/src/test/erasure-code/ErasureCodeExample.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013 Cloudwatt <libre.licensing@cloudwatt.com>
*
* Author: Loic Dachary <loic@dachary.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_EXAMPLE_H
#define CEPH_ERASURE_CODE_EXAMPLE_H
#include <unistd.h>
#include <errno.h>
#include <algorithm>
#include <sstream>
#include "crush/CrushWrapper.h"
#include "osd/osd_types.h"
#include "erasure-code/ErasureCode.h"
#define FIRST_DATA_CHUNK 0
#define SECOND_DATA_CHUNK 1
#define DATA_CHUNKS 2u
#define CODING_CHUNK 2
#define CODING_CHUNKS 1u
#define MINIMUM_TO_RECOVER 2u
class ErasureCodeExample final : public ErasureCode {
public:
~ErasureCodeExample() override {}
int create_rule(const std::string &name,
CrushWrapper &crush,
std::ostream *ss) const override {
return crush.add_simple_rule(name, "default", "host", "",
"indep", pg_pool_t::TYPE_ERASURE, ss);
}
int minimum_to_decode_with_cost(const std::set<int> &want_to_read,
const std::map<int, int> &available,
std::set<int> *minimum) override {
//
// If one chunk is more expensive to fetch than the others,
// recover it instead. For instance, if the cost reflects the
// time it takes for a chunk to be retrieved from a remote
// OSD and if CPU is cheap, it could make sense to recover
// instead of fetching the chunk.
//
std::map<int, int> c2c(available);
if (c2c.size() > DATA_CHUNKS) {
if (c2c[FIRST_DATA_CHUNK] > c2c[SECOND_DATA_CHUNK] &&
c2c[FIRST_DATA_CHUNK] > c2c[CODING_CHUNK])
c2c.erase(FIRST_DATA_CHUNK);
else if(c2c[SECOND_DATA_CHUNK] > c2c[FIRST_DATA_CHUNK] &&
c2c[SECOND_DATA_CHUNK] > c2c[CODING_CHUNK])
c2c.erase(SECOND_DATA_CHUNK);
else if(c2c[CODING_CHUNK] > c2c[FIRST_DATA_CHUNK] &&
c2c[CODING_CHUNK] > c2c[SECOND_DATA_CHUNK])
c2c.erase(CODING_CHUNK);
}
std::set <int> available_chunks;
for (std::map<int, int>::const_iterator i = c2c.begin();
i != c2c.end();
++i)
available_chunks.insert(i->first);
return _minimum_to_decode(want_to_read, available_chunks, minimum);
}
unsigned int get_chunk_count() const override {
return DATA_CHUNKS + CODING_CHUNKS;
}
unsigned int get_data_chunk_count() const override {
return DATA_CHUNKS;
}
unsigned int get_chunk_size(unsigned int object_size) const override {
return ( object_size / DATA_CHUNKS ) + 1;
}
int encode(const std::set<int> &want_to_encode,
const bufferlist &in,
std::map<int, bufferlist> *encoded) override {
//
// make sure all data chunks have the same length, allocating
// padding if necessary.
//
unsigned int chunk_length = get_chunk_size(in.length());
bufferlist out(in);
unsigned int width = get_chunk_count() * get_chunk_size(in.length());
bufferptr pad(width - in.length());
pad.zero(0, get_data_chunk_count());
out.push_back(pad);
//
// compute the coding chunk with first chunk ^ second chunk
//
char *p = out.c_str();
for (unsigned i = 0; i < chunk_length; i++)
p[i + CODING_CHUNK * chunk_length] =
p[i + FIRST_DATA_CHUNK * chunk_length] ^
p[i + SECOND_DATA_CHUNK * chunk_length];
//
// populate the bufferlist with bufferptr pointing
// to chunk boundaries
//
const bufferptr &ptr = out.front();
for (auto j = want_to_encode.begin();
j != want_to_encode.end();
++j) {
bufferlist tmp;
bufferptr chunk(ptr, (*j) * chunk_length, chunk_length);
tmp.push_back(chunk);
tmp.claim_append((*encoded)[*j]);
(*encoded)[*j].swap(tmp);
}
return 0;
}
int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, bufferlist> *encoded) override {
ceph_abort();
return 0;
}
int _decode(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded) override {
//
// All chunks have the same size
//
unsigned chunk_length = (*chunks.begin()).second.length();
for (std::set<int>::iterator i = want_to_read.begin();
i != want_to_read.end();
++i) {
if (chunks.find(*i) != chunks.end()) {
//
// If the chunk is available, just copy the bufferptr pointer
// to the decoded argument.
//
(*decoded)[*i] = chunks.find(*i)->second;
} else if(chunks.size() != 2) {
//
// If a chunk is missing and there are not enough chunks
// to recover, abort.
//
return -ERANGE;
} else {
//
// No matter what the missing chunk is, XOR of the other
// two recovers it.
//
std::map<int, bufferlist>::const_iterator k = chunks.begin();
const char *a = k->second.front().c_str();
++k;
const char *b = k->second.front().c_str();
bufferptr chunk(chunk_length);
char *c = chunk.c_str();
for (unsigned j = 0; j < chunk_length; j++) {
c[j] = a[j] ^ b[j];
}
bufferlist tmp;
tmp.append(chunk);
tmp.claim_append((*decoded)[*i]);
(*decoded)[*i].swap(tmp);
}
}
return 0;
}
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded) override {
ceph_abort();
return 0;
}
const std::vector<int> &get_chunk_mapping() const override {
static std::vector<int> mapping;
return mapping;
}
};
#endif
| 5,876 | 28.984694 | 78 | h |
null | ceph-main/src/test/erasure-code/ErasureCodePluginExample.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013 Cloudwatt <libre.licensing@cloudwatt.com>
* Copyright (C) 2014 Red Hat <contact@redhat.com>
*
* Author: Loic Dachary <loic@dachary.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <unistd.h>
#include "ceph_ver.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "ErasureCodeExample.h"
using namespace std;
class ErasureCodePluginExample : public ErasureCodePlugin {
public:
int factory(const std::string &directory,
ErasureCodeProfile &profile,
ErasureCodeInterfaceRef *erasure_code,
ostream *ss) override
{
*erasure_code = ErasureCodeInterfaceRef(new ErasureCodeExample());
(*erasure_code)->init(profile, ss);
return 0;
}
};
const char *__erasure_code_version() { return CEPH_GIT_NICE_VER; }
int __erasure_code_init(char *plugin_name, char *directory)
{
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
return instance.add(plugin_name, new ErasureCodePluginExample());
}
| 1,376 | 28.934783 | 78 | cc |
null | ceph-main/src/test/erasure-code/ErasureCodePluginFailToInitialize.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013 Cloudwatt <libre.licensing@cloudwatt.com>
* Copyright (C) 2014 Red Hat <contact@redhat.com>
*
* Author: Loic Dachary <loic@dachary.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <errno.h>
#include "ceph_ver.h"
extern "C" const char *__erasure_code_version() { return CEPH_GIT_NICE_VER; }
extern "C" int __erasure_code_init(char *plugin_name, char *directory)
{
return -ESRCH;
}
| 791 | 28.333333 | 77 | cc |
null | ceph-main/src/test/erasure-code/ErasureCodePluginFailToRegister.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013 Cloudwatt <libre.licensing@cloudwatt.com>
* Copyright (C) 2014 Red Hat <contact@redhat.com>
*
* Author: Loic Dachary <loic@dachary.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include "ceph_ver.h"
extern "C" const char *__erasure_code_version() { return CEPH_GIT_NICE_VER; }
extern "C" int __erasure_code_init(char *plugin_name, char *directory)
{
return 0;
}
| 767 | 28.538462 | 77 | cc |
null | ceph-main/src/test/erasure-code/ErasureCodePluginHangs.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013 Cloudwatt <libre.licensing@cloudwatt.com>
* Copyright (C) 2014 Red Hat <contact@redhat.com>
*
* Author: Loic Dachary <loic@dachary.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <unistd.h>
#include "ceph_ver.h"
extern "C" const char *__erasure_code_version() { return CEPH_GIT_NICE_VER; }
extern "C" int __erasure_code_init(char *plugin_name, char *directory)
{
sleep(10);
return 0;
}
| 800 | 27.607143 | 77 | cc |
null | ceph-main/src/test/erasure-code/ErasureCodePluginMissingEntryPoint.cc | #include "ceph_ver.h"
// missing int __erasure_code_init(char *plugin_name, char *directory) {}
extern "C" const char *__erasure_code_version() { return CEPH_GIT_NICE_VER; }
| 177 | 24.428571 | 77 | cc |
null | ceph-main/src/test/erasure-code/ErasureCodePluginMissingVersion.cc | // missing __erasure_code_version
int __this_is_an_used_variable_to_avoid_warnings;
| 85 | 20.5 | 49 | cc |
null | ceph-main/src/test/erasure-code/TestErasureCode.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2014 Red Hat <contact@redhat.com>
*
* Author: Loic Dachary <loic@dachary.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <errno.h>
#include <stdlib.h>
#include "erasure-code/ErasureCode.h"
#include "global/global_context.h"
#include "common/config.h"
#include "gtest/gtest.h"
using namespace std;
class ErasureCodeTest : public ErasureCode {
public:
map<int, bufferlist> encode_chunks_encoded;
unsigned int k;
unsigned int m;
unsigned int chunk_size;
ErasureCodeTest(unsigned int _k, unsigned int _m, unsigned int _chunk_size) :
k(_k), m(_m), chunk_size(_chunk_size) {}
~ErasureCodeTest() override {}
int init(ErasureCodeProfile &profile, ostream *ss) override {
return 0;
}
unsigned int get_chunk_count() const override { return k + m; }
unsigned int get_data_chunk_count() const override { return k; }
unsigned int get_chunk_size(unsigned int object_size) const override {
return chunk_size;
}
int encode_chunks(const set<int> &want_to_encode,
map<int, bufferlist> *encoded) override {
encode_chunks_encoded = *encoded;
return 0;
}
int decode_chunks(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded) override {
ceph_abort_msg("ErasureCode::decode_chunks not implemented");
}
int create_rule(const string &name,
CrushWrapper &crush,
ostream *ss) const override { return 0; }
};
/*
* If we have a buffer of 5 bytes (X below) and a chunk size of 3
* bytes, for k=3, m=1 an additional 7 bytes (P and C below) will
* need to be allocated for padding (P) and the 3 coding bytes (C).
*
* X -+ +----------+ +-X
* X | | data 0 | | X
* X | +----------+ | X
* X | +----------+ | X -> +-X
* X -+ | data 1 | +-X -> | X
* P -+ +----------+ | P
* P | +----------+ | P
* P | | data 2 | | P
* P | +----------+ | P
* C | +----------+ | C
* C | | coding 3 | | C
* C -+ +----------+ +-C
*
* The data chunks 1 and 2 (data 1 and data 2 above) overflow the
* original buffer because it needs padding. A new buffer will
* be allocated to contain the chunk that overflows and all other
* chunks after it, including the coding chunk(s).
*
* The following test creates a siguation where the buffer provided
* for encoding is not memory aligned. After encoding it asserts that:
*
* a) each chunk is SIMD aligned
* b) the data 1 chunk content is as expected which implies that its
* content has been copied over.
*
* It is possible for a flawed implementation to pas the test because the
* underlying allocation function enforces it.
*/
TEST(ErasureCodeTest, encode_memory_align)
{
int k = 3;
int m = 1;
unsigned chunk_size = ErasureCode::SIMD_ALIGN * 7;
ErasureCodeTest erasure_code(k, m, chunk_size);
set<int> want_to_encode;
for (unsigned int i = 0; i < erasure_code.get_chunk_count(); i++)
want_to_encode.insert(i);
string data(chunk_size + chunk_size / 2, 'X'); // uses 1.5 chunks out of 3
// make sure nothing is memory aligned
bufferptr ptr(buffer::create_aligned(data.length() + 1, ErasureCode::SIMD_ALIGN));
ptr.copy_in(1, data.length(), data.c_str());
ptr.set_offset(1);
ptr.set_length(data.length());
bufferlist in;
in.append(ptr);
map<int, bufferlist> encoded;
ASSERT_FALSE(in.is_aligned(ErasureCode::SIMD_ALIGN));
ASSERT_EQ(0, erasure_code.encode(want_to_encode, in, &encoded));
for (unsigned int i = 0; i < erasure_code.get_chunk_count(); i++)
ASSERT_TRUE(encoded[i].is_aligned(ErasureCode::SIMD_ALIGN));
for (unsigned i = 0; i < chunk_size / 2; i++)
ASSERT_EQ(encoded[1][i], 'X');
ASSERT_NE(encoded[1][chunk_size / 2], 'X');
}
TEST(ErasureCodeTest, encode_misaligned_non_contiguous)
{
int k = 3;
int m = 1;
unsigned chunk_size = ErasureCode::SIMD_ALIGN * 7;
ErasureCodeTest erasure_code(k, m, chunk_size);
set<int> want_to_encode;
for (unsigned int i = 0; i < erasure_code.get_chunk_count(); i++)
want_to_encode.insert(i);
string data(chunk_size, 'X');
// create a non contiguous bufferlist where the frist and the second
// bufferptr are not size aligned although they are memory aligned
bufferlist in;
{
bufferptr ptr(buffer::create_aligned(data.length() - 1, ErasureCode::SIMD_ALIGN));
in.append(ptr);
}
{
bufferptr ptr(buffer::create_aligned(data.length() + 1, ErasureCode::SIMD_ALIGN));
in.append(ptr);
}
map<int, bufferlist> encoded;
ASSERT_FALSE(in.is_contiguous());
ASSERT_TRUE(in.front().is_aligned(ErasureCode::SIMD_ALIGN));
ASSERT_FALSE(in.front().is_n_align_sized(chunk_size));
ASSERT_TRUE(in.back().is_aligned(ErasureCode::SIMD_ALIGN));
ASSERT_FALSE(in.back().is_n_align_sized(chunk_size));
ASSERT_EQ(0, erasure_code.encode(want_to_encode, in, &encoded));
for (unsigned int i = 0; i < erasure_code.get_chunk_count(); i++) {
ASSERT_TRUE(encoded[i].is_aligned(ErasureCode::SIMD_ALIGN));
ASSERT_TRUE(encoded[i].is_n_align_sized(chunk_size));
}
}
/*
* Local Variables:
* compile-command: "cd ../.. ;
* make -j4 unittest_erasure_code &&
* valgrind --tool=memcheck --leak-check=full \
* ./unittest_erasure_code \
* --gtest_filter=*.* --log-to-stderr=true"
* End:
*/
| 5,710 | 32.594118 | 86 | cc |
null | ceph-main/src/test/erasure-code/TestErasureCodeClay.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2018 Indian Institute of Science <office.ece@iisc.ac.in>
*
* Author: Myna Vajha <mynaramana@gmail.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <errno.h>
#include <stdlib.h>
#include "crush/CrushWrapper.h"
#include "include/stringify.h"
#include "erasure-code/clay/ErasureCodeClay.h"
#include "global/global_context.h"
#include "common/config_proxy.h"
#include "gtest/gtest.h"
using namespace std;
TEST(ErasureCodeClay, sanity_check_k)
{
ErasureCodeClay clay(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["k"] = "1";
profile["m"] = "1";
ostringstream errors;
EXPECT_EQ(-EINVAL, clay.init(profile, &errors));
EXPECT_NE(std::string::npos, errors.str().find("must be >= 2"));
}
TEST(ErasureCodeClay, encode_decode)
{
ostringstream errors;
ErasureCodeClay clay(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
int r= clay.init(profile, &cerr);
EXPECT_EQ(0, r);
#define LARGE_ENOUGH 2048
bufferptr in_ptr(buffer::create_page_aligned(LARGE_ENOUGH));
in_ptr.zero();
in_ptr.set_length(0);
const char *payload =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
in_ptr.append(payload, strlen(payload));
bufferlist in;
in.push_back(in_ptr);
int want_to_encode[] = { 0, 1, 2, 3 };
map<int, bufferlist> encoded;
EXPECT_EQ(0, clay.encode(set<int>(want_to_encode, want_to_encode+4),
in,
&encoded));
EXPECT_EQ(4u, encoded.size());
unsigned length = encoded[0].length();
EXPECT_EQ(0, memcmp(encoded[0].c_str(), in.c_str(), length));
EXPECT_EQ(0, memcmp(encoded[1].c_str(), in.c_str() + length,
in.length() - length));
// all chunks are available
{
int want_to_decode[] = { 0, 1 };
map<int, bufferlist> decoded;
EXPECT_EQ(0, clay._decode(set<int>(want_to_decode, want_to_decode+2),
encoded,
&decoded));
EXPECT_EQ(2u, decoded.size());
EXPECT_EQ(length, decoded[0].length());
EXPECT_EQ(0, memcmp(decoded[0].c_str(), in.c_str(), length));
EXPECT_EQ(0, memcmp(decoded[1].c_str(), in.c_str() + length,
in.length() - length));
}
// check all two chunks missing possibilities and recover them
for (int i=1; i<4; i++) {
for (int j=0; j<i; j++) {
map<int, bufferlist> degraded = encoded;
degraded.erase(j);
degraded.erase(i);
EXPECT_EQ(2u, degraded.size());
int want_to_decode[] = {j,i};
map<int, bufferlist> decoded;
EXPECT_EQ(0, clay._decode(set<int>(want_to_decode, want_to_decode+2),
degraded,
&decoded));
EXPECT_EQ(4u, decoded.size());
EXPECT_EQ(length, decoded[j].length());
EXPECT_EQ(0, memcmp(decoded[j].c_str(), encoded[j].c_str(), length));
EXPECT_EQ(0, memcmp(decoded[i].c_str(), encoded[i].c_str(), length));
}
}
//check for all one chunk missing possibilities
int sc_size = length/clay.sub_chunk_no;
int avail[] = {0,1,2,3};
for (int i=0; i < 4; i++) {
set<int> want_to_read;
want_to_read.insert(i);
set<int> available(avail, avail+4);
available.erase(i);
map<int, vector<pair<int,int>>> minimum;
EXPECT_EQ(0, clay.minimum_to_decode(want_to_read, available, &minimum));
map<int, bufferlist> helper;
for (map<int, vector<pair<int,int>>>::iterator h=minimum.begin(); h!= minimum.end(); ++h) {
for(vector<pair<int,int>>::iterator ind=h->second.begin(); ind != h->second.end(); ++ind) {
bufferlist temp;
temp.substr_of(encoded[h->first], ind->first*sc_size, ind->second*sc_size);
helper[h->first].append(temp);
}
}
for (map<int, vector<pair<int,int>>>::iterator h=minimum.begin(); h!= minimum.end(); ++h) {
EXPECT_EQ(length/clay.q, helper[h->first].length());
}
EXPECT_EQ(3u, helper.size());
map<int, bufferlist> decoded;
EXPECT_EQ(0, clay.decode(want_to_read, helper, &decoded, length));
EXPECT_EQ(1u, decoded.size());
EXPECT_EQ(0, memcmp(decoded[i].c_str(), encoded[i].c_str(), length));
}
}
TEST(ErasureCodeClay, encode_decode_aloof_nodes)
{
ostringstream errors;
ErasureCodeClay clay(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["k"] = "3";
profile["m"] = "3";
profile["d"] = "4";
int r= clay.init(profile, &cerr);
EXPECT_EQ(0, r);
#define LARGE_ENOUGH 2048
bufferptr in_ptr(buffer::create_page_aligned(LARGE_ENOUGH));
in_ptr.zero();
in_ptr.set_length(0);
const char *payload =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
in_ptr.append(payload, strlen(payload));
bufferlist in;
in.push_back(in_ptr);
int want_to_encode[] = { 0, 1, 2, 3, 4, 5 };
map<int, bufferlist> encoded;
EXPECT_EQ(0, clay.encode(set<int>(want_to_encode, want_to_encode+6),
in,
&encoded));
EXPECT_EQ(6u, encoded.size());
unsigned length = encoded[0].length();
if (in.length() < length) {
EXPECT_EQ(0, memcmp(encoded[0].c_str(), in.c_str(), in.length()));
} else if (in.length() <= 2*length ) {
EXPECT_EQ(0, memcmp(encoded[0].c_str(), in.c_str(), in.length()));
EXPECT_EQ(0, memcmp(encoded[1].c_str(), in.c_str()+length, in.length()-length));
} else {
EXPECT_EQ(1, in.length() <= 3*length);
EXPECT_EQ(0, memcmp(encoded[0].c_str(), in.c_str(), in.length()));
EXPECT_EQ(0, memcmp(encoded[1].c_str(), in.c_str()+length, length));
EXPECT_EQ(0, memcmp(encoded[2].c_str(), in.c_str()+2*length, in.length()-2*length));
}
// all chunks are available
{
int want_to_decode[] = { 0, 1, 2 };
map<int, bufferlist> decoded;
EXPECT_EQ(0, clay._decode(set<int>(want_to_decode, want_to_decode+3),
encoded,
&decoded));
EXPECT_EQ(3u, decoded.size());
EXPECT_EQ(length, decoded[0].length());
EXPECT_EQ(0, memcmp(decoded[0].c_str(), encoded[0].c_str(), length));
EXPECT_EQ(0, memcmp(decoded[1].c_str(), encoded[1].c_str(), length));
EXPECT_EQ(0, memcmp(decoded[2].c_str(), encoded[2].c_str(), length));
}
// check all three chunks missing possibilities and recover them
for (int i=2; i<6; i++) {
for (int j=1; j<i; j++) {
for(int k=0; k<j; k++) {
map<int, bufferlist> degraded = encoded;
degraded.erase(k);
degraded.erase(j);
degraded.erase(i);
EXPECT_EQ(3u, degraded.size());
int want_to_decode[] = {k,j,i};
map<int, bufferlist> decoded;
EXPECT_EQ(0, clay._decode(set<int>(want_to_decode, want_to_decode+3),
degraded,
&decoded));
EXPECT_EQ(6u, decoded.size());
EXPECT_EQ(length, decoded[j].length());
EXPECT_EQ(0, memcmp(decoded[k].c_str(), encoded[k].c_str(), length));
EXPECT_EQ(0, memcmp(decoded[j].c_str(), encoded[j].c_str(), length));
EXPECT_EQ(0, memcmp(decoded[i].c_str(), encoded[i].c_str(), length));
}
}
}
//check for all one chunk missing possibilities
int sc_size = length/clay.sub_chunk_no;
int avail[] = {0,1,2,3,4,5};
for (int i=0; i < 6; i++) {
vector<pair<int,int>> repair_subchunks;
map<int, vector<pair<int,int>>> minimum;
set<int> want_to_read;
want_to_read.insert(i);
set<int> available(avail, avail+6);
available.erase(i);
clay.minimum_to_decode(want_to_read, available, &minimum);
map<int, bufferlist> helper;
for (map<int, vector<pair<int,int>>>::iterator h=minimum.begin(); h!= minimum.end(); ++h) {
for(vector<pair<int,int>>::iterator ind=h->second.begin(); ind != h->second.end(); ++ind) {
bufferlist temp;
temp.substr_of(encoded[h->first], ind->first*sc_size, ind->second*sc_size);
helper[h->first].append(temp);
}
}
for (map<int, vector<pair<int,int>>>::iterator h=minimum.begin(); h!= minimum.end(); ++h) {
EXPECT_EQ(length/clay.q, helper[h->first].length());
}
EXPECT_EQ((unsigned)clay.d, helper.size());
map<int, bufferlist> decoded;
EXPECT_EQ(0, clay.decode(want_to_read, helper, &decoded, length));
EXPECT_EQ(1u, decoded.size());
EXPECT_EQ(0, memcmp(decoded[i].c_str(), encoded[i].c_str(), length));
}
}
TEST(ErasureCodeClay, encode_decode_shortening_case)
{
ostringstream errors;
ErasureCodeClay clay(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["k"] = "4";
profile["m"] = "3";
profile["d"] = "5";
int r= clay.init(profile, &cerr);
EXPECT_EQ(0, r);
EXPECT_EQ(2, clay.q);
EXPECT_EQ(4, clay.t);
EXPECT_EQ(1, clay.nu);
bufferptr in_ptr(buffer::create_page_aligned(LARGE_ENOUGH));
in_ptr.zero();
in_ptr.set_length(0);
const char *payload =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
in_ptr.append(payload, strlen(payload));
bufferlist in;
in.push_back(in_ptr);
int want_to_encode[] = { 0, 1, 2, 3, 4, 5, 6 };
map<int, bufferlist> encoded;
EXPECT_EQ(0, clay.encode(set<int>(want_to_encode, want_to_encode+7),
in,
&encoded));
EXPECT_EQ(7u, encoded.size());
unsigned length = encoded[0].length();
if (in.length() < length) {
EXPECT_EQ(0, memcmp(encoded[0].c_str(), in.c_str(), in.length()));
} else if (in.length() <= 2*length) {
EXPECT_EQ(0, memcmp(encoded[0].c_str(), in.c_str(), in.length()));
EXPECT_EQ(0, memcmp(encoded[1].c_str(), in.c_str()+length, in.length()-length));
} else if (in.length() <= 3*length) {
EXPECT_EQ(0, memcmp(encoded[0].c_str(), in.c_str(), in.length()));
EXPECT_EQ(0, memcmp(encoded[1].c_str(), in.c_str()+length, length));
EXPECT_EQ(0, memcmp(encoded[2].c_str(), in.c_str()+2*length, in.length()-2*length));
} else {
EXPECT_EQ(1, in.length() <= 4*length);
EXPECT_EQ(0, memcmp(encoded[0].c_str(), in.c_str(), in.length()));
EXPECT_EQ(0, memcmp(encoded[1].c_str(), in.c_str()+length, length));
EXPECT_EQ(0, memcmp(encoded[2].c_str(), in.c_str()+2*length, length));
EXPECT_EQ(0, memcmp(encoded[3].c_str(), in.c_str()+3*length, in.length()-3*length));
}
// all chunks are available
{
int want_to_decode[] = { 0, 1, 2, 3 };
map<int, bufferlist> decoded;
EXPECT_EQ(0, clay._decode(set<int>(want_to_decode, want_to_decode+4),
encoded,
&decoded));
EXPECT_EQ(4u, decoded.size());
EXPECT_EQ(length, decoded[0].length());
EXPECT_EQ(0, memcmp(decoded[0].c_str(), encoded[0].c_str(), length));
EXPECT_EQ(0, memcmp(decoded[1].c_str(), encoded[1].c_str(), length));
EXPECT_EQ(0, memcmp(decoded[2].c_str(), encoded[2].c_str(), length));
EXPECT_EQ(0, memcmp(decoded[3].c_str(), encoded[3].c_str(), length));
}
// check all three chunks missing possibilities and recover them
for (int i=2; i<7; i++) {
for (int j=1; j<i; j++) {
for(int k=0; k<j; k++) {
map<int, bufferlist> degraded = encoded;
degraded.erase(k);
degraded.erase(j);
degraded.erase(i);
EXPECT_EQ(4u, degraded.size());
int want_to_decode[] = {k,j,i};
map<int, bufferlist> decoded;
EXPECT_EQ(0, clay._decode(set<int>(want_to_decode, want_to_decode+3),
degraded,
&decoded));
EXPECT_EQ(7u, decoded.size());
EXPECT_EQ(length, decoded[j].length());
EXPECT_EQ(0, memcmp(decoded[k].c_str(), encoded[k].c_str(), length));
EXPECT_EQ(0, memcmp(decoded[j].c_str(), encoded[j].c_str(), length));
EXPECT_EQ(0, memcmp(decoded[i].c_str(), encoded[i].c_str(), length));
}
}
}
//check for all one chunk missing possibilities
int sc_size = length/clay.sub_chunk_no;
int avail[] = {0,1,2,3,4,5,6};
for (int i=0; i < 7; i++) {
vector<pair<int,int>> repair_subchunks;
map<int, vector<pair<int,int>>> minimum;
set<int> want_to_read;
want_to_read.insert(i);
set<int> available(avail, avail+7);
available.erase(i);
clay.minimum_to_decode(want_to_read, available, &minimum);
map<int, bufferlist> helper;
for (map<int, vector<pair<int,int>>>::iterator h=minimum.begin(); h!= minimum.end(); ++h) {
for(vector<pair<int,int>>::iterator ind=h->second.begin(); ind != h->second.end(); ++ind) {
bufferlist temp;
temp.substr_of(encoded[h->first], ind->first*sc_size, ind->second*sc_size);
helper[h->first].append(temp);
}
}
for (map<int, vector<pair<int,int>>>::iterator h=minimum.begin(); h!= minimum.end(); ++h) {
EXPECT_EQ(length/clay.q, helper[h->first].length());
}
EXPECT_EQ(static_cast<size_t>(clay.d), helper.size());
map<int, bufferlist> decoded;
EXPECT_EQ(0, clay.decode(want_to_read, helper, &decoded, length));
EXPECT_EQ(1u, decoded.size());
EXPECT_EQ(length, decoded[i].length());
EXPECT_EQ(0, memcmp(decoded[i].c_str(), encoded[i].c_str(), length));
}
}
TEST(ErasureCodeClay, minimum_to_decode)
{
ErasureCodeClay clay(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
EXPECT_EQ(0, clay.init(profile, &cerr));
//
// If trying to read nothing, the minimum is empty.
//
{
set<int> want_to_read;
set<int> available_chunks;
set<int> minimum;
EXPECT_EQ(0, clay._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
EXPECT_TRUE(minimum.empty());
}
//
// There is no way to read a chunk if none are available.
//
{
set<int> want_to_read;
set<int> available_chunks;
set<int> minimum;
want_to_read.insert(0);
EXPECT_EQ(-EIO, clay._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
}
//
// Reading a subset of the available chunks is always possible.
//
{
set<int> want_to_read;
set<int> available_chunks;
set<int> minimum;
want_to_read.insert(0);
available_chunks.insert(0);
EXPECT_EQ(0, clay._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
EXPECT_EQ(want_to_read, minimum);
}
//
// There is no way to read a missing chunk if there is less than k
// chunks available.
//
{
set<int> want_to_read;
set<int> available_chunks;
set<int> minimum;
want_to_read.insert(0);
want_to_read.insert(1);
available_chunks.insert(0);
EXPECT_EQ(-EIO, clay._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
}
//
// When chunks are not available, the minimum can be made of any
// chunks. For instance, to read 1 and 3 below the minimum could be
// 2 and 3 which may seem better because it contains one of the
// chunks to be read. But it won't be more efficient than retrieving
// 0 and 2 instead because, in both cases, the decode function will
// need to run the same recovery operation and use the same amount
// of CPU and memory.
//
{
set<int> want_to_read;
set<int> available_chunks;
set<int> minimum;
want_to_read.insert(1);
want_to_read.insert(3);
available_chunks.insert(0);
available_chunks.insert(2);
available_chunks.insert(3);
EXPECT_EQ(0, clay._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
EXPECT_EQ(2u, minimum.size());
EXPECT_EQ(0u, minimum.count(3));
}
}
TEST(ErasureCodeClay, encode)
{
ErasureCodeClay clay(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
EXPECT_EQ(0, clay.init(profile, &cerr));
unsigned aligned_object_size = clay.get_chunk_size(1) * 2 * 2;
{
//
// When the input bufferlist needs to be padded because
// it is not properly aligned, it is padded with zeros.
//
bufferlist in;
map<int,bufferlist> encoded;
int want_to_encode[] = { 0, 1, 2, 3 };
int trail_length = 1;
in.append(string(aligned_object_size + trail_length, 'X'));
EXPECT_EQ(0, clay.encode(set<int>(want_to_encode, want_to_encode+4),
in,
&encoded));
EXPECT_EQ(4u, encoded.size());
char *last_chunk = encoded[1].c_str();
int length =encoded[1].length();
EXPECT_EQ('X', last_chunk[0]);
EXPECT_EQ('\0', last_chunk[length - trail_length]);
}
{
//
// When only the first chunk is required, the encoded map only
// contains the first chunk. Although the clay encode
// internally allocated a buffer because of padding requirements
// and also computes the coding chunks, they are released before
// the return of the method, as shown when running the tests thru
// valgrind (there is no leak).
//
bufferlist in;
map<int,bufferlist> encoded;
set<int> want_to_encode;
want_to_encode.insert(0);
int trail_length = 1;
in.append(string(aligned_object_size + trail_length, 'X'));
EXPECT_EQ(0, clay.encode(want_to_encode, in, &encoded));
EXPECT_EQ(1u, encoded.size());
}
}
TEST(ErasureCodeClay, create_rule)
{
std::unique_ptr<CrushWrapper> c = std::make_unique<CrushWrapper>();
c->create();
int root_type = 2;
c->set_type_name(root_type, "root");
int host_type = 1;
c->set_type_name(host_type, "host");
int osd_type = 0;
c->set_type_name(osd_type, "osd");
int rootno;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
root_type, 0, NULL, NULL, &rootno);
c->set_item_name(rootno, "default");
map<string,string> loc;
loc["root"] = "default";
int num_host = 4;
int num_osd = 5;
int osd = 0;
for (int h=0; h<num_host; ++h) {
loc["host"] = string("host-") + stringify(h);
for (int o=0; o<num_osd; ++o, ++osd) {
c->insert_item(g_ceph_context, osd, 1.0, string("osd.") + stringify(osd), loc);
}
}
c->finalize();
{
stringstream ss;
ErasureCodeClay clay(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
EXPECT_EQ(0, clay.init(profile, &cerr));
int ruleid = clay.create_rule("myrule", *c, &ss);
EXPECT_EQ(0, ruleid);
EXPECT_EQ(-EEXIST, clay.create_rule("myrule", *c, &ss));
//
// the minimum that is expected from the created rule is to
// successfully map get_chunk_count() devices from the crushmap,
// at least once.
//
vector<__u32> weight(c->get_max_devices(), 0x10000);
vector<int> out;
int x = 0;
c->do_rule(ruleid, x, out, clay.get_chunk_count(), weight, 0);
ASSERT_EQ(out.size(), clay.get_chunk_count());
for (unsigned i=0; i<out.size(); ++i)
ASSERT_NE(CRUSH_ITEM_NONE, out[i]);
}
{
stringstream ss;
ErasureCodeClay clay(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
profile["crush-root"] = "BAD";
EXPECT_EQ(0, clay.init(profile, &cerr));
EXPECT_EQ(-ENOENT, clay.create_rule("otherrule", *c, &ss));
EXPECT_EQ("root item BAD does not exist", ss.str());
}
{
stringstream ss;
ErasureCodeClay clay(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
profile["crush-failure-domain"] = "WORSE";
EXPECT_EQ(0, clay.init(profile, &cerr));
EXPECT_EQ(-EINVAL, clay.create_rule("otherrule", *c, &ss));
EXPECT_EQ("unknown type WORSE", ss.str());
}
}
/*
* Local Variables:
* compile-command: "cd ../.. ;
* make -j4 unittest_erasure_code_clay &&
* valgrind --tool=memcheck \
* ./unittest_erasure_code_clay \
* --gtest_filter=*.* --log-to-stderr=true --debug-osd=20"
* End:
*/
| 20,469 | 33.288107 | 97 | cc |
null | ceph-main/src/test/erasure-code/TestErasureCodeExample.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013 Cloudwatt <libre.licensing@cloudwatt.com>
*
* Author: Loic Dachary <loic@dachary.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <stdlib.h>
#include "include/stringify.h"
#include "ErasureCodeExample.h"
#include "global/global_context.h"
#include "gtest/gtest.h"
using namespace std;
TEST(ErasureCodeExample, chunk_size)
{
ErasureCodeExample example;
EXPECT_EQ(3u, example.get_chunk_count());
EXPECT_EQ(11u, example.get_chunk_size(20));
}
TEST(ErasureCodeExample, minimum_to_decode)
{
ErasureCodeExample example;
set<int> available_chunks;
set<int> want_to_read;
want_to_read.insert(1);
{
set<int> minimum;
EXPECT_EQ(-EIO, example._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
}
available_chunks.insert(0);
available_chunks.insert(2);
{
set<int> minimum;
EXPECT_EQ(0, example._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
EXPECT_EQ(available_chunks, minimum);
EXPECT_EQ(2u, minimum.size());
EXPECT_EQ(1u, minimum.count(0));
EXPECT_EQ(1u, minimum.count(2));
}
{
set<int> minimum;
available_chunks.insert(1);
EXPECT_EQ(0, example._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
EXPECT_EQ(1u, minimum.size());
EXPECT_EQ(1u, minimum.count(1));
}
}
TEST(ErasureCodeExample, minimum_to_decode_with_cost)
{
ErasureCodeExample example;
map<int,int> available;
set<int> want_to_read;
want_to_read.insert(1);
{
set<int> minimum;
EXPECT_EQ(-EIO, example.minimum_to_decode_with_cost(want_to_read,
available,
&minimum));
}
available[0] = 1;
available[2] = 1;
{
set<int> minimum;
EXPECT_EQ(0, example.minimum_to_decode_with_cost(want_to_read,
available,
&minimum));
EXPECT_EQ(2u, minimum.size());
EXPECT_EQ(1u, minimum.count(0));
EXPECT_EQ(1u, minimum.count(2));
}
{
set<int> minimum;
available[1] = 1;
EXPECT_EQ(0, example.minimum_to_decode_with_cost(want_to_read,
available,
&minimum));
EXPECT_EQ(1u, minimum.size());
EXPECT_EQ(1u, minimum.count(1));
}
{
set<int> minimum;
available[1] = 2;
EXPECT_EQ(0, example.minimum_to_decode_with_cost(want_to_read,
available,
&minimum));
EXPECT_EQ(2u, minimum.size());
EXPECT_EQ(1u, minimum.count(0));
EXPECT_EQ(1u, minimum.count(2));
}
}
TEST(ErasureCodeExample, encode_decode)
{
ErasureCodeExample example;
bufferlist in;
in.append("ABCDE");
set<int> want_to_encode;
for(unsigned int i = 0; i < example.get_chunk_count(); i++)
want_to_encode.insert(i);
map<int, bufferlist> encoded;
EXPECT_EQ(0, example.encode(want_to_encode, in, &encoded));
EXPECT_EQ(example.get_chunk_count(), encoded.size());
EXPECT_EQ(example.get_chunk_size(in.length()), encoded[0].length());
EXPECT_EQ('A', encoded[0][0]);
EXPECT_EQ('B', encoded[0][1]);
EXPECT_EQ('C', encoded[0][2]);
EXPECT_EQ('D', encoded[1][0]);
EXPECT_EQ('E', encoded[1][1]);
EXPECT_EQ('A'^'D', encoded[2][0]);
EXPECT_EQ('B'^'E', encoded[2][1]);
EXPECT_EQ('C'^0, encoded[2][2]);
// all chunks are available
{
int want_to_decode[] = { 0, 1 };
map<int, bufferlist> decoded;
EXPECT_EQ(0, example._decode(set<int>(want_to_decode, want_to_decode+2),
encoded,
&decoded));
EXPECT_EQ(2u, decoded.size());
EXPECT_EQ(3u, decoded[0].length());
EXPECT_EQ('A', decoded[0][0]);
EXPECT_EQ('B', decoded[0][1]);
EXPECT_EQ('C', decoded[0][2]);
EXPECT_EQ('D', decoded[1][0]);
EXPECT_EQ('E', decoded[1][1]);
}
// one chunk is missing
{
map<int, bufferlist> degraded = encoded;
degraded.erase(0);
EXPECT_EQ(2u, degraded.size());
int want_to_decode[] = { 0, 1 };
map<int, bufferlist> decoded;
EXPECT_EQ(0, example._decode(set<int>(want_to_decode, want_to_decode+2),
degraded,
&decoded));
EXPECT_EQ(2u, decoded.size());
EXPECT_EQ(3u, decoded[0].length());
EXPECT_EQ('A', decoded[0][0]);
EXPECT_EQ('B', decoded[0][1]);
EXPECT_EQ('C', decoded[0][2]);
EXPECT_EQ('D', decoded[1][0]);
EXPECT_EQ('E', decoded[1][1]);
}
}
TEST(ErasureCodeExample, decode)
{
ErasureCodeExample example;
#define LARGE_ENOUGH 2048
bufferptr in_ptr(buffer::create_page_aligned(LARGE_ENOUGH));
in_ptr.zero();
in_ptr.set_length(0);
const char *payload =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
in_ptr.append(payload, strlen(payload));
bufferlist in;
in.push_back(in_ptr);
int want_to_encode[] = { 0, 1, 2 };
map<int, bufferlist> encoded;
EXPECT_EQ(0, example.encode(set<int>(want_to_encode, want_to_encode+3),
in,
&encoded));
EXPECT_EQ(3u, encoded.size());
// successfull decode
bufferlist out;
EXPECT_EQ(0, example.decode_concat(encoded, &out));
bufferlist usable;
usable.substr_of(out, 0, in.length());
EXPECT_TRUE(usable == in);
// cannot recover
map<int, bufferlist> degraded;
degraded[0] = encoded[0];
EXPECT_EQ(-ERANGE, example.decode_concat(degraded, &out));
}
TEST(ErasureCodeExample, create_rule)
{
std::unique_ptr<CrushWrapper> c = std::make_unique<CrushWrapper>();
c->create();
c->set_type_name(2, "root");
c->set_type_name(1, "host");
c->set_type_name(0, "osd");
int rootno;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
5, 0, NULL, NULL, &rootno);
c->set_item_name(rootno, "default");
map<string,string> loc;
loc["root"] = "default";
int num_host = 2;
int num_osd = 5;
int osd = 0;
for (int h=0; h<num_host; ++h) {
loc["host"] = string("host-") + stringify(h);
for (int o=0; o<num_osd; ++o, ++osd) {
c->insert_item(g_ceph_context, osd, 1.0, string("osd.") + stringify(osd), loc);
}
}
stringstream ss;
ErasureCodeExample example;
EXPECT_EQ(0, example.create_rule("myrule", *c, &ss));
}
/*
* Local Variables:
* compile-command: "cd ../.. ;
* make -j4 &&
* make unittest_erasure_code_example &&
* valgrind --leak-check=full --tool=memcheck \
* ./unittest_erasure_code_example --gtest_filter=*.* \
* --log-to-stderr=true --debug-osd=20
* "
* End:
*/
| 7,018 | 27.188755 | 85 | cc |
null | ceph-main/src/test/erasure-code/TestErasureCodeIsa.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CERN (Switzerland)
* Copyright (C) 2014 Red Hat <contact@redhat.com>
*
* Author: Andreas-Joachim Peters <Andreas.Joachim.Peters@cern.ch>
* Author: Loic Dachary <loic@dachary.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <errno.h>
#include <stdlib.h>
#include "crush/CrushWrapper.h"
#include "include/stringify.h"
#include "erasure-code/isa/ErasureCodeIsa.h"
#include "erasure-code/isa/xor_op.h"
#include "global/global_context.h"
#include "common/config.h"
#include "gtest/gtest.h"
using namespace std;
ErasureCodeIsaTableCache tcache;
class IsaErasureCodeTest : public ::testing::Test {
public:
void compare_chunks(bufferlist &in, map<int, bufferlist> &encoded);
void encode_decode(unsigned object_size);
};
void IsaErasureCodeTest::compare_chunks(bufferlist &in, map<int, bufferlist> &encoded)
{
unsigned object_size = in.length();
unsigned chunk_size = encoded[0].length();
for (unsigned i = 0; i < encoded.size(); i++) {
if (i * chunk_size >= object_size)
break;
int chunk_length = object_size > (i + 1) * chunk_size ? chunk_size : object_size - i * chunk_size;
EXPECT_EQ(0, memcmp(encoded[i].c_str(), in.c_str() + i * chunk_size, chunk_length));
}
}
void IsaErasureCodeTest::encode_decode(unsigned object_size)
{
ErasureCodeIsaDefault Isa(tcache);
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
Isa.init(profile, &cerr);
string payload(object_size, 'X');
bufferlist in;
// may be multiple bufferptr if object_size is larger than CEPH_PAGE_SIZE
in.append(payload.c_str(), payload.length());
int want_to_encode[] = {0, 1, 2, 3};
map<int, bufferlist> encoded;
EXPECT_EQ(0, Isa.encode(set<int>(want_to_encode, want_to_encode + 4),
in,
&encoded));
EXPECT_EQ(4u, encoded.size());
unsigned chunk_size = encoded[0].length();
EXPECT_EQ(chunk_size, Isa.get_chunk_size(object_size));
compare_chunks(in, encoded);
// all chunks are available
{
int want_to_decode[] = {0, 1};
map<int, bufferlist> decoded;
EXPECT_EQ(0, Isa._decode(set<int>(want_to_decode, want_to_decode + 2),
encoded,
&decoded));
EXPECT_EQ(2u, decoded.size());
EXPECT_EQ(chunk_size, decoded[0].length());
compare_chunks(in, decoded);
}
// one data chunk is missing
{
map<int, bufferlist> degraded = encoded;
string enc1(encoded[1].c_str(), chunk_size);
degraded.erase(1);
EXPECT_EQ(3u, degraded.size());
int want_to_decode[] = {1};
map<int, bufferlist> decoded;
EXPECT_EQ(0, Isa._decode(set<int>(want_to_decode, want_to_decode + 1),
degraded,
&decoded));
// always decode all, regardless of want_to_decode
EXPECT_EQ(4u, decoded.size());
EXPECT_EQ(chunk_size, decoded[1].length());
EXPECT_EQ(0, memcmp(decoded[1].c_str(), enc1.c_str(), chunk_size));
}
// non-xor coding chunk is missing
{
map<int, bufferlist> degraded = encoded;
string enc3(encoded[3].c_str(), chunk_size);
degraded.erase(3);
EXPECT_EQ(3u, degraded.size());
int want_to_decode[] = {3};
map<int, bufferlist> decoded;
EXPECT_EQ(0, Isa._decode(set<int>(want_to_decode, want_to_decode + 1),
degraded,
&decoded));
// always decode all, regardless of want_to_decode
EXPECT_EQ(4u, decoded.size());
EXPECT_EQ(chunk_size, decoded[3].length());
EXPECT_EQ(0, memcmp(decoded[3].c_str(), enc3.c_str(), chunk_size));
}
// xor coding chunk is missing
{
map<int, bufferlist> degraded = encoded;
string enc2(encoded[2].c_str(), chunk_size);
degraded.erase(2);
EXPECT_EQ(3u, degraded.size());
int want_to_decode[] = {2};
map<int, bufferlist> decoded;
EXPECT_EQ(0, Isa._decode(set<int>(want_to_decode, want_to_decode + 1),
degraded,
&decoded));
// always decode all, regardless of want_to_decode
EXPECT_EQ(4u, decoded.size());
EXPECT_EQ(chunk_size, decoded[2].length());
EXPECT_EQ(0, memcmp(decoded[2].c_str(), enc2.c_str(), chunk_size));
}
// one data and one coding chunk is missing
{
map<int, bufferlist> degraded = encoded;
string enc3(encoded[3].c_str(), chunk_size);
degraded.erase(1);
degraded.erase(3);
EXPECT_EQ(2u, degraded.size());
int want_to_decode[] = {1, 3};
map<int, bufferlist> decoded;
EXPECT_EQ(0, Isa._decode(set<int>(want_to_decode, want_to_decode + 2),
degraded,
&decoded));
// always decode all, regardless of want_to_decode
EXPECT_EQ(4u, decoded.size());
EXPECT_EQ(chunk_size, decoded[1].length());
EXPECT_EQ(0, memcmp(decoded[3].c_str(), enc3.c_str(), chunk_size));
}
// two data chunks are missing
{
map<int, bufferlist> degraded = encoded;
degraded.erase(0);
degraded.erase(1);
EXPECT_EQ(2u, degraded.size());
int want_to_decode[] = {0, 1};
map<int, bufferlist> decoded;
EXPECT_EQ(0, Isa._decode(set<int>(want_to_decode, want_to_decode + 2),
degraded,
&decoded));
// always decode all, regardless of want_to_decode
EXPECT_EQ(4u, decoded.size());
EXPECT_EQ(chunk_size, decoded[0].length());
compare_chunks(in, decoded);
}
}
TEST_F(IsaErasureCodeTest, encode_decode)
{
encode_decode(1);
encode_decode(EC_ISA_ADDRESS_ALIGNMENT);
encode_decode(EC_ISA_ADDRESS_ALIGNMENT + 1);
encode_decode(2048);
encode_decode(4096);
encode_decode(4096 + 1);
}
TEST_F(IsaErasureCodeTest, minimum_to_decode)
{
ErasureCodeIsaDefault Isa(tcache);
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
Isa.init(profile, &cerr);
//
// If trying to read nothing, the minimum is empty.
//
{
set<int> want_to_read;
set<int> available_chunks;
set<int> minimum;
EXPECT_EQ(0, Isa._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
EXPECT_TRUE(minimum.empty());
}
//
// There is no way to read a chunk if none are available.
//
{
set<int> want_to_read;
set<int> available_chunks;
set<int> minimum;
want_to_read.insert(0);
EXPECT_EQ(-EIO, Isa._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
}
//
// Reading a subset of the available chunks is always possible.
//
{
set<int> want_to_read;
set<int> available_chunks;
set<int> minimum;
want_to_read.insert(0);
available_chunks.insert(0);
EXPECT_EQ(0, Isa._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
EXPECT_EQ(want_to_read, minimum);
}
//
// There is no way to read a missing chunk if there is less than k
// chunks available.
//
{
set<int> want_to_read;
set<int> available_chunks;
set<int> minimum;
want_to_read.insert(0);
want_to_read.insert(1);
available_chunks.insert(0);
EXPECT_EQ(-EIO, Isa._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
}
//
// When chunks are not available, the minimum can be made of any
// chunks. For instance, to read 1 and 3 below the minimum could be
// 2 and 3 which may seem better because it contains one of the
// chunks to be read. But it won't be more efficient than retrieving
// 0 and 2 instead because, in both cases, the decode function will
// need to run the same recovery operation and use the same amount
// of CPU and memory.
//
{
set<int> want_to_read;
set<int> available_chunks;
set<int> minimum;
want_to_read.insert(1);
want_to_read.insert(3);
available_chunks.insert(0);
available_chunks.insert(2);
available_chunks.insert(3);
EXPECT_EQ(0, Isa._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
EXPECT_EQ(2u, minimum.size());
EXPECT_EQ(0u, minimum.count(3));
}
}
TEST_F(IsaErasureCodeTest, chunk_size)
{
{
ErasureCodeIsaDefault Isa(tcache);
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "1";
Isa.init(profile, &cerr);
const int k = 2;
ASSERT_EQ(EC_ISA_ADDRESS_ALIGNMENT, Isa.get_chunk_size(1));
ASSERT_EQ(EC_ISA_ADDRESS_ALIGNMENT, Isa.get_chunk_size(EC_ISA_ADDRESS_ALIGNMENT * k - 1));
ASSERT_EQ(EC_ISA_ADDRESS_ALIGNMENT * 2, Isa.get_chunk_size(EC_ISA_ADDRESS_ALIGNMENT * k + 1));
}
{
ErasureCodeIsaDefault Isa(tcache);
ErasureCodeProfile profile;
profile["k"] = "3";
profile["m"] = "1";
Isa.init(profile, &cerr);
const int k = 3;
ASSERT_EQ(EC_ISA_ADDRESS_ALIGNMENT, Isa.get_chunk_size(1));
ASSERT_EQ(EC_ISA_ADDRESS_ALIGNMENT, Isa.get_chunk_size(EC_ISA_ADDRESS_ALIGNMENT * k - 1));
ASSERT_EQ(EC_ISA_ADDRESS_ALIGNMENT * 2, Isa.get_chunk_size(EC_ISA_ADDRESS_ALIGNMENT * k + 1));
unsigned object_size = EC_ISA_ADDRESS_ALIGNMENT * k * 1024 + 1;
ASSERT_NE(0u, object_size % k);
ASSERT_NE(0u, object_size % EC_ISA_ADDRESS_ALIGNMENT);
unsigned chunk_size = Isa.get_chunk_size(object_size);
ASSERT_EQ(0u, chunk_size % EC_ISA_ADDRESS_ALIGNMENT);
ASSERT_GT(chunk_size, (chunk_size * k) - object_size);
}
}
TEST_F(IsaErasureCodeTest, encode)
{
ErasureCodeIsaDefault Isa(tcache);
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
Isa.init(profile, &cerr);
unsigned aligned_object_size = Isa.get_alignment() * 2;
{
//
// When the input bufferlist needs to be padded because
// it is not properly aligned, it is padded with zeros.
//
bufferlist in;
map<int,bufferlist> encoded;
int want_to_encode[] = { 0, 1, 2, 3 };
int trail_length = 1;
in.append(string(aligned_object_size + trail_length, 'X'));
EXPECT_EQ(0, Isa.encode(set<int>(want_to_encode, want_to_encode+4),
in,
&encoded));
EXPECT_EQ(4u, encoded.size());
char *last_chunk = encoded[1].c_str();
int length =encoded[1].length();
EXPECT_EQ('X', last_chunk[0]);
EXPECT_EQ('\0', last_chunk[length - trail_length]);
}
{
//
// When only the first chunk is required, the encoded map only
// contains the first chunk. Although the Isa encode
// internally allocated a buffer because of padding requirements
// and also computes the coding chunks, they are released before
// the return of the method, as shown when running the tests thru
// valgrind (there is no leak).
//
bufferlist in;
map<int,bufferlist> encoded;
set<int> want_to_encode;
want_to_encode.insert(0);
int trail_length = 1;
in.append(string(aligned_object_size + trail_length, 'X'));
EXPECT_EQ(0, Isa.encode(want_to_encode, in, &encoded));
EXPECT_EQ(1u, encoded.size());
}
}
TEST_F(IsaErasureCodeTest, sanity_check_k)
{
ErasureCodeIsaDefault Isa(tcache);
ErasureCodeProfile profile;
profile["k"] = "1";
profile["m"] = "1";
ostringstream errors;
EXPECT_EQ(-EINVAL, Isa.init(profile, &errors));
EXPECT_NE(std::string::npos, errors.str().find("must be >= 2"));
}
bool
DecodeAndVerify(ErasureCodeIsaDefault& Isa, map<int, bufferlist> °raded, set<int> want_to_decode, buffer::ptr* enc, int length)
{
map<int, bufferlist> decoded;
bool ok;
// decode as requested
ok = Isa._decode(want_to_decode,
degraded,
&decoded);
for (int i = 0; i < (int) decoded.size(); i++) {
// compare all the buffers with their original
ok |= memcmp(decoded[i].c_str(), enc[i].c_str(), length);
}
return ok;
}
TEST_F(IsaErasureCodeTest, isa_vandermonde_exhaustive)
{
// Test all possible failure scenarios and reconstruction cases for
// a (12,4) configuration using the vandermonde matrix
ErasureCodeIsaDefault Isa(tcache);
ErasureCodeProfile profile;
profile["k"] = "12";
profile["m"] = "4";
Isa.init(profile, &cerr);
const int k = 12;
const int m = 4;
#define LARGE_ENOUGH 2048
bufferptr in_ptr(buffer::create_page_aligned(LARGE_ENOUGH));
in_ptr.zero();
in_ptr.set_length(0);
const char *payload =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
in_ptr.append(payload, strlen(payload));
bufferlist in;
in.push_back(in_ptr);
set<int>want_to_encode;
map<int, bufferlist> encoded;
for (int i = 0; i < (k + m); i++) {
want_to_encode.insert(i);
}
EXPECT_EQ(0, Isa.encode(want_to_encode,
in,
&encoded));
EXPECT_EQ((unsigned) (k + m), encoded.size());
unsigned length = encoded[0].length();
for (int i = 0; i < k; i++) {
EXPECT_EQ(0, memcmp(encoded[i].c_str(), in.c_str() + (i * length), length));
}
buffer::ptr enc[k + m];
// create buffers with a copy of the original data to be able to compare it after decoding
{
for (int i = 0; i < (k + m); i++) {
buffer::ptr newenc(buffer::create_page_aligned(LARGE_ENOUGH));
enc[i] = newenc;
enc[i].zero();
enc[i].set_length(0);
enc[i].append(encoded[i].c_str(), length);
}
}
// loop through all possible loss scenarios
int cnt_cf = 0;
for (int l1 = 0; l1 < (k + m); l1++) {
map<int, bufferlist> degraded = encoded;
set<int> want_to_decode;
bool err;
degraded.erase(l1);
want_to_decode.insert(l1);
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l2 = l1 + 1; l2 < (k + m); l2++) {
degraded.erase(l2);
want_to_decode.insert(l2);
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l3 = l2 + 1; l3 < (k + m); l3++) {
degraded.erase(l3);
want_to_decode.insert(l3);
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l4 = l3 + 1; l4 < (k + m); l4++) {
degraded.erase(l4);
want_to_decode.insert(l4);
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
degraded[l4] = encoded[l4];
want_to_decode.erase(l4);
cnt_cf++;
}
degraded[l3] = encoded[l3];
want_to_decode.erase(l3);
}
degraded[l2] = encoded[l2];
want_to_decode.erase(l2);
}
degraded[l1] = encoded[l1];
want_to_decode.erase(l1);
}
EXPECT_EQ(2516, cnt_cf);
EXPECT_EQ(2506, tcache.getDecodingTableCacheSize()); // 3 entries from (2,2) test and 2503 from (12,4)
}
TEST_F(IsaErasureCodeTest, isa_cauchy_exhaustive)
{
// Test all possible failure scenarios and reconstruction cases for
// a (12,4) configuration using the cauchy matrix
ErasureCodeIsaDefault Isa(tcache,ErasureCodeIsaDefault::kCauchy);
ErasureCodeProfile profile;
profile["k"] = "12";
profile["m"] = "4";
profile["technique"] = "cauchy";
Isa.init(profile, &cerr);
const int k = 12;
const int m = 4;
#define LARGE_ENOUGH 2048
bufferptr in_ptr(buffer::create_page_aligned(LARGE_ENOUGH));
in_ptr.zero();
in_ptr.set_length(0);
const char *payload =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
in_ptr.append(payload, strlen(payload));
bufferlist in;
in.push_back(in_ptr);
set<int>want_to_encode;
map<int, bufferlist> encoded;
for (int i = 0; i < (k + m); i++) {
want_to_encode.insert(i);
}
EXPECT_EQ(0, Isa.encode(want_to_encode,
in,
&encoded));
EXPECT_EQ((unsigned) (k + m), encoded.size());
unsigned length = encoded[0].length();
for (int i = 0; i < k; i++) {
EXPECT_EQ(0, memcmp(encoded[i].c_str(), in.c_str() + (i * length), length));
}
buffer::ptr enc[k + m];
// create buffers with a copy of the original data to be able to compare it after decoding
{
for (int i = 0; i < (k + m); i++) {
buffer::ptr newenc(buffer::create_page_aligned(LARGE_ENOUGH));
enc[i] = newenc;
enc[i].zero();
enc[i].set_length(0);
enc[i].append(encoded[i].c_str(), length);
}
}
// loop through all possible loss scenarios
int cnt_cf = 0;
for (int l1 = 0; l1 < (k + m); l1++) {
map<int, bufferlist> degraded = encoded;
set<int> want_to_decode;
bool err;
degraded.erase(l1);
want_to_decode.insert(l1);
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l2 = l1 + 1; l2 < (k + m); l2++) {
degraded.erase(l2);
want_to_decode.insert(l2);
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l3 = l2 + 1; l3 < (k + m); l3++) {
degraded.erase(l3);
want_to_decode.insert(l3);
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l4 = l3 + 1; l4 < (k + m); l4++) {
degraded.erase(l4);
want_to_decode.insert(l4);
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
degraded[l4] = encoded[l4];
want_to_decode.erase(l4);
cnt_cf++;
}
degraded[l3] = encoded[l3];
want_to_decode.erase(l3);
}
degraded[l2] = encoded[l2];
want_to_decode.erase(l2);
}
degraded[l1] = encoded[l1];
want_to_decode.erase(l1);
}
EXPECT_EQ(2516, cnt_cf);
EXPECT_EQ(2516, tcache.getDecodingTableCacheSize(ErasureCodeIsaDefault::kCauchy));
}
TEST_F(IsaErasureCodeTest, isa_cauchy_cache_trash)
{
// Test all possible failure scenarios and reconstruction cases for
// a (12,4) configuration using the cauchy matrix
ErasureCodeIsaDefault Isa(tcache,ErasureCodeIsaDefault::kCauchy);
ErasureCodeProfile profile;
profile["k"] = "16";
profile["m"] = "4";
profile["technique"] = "cauchy";
Isa.init(profile, &cerr);
const int k = 16;
const int m = 4;
#define LARGE_ENOUGH 2048
bufferptr in_ptr(buffer::create_page_aligned(LARGE_ENOUGH));
in_ptr.zero();
in_ptr.set_length(0);
const char *payload =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
in_ptr.append(payload, strlen(payload));
bufferlist in;
in.push_back(in_ptr);
set<int>want_to_encode;
map<int, bufferlist> encoded;
for (int i = 0; i < (k + m); i++) {
want_to_encode.insert(i);
}
EXPECT_EQ(0, Isa.encode(want_to_encode,
in,
&encoded));
EXPECT_EQ((unsigned) (k + m), encoded.size());
unsigned length = encoded[0].length();
for (int i = 0; i < k; i++) {
EXPECT_EQ(0, memcmp(encoded[i].c_str(), in.c_str() + (i * length), length));
}
buffer::ptr enc[k + m];
// create buffers with a copy of the original data to be able to compare it after decoding
{
for (int i = 0; i < (k + m); i++) {
buffer::ptr newenc(buffer::create_page_aligned(LARGE_ENOUGH));
enc[i] = newenc;
enc[i].zero();
enc[i].set_length(0);
enc[i].append(encoded[i].c_str(), length);
}
}
// loop through all possible loss scenarios
int cnt_cf = 0;
for (int l1 = 0; l1 < (k + m); l1++) {
map<int, bufferlist> degraded = encoded;
set<int> want_to_decode;
bool err;
degraded.erase(l1);
want_to_decode.insert(l1);
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l2 = l1 + 1; l2 < (k + m); l2++) {
degraded.erase(l2);
want_to_decode.insert(l2);
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l3 = l2 + 1; l3 < (k + m); l3++) {
degraded.erase(l3);
want_to_decode.insert(l3);
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
for (int l4 = l3 + 1; l4 < (k + m); l4++) {
degraded.erase(l4);
want_to_decode.insert(l4);
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
degraded[l4] = encoded[l4];
want_to_decode.erase(l4);
cnt_cf++;
}
degraded[l3] = encoded[l3];
want_to_decode.erase(l3);
}
degraded[l2] = encoded[l2];
want_to_decode.erase(l2);
}
degraded[l1] = encoded[l1];
want_to_decode.erase(l1);
}
EXPECT_EQ(6195, cnt_cf);
EXPECT_EQ(2516, tcache.getDecodingTableCacheSize(ErasureCodeIsaDefault::kCauchy));
}
TEST_F(IsaErasureCodeTest, isa_xor_codec)
{
// Test all possible failure scenarios and reconstruction cases for
// a (4,1) RAID-5 like configuration
ErasureCodeIsaDefault Isa(tcache);
ErasureCodeProfile profile;
profile["k"] = "4";
profile["m"] = "1";
Isa.init(profile, &cerr);
const int k = 4;
const int m = 1;
#define LARGE_ENOUGH 2048
bufferptr in_ptr(buffer::create_page_aligned(LARGE_ENOUGH));
in_ptr.zero();
in_ptr.set_length(0);
const char *payload =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
in_ptr.append(payload, strlen(payload));
bufferlist in;
in.push_back(in_ptr);
set<int>want_to_encode;
map<int, bufferlist> encoded;
for (int i = 0; i < (k + m); i++) {
want_to_encode.insert(i);
}
EXPECT_EQ(0, Isa.encode(want_to_encode,
in,
&encoded));
EXPECT_EQ((unsigned) (k + m), encoded.size());
unsigned length = encoded[0].length();
for (int i = 0; i < k; i++) {
EXPECT_EQ(0, memcmp(encoded[i].c_str(), in.c_str() + (i * length), length));
}
buffer::ptr enc[k + m];
// create buffers with a copy of the original data to be able to compare it after decoding
{
for (int i = 0; i < (k + m); i++) {
buffer::ptr newenc(buffer::create_page_aligned(LARGE_ENOUGH));
enc[i] = newenc;
enc[i].zero();
enc[i].set_length(0);
enc[i].append(encoded[i].c_str(), length);
}
}
// loop through all possible loss scenarios
int cnt_cf = 0;
for (int l1 = 0; l1 < (k + m); l1++) {
map<int, bufferlist> degraded = encoded;
set<int> want_to_decode;
bool err;
degraded.erase(l1);
want_to_decode.insert(l1);
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
EXPECT_EQ(0, err);
cnt_cf++;
degraded[l1] = encoded[l1];
want_to_decode.erase(l1);
}
EXPECT_EQ(5, cnt_cf);
}
TEST_F(IsaErasureCodeTest, create_rule)
{
std::unique_ptr<CrushWrapper> c = std::make_unique<CrushWrapper>();
c->create();
int root_type = 2;
c->set_type_name(root_type, "root");
int host_type = 1;
c->set_type_name(host_type, "host");
int osd_type = 0;
c->set_type_name(osd_type, "osd");
int rootno;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
root_type, 0, NULL, NULL, &rootno);
c->set_item_name(rootno, "default");
map<string,string> loc;
loc["root"] = "default";
int num_host = 4;
int num_osd = 5;
int osd = 0;
for (int h=0; h<num_host; ++h) {
loc["host"] = string("host-") + stringify(h);
for (int o=0; o<num_osd; ++o, ++osd) {
c->insert_item(g_ceph_context, osd, 1.0, string("osd.") + stringify(osd), loc);
}
}
c->finalize();
{
stringstream ss;
ErasureCodeIsaDefault isa(tcache);
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
profile["w"] = "8";
isa.init(profile, &cerr);
int rule = isa.create_rule("myrule", *c, &ss);
EXPECT_EQ(0, rule);
EXPECT_EQ(-EEXIST, isa.create_rule("myrule", *c, &ss));
//
// the minimum that is expected from the created rule is to
// successfully map get_chunk_count() devices from the crushmap,
// at least once.
//
vector<__u32> weight(c->get_max_devices(), 0x10000);
vector<int> out;
int x = 0;
c->do_rule(rule, x, out, isa.get_chunk_count(), weight, 0);
ASSERT_EQ(out.size(), isa.get_chunk_count());
for (unsigned i=0; i<out.size(); ++i)
ASSERT_NE(CRUSH_ITEM_NONE, out[i]);
}
{
stringstream ss;
ErasureCodeIsaDefault isa(tcache);
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
profile["w"] = "8";
profile["crush-root"] = "BAD";
isa.init(profile, &cerr);
EXPECT_EQ(-ENOENT, isa.create_rule("otherrule", *c, &ss));
EXPECT_EQ("root item BAD does not exist", ss.str());
}
{
stringstream ss;
ErasureCodeIsaDefault isa(tcache);
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
profile["w"] = "8";
profile["crush-failure-domain"] = "WORSE";
isa.init(profile, &cerr);
EXPECT_EQ(-EINVAL, isa.create_rule("otherrule", *c, &ss));
EXPECT_EQ("unknown type WORSE", ss.str());
}
}
/*
* Local Variables:
* compile-command: "cd ../.. ; make -j4 unittest_erasure_code_isa &&
* libtool --mode=execute valgrind --tool=memcheck \
* ./unittest_erasure_code_isa \
* --gtest_filter=*.* --log-to-stderr=true --debug-osd=20"
* End:
*/
| 31,936 | 31.992769 | 130 | cc |
null | ceph-main/src/test/erasure-code/TestErasureCodeJerasure.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
* Copyright (C) 2014 Red Hat <contact@redhat.com>
*
* Author: Loic Dachary <loic@dachary.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <errno.h>
#include <stdlib.h>
#include "crush/CrushWrapper.h"
#include "include/stringify.h"
#include "erasure-code/jerasure/ErasureCodeJerasure.h"
#include "global/global_context.h"
#include "common/config.h"
#include "gtest/gtest.h"
using namespace std;
template <typename T>
class ErasureCodeTest : public ::testing::Test {
public:
};
typedef ::testing::Types<
ErasureCodeJerasureReedSolomonVandermonde,
ErasureCodeJerasureReedSolomonRAID6,
ErasureCodeJerasureCauchyOrig,
ErasureCodeJerasureCauchyGood,
ErasureCodeJerasureLiberation,
ErasureCodeJerasureBlaumRoth,
ErasureCodeJerasureLiber8tion
> JerasureTypes;
TYPED_TEST_SUITE(ErasureCodeTest, JerasureTypes);
TYPED_TEST(ErasureCodeTest, sanity_check_k)
{
TypeParam jerasure;
ErasureCodeProfile profile;
profile["k"] = "1";
profile["m"] = "1";
profile["packetsize"] = "8";
ostringstream errors;
EXPECT_EQ(-EINVAL, jerasure.init(profile, &errors));
EXPECT_NE(std::string::npos, errors.str().find("must be >= 2"));
}
TYPED_TEST(ErasureCodeTest, encode_decode)
{
const char *per_chunk_alignments[] = { "false", "true" };
for (int per_chunk_alignment = 0 ;
per_chunk_alignment < 2;
per_chunk_alignment++) {
TypeParam jerasure;
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
profile["packetsize"] = "8";
profile["jerasure-per-chunk-alignment"] =
per_chunk_alignments[per_chunk_alignment];
jerasure.init(profile, &cerr);
#define LARGE_ENOUGH 2048
bufferptr in_ptr(buffer::create_page_aligned(LARGE_ENOUGH));
in_ptr.zero();
in_ptr.set_length(0);
const char *payload =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
in_ptr.append(payload, strlen(payload));
bufferlist in;
in.push_back(in_ptr);
int want_to_encode[] = { 0, 1, 2, 3 };
map<int, bufferlist> encoded;
EXPECT_EQ(0, jerasure.encode(set<int>(want_to_encode, want_to_encode+4),
in,
&encoded));
EXPECT_EQ(4u, encoded.size());
unsigned length = encoded[0].length();
EXPECT_EQ(0, memcmp(encoded[0].c_str(), in.c_str(), length));
EXPECT_EQ(0, memcmp(encoded[1].c_str(), in.c_str() + length,
in.length() - length));
// all chunks are available
{
int want_to_decode[] = { 0, 1 };
map<int, bufferlist> decoded;
EXPECT_EQ(0, jerasure._decode(set<int>(want_to_decode, want_to_decode+2),
encoded,
&decoded));
EXPECT_EQ(2u, decoded.size());
EXPECT_EQ(length, decoded[0].length());
EXPECT_EQ(0, memcmp(decoded[0].c_str(), in.c_str(), length));
EXPECT_EQ(0, memcmp(decoded[1].c_str(), in.c_str() + length,
in.length() - length));
}
// two chunks are missing
{
map<int, bufferlist> degraded = encoded;
degraded.erase(0);
degraded.erase(1);
EXPECT_EQ(2u, degraded.size());
int want_to_decode[] = { 0, 1 };
map<int, bufferlist> decoded;
EXPECT_EQ(0, jerasure._decode(set<int>(want_to_decode, want_to_decode+2),
degraded,
&decoded));
// always decode all, regardless of want_to_decode
EXPECT_EQ(4u, decoded.size());
EXPECT_EQ(length, decoded[0].length());
EXPECT_EQ(0, memcmp(decoded[0].c_str(), in.c_str(), length));
EXPECT_EQ(0, memcmp(decoded[1].c_str(), in.c_str() + length,
in.length() - length));
}
}
}
TYPED_TEST(ErasureCodeTest, minimum_to_decode)
{
TypeParam jerasure;
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
profile["w"] = "7";
profile["packetsize"] = "8";
jerasure.init(profile, &cerr);
//
// If trying to read nothing, the minimum is empty.
//
{
set<int> want_to_read;
set<int> available_chunks;
set<int> minimum;
EXPECT_EQ(0, jerasure._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
EXPECT_TRUE(minimum.empty());
}
//
// There is no way to read a chunk if none are available.
//
{
set<int> want_to_read;
set<int> available_chunks;
set<int> minimum;
want_to_read.insert(0);
EXPECT_EQ(-EIO, jerasure._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
}
//
// Reading a subset of the available chunks is always possible.
//
{
set<int> want_to_read;
set<int> available_chunks;
set<int> minimum;
want_to_read.insert(0);
available_chunks.insert(0);
EXPECT_EQ(0, jerasure._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
EXPECT_EQ(want_to_read, minimum);
}
//
// There is no way to read a missing chunk if there is less than k
// chunks available.
//
{
set<int> want_to_read;
set<int> available_chunks;
set<int> minimum;
want_to_read.insert(0);
want_to_read.insert(1);
available_chunks.insert(0);
EXPECT_EQ(-EIO, jerasure._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
}
//
// When chunks are not available, the minimum can be made of any
// chunks. For instance, to read 1 and 3 below the minimum could be
// 2 and 3 which may seem better because it contains one of the
// chunks to be read. But it won't be more efficient than retrieving
// 0 and 2 instead because, in both cases, the decode function will
// need to run the same recovery operation and use the same amount
// of CPU and memory.
//
{
set<int> want_to_read;
set<int> available_chunks;
set<int> minimum;
want_to_read.insert(1);
want_to_read.insert(3);
available_chunks.insert(0);
available_chunks.insert(2);
available_chunks.insert(3);
EXPECT_EQ(0, jerasure._minimum_to_decode(want_to_read,
available_chunks,
&minimum));
EXPECT_EQ(2u, minimum.size());
EXPECT_EQ(0u, minimum.count(3));
}
}
TEST(ErasureCodeTest, encode)
{
ErasureCodeJerasureReedSolomonVandermonde jerasure;
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
profile["w"] = "8";
jerasure.init(profile, &cerr);
unsigned aligned_object_size = jerasure.get_alignment() * 2;
{
//
// When the input bufferlist needs to be padded because
// it is not properly aligned, it is padded with zeros.
//
bufferlist in;
map<int,bufferlist> encoded;
int want_to_encode[] = { 0, 1, 2, 3 };
int trail_length = 1;
in.append(string(aligned_object_size + trail_length, 'X'));
EXPECT_EQ(0, jerasure.encode(set<int>(want_to_encode, want_to_encode+4),
in,
&encoded));
EXPECT_EQ(4u, encoded.size());
char *last_chunk = encoded[1].c_str();
int length =encoded[1].length();
EXPECT_EQ('X', last_chunk[0]);
EXPECT_EQ('\0', last_chunk[length - trail_length]);
}
{
//
// When only the first chunk is required, the encoded map only
// contains the first chunk. Although the jerasure encode
// internally allocated a buffer because of padding requirements
// and also computes the coding chunks, they are released before
// the return of the method, as shown when running the tests thru
// valgrind (there is no leak).
//
bufferlist in;
map<int,bufferlist> encoded;
set<int> want_to_encode;
want_to_encode.insert(0);
int trail_length = 1;
in.append(string(aligned_object_size + trail_length, 'X'));
EXPECT_EQ(0, jerasure.encode(want_to_encode, in, &encoded));
EXPECT_EQ(1u, encoded.size());
}
}
TEST(ErasureCodeTest, create_rule)
{
std::unique_ptr<CrushWrapper> c = std::make_unique<CrushWrapper>();
c->create();
int root_type = 2;
c->set_type_name(root_type, "root");
int host_type = 1;
c->set_type_name(host_type, "host");
int osd_type = 0;
c->set_type_name(osd_type, "osd");
int rootno;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
root_type, 0, NULL, NULL, &rootno);
c->set_item_name(rootno, "default");
map<string,string> loc;
loc["root"] = "default";
int num_host = 4;
int num_osd = 5;
int osd = 0;
for (int h=0; h<num_host; ++h) {
loc["host"] = string("host-") + stringify(h);
for (int o=0; o<num_osd; ++o, ++osd) {
c->insert_item(g_ceph_context, osd, 1.0, string("osd.") + stringify(osd), loc);
}
}
c->finalize();
{
stringstream ss;
ErasureCodeJerasureReedSolomonVandermonde jerasure;
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
profile["w"] = "8";
jerasure.init(profile, &cerr);
int rule = jerasure.create_rule("myrule", *c, &ss);
EXPECT_EQ(0, rule);
EXPECT_EQ(-EEXIST, jerasure.create_rule("myrule", *c, &ss));
//
// the minimum that is expected from the created rule is to
// successfully map get_chunk_count() devices from the crushmap,
// at least once.
//
vector<__u32> weight(c->get_max_devices(), 0x10000);
vector<int> out;
int x = 0;
c->do_rule(rule, x, out, jerasure.get_chunk_count(), weight, 0);
ASSERT_EQ(out.size(), jerasure.get_chunk_count());
for (unsigned i=0; i<out.size(); ++i)
ASSERT_NE(CRUSH_ITEM_NONE, out[i]);
}
{
stringstream ss;
ErasureCodeJerasureReedSolomonVandermonde jerasure;
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
profile["w"] = "8";
profile["crush-root"] = "BAD";
jerasure.init(profile, &cerr);
EXPECT_EQ(-ENOENT, jerasure.create_rule("otherrule", *c, &ss));
EXPECT_EQ("root item BAD does not exist", ss.str());
}
{
stringstream ss;
ErasureCodeJerasureReedSolomonVandermonde jerasure;
ErasureCodeProfile profile;
profile["k"] = "2";
profile["m"] = "2";
profile["w"] = "8";
profile["crush-failure-domain"] = "WORSE";
jerasure.init(profile, &cerr);
EXPECT_EQ(-EINVAL, jerasure.create_rule("otherrule", *c, &ss));
EXPECT_EQ("unknown type WORSE", ss.str());
}
}
/*
* Local Variables:
* compile-command: "cd ../.. ;
* make -j4 unittest_erasure_code_jerasure &&
* valgrind --tool=memcheck \
* ./unittest_erasure_code_jerasure \
* --gtest_filter=*.* --log-to-stderr=true --debug-osd=20"
* End:
*/
| 11,003 | 28.660377 | 85 | cc |
null | ceph-main/src/test/erasure-code/TestErasureCodeLrc.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
* Copyright (C) 2014 Red Hat <contact@redhat.com>
*
* Author: Loic Dachary <loic@dachary.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <errno.h>
#include <stdlib.h>
#include "crush/CrushWrapper.h"
#include "include/stringify.h"
#include "erasure-code/lrc/ErasureCodeLrc.h"
#include "global/global_context.h"
#include "common/config_proxy.h"
#include "gtest/gtest.h"
using namespace std;
TEST(ErasureCodeLrc, parse_rule)
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
EXPECT_EQ("default", lrc.rule_root);
EXPECT_EQ("host", lrc.rule_steps.front().type);
ErasureCodeProfile profile;
profile["crush-root"] = "other";
EXPECT_EQ(0, lrc.parse_rule(profile, &cerr));
EXPECT_EQ("other", lrc.rule_root);
profile["crush-steps"] = "[]";
EXPECT_EQ(0, lrc.parse_rule(profile, &cerr));
EXPECT_TRUE(lrc.rule_steps.empty());
profile["crush-steps"] = "0";
EXPECT_EQ(ERROR_LRC_ARRAY, lrc.parse_rule(profile, &cerr));
profile["crush-steps"] = "{";
EXPECT_EQ(ERROR_LRC_PARSE_JSON, lrc.parse_rule(profile, &cerr));
profile["crush-steps"] = "[0]";
EXPECT_EQ(ERROR_LRC_ARRAY, lrc.parse_rule(profile, &cerr));
profile["crush-steps"] = "[[0]]";
EXPECT_EQ(ERROR_LRC_RULE_OP, lrc.parse_rule(profile, &cerr));
profile["crush-steps"] = "[[\"choose\", 0]]";
EXPECT_EQ(ERROR_LRC_RULE_TYPE, lrc.parse_rule(profile, &cerr));
profile["crush-steps"] = "[[\"choose\", \"host\", []]]";
EXPECT_EQ(ERROR_LRC_RULE_N, lrc.parse_rule(profile, &cerr));
profile["crush-steps"] = "[[\"choose\", \"host\", 2]]";
EXPECT_EQ(0, lrc.parse_rule(profile, &cerr));
const ErasureCodeLrc::Step &step = lrc.rule_steps.front();
EXPECT_EQ("choose", step.op);
EXPECT_EQ("host", step.type);
EXPECT_EQ(2, step.n);
profile["crush-steps"] =
"["
" [\"choose\", \"rack\", 2], "
" [\"chooseleaf\", \"host\", 5], "
"]";
EXPECT_EQ(0, lrc.parse_rule(profile, &cerr));
EXPECT_EQ(2U, lrc.rule_steps.size());
{
const ErasureCodeLrc::Step &step = lrc.rule_steps[0];
EXPECT_EQ("choose", step.op);
EXPECT_EQ("rack", step.type);
EXPECT_EQ(2, step.n);
}
{
const ErasureCodeLrc::Step &step = lrc.rule_steps[1];
EXPECT_EQ("chooseleaf", step.op);
EXPECT_EQ("host", step.type);
EXPECT_EQ(5, step.n);
}
}
TEST(ErasureCodeTest, create_rule)
{
CrushWrapper *c = new CrushWrapper;
c->create();
int root_type = 3;
c->set_type_name(root_type, "root");
int rack_type = 2;
c->set_type_name(rack_type, "rack");
int host_type = 1;
c->set_type_name(host_type, "host");
int osd_type = 0;
c->set_type_name(osd_type, "osd");
int rootno;
c->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1,
root_type, 0, NULL, NULL, &rootno);
c->set_item_name(rootno, "default");
map<string,string> loc;
loc["root"] = "default";
//
// Set all to 10 so that the item number it trivial to decompose
// into rack/host/osd.
//
int num_rack;
int num_host;
int num_osd;
num_rack = num_host = num_osd = 10;
int osd = 0;
for (int r=0; r<num_rack; ++r) {
loc["rack"] = string("rack-") + stringify(r);
for (int h=0; h<num_host; ++h) {
loc["host"] = string("host-") + stringify(r) + string("-") + stringify(h);
for (int o=0; o<num_osd; ++o, ++osd) {
c->insert_item(g_ceph_context, osd, 1.0, string("osd.") + stringify(osd), loc);
}
}
}
c->finalize();
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
EXPECT_EQ(0, lrc.create_rule("rule1", *c, &cerr));
ErasureCodeProfile profile;
unsigned int racks = 2;
unsigned int hosts = 5;
profile["crush-steps"] =
"["
" [\"choose\", \"rack\", " + stringify(racks) + "], "
" [\"chooseleaf\", \"host\", " + stringify(hosts) + "], "
"]";
const char *rule_name = "rule2";
EXPECT_EQ(0, lrc.parse_rule(profile, &cerr));
EXPECT_EQ(1, lrc.create_rule(rule_name, *c, &cerr));
vector<__u32> weight;
for (int o = 0; o < c->get_max_devices(); o++)
weight.push_back(0x10000);
int rule = c->get_rule_id(rule_name);
vector<int> out;
unsigned int n = racks * hosts;
c->do_rule(rule, 1, out, n, weight, 0);
EXPECT_EQ(n, out.size());
//
// check that the first five are in the same rack and the next five
// in the same rack
//
int first_rack = out[0] / num_host / num_osd;
EXPECT_EQ(first_rack, out[1] / num_host / num_osd);
EXPECT_EQ(first_rack, out[2] / num_host / num_osd);
EXPECT_EQ(first_rack, out[3] / num_host / num_osd);
EXPECT_EQ(first_rack, out[4] / num_host / num_osd);
int second_rack = out[5] / num_host / num_osd;
EXPECT_EQ(second_rack, out[6] / num_host / num_osd);
EXPECT_EQ(second_rack, out[7] / num_host / num_osd);
EXPECT_EQ(second_rack, out[8] / num_host / num_osd);
EXPECT_EQ(second_rack, out[9] / num_host / num_osd);
}
TEST(ErasureCodeLrc, parse_kml)
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
EXPECT_EQ(0, lrc.parse_kml(profile, &cerr));
profile["k"] = "4";
EXPECT_EQ(ERROR_LRC_ALL_OR_NOTHING, lrc.parse_kml(profile, &cerr));
const char *generated[] = { "mapping",
"layers",
"crush-steps" };
profile["m"] = "2";
profile["l"] = "3";
for (int i = 0; i < 3; i++) {
profile[generated[i]] = "SET";
EXPECT_EQ(ERROR_LRC_GENERATED, lrc.parse_kml(profile, &cerr));
profile.erase(profile.find(generated[i]));
}
profile["k"] = "4";
profile["m"] = "2";
profile["l"] = "7";
EXPECT_EQ(ERROR_LRC_K_M_MODULO, lrc.parse_kml(profile, &cerr));
profile["k"] = "3";
profile["m"] = "3";
profile["l"] = "3";
EXPECT_EQ(ERROR_LRC_K_MODULO, lrc.parse_kml(profile, &cerr));
profile["k"] = "4";
profile["m"] = "2";
profile["l"] = "3";
EXPECT_EQ(0, lrc.parse_kml(profile, &cerr));
EXPECT_EQ("[ "
" [ \"DDc_DDc_\", \"\" ],"
" [ \"DDDc____\", \"\" ],"
" [ \"____DDDc\", \"\" ],"
"]", profile["layers"]);
EXPECT_EQ("DD__DD__", profile["mapping"]);
EXPECT_EQ("chooseleaf", lrc.rule_steps[0].op);
EXPECT_EQ("host", lrc.rule_steps[0].type);
EXPECT_EQ(0, lrc.rule_steps[0].n);
EXPECT_EQ(1U, lrc.rule_steps.size());
profile.erase(profile.find("mapping"));
profile.erase(profile.find("layers"));
profile["k"] = "4";
profile["m"] = "2";
profile["l"] = "3";
profile["crush-failure-domain"] = "osd";
EXPECT_EQ(0, lrc.parse_kml(profile, &cerr));
EXPECT_EQ("chooseleaf", lrc.rule_steps[0].op);
EXPECT_EQ("osd", lrc.rule_steps[0].type);
EXPECT_EQ(0, lrc.rule_steps[0].n);
EXPECT_EQ(1U, lrc.rule_steps.size());
profile.erase(profile.find("mapping"));
profile.erase(profile.find("layers"));
profile["k"] = "4";
profile["m"] = "2";
profile["l"] = "3";
profile["crush-failure-domain"] = "osd";
profile["crush-locality"] = "rack";
EXPECT_EQ(0, lrc.parse_kml(profile, &cerr));
EXPECT_EQ("choose", lrc.rule_steps[0].op);
EXPECT_EQ("rack", lrc.rule_steps[0].type);
EXPECT_EQ(2, lrc.rule_steps[0].n);
EXPECT_EQ("chooseleaf", lrc.rule_steps[1].op);
EXPECT_EQ("osd", lrc.rule_steps[1].type);
EXPECT_EQ(4, lrc.rule_steps[1].n);
EXPECT_EQ(2U, lrc.rule_steps.size());
profile.erase(profile.find("mapping"));
profile.erase(profile.find("layers"));
}
TEST(ErasureCodeLrc, layers_description)
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
json_spirit::mArray description;
EXPECT_EQ(ERROR_LRC_DESCRIPTION,
lrc.layers_description(profile, &description, &cerr));
{
const char *description_string = "\"not an array\"";
profile["layers"] = description_string;
EXPECT_EQ(ERROR_LRC_ARRAY,
lrc.layers_description(profile, &description, &cerr));
}
{
const char *description_string = "invalid json";
profile["layers"] = description_string;
EXPECT_EQ(ERROR_LRC_PARSE_JSON,
lrc.layers_description(profile, &description, &cerr));
}
{
const char *description_string = "[]";
profile["layers"] = description_string;
EXPECT_EQ(0, lrc.layers_description(profile, &description, &cerr));
}
}
TEST(ErasureCodeLrc, layers_parse)
{
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
const char *description_string ="[ 0 ]";
profile["layers"] = description_string;
json_spirit::mArray description;
EXPECT_EQ(0, lrc.layers_description(profile, &description, &cerr));
EXPECT_EQ(ERROR_LRC_ARRAY,
lrc.layers_parse(description_string, description, &cerr));
}
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
const char *description_string ="[ [ 0 ] ]";
profile["layers"] = description_string;
json_spirit::mArray description;
EXPECT_EQ(0, lrc.layers_description(profile, &description, &cerr));
EXPECT_EQ(ERROR_LRC_STR,
lrc.layers_parse(description_string, description, &cerr));
}
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
const char *description_string ="[ [ \"\", 0 ] ]";
profile["layers"] = description_string;
json_spirit::mArray description;
EXPECT_EQ(0, lrc.layers_description(profile, &description, &cerr));
EXPECT_EQ(ERROR_LRC_CONFIG_OPTIONS,
lrc.layers_parse(description_string, description, &cerr));
}
//
// The second element can be an object describing the plugin
// profile.
//
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
const char *description_string ="[ [ \"\", { \"a\": \"b\" }, \"ignored\" ] ]";
profile["layers"] = description_string;
json_spirit::mArray description;
EXPECT_EQ(0, lrc.layers_description(profile, &description, &cerr));
EXPECT_EQ(0, lrc.layers_parse(description_string, description, &cerr));
EXPECT_EQ("b", lrc.layers.front().profile["a"]);
}
//
// The second element can be a str_map parseable string describing the plugin
// profile.
//
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
const char *description_string ="[ [ \"\", \"a=b c=d\" ] ]";
profile["layers"] = description_string;
json_spirit::mArray description;
EXPECT_EQ(0, lrc.layers_description(profile, &description, &cerr));
EXPECT_EQ(0, lrc.layers_parse(description_string, description, &cerr));
EXPECT_EQ("b", lrc.layers.front().profile["a"]);
EXPECT_EQ("d", lrc.layers.front().profile["c"]);
}
}
TEST(ErasureCodeLrc, layers_sanity_checks)
{
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["mapping"] =
"__DDD__DD";
const char *description_string =
"[ "
" [ \"_cDDD_cDD\", \"\" ],"
" [ \"c_DDD____\", \"\" ],"
" [ \"_____cDDD\", \"\" ],"
"]";
profile["layers"] = description_string;
EXPECT_EQ(0, lrc.init(profile, &cerr));
}
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
const char *description_string =
"[ "
"]";
profile["layers"] = description_string;
EXPECT_EQ(ERROR_LRC_MAPPING, lrc.init(profile, &cerr));
}
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["mapping"] = "";
const char *description_string =
"[ "
"]";
profile["layers"] = description_string;
EXPECT_EQ(ERROR_LRC_LAYERS_COUNT, lrc.init(profile, &cerr));
}
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["mapping"] =
"DD";
const char *description_string =
"[ "
" [ \"DD??\", \"\" ], "
" [ \"DD\", \"\" ], "
" [ \"DD\", \"\" ], "
"]";
profile["layers"] = description_string;
EXPECT_EQ(-EINVAL, lrc.init(profile, &cerr));
}
}
TEST(ErasureCodeLrc, layers_init)
{
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
const char* env = getenv("CEPH_LIB");
string directory(env ? env : "lib");
string description_string =
"[ "
" [ \"_cDDD_cDD_\", \"directory=" + directory + "\" ],"
"]";
profile["layers"] = description_string;
json_spirit::mArray description;
EXPECT_EQ(0, lrc.layers_description(profile, &description, &cerr));
EXPECT_EQ(0, lrc.layers_parse(description_string, description, &cerr));
EXPECT_EQ(0, lrc.layers_init(&cerr));
EXPECT_EQ("5", lrc.layers.front().profile["k"]);
EXPECT_EQ("2", lrc.layers.front().profile["m"]);
EXPECT_EQ("jerasure", lrc.layers.front().profile["plugin"]);
EXPECT_EQ("reed_sol_van", lrc.layers.front().profile["technique"]);
}
}
TEST(ErasureCodeLrc, init)
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["mapping"] =
"__DDD__DD";
const char *description_string =
"[ "
" [ \"_cDDD_cDD\", \"\" ],"
" [ \"c_DDD____\", \"\" ],"
" [ \"_____cDDD\", \"\" ],"
"]";
profile["layers"] = description_string;
EXPECT_EQ(0, lrc.init(profile, &cerr));
}
TEST(ErasureCodeLrc, init_kml)
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["k"] = "4";
profile["m"] = "2";
profile["l"] = "3";
EXPECT_EQ(0, lrc.init(profile, &cerr));
EXPECT_EQ((unsigned int)(4 + 2 + (4 + 2) / 3), lrc.get_chunk_count());
}
TEST(ErasureCodeLrc, minimum_to_decode)
{
// trivial : no erasures, the minimum is want_to_read
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["mapping"] =
"__DDD__DD";
const char *description_string =
"[ "
" [ \"_cDDD_cDD\", \"\" ],"
" [ \"c_DDD____\", \"\" ],"
" [ \"_____cDDD\", \"\" ],"
"]";
profile["layers"] = description_string;
EXPECT_EQ(0, lrc.init(profile, &cerr));
set<int> want_to_read;
want_to_read.insert(1);
set<int> available_chunks;
available_chunks.insert(1);
available_chunks.insert(2);
set<int> minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
EXPECT_EQ(want_to_read, minimum);
}
// locally repairable erasure
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["mapping"] =
"__DDD__DD_";
const char *description_string =
"[ "
" [ \"_cDDD_cDD_\", \"\" ],"
" [ \"c_DDD_____\", \"\" ],"
" [ \"_____cDDD_\", \"\" ],"
" [ \"_____DDDDc\", \"\" ],"
"]";
profile["layers"] = description_string;
EXPECT_EQ(0, lrc.init(profile, &cerr));
EXPECT_EQ(profile["mapping"].length(),
lrc.get_chunk_count());
{
// want to read the last chunk
set<int> want_to_read;
want_to_read.insert(lrc.get_chunk_count() - 1);
// all chunks are available except the last chunk
set<int> available_chunks;
for (int i = 0; i < (int)lrc.get_chunk_count() - 1; i++)
available_chunks.insert(i);
// _____DDDDc can recover c
set<int> minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
set<int> expected_minimum;
expected_minimum.insert(5);
expected_minimum.insert(6);
expected_minimum.insert(7);
expected_minimum.insert(8);
EXPECT_EQ(expected_minimum, minimum);
}
{
set<int> want_to_read;
want_to_read.insert(0);
set<int> available_chunks;
for (int i = 1; i < (int)lrc.get_chunk_count(); i++)
available_chunks.insert(i);
set<int> minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
set<int> expected_minimum;
expected_minimum.insert(2);
expected_minimum.insert(3);
expected_minimum.insert(4);
EXPECT_EQ(expected_minimum, minimum);
}
}
// implicit parity required
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["mapping"] =
"__DDD__DD";
const char *description_string =
"[ "
" [ \"_cDDD_cDD\", \"\" ],"
" [ \"c_DDD____\", \"\" ],"
" [ \"_____cDDD\", \"\" ],"
"]";
profile["layers"] = description_string;
EXPECT_EQ(0, lrc.init(profile, &cerr));
EXPECT_EQ(profile["mapping"].length(),
lrc.get_chunk_count());
set<int> want_to_read;
want_to_read.insert(8);
//
// unable to recover, too many chunks missing
//
{
set<int> available_chunks;
available_chunks.insert(0);
available_chunks.insert(1);
// missing (2)
// missing (3)
available_chunks.insert(4);
available_chunks.insert(5);
available_chunks.insert(6);
// missing (7)
// missing (8)
set<int> minimum;
EXPECT_EQ(-EIO, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
}
//
// We want to read chunk 8 and encoding was done with
//
// _cDDD_cDD
// c_DDD____
// _____cDDD
//
// First strategy fails:
//
// 012345678
// xxXXXxxXX initial chunks
// xx.XXxx.. missing (2, 7, 8)
// _____cDDD fail : can recover 1 but 2 are missing
// c_DDD____ ignored because 8 is not used (i.e. _)
// _cDDD_cDD fail : can recover 2 but 3 are missing
//
// Second strategy succeeds:
//
// 012345678
// xxXXXxxXX initial chunks
// xx.XXxx.. missing (2, 7, 8)
// _____cDDD fail : can recover 1 but 2 are missing
// c_DDD____ success: recovers chunk 2
// _cDDD_cDD success: recovers chunk 7, 8
//
{
set<int> available_chunks;
available_chunks.insert(0);
available_chunks.insert(1);
// missing (2)
available_chunks.insert(3);
available_chunks.insert(4);
available_chunks.insert(5);
available_chunks.insert(6);
// missing (7)
// missing (8)
set<int> minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
EXPECT_EQ(available_chunks, minimum);
}
}
}
TEST(ErasureCodeLrc, encode_decode)
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["mapping"] =
"__DD__DD";
const char *description_string =
"[ "
" [ \"_cDD_cDD\", \"\" ]," // global layer
" [ \"c_DD____\", \"\" ]," // first local layer
" [ \"____cDDD\", \"\" ]," // second local layer
"]";
profile["layers"] = description_string;
EXPECT_EQ(0, lrc.init(profile, &cerr));
EXPECT_EQ(4U, lrc.get_data_chunk_count());
unsigned int chunk_size = g_conf().get_val<Option::size_t>("osd_pool_erasure_code_stripe_unit");
unsigned int stripe_width = lrc.get_data_chunk_count() * chunk_size;
EXPECT_EQ(chunk_size, lrc.get_chunk_size(stripe_width));
set<int> want_to_encode;
map<int, bufferlist> encoded;
for (unsigned int i = 0; i < lrc.get_chunk_count(); ++i) {
want_to_encode.insert(i);
bufferptr ptr(buffer::create_page_aligned(chunk_size));
bufferlist tmp;
tmp.push_back(ptr);
tmp.claim_append(encoded[i]);
encoded[i].swap(tmp);
}
const vector<int> &mapping = lrc.get_chunk_mapping();
char c = 'A';
for (unsigned int i = 0; i < lrc.get_data_chunk_count(); i++) {
int j = mapping[i];
string s(chunk_size, c);
encoded[j].clear();
encoded[j].append(s);
c++;
}
EXPECT_EQ(0, lrc.encode_chunks(want_to_encode, &encoded));
{
map<int, bufferlist> chunks;
chunks[4] = encoded[4];
chunks[5] = encoded[5];
chunks[6] = encoded[6];
set<int> want_to_read;
want_to_read.insert(7);
set<int> available_chunks;
available_chunks.insert(4);
available_chunks.insert(5);
available_chunks.insert(6);
set<int> minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
// only need three chunks from the second local layer
EXPECT_EQ(3U, minimum.size());
EXPECT_EQ(1U, minimum.count(4));
EXPECT_EQ(1U, minimum.count(5));
EXPECT_EQ(1U, minimum.count(6));
map<int, bufferlist> decoded;
EXPECT_EQ(0, lrc._decode(want_to_read, chunks, &decoded));
string s(chunk_size, 'D');
EXPECT_EQ(s, string(decoded[7].c_str(), chunk_size));
}
{
set<int> want_to_read;
want_to_read.insert(2);
map<int, bufferlist> chunks;
chunks[1] = encoded[1];
chunks[3] = encoded[3];
chunks[5] = encoded[5];
chunks[6] = encoded[6];
chunks[7] = encoded[7];
set<int> available_chunks;
available_chunks.insert(1);
available_chunks.insert(3);
available_chunks.insert(5);
available_chunks.insert(6);
available_chunks.insert(7);
set<int> minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
EXPECT_EQ(5U, minimum.size());
EXPECT_EQ(available_chunks, minimum);
map<int, bufferlist> decoded;
EXPECT_EQ(0, lrc._decode(want_to_read, encoded, &decoded));
string s(chunk_size, 'A');
EXPECT_EQ(s, string(decoded[2].c_str(), chunk_size));
}
{
set<int> want_to_read;
want_to_read.insert(3);
want_to_read.insert(6);
want_to_read.insert(7);
set<int> available_chunks;
available_chunks.insert(0);
available_chunks.insert(1);
available_chunks.insert(2);
// available_chunks.insert(3);
available_chunks.insert(4);
available_chunks.insert(5);
// available_chunks.insert(6);
// available_chunks.insert(7);
encoded.erase(3);
encoded.erase(6);
set<int> minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
EXPECT_EQ(4U, minimum.size());
// only need two chunks from the first local layer
EXPECT_EQ(1U, minimum.count(0));
EXPECT_EQ(1U, minimum.count(2));
// the above chunks will rebuild chunk 3 and the global layer only needs
// three more chunks to reach the required amount of chunks (4) to recover
// the last two
EXPECT_EQ(1U, minimum.count(1));
EXPECT_EQ(1U, minimum.count(2));
EXPECT_EQ(1U, minimum.count(5));
map<int, bufferlist> decoded;
EXPECT_EQ(0, lrc._decode(want_to_read, encoded, &decoded));
{
string s(chunk_size, 'B');
EXPECT_EQ(s, string(decoded[3].c_str(), chunk_size));
}
{
string s(chunk_size, 'C');
EXPECT_EQ(s, string(decoded[6].c_str(), chunk_size));
}
{
string s(chunk_size, 'D');
EXPECT_EQ(s, string(decoded[7].c_str(), chunk_size));
}
}
}
TEST(ErasureCodeLrc, encode_decode_2)
{
ErasureCodeLrc lrc(g_conf().get_val<std::string>("erasure_code_dir"));
ErasureCodeProfile profile;
profile["mapping"] =
"DD__DD__";
const char *description_string =
"[ "
" [ \"DDc_DDc_\", \"\" ],"
" [ \"DDDc____\", \"\" ],"
" [ \"____DDDc\", \"\" ],"
"]";
profile["layers"] = description_string;
EXPECT_EQ(0, lrc.init(profile, &cerr));
EXPECT_EQ(4U, lrc.get_data_chunk_count());
unsigned int chunk_size = g_conf().get_val<Option::size_t>("osd_pool_erasure_code_stripe_unit");
unsigned int stripe_width = lrc.get_data_chunk_count() * chunk_size;
EXPECT_EQ(chunk_size, lrc.get_chunk_size(stripe_width));
set<int> want_to_encode;
map<int, bufferlist> encoded;
for (unsigned int i = 0; i < lrc.get_chunk_count(); ++i) {
want_to_encode.insert(i);
bufferptr ptr(buffer::create_page_aligned(chunk_size));
bufferlist tmp;
tmp.push_back(ptr);
tmp.claim_append(encoded[i]);
encoded[i].swap(tmp);
}
const vector<int> &mapping = lrc.get_chunk_mapping();
char c = 'A';
for (unsigned int i = 0; i < lrc.get_data_chunk_count(); i++) {
int j = mapping[i];
string s(chunk_size, c);
encoded[j].clear();
encoded[j].append(s);
c++;
}
EXPECT_EQ(0, lrc.encode_chunks(want_to_encode, &encoded));
{
set<int> want_to_read;
want_to_read.insert(0);
map<int, bufferlist> chunks;
chunks[1] = encoded[1];
chunks[3] = encoded[3];
chunks[4] = encoded[4];
chunks[5] = encoded[5];
chunks[6] = encoded[6];
chunks[7] = encoded[7];
set<int> available_chunks;
available_chunks.insert(1);
available_chunks.insert(3);
available_chunks.insert(4);
available_chunks.insert(5);
available_chunks.insert(6);
available_chunks.insert(7);
set<int> minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
EXPECT_EQ(4U, minimum.size());
EXPECT_EQ(1U, minimum.count(1));
EXPECT_EQ(1U, minimum.count(4));
EXPECT_EQ(1U, minimum.count(5));
EXPECT_EQ(1U, minimum.count(6));
map<int, bufferlist> decoded;
EXPECT_EQ(0, lrc._decode(want_to_read, chunks, &decoded));
string s(chunk_size, 'A');
EXPECT_EQ(s, string(decoded[0].c_str(), chunk_size));
}
{
set<int> want_to_read;
for (unsigned int i = 0; i < lrc.get_chunk_count(); i++)
want_to_read.insert(i);
map<int, bufferlist> chunks;
chunks[1] = encoded[1];
chunks[3] = encoded[3];
chunks[5] = encoded[5];
chunks[6] = encoded[6];
chunks[7] = encoded[7];
set<int> available_chunks;
available_chunks.insert(1);
available_chunks.insert(3);
available_chunks.insert(5);
available_chunks.insert(6);
available_chunks.insert(7);
set<int> minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
EXPECT_EQ(5U, minimum.size());
EXPECT_EQ(1U, minimum.count(1));
EXPECT_EQ(1U, minimum.count(3));
EXPECT_EQ(1U, minimum.count(5));
EXPECT_EQ(1U, minimum.count(6));
EXPECT_EQ(1U, minimum.count(7));
map<int, bufferlist> decoded;
EXPECT_EQ(0, lrc._decode(want_to_read, chunks, &decoded));
{
string s(chunk_size, 'A');
EXPECT_EQ(s, string(decoded[0].c_str(), chunk_size));
}
{
string s(chunk_size, 'B');
EXPECT_EQ(s, string(decoded[1].c_str(), chunk_size));
}
{
string s(chunk_size, 'C');
EXPECT_EQ(s, string(decoded[4].c_str(), chunk_size));
}
{
string s(chunk_size, 'D');
EXPECT_EQ(s, string(decoded[5].c_str(), chunk_size));
}
}
{
set<int> want_to_read;
for (unsigned int i = 0; i < lrc.get_chunk_count(); i++)
want_to_read.insert(i);
map<int, bufferlist> chunks;
chunks[1] = encoded[1];
chunks[3] = encoded[3];
chunks[5] = encoded[5];
chunks[6] = encoded[6];
chunks[7] = encoded[7];
set<int> available_chunks;
available_chunks.insert(1);
available_chunks.insert(3);
available_chunks.insert(5);
available_chunks.insert(6);
available_chunks.insert(7);
set<int> minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
EXPECT_EQ(5U, minimum.size());
EXPECT_EQ(1U, minimum.count(1));
EXPECT_EQ(1U, minimum.count(3));
EXPECT_EQ(1U, minimum.count(5));
EXPECT_EQ(1U, minimum.count(6));
EXPECT_EQ(1U, minimum.count(7));
map<int, bufferlist> decoded;
EXPECT_EQ(0, lrc._decode(want_to_read, chunks, &decoded));
{
string s(chunk_size, 'A');
EXPECT_EQ(s, string(decoded[0].c_str(), chunk_size));
}
{
string s(chunk_size, 'B');
EXPECT_EQ(s, string(decoded[1].c_str(), chunk_size));
}
{
string s(chunk_size, 'C');
EXPECT_EQ(s, string(decoded[4].c_str(), chunk_size));
}
{
string s(chunk_size, 'D');
EXPECT_EQ(s, string(decoded[5].c_str(), chunk_size));
}
}
{
set<int> want_to_read;
want_to_read.insert(6);
map<int, bufferlist> chunks;
chunks[0] = encoded[0];
chunks[1] = encoded[1];
chunks[3] = encoded[3];
chunks[5] = encoded[5];
chunks[7] = encoded[7];
set<int> available_chunks;
available_chunks.insert(0);
available_chunks.insert(1);
available_chunks.insert(3);
available_chunks.insert(5);
available_chunks.insert(7);
set<int> minimum;
EXPECT_EQ(0, lrc._minimum_to_decode(want_to_read, available_chunks, &minimum));
EXPECT_EQ(available_chunks, minimum);
map<int, bufferlist> decoded;
EXPECT_EQ(0, lrc._decode(want_to_read, chunks, &decoded));
}
}
/*
* Local Variables:
* compile-command: "cd ../.. ;
* make -j4 unittest_erasure_code_lrc && valgrind --tool=memcheck \
* ./unittest_erasure_code_lrc \
* --gtest_filter=*.* --log-to-stderr=true --debug-osd=20"
* End:
*/
| 29,144 | 30.474082 | 98 | cc |
null | ceph-main/src/test/erasure-code/TestErasureCodePlugin.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
* Copyright (C) 2014 Red Hat <contact@redhat.com>
*
* Author: Loic Dachary <loic@dachary.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <errno.h>
#include <signal.h>
#include <stdlib.h>
#include "common/Thread.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "global/global_context.h"
#include "common/config_proxy.h"
#include "gtest/gtest.h"
using namespace std;
class ErasureCodePluginRegistryTest : public ::testing::Test {};
TEST_F(ErasureCodePluginRegistryTest, factory_mutex) {
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
{
unique_lock l{instance.lock, std::try_to_lock};
EXPECT_TRUE(l.owns_lock());
}
//
// Test that the loading of a plugin is protected by a mutex.
std::thread sleep_for_10_secs([] {
ErasureCodeProfile profile;
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
ErasureCodeInterfaceRef erasure_code;
instance.factory("hangs",
g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, &cerr);
});
auto wait_until = [&instance](bool loading, unsigned max_secs) {
auto delay = 0ms;
const auto DELAY_MAX = std::chrono::seconds(max_secs);
for (; delay < DELAY_MAX; delay = (delay + 1ms) * 2) {
cout << "Trying (1) with delay " << delay << "us\n";
if (delay.count() > 0) {
std::this_thread::sleep_for(delay);
}
if (instance.loading == loading) {
return true;
}
}
return false;
};
// should be loading in 5 seconds
ASSERT_TRUE(wait_until(true, 5));
{
unique_lock l{instance.lock, std::try_to_lock};
EXPECT_TRUE(!l.owns_lock());
}
// should finish loading in 15 seconds
ASSERT_TRUE(wait_until(false, 15));
{
unique_lock l{instance.lock, std::try_to_lock};
EXPECT_TRUE(l.owns_lock());
}
sleep_for_10_secs.join();
}
TEST_F(ErasureCodePluginRegistryTest, all)
{
ErasureCodeProfile profile;
string directory = g_conf().get_val<std::string>("erasure_code_dir");
ErasureCodeInterfaceRef erasure_code;
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
EXPECT_FALSE(erasure_code);
EXPECT_EQ(-EIO, instance.factory("invalid",
g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, &cerr));
EXPECT_FALSE(erasure_code);
EXPECT_EQ(-EXDEV, instance.factory("missing_version",
g_conf().get_val<std::string>("erasure_code_dir"),
profile,
&erasure_code, &cerr));
EXPECT_FALSE(erasure_code);
EXPECT_EQ(-ENOENT, instance.factory("missing_entry_point",
g_conf().get_val<std::string>("erasure_code_dir"),
profile,
&erasure_code, &cerr));
EXPECT_FALSE(erasure_code);
EXPECT_EQ(-ESRCH, instance.factory("fail_to_initialize",
g_conf().get_val<std::string>("erasure_code_dir"),
profile,
&erasure_code, &cerr));
EXPECT_FALSE(erasure_code);
EXPECT_EQ(-EBADF, instance.factory("fail_to_register",
g_conf().get_val<std::string>("erasure_code_dir"),
profile,
&erasure_code, &cerr));
EXPECT_FALSE(erasure_code);
EXPECT_EQ(0, instance.factory("example",
g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, &cerr));
EXPECT_TRUE(erasure_code.get());
ErasureCodePlugin *plugin = 0;
{
std::lock_guard l{instance.lock};
EXPECT_EQ(-EEXIST, instance.load("example", directory, &plugin, &cerr));
EXPECT_EQ(-ENOENT, instance.remove("does not exist"));
EXPECT_EQ(0, instance.remove("example"));
EXPECT_EQ(0, instance.load("example", directory, &plugin, &cerr));
}
}
/*
* Local Variables:
* compile-command: "cd ../../../build ; make -j4 &&
* make unittest_erasure_code_plugin &&
* valgrind --tool=memcheck \
* ./bin/unittest_erasure_code_plugin \
* --gtest_filter=*.* --log-to-stderr=true --debug-osd=20"
* End:
*/
| 4,366 | 32.083333 | 80 | cc |
null | ceph-main/src/test/erasure-code/TestErasureCodePluginClay.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2018 Indian Institute of Science <office.ece@iisc.ac.in>
*
* Author: Myna Vajha <mynaramana@gmail.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <errno.h>
#include <stdlib.h>
#include "erasure-code/ErasureCodePlugin.h"
#include "log/Log.h"
#include "global/global_context.h"
#include "common/config_proxy.h"
#include "gtest/gtest.h"
using namespace std;
TEST(ErasureCodePlugin, factory)
{
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
ErasureCodeProfile profile;
{
ErasureCodeInterfaceRef erasure_code;
EXPECT_FALSE(erasure_code);
EXPECT_EQ(0, instance.factory("clay",
g_conf().get_val<std::string>("erasure_code_dir"),
profile,
&erasure_code, &cerr));
EXPECT_TRUE(erasure_code);
}
//check clay plugin with scalar_mds=jerasure
{
const char *techniques[] = {
"reed_sol_van",
"reed_sol_r6_op",
"cauchy_orig",
"cauchy_good",
"liber8tion",
0
};
for(const char **technique = techniques; *technique; technique++) {
ErasureCodeInterfaceRef erasure_code;
ErasureCodeProfile profile;
profile["scalar_mds"] = "jerasure";
profile["technique"] = *technique;
EXPECT_FALSE(erasure_code);
EXPECT_EQ(0, instance.factory("clay",
g_conf().get_val<std::string>("erasure_code_dir"),
profile,
&erasure_code, &cerr));
EXPECT_TRUE(erasure_code.get());
}
}
#ifdef WITH_EC_ISA_PLUGIN
//check clay plugin with scalar_mds=isa
{
const char *techniques[] = {
"reed_sol_van",
"cauchy",
0
};
for(const char **technique = techniques; *technique; technique++) {
ErasureCodeInterfaceRef erasure_code;
ErasureCodeProfile profile;
profile["scalar_mds"] = "isa";
profile["technique"] = *technique;
EXPECT_FALSE(erasure_code);
EXPECT_EQ(0, instance.factory("clay",
g_conf().get_val<std::string>("erasure_code_dir"),
profile,
&erasure_code, &cerr));
EXPECT_TRUE(erasure_code.get());
}
}
#endif
//check clay plugin with scalar_mds=shec
{
const char *techniques[] = {
"single",
"multiple",
0
};
for(const char **technique = techniques; *technique; technique++) {
ErasureCodeInterfaceRef erasure_code;
ErasureCodeProfile profile;
profile["scalar_mds"] = "shec";
profile["technique"] = *technique;
EXPECT_FALSE(erasure_code);
EXPECT_EQ(0, instance.factory("clay",
g_conf().get_val<std::string>("erasure_code_dir"),
profile,
&erasure_code, &cerr));
EXPECT_TRUE(erasure_code.get());
}
}
}
/*
* Local Variables:
* compile-command: "cd ../.. ; make -j4 &&
* make unittest_erasure_code_plugin_clay &&
* valgrind --tool=memcheck ./unittest_erasure_code_plugin_clay \
* --gtest_filter=*.* --log-to-stderr=true --debug-osd=20"
* End:
*/
| 3,539 | 29.782609 | 86 | cc |
null | ceph-main/src/test/erasure-code/TestErasureCodePluginIsa.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CERN (Switzerland)
*
* Author: Andreas-Joachim Peters <Andreas.Joachim.Peters@cern.ch>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <errno.h>
#include <stdlib.h>
#include "arch/probe.h"
#include "arch/intel.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "global/global_context.h"
#include "common/config_proxy.h"
#include "gtest/gtest.h"
using namespace std;
TEST(ErasureCodePlugin, factory)
{
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
ErasureCodeProfile profile;
{
ErasureCodeInterfaceRef erasure_code;
EXPECT_FALSE(erasure_code);
EXPECT_EQ(-EIO, instance.factory("no-isa",
g_conf().get_val<std::string>("erasure_code_dir"),
profile,
&erasure_code, &cerr));
EXPECT_FALSE(erasure_code);
}
const char *techniques[] = {
"reed_sol_van",
0
};
for(const char **technique = techniques; *technique; technique++) {
ErasureCodeInterfaceRef erasure_code;
profile["technique"] = *technique;
EXPECT_FALSE(erasure_code);
EXPECT_EQ(0, instance.factory("isa",
g_conf().get_val<std::string>("erasure_code_dir"),
profile,
&erasure_code, &cerr));
EXPECT_TRUE(erasure_code.get());
}
}
/*
* Local Variables:
* compile-command: "cd ../.. ; make -j4 &&
* make unittest_erasure_code_plugin_isa &&
* valgrind --tool=memcheck ./unittest_erasure_code_plugin_isa \
* --gtest_filter=*.* --log-to-stderr=true --debug-osd=20"
* End:
*/
| 1,815 | 27.825397 | 78 | cc |
null | ceph-main/src/test/erasure-code/TestErasureCodePluginJerasure.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
* Copyright (C) 2014 Red Hat <contact@redhat.com>
*
* Author: Loic Dachary <loic@dachary.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <errno.h>
#include <stdlib.h>
#include "erasure-code/ErasureCodePlugin.h"
#include "log/Log.h"
#include "global/global_context.h"
#include "common/config_proxy.h"
#include "gtest/gtest.h"
using namespace std;
TEST(ErasureCodePlugin, factory)
{
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
ErasureCodeProfile profile;
{
ErasureCodeInterfaceRef erasure_code;
EXPECT_FALSE(erasure_code);
EXPECT_EQ(-ENOENT, instance.factory("jerasure",
g_conf().get_val<std::string>("erasure_code_dir"),
profile,
&erasure_code, &cerr));
EXPECT_FALSE(erasure_code);
}
const char *techniques[] = {
"reed_sol_van",
"reed_sol_r6_op",
"cauchy_orig",
"cauchy_good",
"liberation",
"blaum_roth",
"liber8tion",
0
};
for(const char **technique = techniques; *technique; technique++) {
ErasureCodeInterfaceRef erasure_code;
ErasureCodeProfile profile;
profile["technique"] = *technique;
EXPECT_FALSE(erasure_code);
EXPECT_EQ(0, instance.factory("jerasure",
g_conf().get_val<std::string>("erasure_code_dir"),
profile,
&erasure_code, &cerr));
EXPECT_TRUE(erasure_code.get());
}
}
/*
* Local Variables:
* compile-command: "cd ../.. ; make -j4 &&
* make unittest_erasure_code_plugin_jerasure &&
* valgrind --tool=memcheck ./unittest_erasure_code_plugin_jerasure \
* --gtest_filter=*.* --log-to-stderr=true --debug-osd=20"
* End:
*/
| 2,122 | 28.486111 | 78 | cc |
null | ceph-main/src/test/erasure-code/TestErasureCodePluginLrc.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
* Copyright (C) 2014 Red Hat <contact@redhat.com>
*
* Author: Loic Dachary <loic@dachary.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <errno.h>
#include <stdlib.h>
#include "arch/probe.h"
#include "arch/intel.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "global/global_context.h"
#include "common/config_proxy.h"
#include "gtest/gtest.h"
using namespace std;
TEST(ErasureCodePlugin, factory)
{
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
ErasureCodeProfile profile;
profile["mapping"] = "DD_";
profile["layers"] = "[ [ \"DDc\", \"\" ] ]";
ErasureCodeInterfaceRef erasure_code;
EXPECT_FALSE(erasure_code);
EXPECT_EQ(0, instance.factory("lrc",
g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, &cerr));
EXPECT_TRUE(erasure_code.get());
}
/*
* Local Variables:
* compile-command: "cd ../.. ; make -j4 &&
* make unittest_erasure_code_plugin_lrc &&
* valgrind --tool=memcheck ./unittest_erasure_code_plugin_lrc \
* --gtest_filter=*.* --log-to-stderr=true --debug-osd=20"
* End:
*/
| 1,539 | 29.196078 | 78 | cc |
null | ceph-main/src/test/erasure-code/TestErasureCodePluginShec.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2015 FUJITSU LIMITED
*
* Author: Shotaro Kawaguchi <kawaguchi.s@jp.fujitsu.com>
* Author: Takanori Nakao <nakao.takanori@jp.fujitsu.com>
* Author: Takeshi Miyamae <miyamae.takeshi@jp.fujitsu.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <errno.h>
#include <stdlib.h>
#include "erasure-code/ErasureCodePlugin.h"
#include "global/global_context.h"
#include "gtest/gtest.h"
#include "common/config_proxy.h"
using namespace std;
TEST(ErasureCodePlugin, factory)
{
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
map<std::string,std::string> profile;
{
ErasureCodeInterfaceRef erasure_code;
EXPECT_FALSE(erasure_code);
EXPECT_EQ(0, instance.factory("shec",
g_conf().get_val<std::string>("erasure_code_dir"),
profile,
&erasure_code, &cerr));
EXPECT_TRUE(erasure_code.get());
}
const char *techniques[] = {
"single",
"multiple",
0
};
for(const char **technique = techniques; *technique; technique++) {
ErasureCodeInterfaceRef erasure_code;
profile["technique"] = *technique;
EXPECT_FALSE(erasure_code);
EXPECT_EQ(0, instance.factory("shec",
g_conf().get_val<std::string>("erasure_code_dir"),
profile,
&erasure_code, &cerr));
EXPECT_TRUE(erasure_code.get());
}
}
/*
* Local Variables:
* compile-command: "cd ../.. ; make -j4 &&
* make unittest_erasure_code_plugin_shec &&
* valgrind --tool=memcheck ./unittest_erasure_code_plugin_shec \
* --gtest_filter=*.* --log-to-stderr=true --debug-osd=20"
* End:
*/
| 1,977 | 28.969697 | 78 | cc |
null | ceph-main/src/test/erasure-code/TestErasureCodeShec.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014,2015 FUJITSU LIMITED
*
* Author: Shotaro Kawaguchi <kawaguchi.s@jp.fujitsu.com>
* Author: Takanori Nakao <nakao.takanori@jp.fujitsu.com>
* Author: Takeshi Miyamae <miyamae.takeshi@jp.fujitsu.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
//SUMMARY: TestErasureCodeShec
#include <errno.h>
#include <pthread.h>
#include <stdlib.h>
#include "crush/CrushWrapper.h"
#include "osd/osd_types.h"
#include "include/stringify.h"
#include "erasure-code/shec/ErasureCodeShec.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "global/global_context.h"
#include "gtest/gtest.h"
using namespace std;
void* thread1(void* pParam);
void* thread2(void* pParam);
void* thread3(void* pParam);
void* thread4(void* pParam);
void* thread5(void* pParam);
static int g_flag = 0;
TEST(ErasureCodeShec, init_1)
{
//all parameters are normal values
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
int r = shec->init(*profile, &cerr);
//check profile
EXPECT_EQ(4, shec->k);
EXPECT_EQ(3, shec->m);
EXPECT_EQ(2, shec->c);
EXPECT_EQ(8, shec->w);
EXPECT_EQ(ErasureCodeShec::MULTIPLE, shec->technique);
EXPECT_STREQ("default", shec->rule_root.c_str());
EXPECT_STREQ("osd", shec->rule_failure_domain.c_str());
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_2)
{
//all parameters are normal values
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-root"] = "test";
(*profile)["crush-failure-domain"] = "host";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
(*profile)["w"] = "8";
int r = shec->init(*profile, &cerr);
//check profile
EXPECT_EQ(4, shec->k);
EXPECT_EQ(3, shec->m);
EXPECT_EQ(2, shec->c);
EXPECT_EQ(8, shec->w);
EXPECT_EQ(ErasureCodeShec::MULTIPLE, shec->technique);
EXPECT_STREQ("test", shec->rule_root.c_str());
EXPECT_STREQ("host", shec->rule_failure_domain.c_str());
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_3)
{
//all parameters are normal values
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
(*profile)["w"] = "16";
int r = shec->init(*profile, &cerr);
//check profile
EXPECT_EQ(4, shec->k);
EXPECT_EQ(3, shec->m);
EXPECT_EQ(2, shec->c);
EXPECT_EQ(16, shec->w);
EXPECT_EQ(ErasureCodeShec::MULTIPLE, shec->technique);
EXPECT_STREQ("default", shec->rule_root.c_str());
EXPECT_STREQ("osd", shec->rule_failure_domain.c_str());
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_4)
{
//all parameters are normal values
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
(*profile)["w"] = "32";
int r = shec->init(*profile, &cerr);
//check profile
EXPECT_EQ(4, shec->k);
EXPECT_EQ(3, shec->m);
EXPECT_EQ(2, shec->c);
EXPECT_EQ(32, shec->w);
EXPECT_EQ(ErasureCodeShec::MULTIPLE, shec->technique);
EXPECT_STREQ("default", shec->rule_root.c_str());
EXPECT_STREQ("osd", shec->rule_failure_domain.c_str());
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_5)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
//plugin is not specified
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
int r = shec->init(*profile, &cerr);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_6)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "jerasure"; //unexpected value
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
int r = shec->init(*profile, &cerr);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_7)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "abc"; //unexpected value
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
int r = shec->init(*profile, &cerr);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_8)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
int r = shec->init(*profile, &cerr);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_9)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-root"] = "abc"; //unexpected value
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
int r = shec->init(*profile, &cerr);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_10)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "abc"; //unexpected value
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
int r = shec->init(*profile, &cerr);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_11)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "abc"; //unexpected value
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
int r = shec->init(*profile, &cerr);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_12)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "-1"; //unexpected value
(*profile)["m"] = "3";
(*profile)["c"] = "2";
int r = shec->init(*profile, &cerr);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_13)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "abc";
(*profile)["k"] = "0.1"; //unexpected value
(*profile)["m"] = "3";
(*profile)["c"] = "2";
int r = shec->init(*profile, &cerr);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_14)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "a"; //unexpected value
(*profile)["m"] = "3";
(*profile)["c"] = "2";
int r = shec->init(*profile, &cerr);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_15)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
//k is not specified
(*profile)["m"] = "3";
(*profile)["c"] = "2";
int r = shec->init(*profile, &cerr);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_16)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "-1"; //unexpected value
(*profile)["c"] = "2";
int r = shec->init(*profile, &cerr);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_17)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "0.1"; //unexpected value
(*profile)["c"] = "2";
int r = shec->init(*profile, &cerr);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_18)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "a"; //unexpected value
(*profile)["c"] = "2";
int r = shec->init(*profile, &cerr);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_19)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
//m is not specified
(*profile)["c"] = "2";
int r = shec->init(*profile, &cerr);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_20)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "-1"; //unexpected value
int r = shec->init(*profile, &cerr);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_21)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "0.1"; //unexpected value
int r = shec->init(*profile, &cerr);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_22)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "a"; //unexpected value
int r = shec->init(*profile, &cerr);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_23)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
//c is not specified
int r = shec->init(*profile, &cerr);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_24)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
(*profile)["w"] = "1"; //unexpected value
int r = shec->init(*profile, &cerr);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_EQ(4, shec->k);
EXPECT_EQ(3, shec->m);
EXPECT_EQ(2, shec->c);
EXPECT_EQ(8, shec->w);
//w is default value
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_25)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
(*profile)["w"] = "-1"; //unexpected value
int r = shec->init(*profile, &cerr);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_EQ(4, shec->k);
EXPECT_EQ(3, shec->m);
EXPECT_EQ(2, shec->c);
EXPECT_EQ(8, shec->w);
//w is default value
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_26)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
(*profile)["w"] = "0.1"; //unexpected value
int r = shec->init(*profile, &cerr);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_EQ(4, shec->k);
EXPECT_EQ(3, shec->m);
EXPECT_EQ(2, shec->c);
EXPECT_EQ(8, shec->w);
//w is default value
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_27)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
(*profile)["w"] = "a"; //unexpected value
int r = shec->init(*profile, &cerr);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_EQ(4, shec->k);
EXPECT_EQ(3, shec->m);
EXPECT_EQ(2, shec->c);
EXPECT_EQ(8, shec->w);
//w is default value
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_28)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "10"; //c > m
int r = shec->init(*profile, &cerr);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_29)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
//k is not specified
//m is not specified
//c is not specified
int r = shec->init(*profile, &cerr);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
//k,m,c are default values
EXPECT_EQ(4, shec->k);
EXPECT_EQ(3, shec->m);
EXPECT_EQ(2, shec->c);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_30)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "12";
(*profile)["m"] = "8";
(*profile)["c"] = "8";
int r = shec->init(*profile, &cerr);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_EQ(12, shec->k);
EXPECT_EQ(8, shec->m);
EXPECT_EQ(8, shec->c);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_31)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "13";
(*profile)["m"] = "7";
(*profile)["c"] = "7";
int r = shec->init(*profile, &cerr);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_32)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "7";
(*profile)["m"] = "13";
(*profile)["c"] = "13";
int r = shec->init(*profile, &cerr);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_33)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "12";
(*profile)["m"] = "9";
(*profile)["c"] = "8";
int r = shec->init(*profile, &cerr);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init_34)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "8";
(*profile)["m"] = "12";
(*profile)["c"] = "12";
int r = shec->init(*profile, &cerr);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init2_4)
{
//all parameters are normal values
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
int r = shec->init(*profile, &cerr); //init executed twice
//check profile
EXPECT_EQ(4, shec->k);
EXPECT_EQ(3, shec->m);
EXPECT_EQ(2, shec->c);
EXPECT_EQ(8, shec->w);
EXPECT_EQ(ErasureCodeShec::MULTIPLE, shec->technique);
EXPECT_STREQ("default", shec->rule_root.c_str());
EXPECT_STREQ("osd", shec->rule_failure_domain.c_str());
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, init2_5)
{
//all parameters are normal values
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
ErasureCodeProfile *profile2 = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "host";
(*profile)["k"] = "10";
(*profile)["m"] = "6";
(*profile)["c"] = "5";
(*profile)["w"] = "16";
int r = shec->init(*profile, &cerr);
//reexecute init
(*profile2)["plugin"] = "shec";
(*profile2)["technique"] = "";
(*profile2)["crush-failure-domain"] = "osd";
(*profile2)["k"] = "4";
(*profile2)["m"] = "3";
(*profile2)["c"] = "2";
shec->init(*profile2, &cerr);
EXPECT_EQ(4, shec->k);
EXPECT_EQ(3, shec->m);
EXPECT_EQ(2, shec->c);
EXPECT_EQ(8, shec->w);
EXPECT_EQ(ErasureCodeShec::MULTIPLE, shec->technique);
EXPECT_STREQ("default", shec->rule_root.c_str());
EXPECT_STREQ("osd", shec->rule_failure_domain.c_str());
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
delete shec;
delete profile;
delete profile2;
}
TEST(ErasureCodeShec, minimum_to_decode_8)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//minimum_to_decode
set<int> want_to_decode;
set<int> available_chunks;
set<int> minimum_chunks;
for (int i = 0; i < 8; ++i) {
want_to_decode.insert(i);
}
for (int i = 0; i < 5; ++i) {
available_chunks.insert(i);
}
int r = shec->_minimum_to_decode(want_to_decode, available_chunks,
&minimum_chunks);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, minimum_to_decode_9)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//minimum_to_decode
set<int> want_to_decode;
set<int> available_chunks;
set<int> minimum_chunks;
for (int i = 0; i < 4; ++i) {
want_to_decode.insert(i);
}
for (int i = 0; i < 8; ++i) {
available_chunks.insert(i);
}
int r = shec->_minimum_to_decode(want_to_decode, available_chunks,
&minimum_chunks);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, minimum_to_decode_10)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//minimum_to_decode
set<int> want_to_decode;
set<int> available_chunks;
set<int> minimum_chunks;
for (int i = 0; i < 7; ++i) {
want_to_decode.insert(i);
}
for (int i = 4; i < 7; ++i) {
available_chunks.insert(i);
}
int r = shec->_minimum_to_decode(want_to_decode, available_chunks,
&minimum_chunks);
EXPECT_EQ(-EIO, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, minimum_to_decode_11)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//minimum_to_decode
set<int> want_to_decode;
set<int> available_chunks;
set<int> minimum_chunks;
for (int i = 0; i < 5; ++i) {
want_to_decode.insert(i);
}
for (int i = 4; i < 7; ++i) {
available_chunks.insert(i);
}
int r = shec->_minimum_to_decode(want_to_decode, available_chunks,
&minimum_chunks);
EXPECT_EQ(-EIO, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, minimum_to_decode_12)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//minimum_to_decode
set<int> want_to_decode;
set<int> available_chunks;
//minimum_chunks is NULL
for (int i = 0; i < 7; ++i) {
want_to_decode.insert(i);
available_chunks.insert(i);
}
int r = shec->_minimum_to_decode(want_to_decode, available_chunks, NULL);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, minimum_to_decode_13)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//minimum_to_decode
set<int> want_to_decode;
set<int> available_chunks;
set<int> minimum_chunks, minimum;
for (int i = 0; i < 7; ++i) {
want_to_decode.insert(i);
available_chunks.insert(i);
}
shec->_minimum_to_decode(want_to_decode, available_chunks, &minimum_chunks);
minimum = minimum_chunks; //normal value
for (int i = 100; i < 120; ++i) {
minimum_chunks.insert(i); //insert extra data
}
int r = shec->_minimum_to_decode(want_to_decode, available_chunks,
&minimum_chunks);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_EQ(minimum, minimum_chunks);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, minimum_to_decode2_1)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//minimum_to_decode
set<int> want_to_decode;
set<int> available_chunks;
set<int> minimum_chunks;
want_to_decode.insert(0);
available_chunks.insert(0);
available_chunks.insert(1);
available_chunks.insert(2);
int r = shec->_minimum_to_decode(want_to_decode, available_chunks,
&minimum_chunks);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_TRUE(minimum_chunks.size());
delete shec;
delete profile;
}
TEST(ErasureCodeShec, minimum_to_decode2_3)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//minimum_to_decode
set<int> want_to_decode;
set<int> available_chunks;
set<int> minimum_chunks;
want_to_decode.insert(0);
want_to_decode.insert(2);
available_chunks.insert(0);
available_chunks.insert(1);
available_chunks.insert(2);
available_chunks.insert(3);
pthread_t tid;
g_flag = 0;
pthread_create(&tid, NULL, thread1, shec);
while (g_flag == 0) {
usleep(1);
}
sleep(1);
printf("*** test start ***\n");
int r = shec->_minimum_to_decode(want_to_decode, available_chunks,
&minimum_chunks);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_EQ(want_to_decode, minimum_chunks);
printf("*** test end ***\n");
g_flag = 0;
pthread_join(tid, NULL);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, minimum_to_decode_with_cost_1)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//minimum_to_decode_with_cost
set<int> want_to_decode;
map<int, int> available_chunks;
set<int> minimum_chunks;
for (int i = 0; i < 7; ++i) {
want_to_decode.insert(i);
available_chunks.insert(make_pair(i, i));
}
int r = shec->minimum_to_decode_with_cost(want_to_decode, available_chunks,
&minimum_chunks);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_TRUE(minimum_chunks.size());
delete shec;
delete profile;
}
TEST(ErasureCodeShec, minimum_to_decode_with_cost_2_3)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//minimum_to_decode_with_cost
set<int> want_to_decode;
map<int, int> available_chunks;
set<int> minimum_chunks;
want_to_decode.insert(0);
want_to_decode.insert(2);
available_chunks[0] = 0;
available_chunks[1] = 1;
available_chunks[2] = 2;
available_chunks[3] = 3;
pthread_t tid;
g_flag = 0;
pthread_create(&tid, NULL, thread2, shec);
while (g_flag == 0) {
usleep(1);
}
sleep(1);
printf("*** test start ***\n");
int r = shec->minimum_to_decode_with_cost(want_to_decode, available_chunks,
&minimum_chunks);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_EQ(want_to_decode, minimum_chunks);
printf("*** test end ***\n");
g_flag = 0;
pthread_join(tid, NULL);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, encode_1)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"0123"//128
);
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
int r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count(), encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4, 5, 6 };
map<int, bufferlist> decoded;
decoded.clear();
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 2),
encoded,
&decoded);
EXPECT_NE(nullptr, shec->matrix);
EXPECT_EQ(0, r);
EXPECT_EQ(2u, decoded.size());
EXPECT_EQ(32u, decoded[0].length());
bufferlist out1, out2, usable;
//out1 is "encoded"
for (unsigned int i = 0; i < encoded.size(); ++i) {
out1.append(encoded[i]);
}
//out2 is "decoded"
r = shec->decode_concat(encoded, &out2);
usable.substr_of(out2, 0, in.length());
EXPECT_FALSE(out1 == in);
EXPECT_TRUE(usable == in);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, encode_2)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
);
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
int r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count(), encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4, 5, 6 };
map<int, bufferlist> decoded;
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 2), encoded,
&decoded);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_EQ(2u, decoded.size());
EXPECT_EQ(32u, decoded[0].length());
bufferlist out1, out2, usable;
//out1 is "encoded"
for (unsigned int i = 0; i < encoded.size(); ++i)
out1.append(encoded[i]);
//out2 is "decoded"
shec->decode_concat(encoded, &out2);
usable.substr_of(out2, 0, in.length());
EXPECT_FALSE(out1 == in);
EXPECT_TRUE(usable == in);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, encode_3)
{
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
bufferlist in;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
);
set<int> want_to_encode;
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
want_to_encode.insert(10);
want_to_encode.insert(11);
map<int, bufferlist> encoded;
int r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count(), encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4, 5, 6 };
map<int, bufferlist> decoded;
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 2), encoded,
&decoded);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_EQ(2u, decoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), decoded[0].length());
bufferlist out1, out2, usable;
//out1 is "encoded"
for (unsigned int i = 0; i < encoded.size(); ++i) {
out1.append(encoded[i]);
}
//out2 is "decoded"
shec->decode_concat(encoded, &out2);
usable.substr_of(out2, 0, in.length());
EXPECT_FALSE(out1 == in);
EXPECT_TRUE(usable == in);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, encode_4)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
);
for (unsigned int i = 0; i < shec->get_chunk_count() - 1; ++i) {
want_to_encode.insert(i);
}
want_to_encode.insert(100);
int r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count()-1, encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4, 5, 6 };
map<int, bufferlist> decoded;
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 2), encoded,
&decoded);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_EQ(2u, decoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), decoded[0].length());
bufferlist out1, out2, usable;
//out1 is "encoded"
for (unsigned int i = 0; i < encoded.size(); ++i) {
out1.append(encoded[i]);
}
//out2 is "decoded"
shec->decode_concat(encoded, &out2);
usable.substr_of(out2, 0, in.length());
EXPECT_FALSE(out1 == in);
EXPECT_TRUE(usable == in);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, encode_8)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//encode
bufferlist in;
set<int> want_to_encode;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
);
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
int r = shec->encode(want_to_encode, in, NULL); //encoded = NULL
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, encode_9)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
);
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
for (int i = 0; i < 100; ++i) {
encoded[i].append("ABCDEFGHIJKLMNOPQRSTUVWXYZ");
}
int r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(-EINVAL, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, encode2_1)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"0123"//128
);
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
int r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count(), encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4, 5, 6 };
map<int, bufferlist> decoded;
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 2), encoded,
&decoded);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_EQ(2u, decoded.size());
EXPECT_EQ(32u, decoded[0].length());
bufferlist out1, out2, usable;
//out1 is "encoded"
for (unsigned int i = 0; i < encoded.size(); ++i) {
out1.append(encoded[i]);
}
//out2 is "decoded"
shec->decode_concat(encoded, &out2);
usable.substr_of(out2, 0, in.length());
EXPECT_FALSE(out1 == in);
EXPECT_TRUE(usable == in);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, encode2_3)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"0123"//128
);
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
pthread_t tid;
g_flag = 0;
pthread_create(&tid, NULL, thread4, shec);
while (g_flag == 0) {
usleep(1);
}
sleep(1);
printf("*** test start ***\n");
int r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count(), encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
printf("*** test end ***\n");
g_flag = 0;
pthread_join(tid, NULL);
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4, 5, 6 };
map<int, bufferlist> decoded;
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 2), encoded,
&decoded);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_EQ(2u, decoded.size());
EXPECT_EQ(32u, decoded[0].length());
bufferlist out1, out2, usable;
//out1 is "encoded"
for (unsigned int i = 0; i < encoded.size(); ++i) {
out1.append(encoded[i]);
}
//out2 is "decoded"
shec->decode_concat(encoded, &out2);
usable.substr_of(out2, 0, in.length());
EXPECT_FALSE(out1 == in);
EXPECT_TRUE(usable == in);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, decode_1)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//186
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//248
);
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
int r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count(), encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
// all chunks are available
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4, 5, 6 };
map<int, bufferlist> decoded;
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 7), encoded,
&decoded);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_EQ(7u, decoded.size());
bufferlist usable;
int cmp;
unsigned int c_size = shec->get_chunk_size(in.length());
for (unsigned int i = 0; i < shec->get_data_chunk_count(); ++i) {
usable.clear();
EXPECT_EQ(c_size, decoded[i].length());
if ( c_size * (i+1) <= in.length() ) {
usable.substr_of(in, c_size * i, c_size);
cmp = memcmp(decoded[i].c_str(), usable.c_str(), c_size);
} else {
usable.substr_of(in, c_size * i, in.length() % c_size);
cmp = memcmp(decoded[i].c_str(), usable.c_str(), in.length() % c_size);
}
EXPECT_EQ(0, cmp);
}
delete shec;
delete profile;
}
TEST(ErasureCodeShec, decode_8)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//186
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//248
);
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
int r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count(), encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
// all chunks are available
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4, 5, 6, 7 }; //more than k+m
map<int, bufferlist> decoded;
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 8), encoded,
&decoded);
EXPECT_EQ(0, r);
EXPECT_EQ(7u, decoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
bufferlist usable;
int cmp;
unsigned int c_size = shec->get_chunk_size(in.length());
for (unsigned int i = 0; i < shec->get_data_chunk_count(); ++i) {
usable.clear();
EXPECT_EQ(c_size, decoded[i].length());
if ( c_size * (i+1) <= in.length() ) {
usable.substr_of(in, c_size * i, c_size);
cmp = memcmp(decoded[i].c_str(), usable.c_str(), c_size);
} else {
usable.substr_of(in, c_size * i, in.length() % c_size);
cmp = memcmp(decoded[i].c_str(), usable.c_str(), in.length() % c_size);
}
EXPECT_EQ(0, cmp);
}
delete shec;
delete profile;
}
TEST(ErasureCodeShec, decode_9)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//186
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//248
);
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
int r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count(), encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
// all chunks are available
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
map<int, bufferlist> decoded;
//extra data
bufferlist buf;
buf.append("abc");
encoded[100] = buf;
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 10), encoded,
&decoded);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_EQ(7u, decoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), decoded[0].length());
bufferlist out1, usable;
//out1 is "encoded"
for (unsigned int i = 0; i < encoded.size(); ++i) {
out1.append(encoded[i]);
}
EXPECT_FALSE(out1 == in);
//usable is "decoded"
int cmp;
unsigned int c_size = shec->get_chunk_size(in.length());
for (unsigned int i = 0; i < shec->get_data_chunk_count(); ++i) {
usable.clear();
EXPECT_EQ(c_size, decoded[i].length());
if ( c_size * (i+1) <= in.length() ) {
usable.substr_of(in, c_size * i, c_size);
cmp = memcmp(decoded[i].c_str(), usable.c_str(), c_size);
} else {
usable.substr_of(in, c_size * i, in.length() % c_size);
cmp = memcmp(decoded[i].c_str(), usable.c_str(), in.length() % c_size);
}
EXPECT_EQ(0, cmp);
}
delete shec;
delete profile;
}
TEST(ErasureCodeShec, decode_10)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//186
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//248
);
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
int r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count(), encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4, 5, 6 }; //more than k+m
map<int, bufferlist> decoded, inchunks;
for ( unsigned int i = 0; i < 3; ++i) {
inchunks.insert(make_pair(i, encoded[i]));
}
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 7), inchunks,
&decoded);
EXPECT_EQ(-1, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, decode_11)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"ABCD"//128
);
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
int r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count(), encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4 };
map<int, bufferlist> decoded, inchunks;
for ( unsigned int i = 4; i < 7; ++i) {
inchunks.insert(make_pair(i, encoded[i]));
}
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 5), inchunks,
&decoded);
EXPECT_EQ(-1, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, decode_12)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//186
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//248
);
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
int r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count(), encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
// all chunks are available
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4, 5, 6 };
//decoded = NULL
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 7), encoded,
NULL);
EXPECT_NE(0, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, decode_13)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//186
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//248
);
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
int r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count(), encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
// all chunks are available
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4, 5, 6 };
map<int, bufferlist> decoded;
//extra data
bufferlist buf;
buf.append("a");
for (int i = 0; i < 100; ++i) {
decoded[i] = buf;
}
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 7), encoded,
&decoded);
EXPECT_NE(0, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, decode2_1)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//186
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//248
);
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
int r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count(), encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
// all chunks are available
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
map<int, bufferlist> decoded;
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 2), encoded,
&decoded);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_EQ(2u, decoded.size());
bufferlist out;
shec->decode_concat(encoded, &out);
bufferlist usable;
usable.substr_of(out, 0, in.length());
EXPECT_TRUE(usable == in);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, decode2_3)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//186
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//248
);
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
int r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count(), encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
// all chunks are available
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
map<int, bufferlist> decoded;
pthread_t tid;
g_flag = 0;
pthread_create(&tid, NULL, thread4, shec);
while (g_flag == 0) {
usleep(1);
}
sleep(1);
printf("*** test start ***\n");
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 2), encoded,
&decoded);
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, r);
EXPECT_EQ(2u, decoded.size());
printf("*** test end ***\n");
g_flag = 0;
pthread_join(tid, NULL);
bufferlist out;
shec->decode_concat(encoded, &out);
bufferlist usable;
usable.substr_of(out, 0, in.length());
EXPECT_TRUE(usable == in);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, decode2_4)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//186
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//248
);
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
int r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count(), encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
map<int, bufferlist> decoded;
// cannot recover
bufferlist out;
map<int, bufferlist> degraded;
degraded[0] = encoded[0];
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 2), degraded,
&decoded);
EXPECT_EQ(-1, r);
delete shec;
delete profile;
}
TEST(ErasureCodeShec, create_rule_1_2)
{
//create rule
CrushWrapper *crush = new CrushWrapper;
crush->create();
crush->set_type_name(2, "root");
crush->set_type_name(1, "host");
crush->set_type_name(0, "osd");
int rootno;
crush->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1, 2, 0, NULL,
NULL, &rootno);
crush->set_item_name(rootno, "default");
map < string, string > loc;
loc["root"] = "default";
int num_host = 2;
int num_osd = 5;
int osd = 0;
for (int h = 0; h < num_host; ++h) {
loc["host"] = string("host-") + stringify(h);
for (int o = 0; o < num_osd; ++o, ++osd) {
crush->insert_item(g_ceph_context, osd, 1.0,
string("osd.") + stringify(osd), loc);
}
}
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//create_rule
stringstream ss;
int r = shec->create_rule("myrule", *crush, &ss);
EXPECT_EQ(0, r);
EXPECT_STREQ("myrule", crush->rule_name_map[0].c_str());
//reexecute create_rule
r = shec->create_rule("myrule", *crush, &ss);
EXPECT_EQ(-EEXIST, r);
delete shec;
delete profile;
delete crush;
}
TEST(ErasureCodeShec, create_rule_4)
{
//create rule
CrushWrapper *crush = new CrushWrapper;
crush->create();
crush->set_type_name(2, "root");
crush->set_type_name(1, "host");
crush->set_type_name(0, "osd");
int rootno;
crush->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1, 2, 0, NULL,
NULL, &rootno);
crush->set_item_name(rootno, "default");
map < string, string > loc;
loc["root"] = "default";
int num_host = 2;
int num_osd = 5;
int osd = 0;
for (int h = 0; h < num_host; ++h) {
loc["host"] = string("host-") + stringify(h);
for (int o = 0; o < num_osd; ++o, ++osd) {
crush->insert_item(g_ceph_context, osd, 1.0,
string("osd.") + stringify(osd), loc);
}
}
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//create_rule
int r = shec->create_rule("myrule", *crush, NULL); //ss = NULL
EXPECT_EQ(0, r);
delete shec;
delete profile;
delete crush;
}
TEST(ErasureCodeShec, create_rule2_1)
{
//create rule
CrushWrapper *crush = new CrushWrapper;
crush->create();
crush->set_type_name(2, "root");
crush->set_type_name(1, "host");
crush->set_type_name(0, "osd");
int rootno;
crush->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1, 2, 0, NULL,
NULL, &rootno);
crush->set_item_name(rootno, "default");
map < string, string > loc;
loc["root"] = "default";
int num_host = 2;
int num_osd = 5;
int osd = 0;
for (int h = 0; h < num_host; ++h) {
loc["host"] = string("host-") + stringify(h);
for (int o = 0; o < num_osd; ++o, ++osd) {
crush->insert_item(g_ceph_context, osd, 1.0,
string("osd.") + stringify(osd), loc);
}
}
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//create_rule
stringstream ss;
int r = shec->create_rule("myrule", *crush, &ss);
EXPECT_EQ(0, r);
EXPECT_STREQ("myrule", crush->rule_name_map[0].c_str());
delete shec;
delete profile;
delete crush;
}
struct CreateRuleset2_3_Param_d {
ErasureCodeShec *shec;
CrushWrapper *crush;
};
TEST(ErasureCodeShec, create_rule2_3)
{
//create rule
CrushWrapper *crush = new CrushWrapper;
crush->create();
crush->set_type_name(2, "root");
crush->set_type_name(1, "host");
crush->set_type_name(0, "osd");
int rootno;
crush->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1, 2, 0, NULL,
NULL, &rootno);
crush->set_item_name(rootno, "default");
map < string, string > loc;
loc["root"] = "default";
int num_host = 2;
int num_osd = 5;
int osd = 0;
for (int h = 0; h < num_host; ++h) {
loc["host"] = string("host-") + stringify(h);
for (int o = 0; o < num_osd; ++o, ++osd) {
crush->insert_item(g_ceph_context, osd, 1.0,
string("osd.") + stringify(osd), loc);
}
}
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//create_rule
stringstream ss;
pthread_t tid;
g_flag = 0;
pthread_create(&tid, NULL, thread3, shec);
while (g_flag == 0) {
usleep(1);
}
sleep(1);
printf("*** test start ***\n");
int r = (shec->create_rule("myrule", *crush, &ss));
EXPECT_TRUE(r >= 0);
printf("*** test end ***\n");
g_flag = 0;
pthread_join(tid, NULL);
delete shec;
delete profile;
delete crush;
}
TEST(ErasureCodeShec, get_chunk_count_1)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//get_chunk_count
EXPECT_EQ(7u, shec->get_chunk_count());
delete shec;
delete profile;
}
TEST(ErasureCodeShec, get_data_chunk_count_1)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
shec->init(*profile, &cerr);
//get_data_chunk_count
EXPECT_EQ(4u, shec->get_data_chunk_count());
delete shec;
delete profile;
}
TEST(ErasureCodeShec, get_chunk_size_1_2)
{
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = "4";
(*profile)["m"] = "3";
(*profile)["c"] = "2";
(*profile)["w"] = "8";
shec->init(*profile, &cerr);
//when there is no padding(128=k*w*4)
EXPECT_EQ(32u, shec->get_chunk_size(128));
//when there is padding(126=k*w*4-2)
EXPECT_EQ(32u, shec->get_chunk_size(126));
delete shec;
delete profile;
}
void* thread1(void* pParam)
{
ErasureCodeShec* shec = (ErasureCodeShec*) pParam;
set<int> want_to_decode;
set<int> available_chunks;
set<int> minimum_chunks;
want_to_decode.insert(0);
want_to_decode.insert(1);
available_chunks.insert(0);
available_chunks.insert(1);
available_chunks.insert(2);
printf("*** thread loop start ***\n");
g_flag = 1;
while (g_flag == 1) {
shec->_minimum_to_decode(want_to_decode, available_chunks, &minimum_chunks);
}
printf("*** thread loop end ***\n");
return NULL;
}
void* thread2(void* pParam)
{
ErasureCodeShec* shec = (ErasureCodeShec*) pParam;
set<int> want_to_decode;
map<int, int> available_chunks;
set<int> minimum_chunks;
want_to_decode.insert(0);
want_to_decode.insert(1);
available_chunks[0] = 0;
available_chunks[1] = 1;
available_chunks[2] = 2;
printf("*** thread loop start ***\n");
g_flag = 1;
while (g_flag == 1) {
shec->minimum_to_decode_with_cost(want_to_decode, available_chunks,
&minimum_chunks);
minimum_chunks.clear();
}
printf("*** thread loop end ***\n");
return NULL;
}
void* thread3(void* pParam)
{
ErasureCodeShec* shec = (ErasureCodeShec*) pParam;
std::unique_ptr<CrushWrapper> crush = std::make_unique<CrushWrapper>();
crush->create();
crush->set_type_name(2, "root");
crush->set_type_name(1, "host");
crush->set_type_name(0, "osd");
int rootno;
crush->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1, 2, 0, NULL,
NULL, &rootno);
crush->set_item_name(rootno, "default");
map < string, string > loc;
loc["root"] = "default";
int num_host = 2;
int num_osd = 5;
int osd = 0;
for (int h = 0; h < num_host; ++h) {
loc["host"] = string("host-") + stringify(h);
for (int o = 0; o < num_osd; ++o, ++osd) {
crush->insert_item(g_ceph_context, osd, 1.0,
string("osd.") + stringify(osd), loc);
}
}
stringstream ss;
int i = 0;
char name[30];
printf("*** thread loop start ***\n");
g_flag = 1;
while (g_flag == 1) {
sprintf(name, "myrule%d", i);
shec->create_rule(name, *crush, &ss);
++i;
}
printf("*** thread loop end ***\n");
return NULL;
}
void* thread4(void* pParam)
{
ErasureCodeShec* shec = (ErasureCodeShec*) pParam;
bufferlist in;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//186
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//248
);
set<int> want_to_encode;
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
map<int, bufferlist> encoded;
printf("*** thread loop start ***\n");
g_flag = 1;
while (g_flag == 1) {
shec->encode(want_to_encode, in, &encoded);
encoded.clear();
}
printf("*** thread loop end ***\n");
return NULL;
}
void* thread5(void* pParam)
{
ErasureCodeShec* shec = (ErasureCodeShec*) pParam;
bufferlist in;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//186
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//248
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//310
);
set<int> want_to_encode;
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
map<int, bufferlist> encoded;
shec->encode(want_to_encode, in, &encoded);
int want_to_decode[] = { 0, 1, 2, 3, 4, 5 };
map<int, bufferlist> decoded;
printf("*** thread loop start ***\n");
g_flag = 1;
while (g_flag == 1) {
shec->_decode(set<int>(want_to_decode, want_to_decode + 2), encoded,
&decoded);
decoded.clear();
}
printf("*** thread loop end ***\n");
return NULL;
}
| 76,300 | 26.018768 | 89 | cc |
null | ceph-main/src/test/erasure-code/TestErasureCodeShec_all.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014,2015 FUJITSU LIMITED
*
* Author: Shotaro Kawaguchi <kawaguchi.s@jp.fujitsu.com>
* Author: Takanori Nakao <nakao.takanori@jp.fujitsu.com>
* Author: Takeshi Miyamae <miyamae.takeshi@jp.fujitsu.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
// SUMMARY: TestErasureCodeShec combination of k,m,c by 301 patterns
#include <errno.h>
#include <stdlib.h>
#include "crush/CrushWrapper.h"
#include "osd/osd_types.h"
#include "include/stringify.h"
#include "global/global_init.h"
#include "erasure-code/shec/ErasureCodeShec.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "common/ceph_argparse.h"
#include "global/global_context.h"
#include "gtest/gtest.h"
using namespace std;
struct Param_d {
char* k;
char* m;
char* c;
int ch_size;
char sk[16];
char sm[16];
char sc[16];
};
struct Param_d param[301];
unsigned int g_recover = 0;
unsigned int g_cannot_recover = 0;
struct Recover_d {
int k;
int m;
int c;
set<int> want;
set<int> avail;
};
struct std::vector<Recover_d> cannot_recover;
class ParameterTest : public ::testing::TestWithParam<struct Param_d> {
};
TEST_P(ParameterTest, parameter_all)
{
int result;
//get parameters
char* k = GetParam().k;
char* m = GetParam().m;
char* c = GetParam().c;
unsigned c_size = GetParam().ch_size;
int i_k = atoi(k);
int i_m = atoi(m);
int i_c = atoi(c);
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = k;
(*profile)["m"] = m;
(*profile)["c"] = c;
result = shec->init(*profile, &cerr);
//check profile
EXPECT_EQ(i_k, shec->k);
EXPECT_EQ(i_m, shec->m);
EXPECT_EQ(i_c, shec->c);
EXPECT_EQ(8, shec->w);
EXPECT_EQ(ErasureCodeShec::MULTIPLE, shec->technique);
EXPECT_STREQ("default", shec->rule_root.c_str());
EXPECT_STREQ("osd", shec->rule_failure_domain.c_str());
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, result);
//minimum_to_decode
//want_to_decode will be a combination that chooses 1~c from k+m
set<int> want_to_decode, available_chunks, minimum_chunks;
int array_want_to_decode[shec->get_chunk_count()];
struct Recover_d comb;
for (int w = 1; w <= i_c; w++) {
const unsigned int r = w; // combination(k+m,r)
for (unsigned int i = 0; i < r; ++i) {
array_want_to_decode[i] = 1;
}
for (unsigned int i = r; i < shec->get_chunk_count(); ++i) {
array_want_to_decode[i] = 0;
}
do {
for (unsigned int i = 0; i < shec->get_chunk_count(); i++) {
available_chunks.insert(i);
}
for (unsigned int i = 0; i < shec->get_chunk_count(); i++) {
if (array_want_to_decode[i]) {
want_to_decode.insert(i);
available_chunks.erase(i);
}
}
result = shec->_minimum_to_decode(want_to_decode, available_chunks,
&minimum_chunks);
if (result == 0){
EXPECT_EQ(0, result);
EXPECT_TRUE(minimum_chunks.size());
g_recover++;
} else {
EXPECT_EQ(-EIO, result);
EXPECT_EQ(0u, minimum_chunks.size());
g_cannot_recover++;
comb.k = shec->k;
comb.m = shec->m;
comb.c = shec->c;
comb.want = want_to_decode;
comb.avail = available_chunks;
cannot_recover.push_back(comb);
}
want_to_decode.clear();
available_chunks.clear();
minimum_chunks.clear();
} while (std::prev_permutation(
array_want_to_decode,
array_want_to_decode + shec->get_chunk_count()));
}
//minimum_to_decode_with_cost
set<int> want_to_decode_with_cost, minimum_chunks_with_cost;
map<int, int> available_chunks_with_cost;
for (unsigned int i = 0; i < 1; i++) {
want_to_decode_with_cost.insert(i);
}
for (unsigned int i = 0; i < shec->get_chunk_count(); i++) {
available_chunks_with_cost[i] = i;
}
result = shec->minimum_to_decode_with_cost(
want_to_decode_with_cost,
available_chunks_with_cost,
&minimum_chunks_with_cost);
EXPECT_EQ(0, result);
EXPECT_TRUE(minimum_chunks_with_cost.size());
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//186
"012345"//192
);
for (unsigned int i = 0; i < shec->get_chunk_count(); i++) {
want_to_encode.insert(i);
}
result = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, result);
EXPECT_EQ(i_k+i_m, (int)encoded.size());
EXPECT_EQ(c_size, encoded[0].length());
//decode
int want_to_decode2[i_k + i_m];
map<int, bufferlist> decoded;
for (unsigned int i = 0; i < shec->get_chunk_count(); i++) {
want_to_decode2[i] = i;
}
result = shec->_decode(set<int>(want_to_decode2, want_to_decode2 + 2),
encoded, &decoded);
EXPECT_EQ(0, result);
EXPECT_EQ(2u, decoded.size());
EXPECT_EQ(c_size, decoded[0].length());
//check encoded,decoded
bufferlist out1, out2, usable;
//out1 is "encoded"
for (unsigned int i = 0; i < encoded.size(); i++) {
out1.append(encoded[i]);
}
//out2 is "decoded"
shec->decode_concat(encoded, &out2);
usable.substr_of(out2, 0, in.length());
EXPECT_FALSE(out1 == in);
EXPECT_TRUE(usable == in);
//create_rule
stringstream ss;
CrushWrapper *crush = new CrushWrapper;
crush->create();
crush->set_type_name(2, "root");
crush->set_type_name(1, "host");
crush->set_type_name(0, "osd");
int rootno;
crush->add_bucket(0, CRUSH_BUCKET_STRAW, CRUSH_HASH_RJENKINS1, 2, 0, NULL,
NULL, &rootno);
crush->set_item_name(rootno, "default");
map < string, string > loc;
loc["root"] = "default";
int num_host = 2;
int num_osd = 5;
int osd = 0;
for (int h = 0; h < num_host; ++h) {
loc["host"] = string("host-") + stringify(h);
for (int o = 0; o < num_osd; ++o, ++osd) {
crush->insert_item(g_ceph_context, osd, 1.0,
string("osd.") + stringify(osd), loc);
}
}
result = shec->create_rule("myrule", *crush, &ss);
EXPECT_EQ(0, result);
EXPECT_STREQ("myrule", crush->rule_name_map[0].c_str());
//get_chunk_count
EXPECT_EQ(i_k+i_m, (int)shec->get_chunk_count());
//get_data_chunk_count
EXPECT_EQ(i_k, (int)shec->get_data_chunk_count());
//get_chunk_size
EXPECT_EQ(c_size, shec->get_chunk_size(192));
delete shec;
delete profile;
delete crush;
}
INSTANTIATE_TEST_SUITE_P(Test, ParameterTest, ::testing::ValuesIn(param));
int main(int argc, char **argv)
{
int i = 0;
int r;
const int kObjectSize = 192;
unsigned alignment, tail, padded_length;
float recovery_percentage;
//make_kmc
for (unsigned int k = 1; k <= 12; k++) {
for (unsigned int m = 1; (m <= k) && (k + m <= 20); m++) {
for (unsigned int c = 1; c <= m; c++) {
sprintf(param[i].sk, "%u", k);
sprintf(param[i].sm, "%u", m);
sprintf(param[i].sc, "%u", c);
param[i].k = param[i].sk;
param[i].m = param[i].sm;
param[i].c = param[i].sc;
alignment = k * 8 * sizeof(int);
tail = kObjectSize % alignment;
padded_length = kObjectSize + (tail ? (alignment - tail) : 0);
param[i].ch_size = padded_length / k;
i++;
}
}
}
auto args = argv_to_vec(argc, argv);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_MON_CONFIG);
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
r = RUN_ALL_TESTS();
std::cout << "minimum_to_decode:recover_num = " << g_recover << std::endl;
std::cout << "minimum_to_decode:cannot_recover_num = " << g_cannot_recover
<< std::endl;
recovery_percentage = 100.0
- (float) (100.0 * g_cannot_recover / (g_recover + g_cannot_recover));
printf("recovery_percentage:%f\n",recovery_percentage);
if (recovery_percentage > 99.0) {
std::cout << "[ OK ] Recovery percentage is more than 99.0%"
<< std::endl;
} else {
std::cout << "[ NG ] Recovery percentage is less than 99.0%"
<< std::endl;
}
std::cout << "cannot recovery patterns:" << std::endl;
for (std::vector<Recover_d>::const_iterator i = cannot_recover.begin();
i != cannot_recover.end(); ++i) {
std::cout << "---" << std::endl;
std::cout << "k = " << i->k << ", m = " << i->m << ", c = " << i->c
<< std::endl;
std::cout << "want_to_decode :" << i->want << std::endl;
std::cout << "available_chunks:" << i->avail << std::endl;
}
std::cout << "---" << std::endl;
return r;
}
| 9,076 | 26.258258 | 89 | cc |
null | ceph-main/src/test/erasure-code/TestErasureCodeShec_arguments.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 FUJITSU LIMITED
*
* Author: Shotaro Kawaguchi <kawaguchi.s@jp.fujitsu.com>
* Author: Takanori Nakao <nakao.takanori@jp.fujitsu.com>
* Author: Takeshi Miyamae <miyamae.takeshi@jp.fujitsu.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
// SUMMARY: shec's gtest for each argument of minimum_to_decode()/decode()
#include <algorithm>
#include <bit>
#include <cerrno>
#include <cstdlib>
#include "crush/CrushWrapper.h"
#include "osd/osd_types.h"
#include "include/stringify.h"
#include "global/global_init.h"
#include "erasure-code/shec/ErasureCodeShec.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "common/ceph_argparse.h"
#include "global/global_context.h"
#include "gtest/gtest.h"
using namespace std;
unsigned int count_num = 0;
unsigned int unexpected_count = 0;
unsigned int value_count = 0;
map<set<int>,set<set<int> > > shec_table;
constexpr int getint(std::initializer_list<int> is) {
int a = 0;
for (const auto i : is) {
a |= 1 << i;
}
return a;
}
void create_table_shec432() {
set<int> table_key,vec_avails;
set<set<int> > table_value;
for (int want_count = 0; want_count < 7; ++want_count) {
for (unsigned want = 1; want < (1<<7); ++want) {
table_key.clear();
table_value.clear();
if (std::popcount(want) != want_count) {
continue;
}
{
for (int i = 0; i < 7; ++i) {
if (want & (1 << i)) {
table_key.insert(i);
}
}
}
vector<int> vec;
for (unsigned avails = 0; avails < (1<<7); ++avails) {
if (want & avails) {
continue;
}
if (std::popcount(avails) == 2 &&
std::popcount(want) == 1) {
if (std::cmp_equal(want | avails, getint({0,1,5})) ||
std::cmp_equal(want | avails, getint({2,3,6}))) {
vec.push_back(avails);
}
}
}
for (unsigned avails = 0; avails < (1<<7); ++avails) {
if (want & avails) {
continue;
}
if (std::popcount(avails) == 4) {
auto a = to_array<std::initializer_list<int>>({
{0,1,2,3}, {0,1,2,4}, {0,1,2,6}, {0,1,3,4}, {0,1,3,6}, {0,1,4,6},
{0,2,3,4}, {0,2,3,5}, {0,2,4,5}, {0,2,4,6}, {0,2,5,6}, {0,3,4,5},
{0,3,4,6}, {0,3,5,6}, {0,4,5,6}, {1,2,3,4}, {1,2,3,5}, {1,2,4,5},
{1,2,4,6}, {1,2,5,6}, {1,3,4,5}, {1,3,4,6}, {1,3,5,6}, {1,4,5,6},
{2,3,4,5}, {2,4,5,6}, {3,4,5,6}});
if (ranges::any_of(a, std::bind_front(cmp_equal<uint, int>, avails),
getint)) {
vec.push_back(avails);
}
}
}
for (int i = 0; i < (int)vec.size(); ++i) {
for (int j = i + 1; j < (int)vec.size(); ++j) {
if ((vec[i] & vec[j]) == vec[i]) {
vec.erase(vec.begin() + j);
--j;
}
}
}
for (int i = 0; i < (int)vec.size(); ++i) {
vec_avails.clear();
for (int j = 0; j < 7; ++j) {
if (vec[i] & (1 << j)) {
vec_avails.insert(j);
}
}
table_value.insert(vec_avails);
}
shec_table.insert(std::make_pair(table_key,table_value));
}
}
}
bool search_table_shec432(set<int> want_to_read, set<int> available_chunks) {
set<set<int> > tmp;
set<int> settmp;
bool found;
tmp = shec_table.find(want_to_read)->second;
for (set<set<int> >::iterator itr = tmp.begin();itr != tmp.end(); ++itr) {
found = true;
value_count = 0;
settmp = *itr;
for (set<int>::iterator setitr = settmp.begin();setitr != settmp.end(); ++setitr) {
if (!available_chunks.count(*setitr)) {
found = false;
}
++value_count;
}
if (found) {
return true;
}
}
return false;
}
TEST(ParameterTest, combination_all)
{
const unsigned int kObjectSize = 128;
//get profile
char* k = (char*)"4";
char* m = (char*)"3";
char* c = (char*)"2";
int i_k = atoi(k);
int i_m = atoi(m);
int i_c = atoi(c);
const unsigned alignment = i_k * 8 * sizeof(int);
const unsigned tail = kObjectSize % alignment;
const unsigned padded_length = kObjectSize + (tail ? (alignment - tail) : 0);
const unsigned c_size = padded_length / i_k;
//init
ErasureCodeShecTableCache tcache;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
map < std::string, std::string > *profile = new map<std::string,
std::string>();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = k;
(*profile)["m"] = m;
(*profile)["c"] = c;
int result = shec->init(*profile, &cerr);
//check profile
EXPECT_EQ(i_k, shec->k);
EXPECT_EQ(i_m, shec->m);
EXPECT_EQ(i_c, shec->c);
EXPECT_EQ(8, shec->w);
EXPECT_EQ(ErasureCodeShec::MULTIPLE, shec->technique);
EXPECT_STREQ("default", shec->rule_root.c_str());
EXPECT_STREQ("osd", shec->rule_failure_domain.c_str());
EXPECT_TRUE(shec->matrix != NULL);
EXPECT_EQ(0, result);
//encode
bufferlist in;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"0123"//128
);
set<int> want_to_encode;
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
want_to_encode.insert(i);
}
map<int, bufferlist> encoded;
result = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, result);
EXPECT_EQ(i_k+i_m, (int)encoded.size());
EXPECT_EQ(c_size, encoded[0].length());
bufferlist out1;
//out1 is "encoded"
for (unsigned int i = 0; i < encoded.size(); ++i) {
out1.append(encoded[i]);
}
EXPECT_FALSE(out1 == in);
for (unsigned int w1 = 0; w1 <= shec->get_chunk_count(); ++w1) {
// combination(k+m,w1)
int array_want_to_read[shec->get_chunk_count()];
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
array_want_to_read[i] = i < w1 ? 1 : 0;
}
for (unsigned w2 = 0; w2 <= shec->get_chunk_count(); ++w2) {
// combination(k+m,w2)
int array_available_chunks[shec->get_chunk_count()];
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i ) {
array_available_chunks[i] = i < w2 ? 1 : 0;
}
do {
do {
set<int> want_to_read, available_chunks;
map<int, bufferlist> inchunks;
for (unsigned int i = 0; i < shec->get_chunk_count(); ++i) {
if (array_want_to_read[i]) {
want_to_read.insert(i);
}
if (array_available_chunks[i]) {
available_chunks.insert(i);
inchunks.insert(make_pair(i,encoded[i]));
}
}
map<int, vector<pair<int,int>>> minimum_chunks;
map<int, bufferlist> decoded;
result = shec->minimum_to_decode(want_to_read, available_chunks,
&minimum_chunks);
int dresult = shec->decode(want_to_read, inchunks, &decoded,
shec->get_chunk_size(kObjectSize));
++count_num;
unsigned int minimum_count = 0;
if (want_to_read.size() == 0) {
EXPECT_EQ(0, result);
EXPECT_EQ(0u, minimum_chunks.size());
EXPECT_EQ(0, dresult);
EXPECT_EQ(0u, decoded.size());
EXPECT_EQ(0u, decoded[0].length());
if (result != 0 || dresult != 0) {
++unexpected_count;
}
} else {
// want - avail
set<int> want_to_read_without_avails;
for (auto chunk : want_to_read) {
if (!available_chunks.count(chunk)) {
want_to_read_without_avails.insert(chunk);
} else {
++minimum_count;
}
}
if (want_to_read_without_avails.size() == 0) {
EXPECT_EQ(0, result);
EXPECT_LT(0u, minimum_chunks.size());
EXPECT_GE(minimum_count, minimum_chunks.size());
EXPECT_EQ(0, dresult);
EXPECT_NE(0u, decoded.size());
for (unsigned int i = 0; i < shec->get_data_chunk_count(); ++i) {
if (array_want_to_read[i]) {
bufferlist usable;
usable.substr_of(in, c_size * i, c_size);
int cmp = memcmp(decoded[i].c_str(), usable.c_str(), c_size);
EXPECT_EQ(c_size, decoded[i].length());
EXPECT_EQ(0, cmp);
if (cmp != 0) {
++unexpected_count;
}
}
}
if (result != 0 || dresult != 0) {
++unexpected_count;
}
} else if (want_to_read_without_avails.size() > 3) {
EXPECT_EQ(-EIO, result);
EXPECT_EQ(0u, minimum_chunks.size());
EXPECT_EQ(-1, dresult);
if (result != -EIO || dresult != -1) {
++unexpected_count;
}
} else {
// search
if (search_table_shec432(want_to_read_without_avails,available_chunks)) {
EXPECT_EQ(0, result);
EXPECT_LT(0u, minimum_chunks.size());
EXPECT_GE(value_count + minimum_count, minimum_chunks.size());
EXPECT_EQ(0, dresult);
EXPECT_NE(0u, decoded.size());
for (unsigned int i = 0; i < shec->get_data_chunk_count(); ++i) {
if (array_want_to_read[i]) {
bufferlist usable;
usable.substr_of(in, c_size * i, c_size);
int cmp = memcmp(decoded[i].c_str(), usable.c_str(), c_size);
EXPECT_EQ(c_size, decoded[i].length());
EXPECT_EQ(0, cmp);
if (cmp != 0) {
++unexpected_count;
std::cout << "decoded[" << i << "] = " << decoded[i].c_str() << std::endl;
std::cout << "usable = " << usable.c_str() << std::endl;
std::cout << "want_to_read :" << want_to_read << std::endl;
std::cout << "available_chunks:" << available_chunks << std::endl;
std::cout << "minimum_chunks :" << minimum_chunks << std::endl;
}
}
}
if (result != 0 || dresult != 0) {
++unexpected_count;
}
} else {
EXPECT_EQ(-EIO, result);
EXPECT_EQ(0u, minimum_chunks.size());
EXPECT_EQ(-1, dresult);
if (result != -EIO || dresult != -1) {
++unexpected_count;
}
}
}
}
} while (std::prev_permutation(
array_want_to_read,
array_want_to_read + shec->get_chunk_count()));
} while (std::prev_permutation(
array_available_chunks,
array_available_chunks + shec->get_chunk_count()));
}
}
delete shec;
delete profile;
}
int main(int argc, char **argv)
{
auto args = argv_to_vec(argc, argv);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_MON_CONFIG);
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
create_table_shec432();
int r = RUN_ALL_TESTS();
std::cout << "minimum_to_decode:total_num = " << count_num
<< std::endl;
std::cout << "minimum_to_decode:unexpected_num = " << unexpected_count
<< std::endl;
return r;
}
| 11,876 | 31.013477 | 96 | cc |
null | ceph-main/src/test/erasure-code/TestErasureCodeShec_thread.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014,2015 FUJITSU LIMITED
*
* Author: Shotaro Kawaguchi <kawaguchi.s@jp.fujitsu.com>
* Author: Takanori Nakao <nakao.takanori@jp.fujitsu.com>
* Author: Takeshi Miyamae <miyamae.takeshi@jp.fujitsu.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
// SUMMARY: TestErasureCodeShec executes some threads at the same time
#include <errno.h>
#include <pthread.h>
#include <stdlib.h>
#include "crush/CrushWrapper.h"
#include "osd/osd_types.h"
#include "include/stringify.h"
#include "erasure-code/shec/ErasureCodeShec.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "global/global_context.h"
#include "gtest/gtest.h"
using namespace std;
void* thread1(void* pParam);
class TestParam {
public:
string k, m, c, w;
};
TEST(ErasureCodeShec, thread)
{
TestParam param1, param2, param3, param4, param5;
param1.k = "6";
param1.m = "4";
param1.c = "3";
param1.w = "8";
param2.k = "4";
param2.m = "3";
param2.c = "2";
param2.w = "16";
param3.k = "10";
param3.m = "8";
param3.c = "4";
param3.w = "32";
param4.k = "5";
param4.m = "5";
param4.c = "5";
param4.w = "8";
param5.k = "9";
param5.m = "9";
param5.c = "6";
param5.w = "16";
pthread_t tid1, tid2, tid3, tid4, tid5;
pthread_create(&tid1, NULL, thread1, (void*) ¶m1);
std::cout << "thread1 start " << std::endl;
pthread_create(&tid2, NULL, thread1, (void*) ¶m2);
std::cout << "thread2 start " << std::endl;
pthread_create(&tid3, NULL, thread1, (void*) ¶m3);
std::cout << "thread3 start " << std::endl;
pthread_create(&tid4, NULL, thread1, (void*) ¶m4);
std::cout << "thread4 start " << std::endl;
pthread_create(&tid5, NULL, thread1, (void*) ¶m5);
std::cout << "thread5 start " << std::endl;
pthread_join(tid1, NULL);
pthread_join(tid2, NULL);
pthread_join(tid3, NULL);
pthread_join(tid4, NULL);
pthread_join(tid5, NULL);
}
void* thread1(void* pParam)
{
TestParam* param = static_cast<TestParam*>(pParam);
time_t start, end;
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
instance.disable_dlclose = true;
{
std::lock_guard l{instance.lock};
__erasure_code_init((char*) "shec", (char*) "");
}
std::cout << "__erasure_code_init finish " << std::endl;
//encode
bufferlist in;
set<int> want_to_encode;
map<int, bufferlist> encoded;
in.append("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" //length = 62
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//124
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"//186
"012345"//192
);
//decode
int want_to_decode[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
map<int, bufferlist> decoded;
bufferlist out1, out2, usable;
time(&start);
time(&end);
const int kTestSec = 60;
ErasureCodeShecTableCache tcache;
while (kTestSec >= (end - start)) {
//init
int r;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);
ErasureCodeProfile *profile = new ErasureCodeProfile();
(*profile)["plugin"] = "shec";
(*profile)["technique"] = "multiple";
(*profile)["crush-failure-domain"] = "osd";
(*profile)["k"] = param->k;
(*profile)["m"] = param->m;
(*profile)["c"] = param->c;
(*profile)["w"] = param->w;
r = shec->init(*profile, &cerr);
int i_k = std::atoi(param->k.c_str());
int i_m = std::atoi(param->m.c_str());
int i_c = std::atoi(param->c.c_str());
int i_w = std::atoi(param->w.c_str());
EXPECT_EQ(0, r);
EXPECT_EQ(i_k, shec->k);
EXPECT_EQ(i_m, shec->m);
EXPECT_EQ(i_c, shec->c);
EXPECT_EQ(i_w, shec->w);
EXPECT_EQ(ErasureCodeShec::MULTIPLE, shec->technique);
EXPECT_STREQ("default", shec->rule_root.c_str());
EXPECT_STREQ("osd", shec->rule_failure_domain.c_str());
EXPECT_TRUE(shec->matrix != NULL);
if ((shec->matrix == NULL)) {
std::cout << "matrix is null" << std::endl;
// error
break;
}
//encode
for (unsigned int i = 0; i < shec->get_chunk_count(); i++) {
want_to_encode.insert(i);
}
r = shec->encode(want_to_encode, in, &encoded);
EXPECT_EQ(0, r);
EXPECT_EQ(shec->get_chunk_count(), encoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), encoded[0].length());
if (r != 0) {
std::cout << "error in encode" << std::endl;
//error
break;
}
//decode
r = shec->_decode(set<int>(want_to_decode, want_to_decode + 2),
encoded,
&decoded);
EXPECT_EQ(0, r);
EXPECT_EQ(2u, decoded.size());
EXPECT_EQ(shec->get_chunk_size(in.length()), decoded[0].length());
if (r != 0) {
std::cout << "error in decode" << std::endl;
//error
break;
}
//out1 is "encoded"
for (unsigned int i = 0; i < encoded.size(); i++) {
out1.append(encoded[i]);
}
//out2 is "decoded"
shec->decode_concat(encoded, &out2);
usable.substr_of(out2, 0, in.length());
EXPECT_FALSE(out1 == in);
EXPECT_TRUE(usable == in);
if (out1 == in || !(usable == in)) {
std::cout << "encode(decode) result is not correct" << std::endl;
break;
}
delete shec;
delete profile;
want_to_encode.clear();
encoded.clear();
decoded.clear();
out1.clear();
out2.clear();
usable.clear();
time(&end);
}
return NULL;
}
| 5,809 | 25.409091 | 90 | cc |
null | ceph-main/src/test/erasure-code/ceph_erasure_code_benchmark.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
* Copyright (C) 2014 Red Hat <contact@redhat.com>
*
* Author: Loic Dachary <loic@dachary.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <boost/scoped_ptr.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/program_options/option.hpp>
#include <boost/program_options/options_description.hpp>
#include <boost/program_options/variables_map.hpp>
#include <boost/program_options/cmdline.hpp>
#include <boost/program_options/parsers.hpp>
#include <boost/algorithm/string.hpp>
#include "global/global_context.h"
#include "global/global_init.h"
#include "common/ceph_argparse.h"
#include "common/ceph_context.h"
#include "common/config.h"
#include "common/Clock.h"
#include "include/utime.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "erasure-code/ErasureCode.h"
#include "ceph_erasure_code_benchmark.h"
using std::endl;
using std::cerr;
using std::cout;
using std::map;
using std::set;
using std::string;
using std::stringstream;
using std::vector;
namespace po = boost::program_options;
int ErasureCodeBench::setup(int argc, char** argv) {
po::options_description desc("Allowed options");
desc.add_options()
("help,h", "produce help message")
("verbose,v", "explain what happens")
("size,s", po::value<int>()->default_value(1024 * 1024),
"size of the buffer to be encoded")
("iterations,i", po::value<int>()->default_value(1),
"number of encode/decode runs")
("plugin,p", po::value<string>()->default_value("jerasure"),
"erasure code plugin name")
("workload,w", po::value<string>()->default_value("encode"),
"run either encode or decode")
("erasures,e", po::value<int>()->default_value(1),
"number of erasures when decoding")
("erased", po::value<vector<int> >(),
"erased chunk (repeat if more than one chunk is erased)")
("erasures-generation,E", po::value<string>()->default_value("random"),
"If set to 'random', pick the number of chunks to recover (as specified by "
" --erasures) at random. If set to 'exhaustive' try all combinations of erasures "
" (i.e. k=4,m=3 with one erasure will try to recover from the erasure of "
" the first chunk, then the second etc.)")
("parameter,P", po::value<vector<string> >(),
"add a parameter to the erasure code profile")
;
po::variables_map vm;
po::parsed_options parsed =
po::command_line_parser(argc, argv).options(desc).allow_unregistered().run();
po::store(
parsed,
vm);
po::notify(vm);
vector<const char *> ceph_options;
vector<string> ceph_option_strings = po::collect_unrecognized(
parsed.options, po::include_positional);
ceph_options.reserve(ceph_option_strings.size());
for (vector<string>::iterator i = ceph_option_strings.begin();
i != ceph_option_strings.end();
++i) {
ceph_options.push_back(i->c_str());
}
cct = global_init(
NULL, ceph_options, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
g_ceph_context->_conf.apply_changes(nullptr);
if (vm.count("help")) {
cout << desc << std::endl;
return 1;
}
if (vm.count("parameter")) {
const vector<string> &p = vm["parameter"].as< vector<string> >();
for (vector<string>::const_iterator i = p.begin();
i != p.end();
++i) {
std::vector<std::string> strs;
boost::split(strs, *i, boost::is_any_of("="));
if (strs.size() != 2) {
cerr << "--parameter " << *i << " ignored because it does not contain exactly one =" << endl;
} else {
profile[strs[0]] = strs[1];
}
}
}
in_size = vm["size"].as<int>();
max_iterations = vm["iterations"].as<int>();
plugin = vm["plugin"].as<string>();
workload = vm["workload"].as<string>();
erasures = vm["erasures"].as<int>();
if (vm.count("erasures-generation") > 0 &&
vm["erasures-generation"].as<string>() == "exhaustive")
exhaustive_erasures = true;
else
exhaustive_erasures = false;
if (vm.count("erased") > 0)
erased = vm["erased"].as<vector<int> >();
try {
k = stoi(profile["k"]);
m = stoi(profile["m"]);
} catch (const std::logic_error& e) {
cout << "Invalid k and/or m: k=" << profile["k"] << ", m=" << profile["m"]
<< " (" << e.what() << ")" << endl;
return -EINVAL;
}
if (k <= 0) {
cout << "parameter k is " << k << ". But k needs to be > 0." << endl;
return -EINVAL;
} else if ( m < 0 ) {
cout << "parameter m is " << m << ". But m needs to be >= 0." << endl;
return -EINVAL;
}
verbose = vm.count("verbose") > 0 ? true : false;
return 0;
}
int ErasureCodeBench::run() {
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
instance.disable_dlclose = true;
if (workload == "encode")
return encode();
else
return decode();
}
int ErasureCodeBench::encode()
{
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
ErasureCodeInterfaceRef erasure_code;
stringstream messages;
int code = instance.factory(plugin,
g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, &messages);
if (code) {
cerr << messages.str() << endl;
return code;
}
bufferlist in;
in.append(string(in_size, 'X'));
in.rebuild_aligned(ErasureCode::SIMD_ALIGN);
set<int> want_to_encode;
for (int i = 0; i < k + m; i++) {
want_to_encode.insert(i);
}
utime_t begin_time = ceph_clock_now();
for (int i = 0; i < max_iterations; i++) {
std::map<int,bufferlist> encoded;
code = erasure_code->encode(want_to_encode, in, &encoded);
if (code)
return code;
}
utime_t end_time = ceph_clock_now();
cout << (end_time - begin_time) << "\t" << (max_iterations * (in_size / 1024)) << endl;
return 0;
}
static void display_chunks(const map<int,bufferlist> &chunks,
unsigned int chunk_count) {
cout << "chunks ";
for (unsigned int chunk = 0; chunk < chunk_count; chunk++) {
if (chunks.count(chunk) == 0) {
cout << "(" << chunk << ")";
} else {
cout << " " << chunk << " ";
}
cout << " ";
}
cout << "(X) is an erased chunk" << endl;
}
int ErasureCodeBench::decode_erasures(const map<int,bufferlist> &all_chunks,
const map<int,bufferlist> &chunks,
unsigned i,
unsigned want_erasures,
ErasureCodeInterfaceRef erasure_code)
{
int code = 0;
if (want_erasures == 0) {
if (verbose)
display_chunks(chunks, erasure_code->get_chunk_count());
set<int> want_to_read;
for (unsigned int chunk = 0; chunk < erasure_code->get_chunk_count(); chunk++)
if (chunks.count(chunk) == 0)
want_to_read.insert(chunk);
map<int,bufferlist> decoded;
code = erasure_code->decode(want_to_read, chunks, &decoded, 0);
if (code)
return code;
for (set<int>::iterator chunk = want_to_read.begin();
chunk != want_to_read.end();
++chunk) {
if (all_chunks.find(*chunk)->second.length() != decoded[*chunk].length()) {
cerr << "chunk " << *chunk << " length=" << all_chunks.find(*chunk)->second.length()
<< " decoded with length=" << decoded[*chunk].length() << endl;
return -1;
}
bufferlist tmp = all_chunks.find(*chunk)->second;
if (!tmp.contents_equal(decoded[*chunk])) {
cerr << "chunk " << *chunk
<< " content and recovered content are different" << endl;
return -1;
}
}
return 0;
}
for (; i < erasure_code->get_chunk_count(); i++) {
map<int,bufferlist> one_less = chunks;
one_less.erase(i);
code = decode_erasures(all_chunks, one_less, i + 1, want_erasures - 1, erasure_code);
if (code)
return code;
}
return 0;
}
int ErasureCodeBench::decode()
{
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
ErasureCodeInterfaceRef erasure_code;
stringstream messages;
int code = instance.factory(plugin,
g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, &messages);
if (code) {
cerr << messages.str() << endl;
return code;
}
bufferlist in;
in.append(string(in_size, 'X'));
in.rebuild_aligned(ErasureCode::SIMD_ALIGN);
set<int> want_to_encode;
for (int i = 0; i < k + m; i++) {
want_to_encode.insert(i);
}
map<int,bufferlist> encoded;
code = erasure_code->encode(want_to_encode, in, &encoded);
if (code)
return code;
set<int> want_to_read = want_to_encode;
if (erased.size() > 0) {
for (vector<int>::const_iterator i = erased.begin();
i != erased.end();
++i)
encoded.erase(*i);
display_chunks(encoded, erasure_code->get_chunk_count());
}
utime_t begin_time = ceph_clock_now();
for (int i = 0; i < max_iterations; i++) {
if (exhaustive_erasures) {
code = decode_erasures(encoded, encoded, 0, erasures, erasure_code);
if (code)
return code;
} else if (erased.size() > 0) {
map<int,bufferlist> decoded;
code = erasure_code->decode(want_to_read, encoded, &decoded, 0);
if (code)
return code;
} else {
map<int,bufferlist> chunks = encoded;
for (int j = 0; j < erasures; j++) {
int erasure;
do {
erasure = rand() % ( k + m );
} while(chunks.count(erasure) == 0);
chunks.erase(erasure);
}
map<int,bufferlist> decoded;
code = erasure_code->decode(want_to_read, chunks, &decoded, 0);
if (code)
return code;
}
}
utime_t end_time = ceph_clock_now();
cout << (end_time - begin_time) << "\t" << (max_iterations * (in_size / 1024)) << endl;
return 0;
}
int main(int argc, char** argv) {
ErasureCodeBench ecbench;
try {
int err = ecbench.setup(argc, argv);
if (err)
return err;
return ecbench.run();
} catch(po::error &e) {
cerr << e.what() << endl;
return 1;
}
}
/*
* Local Variables:
* compile-command: "cd ../../../build ; make -j4 ceph_erasure_code_benchmark &&
* valgrind --tool=memcheck --leak-check=full \
* ./bin/ceph_erasure_code_benchmark \
* --plugin jerasure \
* --parameter directory=lib \
* --parameter technique=reed_sol_van \
* --parameter k=2 \
* --parameter m=2 \
* --iterations 1
* "
* End:
*/
| 10,659 | 29.028169 | 94 | cc |
null | ceph-main/src/test/erasure-code/ceph_erasure_code_benchmark.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
* Copyright (C) 2014 Red Hat <contact@redhat.com>
*
* Author: Loic Dachary <loic@dachary.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_BENCHMARK_H
#define CEPH_ERASURE_CODE_BENCHMARK_H
#include <string>
#include <map>
#include <vector>
#include <boost/intrusive_ptr.hpp>
#include "include/buffer.h"
#include "common/ceph_context.h"
#include "erasure-code/ErasureCodeInterface.h"
class ErasureCodeBench {
int in_size;
int max_iterations;
int erasures;
int k;
int m;
std::string plugin;
bool exhaustive_erasures;
std::vector<int> erased;
std::string workload;
ceph::ErasureCodeProfile profile;
bool verbose;
boost::intrusive_ptr<CephContext> cct;
public:
int setup(int argc, char** argv);
int run();
int decode_erasures(const std::map<int, ceph::buffer::list> &all_chunks,
const std::map<int, ceph::buffer::list> &chunks,
unsigned i,
unsigned want_erasures,
ErasureCodeInterfaceRef erasure_code);
int decode();
int encode();
};
#endif
| 1,482 | 22.539683 | 74 | h |
null | ceph-main/src/test/erasure-code/ceph_erasure_code_non_regression.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Red Hat (C) 2014, 2015 Red Hat <contact@redhat.com>
*
* Author: Loic Dachary <loic@dachary.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <errno.h>
#include <stdlib.h>
#include <boost/scoped_ptr.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/program_options/option.hpp>
#include <boost/program_options/options_description.hpp>
#include <boost/program_options/variables_map.hpp>
#include <boost/program_options/cmdline.hpp>
#include <boost/program_options/parsers.hpp>
#include <boost/algorithm/string.hpp>
#include "global/global_context.h"
#include "global/global_init.h"
#include "common/errno.h"
#include "common/ceph_context.h"
#include "common/ceph_argparse.h"
#include "common/config.h"
#include "erasure-code/ErasureCodePlugin.h"
namespace po = boost::program_options;
using namespace std;
class ErasureCodeNonRegression {
unsigned stripe_width;
string plugin;
bool create;
bool check;
string base;
string directory;
ErasureCodeProfile profile;
boost::intrusive_ptr<CephContext> cct;
public:
int setup(int argc, char** argv);
int run();
int run_create();
int run_check();
int decode_erasures(ErasureCodeInterfaceRef erasure_code,
set<int> erasures,
map<int,bufferlist> chunks);
string content_path();
string chunk_path(unsigned int chunk);
};
int ErasureCodeNonRegression::setup(int argc, char** argv) {
po::options_description desc("Allowed options");
desc.add_options()
("help,h", "produce help message")
("stripe-width,s", po::value<int>()->default_value(4 * 1024),
"stripe_width, i.e. the size of the buffer to be encoded")
("plugin,p", po::value<string>()->default_value("jerasure"),
"erasure code plugin name")
("base", po::value<string>()->default_value("."),
"prefix all paths with base")
("parameter,P", po::value<vector<string> >(),
"add a parameter to the erasure code profile")
("create", "create the erasure coded content in the directory")
("check", "check the content in the directory matches the chunks and vice versa")
;
po::variables_map vm;
po::parsed_options parsed =
po::command_line_parser(argc, argv).options(desc).allow_unregistered().run();
po::store(
parsed,
vm);
po::notify(vm);
vector<const char *> ceph_options;
vector<string> ceph_option_strings = po::collect_unrecognized(
parsed.options, po::include_positional);
ceph_options.reserve(ceph_option_strings.size());
for (vector<string>::iterator i = ceph_option_strings.begin();
i != ceph_option_strings.end();
++i) {
ceph_options.push_back(i->c_str());
}
cct = global_init(NULL, ceph_options, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_MON_CONFIG);
common_init_finish(g_ceph_context);
g_ceph_context->_conf.apply_changes(nullptr);
if (vm.count("help")) {
cout << desc << std::endl;
return 1;
}
stripe_width = vm["stripe-width"].as<int>();
plugin = vm["plugin"].as<string>();
base = vm["base"].as<string>();
check = vm.count("check") > 0;
create = vm.count("create") > 0;
if (!check && !create) {
cerr << "must specifify either --check, or --create" << endl;
return 1;
}
{
stringstream path;
path << base << "/" << "plugin=" << plugin << " stripe-width=" << stripe_width;
directory = path.str();
}
if (vm.count("parameter")) {
const vector<string> &p = vm["parameter"].as< vector<string> >();
for (vector<string>::const_iterator i = p.begin();
i != p.end();
++i) {
std::vector<std::string> strs;
boost::split(strs, *i, boost::is_any_of("="));
if (strs.size() != 2) {
cerr << "--parameter " << *i << " ignored because it does not contain exactly one =" << endl;
} else {
profile[strs[0]] = strs[1];
}
directory += " " + *i;
}
}
return 0;
}
int ErasureCodeNonRegression::run()
{
int ret = 0;
if(create && (ret = run_create()))
return ret;
if(check && (ret = run_check()))
return ret;
return ret;
}
int ErasureCodeNonRegression::run_create()
{
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
ErasureCodeInterfaceRef erasure_code;
stringstream messages;
int code = instance.factory(plugin,
g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, &messages);
if (code) {
cerr << messages.str() << endl;
return code;
}
if (::mkdir(directory.c_str(), 0755)) {
cerr << "mkdir(" << directory << "): " << cpp_strerror(errno) << endl;
return 1;
}
unsigned payload_chunk_size = 37;
string payload;
for (unsigned j = 0; j < payload_chunk_size; ++j)
payload.push_back('a' + (rand() % 26));
bufferlist in;
for (unsigned j = 0; j < stripe_width; j += payload_chunk_size)
in.append(payload);
if (stripe_width < in.length())
in.splice(stripe_width, in.length() - stripe_width);
if (in.write_file(content_path().c_str()))
return 1;
set<int> want_to_encode;
for (unsigned int i = 0; i < erasure_code->get_chunk_count(); i++) {
want_to_encode.insert(i);
}
map<int,bufferlist> encoded;
code = erasure_code->encode(want_to_encode, in, &encoded);
if (code)
return code;
for (map<int,bufferlist>::iterator chunk = encoded.begin();
chunk != encoded.end();
++chunk) {
if (chunk->second.write_file(chunk_path(chunk->first).c_str()))
return 1;
}
return 0;
}
int ErasureCodeNonRegression::decode_erasures(ErasureCodeInterfaceRef erasure_code,
set<int> erasures,
map<int,bufferlist> chunks)
{
map<int,bufferlist> available;
for (map<int,bufferlist>::iterator chunk = chunks.begin();
chunk != chunks.end();
++chunk) {
if (erasures.count(chunk->first) == 0)
available[chunk->first] = chunk->second;
}
map<int,bufferlist> decoded;
int code = erasure_code->decode(erasures, available, &decoded, available.begin()->second.length());
if (code)
return code;
for (set<int>::iterator erasure = erasures.begin();
erasure != erasures.end();
++erasure) {
if (!chunks[*erasure].contents_equal(decoded[*erasure])) {
cerr << "chunk " << *erasure << " incorrectly recovered" << endl;
return 1;
}
}
return 0;
}
int ErasureCodeNonRegression::run_check()
{
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
ErasureCodeInterfaceRef erasure_code;
stringstream messages;
int code = instance.factory(plugin,
g_conf().get_val<std::string>("erasure_code_dir"),
profile, &erasure_code, &messages);
if (code) {
cerr << messages.str() << endl;
return code;
}
string errors;
bufferlist in;
if (in.read_file(content_path().c_str(), &errors)) {
cerr << errors << endl;
return 1;
}
set<int> want_to_encode;
for (unsigned int i = 0; i < erasure_code->get_chunk_count(); i++) {
want_to_encode.insert(i);
}
map<int,bufferlist> encoded;
code = erasure_code->encode(want_to_encode, in, &encoded);
if (code)
return code;
for (map<int,bufferlist>::iterator chunk = encoded.begin();
chunk != encoded.end();
++chunk) {
bufferlist existing;
if (existing.read_file(chunk_path(chunk->first).c_str(), &errors)) {
cerr << errors << endl;
return 1;
}
bufferlist &old = chunk->second;
if (existing.length() != old.length() ||
memcmp(existing.c_str(), old.c_str(), old.length())) {
cerr << "chunk " << chunk->first << " encodes differently" << endl;
return 1;
}
}
// erasing a single chunk is likely to use a specific code path in every plugin
set<int> erasures;
erasures.clear();
erasures.insert(0);
code = decode_erasures(erasure_code, erasures, encoded);
if (code)
return code;
if (erasure_code->get_chunk_count() - erasure_code->get_data_chunk_count() > 1) {
// erasing two chunks is likely to be the general case
erasures.clear();
erasures.insert(0);
erasures.insert(erasure_code->get_chunk_count() - 1);
code = decode_erasures(erasure_code, erasures, encoded);
if (code)
return code;
}
return 0;
}
string ErasureCodeNonRegression::content_path()
{
stringstream path;
path << directory << "/content";
return path.str();
}
string ErasureCodeNonRegression::chunk_path(unsigned int chunk)
{
stringstream path;
path << directory << "/" << chunk;
return path.str();
}
int main(int argc, char** argv) {
ErasureCodeNonRegression non_regression;
int err = non_regression.setup(argc, argv);
if (err)
return err;
return non_regression.run();
}
/*
* Local Variables:
* compile-command: "cd ../.. ; make -j4 &&
* make ceph_erasure_code_non_regression &&
* libtool --mode=execute valgrind --tool=memcheck --leak-check=full \
* ./ceph_erasure_code_non_regression \
* --plugin jerasure \
* --parameter technique=reed_sol_van \
* --parameter k=2 \
* --parameter m=2 \
* --directory /tmp/ceph_erasure_code_non_regression \
* --stripe-width 3181 \
* --create \
* --check
* "
* End:
*/
| 9,521 | 28.030488 | 101 | cc |
null | ceph-main/src/test/exporter/test_exporter.cc | #include "gtest/gtest.h"
#include "exporter/util.h"
#include <string>
#include <vector>
#include <utility>
// 17.2.6's memento mori:
// This data was gathered from the python implementation of the promethize method
// where we transform the path of a counter to a valid prometheus name.
static std::vector<std::pair<std::string, std::string>> promethize_data = {
{"bluefs.alloc_slow_fallback", "ceph_bluefs_alloc_slow_fallback"},
{"bluefs.alloc_slow_size_fallback", "ceph_bluefs_alloc_slow_size_fallback"},
{"bluefs.alloc_unit_db", "ceph_bluefs_alloc_unit_db"},
{"bluefs.alloc_unit_main", "ceph_bluefs_alloc_unit_main"},
{"bluefs.alloc_unit_wal", "ceph_bluefs_alloc_unit_wal"},
{"bluefs.bytes_written_slow", "ceph_bluefs_bytes_written_slow"},
{"bluefs.bytes_written_sst", "ceph_bluefs_bytes_written_sst"},
{"bluefs.bytes_written_wal", "ceph_bluefs_bytes_written_wal"},
{"bluefs.compact_lat_count", "ceph_bluefs_compact_lat_count"},
{"bluefs.compact_lat_sum", "ceph_bluefs_compact_lat_sum"},
{"bluefs.compact_lock_lat_count", "ceph_bluefs_compact_lock_lat_count"},
{"bluefs.compact_lock_lat_sum", "ceph_bluefs_compact_lock_lat_sum"},
{"bluefs.db_total_bytes", "ceph_bluefs_db_total_bytes"},
{"bluefs.db_used_bytes", "ceph_bluefs_db_used_bytes"},
{"bluefs.log_bytes", "ceph_bluefs_log_bytes"},
{"bluefs.logged_bytes", "ceph_bluefs_logged_bytes"},
{"bluefs.max_bytes_db", "ceph_bluefs_max_bytes_db"},
{"bluefs.max_bytes_slow", "ceph_bluefs_max_bytes_slow"},
{"bluefs.max_bytes_wal", "ceph_bluefs_max_bytes_wal"},
{"bluefs.num_files", "ceph_bluefs_num_files"},
{"bluefs.read_bytes", "ceph_bluefs_read_bytes"},
{"bluefs.read_count", "ceph_bluefs_read_count"},
{"bluefs.read_disk_bytes", "ceph_bluefs_read_disk_bytes"},
{"bluefs.read_disk_bytes_db", "ceph_bluefs_read_disk_bytes_db"},
{"bluefs.read_disk_bytes_slow", "ceph_bluefs_read_disk_bytes_slow"},
{"bluefs.read_disk_bytes_wal", "ceph_bluefs_read_disk_bytes_wal"},
{"bluefs.read_disk_count", "ceph_bluefs_read_disk_count"},
{"bluefs.read_prefetch_bytes", "ceph_bluefs_read_prefetch_bytes"},
{"bluefs.read_prefetch_count", "ceph_bluefs_read_prefetch_count"},
{"bluefs.read_random_buffer_bytes", "ceph_bluefs_read_random_buffer_bytes"},
{"bluefs.read_random_buffer_count", "ceph_bluefs_read_random_buffer_count"},
{"bluefs.read_random_bytes", "ceph_bluefs_read_random_bytes"},
{"bluefs.read_random_count", "ceph_bluefs_read_random_count"},
{"bluefs.read_random_disk_bytes", "ceph_bluefs_read_random_disk_bytes"},
{"bluefs.read_random_disk_bytes_db", "ceph_bluefs_read_random_disk_bytes_db"},
{"bluefs.read_random_disk_bytes_slow", "ceph_bluefs_read_random_disk_bytes_slow"},
{"bluefs.read_random_disk_bytes_wal", "ceph_bluefs_read_random_disk_bytes_wal"},
{"bluefs.read_random_disk_count", "ceph_bluefs_read_random_disk_count"},
{"bluefs.slow_total_bytes", "ceph_bluefs_slow_total_bytes"},
{"bluefs.slow_used_bytes", "ceph_bluefs_slow_used_bytes"},
{"bluefs.wal_total_bytes", "ceph_bluefs_wal_total_bytes"},
{"bluefs.wal_used_bytes", "ceph_bluefs_wal_used_bytes"},
{"bluestore-pricache.cache_bytes", "ceph_bluestore_pricache_cache_bytes"},
{"bluestore-pricache.heap_bytes", "ceph_bluestore_pricache_heap_bytes"},
{"bluestore-pricache.mapped_bytes", "ceph_bluestore_pricache_mapped_bytes"},
{"bluestore-pricache.target_bytes", "ceph_bluestore_pricache_target_bytes"},
{"bluestore-pricache.unmapped_bytes", "ceph_bluestore_pricache_unmapped_bytes"},
{"bluestore-pricache:data.committed_bytes", "ceph_bluestore_pricache:data_committed_bytes"},
{"bluestore-pricache:data.pri0_bytes", "ceph_bluestore_pricache:data_pri0_bytes"},
{"bluestore-pricache:data.pri10_bytes", "ceph_bluestore_pricache:data_pri10_bytes"},
{"bluestore-pricache:data.pri11_bytes", "ceph_bluestore_pricache:data_pri11_bytes"},
{"bluestore-pricache:data.pri1_bytes", "ceph_bluestore_pricache:data_pri1_bytes"},
{"bluestore-pricache:data.pri2_bytes", "ceph_bluestore_pricache:data_pri2_bytes"},
{"bluestore-pricache:data.pri3_bytes", "ceph_bluestore_pricache:data_pri3_bytes"},
{"bluestore-pricache:data.pri4_bytes", "ceph_bluestore_pricache:data_pri4_bytes"},
{"bluestore-pricache:data.pri5_bytes", "ceph_bluestore_pricache:data_pri5_bytes"},
{"bluestore-pricache:data.pri6_bytes", "ceph_bluestore_pricache:data_pri6_bytes"},
{"bluestore-pricache:data.pri7_bytes", "ceph_bluestore_pricache:data_pri7_bytes"},
{"bluestore-pricache:data.pri8_bytes", "ceph_bluestore_pricache:data_pri8_bytes"},
{"bluestore-pricache:data.pri9_bytes", "ceph_bluestore_pricache:data_pri9_bytes"},
{"bluestore-pricache:data.reserved_bytes", "ceph_bluestore_pricache:data_reserved_bytes"},
{"bluestore-pricache:kv.committed_bytes", "ceph_bluestore_pricache:kv_committed_bytes"},
{"bluestore-pricache:kv.pri0_bytes", "ceph_bluestore_pricache:kv_pri0_bytes"},
{"bluestore-pricache:kv.pri10_bytes", "ceph_bluestore_pricache:kv_pri10_bytes"},
{"bluestore-pricache:kv.pri11_bytes", "ceph_bluestore_pricache:kv_pri11_bytes"},
{"bluestore-pricache:kv.pri1_bytes", "ceph_bluestore_pricache:kv_pri1_bytes"},
{"bluestore-pricache:kv.pri2_bytes", "ceph_bluestore_pricache:kv_pri2_bytes"},
{"bluestore-pricache:kv.pri3_bytes", "ceph_bluestore_pricache:kv_pri3_bytes"},
{"bluestore-pricache:kv.pri4_bytes", "ceph_bluestore_pricache:kv_pri4_bytes"},
{"bluestore-pricache:kv.pri5_bytes", "ceph_bluestore_pricache:kv_pri5_bytes"},
{"bluestore-pricache:kv.pri6_bytes", "ceph_bluestore_pricache:kv_pri6_bytes"},
{"bluestore-pricache:kv.pri7_bytes", "ceph_bluestore_pricache:kv_pri7_bytes"},
{"bluestore-pricache:kv.pri8_bytes", "ceph_bluestore_pricache:kv_pri8_bytes"},
{"bluestore-pricache:kv.pri9_bytes", "ceph_bluestore_pricache:kv_pri9_bytes"},
{"bluestore-pricache:kv.reserved_bytes", "ceph_bluestore_pricache:kv_reserved_bytes"},
{"bluestore-pricache:kv_onode.committed_bytes", "ceph_bluestore_pricache:kv_onode_committed_bytes"},
{"bluestore-pricache:kv_onode.pri0_bytes", "ceph_bluestore_pricache:kv_onode_pri0_bytes"},
{"bluestore-pricache:kv_onode.pri10_bytes", "ceph_bluestore_pricache:kv_onode_pri10_bytes"},
{"bluestore-pricache:kv_onode.pri11_bytes", "ceph_bluestore_pricache:kv_onode_pri11_bytes"},
{"bluestore-pricache:kv_onode.pri1_bytes", "ceph_bluestore_pricache:kv_onode_pri1_bytes"},
{"bluestore-pricache:kv_onode.pri2_bytes", "ceph_bluestore_pricache:kv_onode_pri2_bytes"},
{"bluestore-pricache:kv_onode.pri3_bytes", "ceph_bluestore_pricache:kv_onode_pri3_bytes"},
{"bluestore-pricache:kv_onode.pri4_bytes", "ceph_bluestore_pricache:kv_onode_pri4_bytes"},
{"bluestore-pricache:kv_onode.pri5_bytes", "ceph_bluestore_pricache:kv_onode_pri5_bytes"},
{"bluestore-pricache:kv_onode.pri6_bytes", "ceph_bluestore_pricache:kv_onode_pri6_bytes"},
{"bluestore-pricache:kv_onode.pri7_bytes", "ceph_bluestore_pricache:kv_onode_pri7_bytes"},
{"bluestore-pricache:kv_onode.pri8_bytes", "ceph_bluestore_pricache:kv_onode_pri8_bytes"},
{"bluestore-pricache:kv_onode.pri9_bytes", "ceph_bluestore_pricache:kv_onode_pri9_bytes"},
{"bluestore-pricache:kv_onode.reserved_bytes", "ceph_bluestore_pricache:kv_onode_reserved_bytes"},
{"bluestore-pricache:meta.committed_bytes", "ceph_bluestore_pricache:meta_committed_bytes"},
{"bluestore-pricache:meta.pri0_bytes", "ceph_bluestore_pricache:meta_pri0_bytes"},
{"bluestore-pricache:meta.pri10_bytes", "ceph_bluestore_pricache:meta_pri10_bytes"},
{"bluestore-pricache:meta.pri11_bytes", "ceph_bluestore_pricache:meta_pri11_bytes"},
{"bluestore-pricache:meta.pri1_bytes", "ceph_bluestore_pricache:meta_pri1_bytes"},
{"bluestore-pricache:meta.pri2_bytes", "ceph_bluestore_pricache:meta_pri2_bytes"},
{"bluestore-pricache:meta.pri3_bytes", "ceph_bluestore_pricache:meta_pri3_bytes"},
{"bluestore-pricache:meta.pri4_bytes", "ceph_bluestore_pricache:meta_pri4_bytes"},
{"bluestore-pricache:meta.pri5_bytes", "ceph_bluestore_pricache:meta_pri5_bytes"},
{"bluestore-pricache:meta.pri6_bytes", "ceph_bluestore_pricache:meta_pri6_bytes"},
{"bluestore-pricache:meta.pri7_bytes", "ceph_bluestore_pricache:meta_pri7_bytes"},
{"bluestore-pricache:meta.pri8_bytes", "ceph_bluestore_pricache:meta_pri8_bytes"},
{"bluestore-pricache:meta.pri9_bytes", "ceph_bluestore_pricache:meta_pri9_bytes"},
{"bluestore-pricache:meta.reserved_bytes", "ceph_bluestore_pricache:meta_reserved_bytes"},
{"bluestore.alloc_unit", "ceph_bluestore_alloc_unit"},
{"bluestore.allocated", "ceph_bluestore_allocated"},
{"bluestore.clist_lat_count", "ceph_bluestore_clist_lat_count"},
{"bluestore.clist_lat_sum", "ceph_bluestore_clist_lat_sum"},
{"bluestore.compress_lat_count", "ceph_bluestore_compress_lat_count"},
{"bluestore.compress_lat_sum", "ceph_bluestore_compress_lat_sum"},
{"bluestore.compressed", "ceph_bluestore_compressed"},
{"bluestore.compressed_allocated", "ceph_bluestore_compressed_allocated"},
{"bluestore.compressed_original", "ceph_bluestore_compressed_original"},
{"bluestore.csum_lat_count", "ceph_bluestore_csum_lat_count"},
{"bluestore.csum_lat_sum", "ceph_bluestore_csum_lat_sum"},
{"bluestore.decompress_lat_count", "ceph_bluestore_decompress_lat_count"},
{"bluestore.decompress_lat_sum", "ceph_bluestore_decompress_lat_sum"},
{"bluestore.kv_commit_lat_count", "ceph_bluestore_kv_commit_lat_count"},
{"bluestore.kv_commit_lat_sum", "ceph_bluestore_kv_commit_lat_sum"},
{"bluestore.kv_final_lat_count", "ceph_bluestore_kv_final_lat_count"},
{"bluestore.kv_final_lat_sum", "ceph_bluestore_kv_final_lat_sum"},
{"bluestore.kv_flush_lat_count", "ceph_bluestore_kv_flush_lat_count"},
{"bluestore.kv_flush_lat_sum", "ceph_bluestore_kv_flush_lat_sum"},
{"bluestore.kv_sync_lat_count", "ceph_bluestore_kv_sync_lat_count"},
{"bluestore.kv_sync_lat_sum", "ceph_bluestore_kv_sync_lat_sum"},
{"bluestore.omap_get_keys_lat_count", "ceph_bluestore_omap_get_keys_lat_count"},
{"bluestore.omap_get_keys_lat_sum", "ceph_bluestore_omap_get_keys_lat_sum"},
{"bluestore.omap_get_values_lat_count", "ceph_bluestore_omap_get_values_lat_count"},
{"bluestore.omap_get_values_lat_sum", "ceph_bluestore_omap_get_values_lat_sum"},
{"bluestore.omap_lower_bound_lat_count", "ceph_bluestore_omap_lower_bound_lat_count"},
{"bluestore.omap_lower_bound_lat_sum", "ceph_bluestore_omap_lower_bound_lat_sum"},
{"bluestore.omap_next_lat_count", "ceph_bluestore_omap_next_lat_count"},
{"bluestore.omap_next_lat_sum", "ceph_bluestore_omap_next_lat_sum"},
{"bluestore.omap_seek_to_first_lat_count", "ceph_bluestore_omap_seek_to_first_lat_count"},
{"bluestore.omap_seek_to_first_lat_sum", "ceph_bluestore_omap_seek_to_first_lat_sum"},
{"bluestore.omap_upper_bound_lat_count", "ceph_bluestore_omap_upper_bound_lat_count"},
{"bluestore.omap_upper_bound_lat_sum", "ceph_bluestore_omap_upper_bound_lat_sum"},
{"bluestore.onode_hits", "ceph_bluestore_onode_hits"},
{"bluestore.onode_misses", "ceph_bluestore_onode_misses"},
{"bluestore.read_lat_count", "ceph_bluestore_read_lat_count"},
{"bluestore.read_lat_sum", "ceph_bluestore_read_lat_sum"},
{"bluestore.read_onode_meta_lat_count", "ceph_bluestore_read_onode_meta_lat_count"},
{"bluestore.read_onode_meta_lat_sum", "ceph_bluestore_read_onode_meta_lat_sum"},
{"bluestore.read_wait_aio_lat_count", "ceph_bluestore_read_wait_aio_lat_count"},
{"bluestore.read_wait_aio_lat_sum", "ceph_bluestore_read_wait_aio_lat_sum"},
{"bluestore.reads_with_retries", "ceph_bluestore_reads_with_retries"},
{"bluestore.remove_lat_count", "ceph_bluestore_remove_lat_count"},
{"bluestore.remove_lat_sum", "ceph_bluestore_remove_lat_sum"},
{"bluestore.state_aio_wait_lat_count", "ceph_bluestore_state_aio_wait_lat_count"},
{"bluestore.state_aio_wait_lat_sum", "ceph_bluestore_state_aio_wait_lat_sum"},
{"bluestore.state_deferred_aio_wait_lat_count", "ceph_bluestore_state_deferred_aio_wait_lat_count"},
{"bluestore.state_deferred_aio_wait_lat_sum", "ceph_bluestore_state_deferred_aio_wait_lat_sum"},
{"bluestore.state_deferred_cleanup_lat_count", "ceph_bluestore_state_deferred_cleanup_lat_count"},
{"bluestore.state_deferred_cleanup_lat_sum", "ceph_bluestore_state_deferred_cleanup_lat_sum"},
{"bluestore.state_deferred_queued_lat_count", "ceph_bluestore_state_deferred_queued_lat_count"},
{"bluestore.state_deferred_queued_lat_sum", "ceph_bluestore_state_deferred_queued_lat_sum"},
{"bluestore.state_done_lat_count", "ceph_bluestore_state_done_lat_count"},
{"bluestore.state_done_lat_sum", "ceph_bluestore_state_done_lat_sum"},
{"bluestore.state_finishing_lat_count", "ceph_bluestore_state_finishing_lat_count"},
{"bluestore.state_finishing_lat_sum", "ceph_bluestore_state_finishing_lat_sum"},
{"bluestore.state_io_done_lat_count", "ceph_bluestore_state_io_done_lat_count"},
{"bluestore.state_io_done_lat_sum", "ceph_bluestore_state_io_done_lat_sum"},
{"bluestore.state_kv_commiting_lat_count", "ceph_bluestore_state_kv_commiting_lat_count"},
{"bluestore.state_kv_commiting_lat_sum", "ceph_bluestore_state_kv_commiting_lat_sum"},
{"bluestore.state_kv_done_lat_count", "ceph_bluestore_state_kv_done_lat_count"},
{"bluestore.state_kv_done_lat_sum", "ceph_bluestore_state_kv_done_lat_sum"},
{"bluestore.state_kv_queued_lat_count", "ceph_bluestore_state_kv_queued_lat_count"},
{"bluestore.state_kv_queued_lat_sum", "ceph_bluestore_state_kv_queued_lat_sum"},
{"bluestore.state_prepare_lat_count", "ceph_bluestore_state_prepare_lat_count"},
{"bluestore.state_prepare_lat_sum", "ceph_bluestore_state_prepare_lat_sum"},
{"bluestore.stored", "ceph_bluestore_stored"},
{"bluestore.truncate_lat_count", "ceph_bluestore_truncate_lat_count"},
{"bluestore.truncate_lat_sum", "ceph_bluestore_truncate_lat_sum"},
{"bluestore.txc_commit_lat_count", "ceph_bluestore_txc_commit_lat_count"},
{"bluestore.txc_commit_lat_sum", "ceph_bluestore_txc_commit_lat_sum"},
{"bluestore.txc_submit_lat_count", "ceph_bluestore_txc_submit_lat_count"},
{"bluestore.txc_submit_lat_sum", "ceph_bluestore_txc_submit_lat_sum"},
{"bluestore.txc_throttle_lat_count", "ceph_bluestore_txc_throttle_lat_count"},
{"bluestore.txc_throttle_lat_sum", "ceph_bluestore_txc_throttle_lat_sum"},
{"cluster_by_class_total_bytes", "ceph_cluster_by_class_total_bytes"},
{"cluster_by_class_total_used_bytes", "ceph_cluster_by_class_total_used_bytes"},
{"cluster_by_class_total_used_raw_bytes", "ceph_cluster_by_class_total_used_raw_bytes"},
{"cluster_osd_blocklist_count", "ceph_cluster_osd_blocklist_count"},
{"cluster_total_bytes", "ceph_cluster_total_bytes"},
{"cluster_total_used_bytes", "ceph_cluster_total_used_bytes"},
{"cluster_total_used_raw_bytes", "ceph_cluster_total_used_raw_bytes"},
{"daemon_health_metrics", "ceph_daemon_health_metrics"},
{"disk_occupation", "ceph_disk_occupation"},
{"disk_occupation_human", "ceph_disk_occupation_human"},
{"fs_metadata", "ceph_fs_metadata"},
{"health_detail", "ceph_health_detail"},
{"health_status", "ceph_health_status"},
{"healthcheck_slow_ops", "ceph_healthcheck_slow_ops"},
{"mds.caps", "ceph_mds_caps"},
{"mds.ceph_cap_op_flush_ack", "ceph_mds_ceph_cap_op_flush_ack"},
{"mds.ceph_cap_op_flushsnap_ack", "ceph_mds_ceph_cap_op_flushsnap_ack"},
{"mds.ceph_cap_op_grant", "ceph_mds_ceph_cap_op_grant"},
{"mds.ceph_cap_op_revoke", "ceph_mds_ceph_cap_op_revoke"},
{"mds.ceph_cap_op_trunc", "ceph_mds_ceph_cap_op_trunc"},
{"mds.dir_commit", "ceph_mds_dir_commit"},
{"mds.dir_fetch_complete", "ceph_mds_dir_fetch_complete"},
{"mds.dir_fetch_keys", "ceph_mds_dir_fetch_keys"},
{"mds.dir_merge", "ceph_mds_dir_merge"},
{"mds.dir_split", "ceph_mds_dir_split"},
{"mds.exported_inodes", "ceph_mds_exported_inodes"},
{"mds.forward", "ceph_mds_forward"},
{"mds.handle_client_cap_release", "ceph_mds_handle_client_cap_release"},
{"mds.handle_client_caps", "ceph_mds_handle_client_caps"},
{"mds.handle_client_caps_dirty", "ceph_mds_handle_client_caps_dirty"},
{"mds.handle_inode_file_caps", "ceph_mds_handle_inode_file_caps"},
{"mds.imported_inodes", "ceph_mds_imported_inodes"},
{"mds.inodes", "ceph_mds_inodes"},
{"mds.inodes_expired", "ceph_mds_inodes_expired"},
{"mds.inodes_pinned", "ceph_mds_inodes_pinned"},
{"mds.inodes_with_caps", "ceph_mds_inodes_with_caps"},
{"mds.load_cent", "ceph_mds_load_cent"},
{"mds.openino_dir_fetch", "ceph_mds_openino_dir_fetch"},
{"mds.process_request_cap_release", "ceph_mds_process_request_cap_release"},
{"mds.reply_latency_count", "ceph_mds_reply_latency_count"},
{"mds.reply_latency_sum", "ceph_mds_reply_latency_sum"},
{"mds.request", "ceph_mds_request"},
{"mds.root_rbytes", "ceph_mds_root_rbytes"},
{"mds.root_rfiles", "ceph_mds_root_rfiles"},
{"mds.root_rsnaps", "ceph_mds_root_rsnaps"},
{"mds.slow_reply", "ceph_mds_slow_reply"},
{"mds.subtrees", "ceph_mds_subtrees"},
{"mds_cache.ireq_enqueue_scrub", "ceph_mds_cache_ireq_enqueue_scrub"},
{"mds_cache.ireq_exportdir", "ceph_mds_cache_ireq_exportdir"},
{"mds_cache.ireq_flush", "ceph_mds_cache_ireq_flush"},
{"mds_cache.ireq_fragmentdir", "ceph_mds_cache_ireq_fragmentdir"},
{"mds_cache.ireq_fragstats", "ceph_mds_cache_ireq_fragstats"},
{"mds_cache.ireq_inodestats", "ceph_mds_cache_ireq_inodestats"},
{"mds_cache.num_recovering_enqueued", "ceph_mds_cache_num_recovering_enqueued"},
{"mds_cache.num_recovering_prioritized", "ceph_mds_cache_num_recovering_prioritized"},
{"mds_cache.num_recovering_processing", "ceph_mds_cache_num_recovering_processing"},
{"mds_cache.num_strays", "ceph_mds_cache_num_strays"},
{"mds_cache.num_strays_delayed", "ceph_mds_cache_num_strays_delayed"},
{"mds_cache.num_strays_enqueuing", "ceph_mds_cache_num_strays_enqueuing"},
{"mds_cache.recovery_completed", "ceph_mds_cache_recovery_completed"},
{"mds_cache.recovery_started", "ceph_mds_cache_recovery_started"},
{"mds_cache.strays_created", "ceph_mds_cache_strays_created"},
{"mds_cache.strays_enqueued", "ceph_mds_cache_strays_enqueued"},
{"mds_cache.strays_migrated", "ceph_mds_cache_strays_migrated"},
{"mds_cache.strays_reintegrated", "ceph_mds_cache_strays_reintegrated"},
{"mds_log.ev", "ceph_mds_log_ev"},
{"mds_log.evadd", "ceph_mds_log_evadd"},
{"mds_log.evex", "ceph_mds_log_evex"},
{"mds_log.evexd", "ceph_mds_log_evexd"},
{"mds_log.evexg", "ceph_mds_log_evexg"},
{"mds_log.evtrm", "ceph_mds_log_evtrm"},
{"mds_log.jlat_count", "ceph_mds_log_jlat_count"},
{"mds_log.jlat_sum", "ceph_mds_log_jlat_sum"},
{"mds_log.replayed", "ceph_mds_log_replayed"},
{"mds_log.seg", "ceph_mds_log_seg"},
{"mds_log.segadd", "ceph_mds_log_segadd"},
{"mds_log.segex", "ceph_mds_log_segex"},
{"mds_log.segexd", "ceph_mds_log_segexd"},
{"mds_log.segexg", "ceph_mds_log_segexg"},
{"mds_log.segtrm", "ceph_mds_log_segtrm"},
{"mds_mem.cap", "ceph_mds_mem_cap"},
{"mds_mem.cap+", "ceph_mds_mem_cap_plus"},
{"mds_mem.cap-", "ceph_mds_mem_cap_minus"},
{"mds_mem.dir", "ceph_mds_mem_dir"},
{"mds_mem.dir+", "ceph_mds_mem_dir_plus"},
{"mds_mem.dir-", "ceph_mds_mem_dir_minus"},
{"mds_mem.dn", "ceph_mds_mem_dn"},
{"mds_mem.dn+", "ceph_mds_mem_dn_plus"},
{"mds_mem.dn-", "ceph_mds_mem_dn_minus"},
{"mds_mem.heap", "ceph_mds_mem_heap"},
{"mds_mem.ino", "ceph_mds_mem_ino"},
{"mds_mem.ino+", "ceph_mds_mem_ino_plus"},
{"mds_mem.ino-", "ceph_mds_mem_ino_minus"},
{"mds_metadata", "ceph_mds_metadata"},
{"mds_server.cap_acquisition_throttle", "ceph_mds_server_cap_acquisition_throttle"},
{"mds_server.cap_revoke_eviction", "ceph_mds_server_cap_revoke_eviction"},
{"mds_server.handle_client_request", "ceph_mds_server_handle_client_request"},
{"mds_server.handle_client_session", "ceph_mds_server_handle_client_session"},
{"mds_server.handle_peer_request", "ceph_mds_server_handle_peer_request"},
{"mds_server.req_create_latency_count", "ceph_mds_server_req_create_latency_count"},
{"mds_server.req_create_latency_sum", "ceph_mds_server_req_create_latency_sum"},
{"mds_server.req_getattr_latency_count", "ceph_mds_server_req_getattr_latency_count"},
{"mds_server.req_getattr_latency_sum", "ceph_mds_server_req_getattr_latency_sum"},
{"mds_server.req_getfilelock_latency_count", "ceph_mds_server_req_getfilelock_latency_count"},
{"mds_server.req_getfilelock_latency_sum", "ceph_mds_server_req_getfilelock_latency_sum"},
{"mds_server.req_getvxattr_latency_count", "ceph_mds_server_req_getvxattr_latency_count"},
{"mds_server.req_getvxattr_latency_sum", "ceph_mds_server_req_getvxattr_latency_sum"},
{"mds_server.req_link_latency_count", "ceph_mds_server_req_link_latency_count"},
{"mds_server.req_link_latency_sum", "ceph_mds_server_req_link_latency_sum"},
{"mds_server.req_lookup_latency_count", "ceph_mds_server_req_lookup_latency_count"},
{"mds_server.req_lookup_latency_sum", "ceph_mds_server_req_lookup_latency_sum"},
{"mds_server.req_lookuphash_latency_count", "ceph_mds_server_req_lookuphash_latency_count"},
{"mds_server.req_lookuphash_latency_sum", "ceph_mds_server_req_lookuphash_latency_sum"},
{"mds_server.req_lookupino_latency_count", "ceph_mds_server_req_lookupino_latency_count"},
{"mds_server.req_lookupino_latency_sum", "ceph_mds_server_req_lookupino_latency_sum"},
{"mds_server.req_lookupname_latency_count", "ceph_mds_server_req_lookupname_latency_count"},
{"mds_server.req_lookupname_latency_sum", "ceph_mds_server_req_lookupname_latency_sum"},
{"mds_server.req_lookupparent_latency_count", "ceph_mds_server_req_lookupparent_latency_count"},
{"mds_server.req_lookupparent_latency_sum", "ceph_mds_server_req_lookupparent_latency_sum"},
{"mds_server.req_lookupsnap_latency_count", "ceph_mds_server_req_lookupsnap_latency_count"},
{"mds_server.req_lookupsnap_latency_sum", "ceph_mds_server_req_lookupsnap_latency_sum"},
{"mds_server.req_lssnap_latency_count", "ceph_mds_server_req_lssnap_latency_count"},
{"mds_server.req_lssnap_latency_sum", "ceph_mds_server_req_lssnap_latency_sum"},
{"mds_server.req_mkdir_latency_count", "ceph_mds_server_req_mkdir_latency_count"},
{"mds_server.req_mkdir_latency_sum", "ceph_mds_server_req_mkdir_latency_sum"},
{"mds_server.req_mknod_latency_count", "ceph_mds_server_req_mknod_latency_count"},
{"mds_server.req_mknod_latency_sum", "ceph_mds_server_req_mknod_latency_sum"},
{"mds_server.req_mksnap_latency_count", "ceph_mds_server_req_mksnap_latency_count"},
{"mds_server.req_mksnap_latency_sum", "ceph_mds_server_req_mksnap_latency_sum"},
{"mds_server.req_open_latency_count", "ceph_mds_server_req_open_latency_count"},
{"mds_server.req_open_latency_sum", "ceph_mds_server_req_open_latency_sum"},
{"mds_server.req_readdir_latency_count", "ceph_mds_server_req_readdir_latency_count"},
{"mds_server.req_readdir_latency_sum", "ceph_mds_server_req_readdir_latency_sum"},
{"mds_server.req_rename_latency_count", "ceph_mds_server_req_rename_latency_count"},
{"mds_server.req_rename_latency_sum", "ceph_mds_server_req_rename_latency_sum"},
{"mds_server.req_renamesnap_latency_count", "ceph_mds_server_req_renamesnap_latency_count"},
{"mds_server.req_renamesnap_latency_sum", "ceph_mds_server_req_renamesnap_latency_sum"},
{"mds_server.req_rmdir_latency_count", "ceph_mds_server_req_rmdir_latency_count"},
{"mds_server.req_rmdir_latency_sum", "ceph_mds_server_req_rmdir_latency_sum"},
{"mds_server.req_rmsnap_latency_count", "ceph_mds_server_req_rmsnap_latency_count"},
{"mds_server.req_rmsnap_latency_sum", "ceph_mds_server_req_rmsnap_latency_sum"},
{"mds_server.req_rmxattr_latency_count", "ceph_mds_server_req_rmxattr_latency_count"},
{"mds_server.req_rmxattr_latency_sum", "ceph_mds_server_req_rmxattr_latency_sum"},
{"mds_server.req_setattr_latency_count", "ceph_mds_server_req_setattr_latency_count"},
{"mds_server.req_setattr_latency_sum", "ceph_mds_server_req_setattr_latency_sum"},
{"mds_server.req_setdirlayout_latency_count", "ceph_mds_server_req_setdirlayout_latency_count"},
{"mds_server.req_setdirlayout_latency_sum", "ceph_mds_server_req_setdirlayout_latency_sum"},
{"mds_server.req_setfilelock_latency_count", "ceph_mds_server_req_setfilelock_latency_count"},
{"mds_server.req_setfilelock_latency_sum", "ceph_mds_server_req_setfilelock_latency_sum"},
{"mds_server.req_setlayout_latency_count", "ceph_mds_server_req_setlayout_latency_count"},
{"mds_server.req_setlayout_latency_sum", "ceph_mds_server_req_setlayout_latency_sum"},
{"mds_server.req_setxattr_latency_count", "ceph_mds_server_req_setxattr_latency_count"},
{"mds_server.req_setxattr_latency_sum", "ceph_mds_server_req_setxattr_latency_sum"},
{"mds_server.req_symlink_latency_count", "ceph_mds_server_req_symlink_latency_count"},
{"mds_server.req_symlink_latency_sum", "ceph_mds_server_req_symlink_latency_sum"},
{"mds_server.req_unlink_latency_count", "ceph_mds_server_req_unlink_latency_count"},
{"mds_server.req_unlink_latency_sum", "ceph_mds_server_req_unlink_latency_sum"},
{"mds_sessions.average_load", "ceph_mds_sessions_average_load"},
{"mds_sessions.avg_session_uptime", "ceph_mds_sessions_avg_session_uptime"},
{"mds_sessions.session_add", "ceph_mds_sessions_session_add"},
{"mds_sessions.session_count", "ceph_mds_sessions_session_count"},
{"mds_sessions.session_remove", "ceph_mds_sessions_session_remove"},
{"mds_sessions.sessions_open", "ceph_mds_sessions_sessions_open"},
{"mds_sessions.sessions_stale", "ceph_mds_sessions_sessions_stale"},
{"mds_sessions.total_load", "ceph_mds_sessions_total_load"},
{"mgr_metadata", "ceph_mgr_metadata"},
{"mgr_module_can_run", "ceph_mgr_module_can_run"},
{"mgr_module_status", "ceph_mgr_module_status"},
{"mgr_status", "ceph_mgr_status"},
{"mon.election_call", "ceph_mon_election_call"},
{"mon.election_lose", "ceph_mon_election_lose"},
{"mon.election_win", "ceph_mon_election_win"},
{"mon.num_elections", "ceph_mon_num_elections"},
{"mon.num_sessions", "ceph_mon_num_sessions"},
{"mon.session_add", "ceph_mon_session_add"},
{"mon.session_rm", "ceph_mon_session_rm"},
{"mon.session_trim", "ceph_mon_session_trim"},
{"mon_metadata", "ceph_mon_metadata"},
{"mon_quorum_status", "ceph_mon_quorum_status"},
{"num_objects_degraded", "ceph_num_objects_degraded"},
{"num_objects_misplaced", "ceph_num_objects_misplaced"},
{"num_objects_unfound", "ceph_num_objects_unfound"},
{"objecter-0x5591781656c0.op_active", "ceph_objecter_0x5591781656c0_op_active"},
{"objecter-0x5591781656c0.op_r", "ceph_objecter_0x5591781656c0_op_r"},
{"objecter-0x5591781656c0.op_rmw", "ceph_objecter_0x5591781656c0_op_rmw"},
{"objecter-0x5591781656c0.op_w", "ceph_objecter_0x5591781656c0_op_w"},
{"objecter-0x559178165930.op_active", "ceph_objecter_0x559178165930_op_active"},
{"objecter-0x559178165930.op_r", "ceph_objecter_0x559178165930_op_r"},
{"objecter-0x559178165930.op_rmw", "ceph_objecter_0x559178165930_op_rmw"},
{"objecter-0x559178165930.op_w", "ceph_objecter_0x559178165930_op_w"},
{"objecter.op_active", "ceph_objecter_op_active"},
{"objecter.op_r", "ceph_objecter_op_r"},
{"objecter.op_rmw", "ceph_objecter_op_rmw"},
{"objecter.op_w", "ceph_objecter_op_w"},
{"osd.numpg", "ceph_osd_numpg"},
{"osd.numpg_removing", "ceph_osd_numpg_removing"},
{"osd.op", "ceph_osd_op"},
{"osd.op_in_bytes", "ceph_osd_op_in_bytes"},
{"osd.op_latency_count", "ceph_osd_op_latency_count"},
{"osd.op_latency_sum", "ceph_osd_op_latency_sum"},
{"osd.op_out_bytes", "ceph_osd_op_out_bytes"},
{"osd.op_prepare_latency_count", "ceph_osd_op_prepare_latency_count"},
{"osd.op_prepare_latency_sum", "ceph_osd_op_prepare_latency_sum"},
{"osd.op_process_latency_count", "ceph_osd_op_process_latency_count"},
{"osd.op_process_latency_sum", "ceph_osd_op_process_latency_sum"},
{"osd.op_r", "ceph_osd_op_r"},
{"osd.op_r_latency_count", "ceph_osd_op_r_latency_count"},
{"osd.op_r_latency_sum", "ceph_osd_op_r_latency_sum"},
{"osd.op_r_out_bytes", "ceph_osd_op_r_out_bytes"},
{"osd.op_r_prepare_latency_count", "ceph_osd_op_r_prepare_latency_count"},
{"osd.op_r_prepare_latency_sum", "ceph_osd_op_r_prepare_latency_sum"},
{"osd.op_r_process_latency_count", "ceph_osd_op_r_process_latency_count"},
{"osd.op_r_process_latency_sum", "ceph_osd_op_r_process_latency_sum"},
{"osd.op_rw", "ceph_osd_op_rw"},
{"osd.op_rw_in_bytes", "ceph_osd_op_rw_in_bytes"},
{"osd.op_rw_latency_count", "ceph_osd_op_rw_latency_count"},
{"osd.op_rw_latency_sum", "ceph_osd_op_rw_latency_sum"},
{"osd.op_rw_out_bytes", "ceph_osd_op_rw_out_bytes"},
{"osd.op_rw_prepare_latency_count", "ceph_osd_op_rw_prepare_latency_count"},
{"osd.op_rw_prepare_latency_sum", "ceph_osd_op_rw_prepare_latency_sum"},
{"osd.op_rw_process_latency_count", "ceph_osd_op_rw_process_latency_count"},
{"osd.op_rw_process_latency_sum", "ceph_osd_op_rw_process_latency_sum"},
{"osd.op_w", "ceph_osd_op_w"},
{"osd.op_w_in_bytes", "ceph_osd_op_w_in_bytes"},
{"osd.op_w_latency_count", "ceph_osd_op_w_latency_count"},
{"osd.op_w_latency_sum", "ceph_osd_op_w_latency_sum"},
{"osd.op_w_prepare_latency_count", "ceph_osd_op_w_prepare_latency_count"},
{"osd.op_w_prepare_latency_sum", "ceph_osd_op_w_prepare_latency_sum"},
{"osd.op_w_process_latency_count", "ceph_osd_op_w_process_latency_count"},
{"osd.op_w_process_latency_sum", "ceph_osd_op_w_process_latency_sum"},
{"osd.op_wip", "ceph_osd_op_wip"},
{"osd.recovery_bytes", "ceph_osd_recovery_bytes"},
{"osd.recovery_ops", "ceph_osd_recovery_ops"},
{"osd.stat_bytes", "ceph_osd_stat_bytes"},
{"osd.stat_bytes_used", "ceph_osd_stat_bytes_used"},
{"osd_apply_latency_ms", "ceph_osd_apply_latency_ms"},
{"osd_commit_latency_ms", "ceph_osd_commit_latency_ms"},
{"osd_flag_nobackfill", "ceph_osd_flag_nobackfill"},
{"osd_flag_nodeep-scrub", "ceph_osd_flag_nodeep_scrub"},
{"osd_flag_nodown", "ceph_osd_flag_nodown"},
{"osd_flag_noin", "ceph_osd_flag_noin"},
{"osd_flag_noout", "ceph_osd_flag_noout"},
{"osd_flag_norebalance", "ceph_osd_flag_norebalance"},
{"osd_flag_norecover", "ceph_osd_flag_norecover"},
{"osd_flag_noscrub", "ceph_osd_flag_noscrub"},
{"osd_flag_noup", "ceph_osd_flag_noup"},
{"osd_in", "ceph_osd_in"},
{"osd_metadata", "ceph_osd_metadata"},
{"osd_up", "ceph_osd_up"},
{"osd_weight", "ceph_osd_weight"},
{"paxos.accept_timeout", "ceph_paxos_accept_timeout"},
{"paxos.begin", "ceph_paxos_begin"},
{"paxos.begin_bytes_count", "ceph_paxos_begin_bytes_count"},
{"paxos.begin_bytes_sum", "ceph_paxos_begin_bytes_sum"},
{"paxos.begin_keys_count", "ceph_paxos_begin_keys_count"},
{"paxos.begin_keys_sum", "ceph_paxos_begin_keys_sum"},
{"paxos.begin_latency_count", "ceph_paxos_begin_latency_count"},
{"paxos.begin_latency_sum", "ceph_paxos_begin_latency_sum"},
{"paxos.collect", "ceph_paxos_collect"},
{"paxos.collect_bytes_count", "ceph_paxos_collect_bytes_count"},
{"paxos.collect_bytes_sum", "ceph_paxos_collect_bytes_sum"},
{"paxos.collect_keys_count", "ceph_paxos_collect_keys_count"},
{"paxos.collect_keys_sum", "ceph_paxos_collect_keys_sum"},
{"paxos.collect_latency_count", "ceph_paxos_collect_latency_count"},
{"paxos.collect_latency_sum", "ceph_paxos_collect_latency_sum"},
{"paxos.collect_timeout", "ceph_paxos_collect_timeout"},
{"paxos.collect_uncommitted", "ceph_paxos_collect_uncommitted"},
{"paxos.commit", "ceph_paxos_commit"},
{"paxos.commit_bytes_count", "ceph_paxos_commit_bytes_count"},
{"paxos.commit_bytes_sum", "ceph_paxos_commit_bytes_sum"},
{"paxos.commit_keys_count", "ceph_paxos_commit_keys_count"},
{"paxos.commit_keys_sum", "ceph_paxos_commit_keys_sum"},
{"paxos.commit_latency_count", "ceph_paxos_commit_latency_count"},
{"paxos.commit_latency_sum", "ceph_paxos_commit_latency_sum"},
{"paxos.lease_ack_timeout", "ceph_paxos_lease_ack_timeout"},
{"paxos.lease_timeout", "ceph_paxos_lease_timeout"},
{"paxos.new_pn", "ceph_paxos_new_pn"},
{"paxos.new_pn_latency_count", "ceph_paxos_new_pn_latency_count"},
{"paxos.new_pn_latency_sum", "ceph_paxos_new_pn_latency_sum"},
{"paxos.refresh", "ceph_paxos_refresh"},
{"paxos.refresh_latency_count", "ceph_paxos_refresh_latency_count"},
{"paxos.refresh_latency_sum", "ceph_paxos_refresh_latency_sum"},
{"paxos.restart", "ceph_paxos_restart"},
{"paxos.share_state", "ceph_paxos_share_state"},
{"paxos.share_state_bytes_count", "ceph_paxos_share_state_bytes_count"},
{"paxos.share_state_bytes_sum", "ceph_paxos_share_state_bytes_sum"},
{"paxos.share_state_keys_count", "ceph_paxos_share_state_keys_count"},
{"paxos.share_state_keys_sum", "ceph_paxos_share_state_keys_sum"},
{"paxos.start_leader", "ceph_paxos_start_leader"},
{"paxos.start_peon", "ceph_paxos_start_peon"},
{"paxos.store_state", "ceph_paxos_store_state"},
{"paxos.store_state_bytes_count", "ceph_paxos_store_state_bytes_count"},
{"paxos.store_state_bytes_sum", "ceph_paxos_store_state_bytes_sum"},
{"paxos.store_state_keys_count", "ceph_paxos_store_state_keys_count"},
{"paxos.store_state_keys_sum", "ceph_paxos_store_state_keys_sum"},
{"paxos.store_state_latency_count", "ceph_paxos_store_state_latency_count"},
{"paxos.store_state_latency_sum", "ceph_paxos_store_state_latency_sum"},
{"pg_activating", "ceph_pg_activating"},
{"pg_active", "ceph_pg_active"},
{"pg_backfill_toofull", "ceph_pg_backfill_toofull"},
{"pg_backfill_unfound", "ceph_pg_backfill_unfound"},
{"pg_backfill_wait", "ceph_pg_backfill_wait"},
{"pg_backfilling", "ceph_pg_backfilling"},
{"pg_clean", "ceph_pg_clean"},
{"pg_creating", "ceph_pg_creating"},
{"pg_deep", "ceph_pg_deep"},
{"pg_degraded", "ceph_pg_degraded"},
{"pg_down", "ceph_pg_down"},
{"pg_failed_repair", "ceph_pg_failed_repair"},
{"pg_forced_backfill", "ceph_pg_forced_backfill"},
{"pg_forced_recovery", "ceph_pg_forced_recovery"},
{"pg_incomplete", "ceph_pg_incomplete"},
{"pg_inconsistent", "ceph_pg_inconsistent"},
{"pg_laggy", "ceph_pg_laggy"},
{"pg_peered", "ceph_pg_peered"},
{"pg_peering", "ceph_pg_peering"},
{"pg_premerge", "ceph_pg_premerge"},
{"pg_recovering", "ceph_pg_recovering"},
{"pg_recovery_toofull", "ceph_pg_recovery_toofull"},
{"pg_recovery_unfound", "ceph_pg_recovery_unfound"},
{"pg_recovery_wait", "ceph_pg_recovery_wait"},
{"pg_remapped", "ceph_pg_remapped"},
{"pg_repair", "ceph_pg_repair"},
{"pg_scrubbing", "ceph_pg_scrubbing"},
{"pg_snaptrim", "ceph_pg_snaptrim"},
{"pg_snaptrim_error", "ceph_pg_snaptrim_error"},
{"pg_snaptrim_wait", "ceph_pg_snaptrim_wait"},
{"pg_stale", "ceph_pg_stale"},
{"pg_total", "ceph_pg_total"},
{"pg_undersized", "ceph_pg_undersized"},
{"pg_unknown", "ceph_pg_unknown"},
{"pg_wait", "ceph_pg_wait"},
{"pool_avail_raw", "ceph_pool_avail_raw"},
{"pool_bytes_used", "ceph_pool_bytes_used"},
{"pool_compress_bytes_used", "ceph_pool_compress_bytes_used"},
{"pool_compress_under_bytes", "ceph_pool_compress_under_bytes"},
{"pool_dirty", "ceph_pool_dirty"},
{"pool_max_avail", "ceph_pool_max_avail"},
{"pool_metadata", "ceph_pool_metadata"},
{"pool_num_bytes_recovered", "ceph_pool_num_bytes_recovered"},
{"pool_num_objects_recovered", "ceph_pool_num_objects_recovered"},
{"pool_objects", "ceph_pool_objects"},
{"pool_objects_repaired", "ceph_pool_objects_repaired"},
{"pool_percent_used", "ceph_pool_percent_used"},
{"pool_quota_bytes", "ceph_pool_quota_bytes"},
{"pool_quota_objects", "ceph_pool_quota_objects"},
{"pool_rd", "ceph_pool_rd"},
{"pool_rd_bytes", "ceph_pool_rd_bytes"},
{"pool_recovering_bytes_per_sec", "ceph_pool_recovering_bytes_per_sec"},
{"pool_recovering_keys_per_sec", "ceph_pool_recovering_keys_per_sec"},
{"pool_recovering_objects_per_sec", "ceph_pool_recovering_objects_per_sec"},
{"pool_stored", "ceph_pool_stored"},
{"pool_stored_raw", "ceph_pool_stored_raw"},
{"pool_wr", "ceph_pool_wr"},
{"pool_wr_bytes", "ceph_pool_wr_bytes"},
{"prioritycache.cache_bytes", "ceph_prioritycache_cache_bytes"},
{"prioritycache.heap_bytes", "ceph_prioritycache_heap_bytes"},
{"prioritycache.mapped_bytes", "ceph_prioritycache_mapped_bytes"},
{"prioritycache.target_bytes", "ceph_prioritycache_target_bytes"},
{"prioritycache.unmapped_bytes", "ceph_prioritycache_unmapped_bytes"},
{"prioritycache:full.committed_bytes", "ceph_prioritycache:full_committed_bytes"},
{"prioritycache:full.pri0_bytes", "ceph_prioritycache:full_pri0_bytes"},
{"prioritycache:full.pri10_bytes", "ceph_prioritycache:full_pri10_bytes"},
{"prioritycache:full.pri11_bytes", "ceph_prioritycache:full_pri11_bytes"},
{"prioritycache:full.pri1_bytes", "ceph_prioritycache:full_pri1_bytes"},
{"prioritycache:full.pri2_bytes", "ceph_prioritycache:full_pri2_bytes"},
{"prioritycache:full.pri3_bytes", "ceph_prioritycache:full_pri3_bytes"},
{"prioritycache:full.pri4_bytes", "ceph_prioritycache:full_pri4_bytes"},
{"prioritycache:full.pri5_bytes", "ceph_prioritycache:full_pri5_bytes"},
{"prioritycache:full.pri6_bytes", "ceph_prioritycache:full_pri6_bytes"},
{"prioritycache:full.pri7_bytes", "ceph_prioritycache:full_pri7_bytes"},
{"prioritycache:full.pri8_bytes", "ceph_prioritycache:full_pri8_bytes"},
{"prioritycache:full.pri9_bytes", "ceph_prioritycache:full_pri9_bytes"},
{"prioritycache:full.reserved_bytes", "ceph_prioritycache:full_reserved_bytes"},
{"prioritycache:inc.committed_bytes", "ceph_prioritycache:inc_committed_bytes"},
{"prioritycache:inc.pri0_bytes", "ceph_prioritycache:inc_pri0_bytes"},
{"prioritycache:inc.pri10_bytes", "ceph_prioritycache:inc_pri10_bytes"},
{"prioritycache:inc.pri11_bytes", "ceph_prioritycache:inc_pri11_bytes"},
{"prioritycache:inc.pri1_bytes", "ceph_prioritycache:inc_pri1_bytes"},
{"prioritycache:inc.pri2_bytes", "ceph_prioritycache:inc_pri2_bytes"},
{"prioritycache:inc.pri3_bytes", "ceph_prioritycache:inc_pri3_bytes"},
{"prioritycache:inc.pri4_bytes", "ceph_prioritycache:inc_pri4_bytes"},
{"prioritycache:inc.pri5_bytes", "ceph_prioritycache:inc_pri5_bytes"},
{"prioritycache:inc.pri6_bytes", "ceph_prioritycache:inc_pri6_bytes"},
{"prioritycache:inc.pri7_bytes", "ceph_prioritycache:inc_pri7_bytes"},
{"prioritycache:inc.pri8_bytes", "ceph_prioritycache:inc_pri8_bytes"},
{"prioritycache:inc.pri9_bytes", "ceph_prioritycache:inc_pri9_bytes"},
{"prioritycache:inc.reserved_bytes", "ceph_prioritycache:inc_reserved_bytes"},
{"prioritycache:kv.committed_bytes", "ceph_prioritycache:kv_committed_bytes"},
{"prioritycache:kv.pri0_bytes", "ceph_prioritycache:kv_pri0_bytes"},
{"prioritycache:kv.pri10_bytes", "ceph_prioritycache:kv_pri10_bytes"},
{"prioritycache:kv.pri11_bytes", "ceph_prioritycache:kv_pri11_bytes"},
{"prioritycache:kv.pri1_bytes", "ceph_prioritycache:kv_pri1_bytes"},
{"prioritycache:kv.pri2_bytes", "ceph_prioritycache:kv_pri2_bytes"},
{"prioritycache:kv.pri3_bytes", "ceph_prioritycache:kv_pri3_bytes"},
{"prioritycache:kv.pri4_bytes", "ceph_prioritycache:kv_pri4_bytes"},
{"prioritycache:kv.pri5_bytes", "ceph_prioritycache:kv_pri5_bytes"},
{"prioritycache:kv.pri6_bytes", "ceph_prioritycache:kv_pri6_bytes"},
{"prioritycache:kv.pri7_bytes", "ceph_prioritycache:kv_pri7_bytes"},
{"prioritycache:kv.pri8_bytes", "ceph_prioritycache:kv_pri8_bytes"},
{"prioritycache:kv.pri9_bytes", "ceph_prioritycache:kv_pri9_bytes"},
{"prioritycache:kv.reserved_bytes", "ceph_prioritycache:kv_reserved_bytes"},
{"prometheus_collect_duration_seconds_count", "ceph_prometheus_collect_duration_seconds_count"},
{"prometheus_collect_duration_seconds_sum", "ceph_prometheus_collect_duration_seconds_sum"},
{"purge_queue.pq_executed", "ceph_purge_queue_pq_executed"},
{"purge_queue.pq_executing", "ceph_purge_queue_pq_executing"},
{"purge_queue.pq_executing_high_water", "ceph_purge_queue_pq_executing_high_water"},
{"purge_queue.pq_executing_ops", "ceph_purge_queue_pq_executing_ops"},
{"purge_queue.pq_executing_ops_high_water", "ceph_purge_queue_pq_executing_ops_high_water"},
{"purge_queue.pq_item_in_journal", "ceph_purge_queue_pq_item_in_journal"},
{"rbd_mirror_metadata", "ceph_rbd_mirror_metadata"},
{"rgw.cache_hit", "ceph_rgw_cache_hit"},
{"rgw.cache_miss", "ceph_rgw_cache_miss"},
{"rgw.failed_req", "ceph_rgw_failed_req"},
{"rgw.gc_retire_object", "ceph_rgw_gc_retire_object"},
{"rgw.get", "ceph_rgw_get"},
{"rgw.get_b", "ceph_rgw_get_b"},
{"rgw.get_initial_lat_count", "ceph_rgw_get_initial_lat_count"},
{"rgw.get_initial_lat_sum", "ceph_rgw_get_initial_lat_sum"},
{"rgw.keystone_token_cache_hit", "ceph_rgw_keystone_token_cache_hit"},
{"rgw.keystone_token_cache_miss", "ceph_rgw_keystone_token_cache_miss"},
{"rgw.lc_abort_mpu", "ceph_rgw_lc_abort_mpu"},
{"rgw.lc_expire_current", "ceph_rgw_lc_expire_current"},
{"rgw.lc_expire_dm", "ceph_rgw_lc_expire_dm"},
{"rgw.lc_expire_noncurrent", "ceph_rgw_lc_expire_noncurrent"},
{"rgw.lc_transition_current", "ceph_rgw_lc_transition_current"},
{"rgw.lc_transition_noncurrent", "ceph_rgw_lc_transition_noncurrent"},
{"rgw.lua_current_vms", "ceph_rgw_lua_current_vms"},
{"rgw.lua_script_fail", "ceph_rgw_lua_script_fail"},
{"rgw.lua_script_ok", "ceph_rgw_lua_script_ok"},
{"rgw.pubsub_event_lost", "ceph_rgw_pubsub_event_lost"},
{"rgw.pubsub_event_triggered", "ceph_rgw_pubsub_event_triggered"},
{"rgw.pubsub_events", "ceph_rgw_pubsub_events"},
{"rgw.pubsub_missing_conf", "ceph_rgw_pubsub_missing_conf"},
{"rgw.pubsub_push_failed", "ceph_rgw_pubsub_push_failed"},
{"rgw.pubsub_push_ok", "ceph_rgw_pubsub_push_ok"},
{"rgw.pubsub_push_pending", "ceph_rgw_pubsub_push_pending"},
{"rgw.pubsub_store_fail", "ceph_rgw_pubsub_store_fail"},
{"rgw.pubsub_store_ok", "ceph_rgw_pubsub_store_ok"},
{"rgw.put", "ceph_rgw_put"},
{"rgw.put_b", "ceph_rgw_put_b"},
{"rgw.put_initial_lat_count", "ceph_rgw_put_initial_lat_count"},
{"rgw.put_initial_lat_sum", "ceph_rgw_put_initial_lat_sum"},
{"rgw.qactive", "ceph_rgw_qactive"},
{"rgw.qlen", "ceph_rgw_qlen"},
{"rgw.req", "ceph_rgw_req"},
{"rgw_metadata", "ceph_rgw_metadata"},
{"rocksdb.compact", "ceph_rocksdb_compact"},
{"rocksdb.compact_queue_len", "ceph_rocksdb_compact_queue_len"},
{"rocksdb.compact_queue_merge", "ceph_rocksdb_compact_queue_merge"},
{"rocksdb.compact_range", "ceph_rocksdb_compact_range"},
{"rocksdb.get_latency_count", "ceph_rocksdb_get_latency_count"},
{"rocksdb.get_latency_sum", "ceph_rocksdb_get_latency_sum"},
{"rocksdb.rocksdb_write_delay_time_count", "ceph_rocksdb_rocksdb_write_delay_time_count"},
{"rocksdb.rocksdb_write_delay_time_sum", "ceph_rocksdb_rocksdb_write_delay_time_sum"},
{"rocksdb.rocksdb_write_memtable_time_count", "ceph_rocksdb_rocksdb_write_memtable_time_count"},
{"rocksdb.rocksdb_write_memtable_time_sum", "ceph_rocksdb_rocksdb_write_memtable_time_sum"},
{"rocksdb.rocksdb_write_pre_and_post_time_count", "ceph_rocksdb_rocksdb_write_pre_and_post_time_count"},
{"rocksdb.rocksdb_write_pre_and_post_time_sum", "ceph_rocksdb_rocksdb_write_pre_and_post_time_sum"},
{"rocksdb.rocksdb_write_wal_time_count", "ceph_rocksdb_rocksdb_write_wal_time_count"},
{"rocksdb.rocksdb_write_wal_time_sum", "ceph_rocksdb_rocksdb_write_wal_time_sum"},
{"rocksdb.submit_latency_count", "ceph_rocksdb_submit_latency_count"},
{"rocksdb.submit_latency_sum", "ceph_rocksdb_submit_latency_sum"},
{"rocksdb.submit_sync_latency_count", "ceph_rocksdb_submit_sync_latency_count"},
{"rocksdb.submit_sync_latency_sum", "ceph_rocksdb_submit_sync_latency_sum"}
};
TEST(Exporter, promethize) {
for (auto &test_case : promethize_data) {
std::string path = test_case.first;
promethize(path);
ASSERT_EQ(path, test_case.second);
}
}
| 43,530 | 64.46015 | 106 | cc |
null | ceph-main/src/test/fedora-33/install-deps.sh | ../../../install-deps.sh | 24 | 24 | 24 | sh |
null | ceph-main/src/test/fio/README.md | FIO
===
Ceph uses the fio workload generator and benchmarking utility.
(https://github.com/axboe/fio.git)
FIO tool is automatically fetched to build/src/fio, and build if necessary.
RBD
---
The fio engine for rbd is located in the fio tree itself, so you'll need to
build it from source.
If you install the ceph libraries to a location that isn't in your
LD_LIBRARY_PATH, be sure to add it:
export LD_LIBRARY_PATH=/path/to/install/lib
To build fio with rbd:
./configure --extra-cflags="-I/path/to/install/include -L/path/to/install/lib"
make
If configure fails with "Rados Block Device engine no", see config.log for
details and adjust the cflags as necessary.
If ceph was compiled with tcmalloc, it may be necessary to compile fio with:
make EXTLIBS=tcmalloc
Otherwise fio might crash in malloc_usable_size().
To view the fio options specific to the rbd engine:
./fio --enghelp=rbd
See examples/rbd.fio for an example job file. To run:
./fio examples/rbd.fio
ObjectStore
-----------
This fio engine allows you to mount and use a ceph object store directly,
without having to build a ceph cluster or start any daemons.
Because the ObjectStore is not a public-facing interface, we build it inside
of the ceph tree and load libfio_ceph_objectstore.so into fio as an external
engine.
To build fio_ceph_objectstore run:
```
./do_cmake.sh -DWITH_FIO=ON
cd build
make fio_ceph_objectstore
```
This will fetch FIO to build/src/fio directory,
compile fio tool and libfio_ceph_objectstore.so.
If you install the ceph libraries to a location that isn't in your
LD_LIBRARY_PATH, be sure to add it:
export LD_LIBRARY_PATH=/path/to/install/lib
To view the fio options specific to the objectstore engine:
./fio --enghelp=libfio_ceph_objectstore.so
The conf= option requires a ceph configuration file (ceph.conf). Example job
and conf files for each object store are provided in the same directory as
this README.
To run:
./fio /path/to/job.fio
RADOS
-----
By default FIO can be compiled with support for RADOS.
When ceph is installed in your system default compilation of FIO includes RADOS ioengine.
If you installed ceph in any other place (cmake -DCMAKE_INSTALL_PREFIX=${CEPH_INSTALL_ROOT} ..) you can build FIO following way:
LIBS="-lrados -ltcmalloc" LDFLAGS="-L${CEPH_INSTALL_ROOT}/lib" EXTFLAGS="-I${CEPH_INSTALL_ROOT}/include" \
rados=yes ./configure
LIBS="-lrados -ltcmalloc" LDFLAGS="-L${CEPH_INSTALL_ROOT}/lib" EXTFLAGS="-I${CEPH_INSTALL_ROOT}/include" \
rados=yes make
"-ltcmalloc" is necessary if ceph was compiled with tcmalloc.
Messenger
---------
This fio engine allows you to test CEPH messenger transport layer, without
any disk activities involved.
To build fio_ceph_messenger:
```
./do_cmake.sh -DWITH_FIO=ON
cd build
make fio_ceph_messenger
```
If you install the ceph libraries to a location that isn't in your
LD_LIBRARY_PATH, be sure to add it:
export LD_LIBRARY_PATH=/path/to/install/lib
To view the fio options specific to the messenger engine:
./fio --enghelp=libfio_ceph_messenger.so
The ceph_conf_file= option requires a ceph configuration file (ceph.conf),
see ceph-messenger.conf and ceph-messenger.fio for details.
To run:
./fio ./ceph-messenger.fio
| 3,284 | 27.076923 | 128 | md |
null | ceph-main/src/test/fio/fio_ceph_messenger.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* CEPH messenger engine
*
* FIO engine which uses ceph messenger as a transport. See corresponding
* FIO client and server jobs for details.
*/
#include "global/global_init.h"
#include "msg/Messenger.h"
#include "messages/MOSDOp.h"
#include "messages/MOSDOpReply.h"
#include "common/perf_counters.h"
#include "auth/DummyAuth.h"
#include "ring_buffer.h"
#include <fio.h>
#include <flist.h>
#include <optgroup.h>
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_
using namespace std;
enum ceph_msgr_type {
CEPH_MSGR_TYPE_UNDEF,
CEPH_MSGR_TYPE_POSIX,
CEPH_MSGR_TYPE_DPDK,
CEPH_MSGR_TYPE_RDMA,
};
const char *ceph_msgr_types[] = { "undef", "async+posix",
"async+dpdk", "async+rdma" };
struct ceph_msgr_options {
struct thread_data *td__;
unsigned int is_receiver;
unsigned int is_single;
unsigned int port;
const char *hostname;
const char *conffile;
enum ceph_msgr_type ms_type;
};
class FioDispatcher;
struct ceph_msgr_data {
ceph_msgr_data(struct ceph_msgr_options *o_, unsigned iodepth) :
o(o_) {
INIT_FLIST_HEAD(&io_inflight_list);
INIT_FLIST_HEAD(&io_pending_list);
ring_buffer_init(&io_completed_q, iodepth);
pthread_spin_init(&spin, PTHREAD_PROCESS_PRIVATE);
}
struct ceph_msgr_options *o;
Messenger *msgr = NULL;
FioDispatcher *disp = NULL;
pthread_spinlock_t spin;
struct ring_buffer io_completed_q;
struct flist_head io_inflight_list;
struct flist_head io_pending_list;
unsigned int io_inflight_nr = 0;
unsigned int io_pending_nr = 0;
};
struct ceph_msgr_io {
struct flist_head list;
struct ceph_msgr_data *data;
struct io_u *io_u;
MOSDOp *req_msg; /** Cached request, valid only for sender */
};
struct ceph_msgr_reply_io {
struct flist_head list;
MOSDOpReply *rep;
};
static void *str_to_ptr(const std::string &str)
{
// str is assumed to be a valid ptr string
return reinterpret_cast<void*>(ceph::parse<uintptr_t>(str, 16).value());
}
static std::string ptr_to_str(void *ptr)
{
char buf[32];
snprintf(buf, sizeof(buf), "%llx", (unsigned long long)ptr);
return std::string(buf);
}
/*
* Used for refcounters print on the last context put, almost duplicates
* global context refcounter, sigh.
*/
static std::atomic<int> ctx_ref(1);
static DummyAuthClientServer *g_dummy_auth;
static void create_or_get_ceph_context(struct ceph_msgr_options *o)
{
if (g_ceph_context) {
g_ceph_context->get();
ctx_ref++;
return;
}
boost::intrusive_ptr<CephContext> cct;
vector<const char*> args;
if (o->conffile)
args = { "--conf", o->conffile };
cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
/* Will use g_ceph_context instead */
cct.detach();
common_init_finish(g_ceph_context);
g_ceph_context->_conf.apply_changes(NULL);
g_dummy_auth = new DummyAuthClientServer(g_ceph_context);
g_dummy_auth->auth_registry.refresh_config();
}
static void put_ceph_context(void)
{
if (--ctx_ref == 0) {
ostringstream ostr;
Formatter* f;
f = Formatter::create("json-pretty");
g_ceph_context->get_perfcounters_collection()->dump_formatted(f, false, false);
ostr << ">>>>>>>>>>>>> PERFCOUNTERS BEGIN <<<<<<<<<<<<" << std::endl;
f->flush(ostr);
ostr << ">>>>>>>>>>>>> PERFCOUNTERS END <<<<<<<<<<<<" << std::endl;
delete f;
delete g_dummy_auth;
dout(0) << ostr.str() << dendl;
}
g_ceph_context->put();
}
static void ceph_msgr_sender_on_reply(const object_t &oid)
{
struct ceph_msgr_data *data;
struct ceph_msgr_io *io;
/*
* Here we abuse object and use it as a raw pointer. Since this is
* only for benchmarks and testing we do not care about anything
* but performance. So no need to use global structure in order
* to search for reply, just send a pointer and get it back.
*/
io = (decltype(io))str_to_ptr(oid.name);
data = io->data;
ring_buffer_enqueue(&data->io_completed_q, (void *)io);
}
class ReplyCompletion : public Message::CompletionHook {
struct ceph_msgr_io *m_io;
public:
ReplyCompletion(MOSDOpReply *rep, struct ceph_msgr_io *io) :
Message::CompletionHook(rep),
m_io(io) {
}
void finish(int err) override {
struct ceph_msgr_data *data = m_io->data;
ring_buffer_enqueue(&data->io_completed_q, (void *)m_io);
}
};
static void ceph_msgr_receiver_on_request(struct ceph_msgr_data *data,
MOSDOp *req)
{
MOSDOpReply *rep;
rep = new MOSDOpReply(req, 0, 0, 0, false);
rep->set_connection(req->get_connection());
pthread_spin_lock(&data->spin);
if (data->io_inflight_nr) {
struct ceph_msgr_io *io;
data->io_inflight_nr--;
io = flist_first_entry(&data->io_inflight_list,
struct ceph_msgr_io, list);
flist_del(&io->list);
pthread_spin_unlock(&data->spin);
rep->set_completion_hook(new ReplyCompletion(rep, io));
rep->get_connection()->send_message(rep);
} else {
struct ceph_msgr_reply_io *rep_io;
rep_io = (decltype(rep_io))malloc(sizeof(*rep_io));
rep_io->rep = rep;
data->io_pending_nr++;
flist_add_tail(&rep_io->list, &data->io_pending_list);
pthread_spin_unlock(&data->spin);
}
}
class FioDispatcher : public Dispatcher {
struct ceph_msgr_data *m_data;
public:
FioDispatcher(struct ceph_msgr_data *data):
Dispatcher(g_ceph_context),
m_data(data) {
}
bool ms_can_fast_dispatch_any() const override {
return true;
}
bool ms_can_fast_dispatch(const Message *m) const override {
switch (m->get_type()) {
case CEPH_MSG_OSD_OP:
return m_data->o->is_receiver;
case CEPH_MSG_OSD_OPREPLY:
return !m_data->o->is_receiver;
default:
return false;
}
}
void ms_handle_fast_connect(Connection *con) override {
}
void ms_handle_fast_accept(Connection *con) override {
}
bool ms_dispatch(Message *m) override {
return true;
}
void ms_fast_dispatch(Message *m) override {
if (m_data->o->is_receiver) {
MOSDOp *req;
/*
* Server side, handle request.
*/
req = static_cast<MOSDOp*>(m);
req->finish_decode();
ceph_msgr_receiver_on_request(m_data, req);
} else {
MOSDOpReply *rep;
/*
* Client side, get reply, extract objid and mark
* IO as completed.
*/
rep = static_cast<MOSDOpReply*>(m);
ceph_msgr_sender_on_reply(rep->get_oid());
}
m->put();
}
bool ms_handle_reset(Connection *con) override {
return true;
}
void ms_handle_remote_reset(Connection *con) override {
}
bool ms_handle_refused(Connection *con) override {
return false;
}
int ms_handle_authentication(Connection *con) override {
return 1;
}
};
static entity_addr_t hostname_to_addr(struct ceph_msgr_options *o)
{
entity_addr_t addr;
addr.parse(o->hostname);
addr.set_port(o->port);
addr.set_nonce(0);
return addr;
}
static Messenger *create_messenger(struct ceph_msgr_options *o)
{
entity_name_t ename = o->is_receiver ?
entity_name_t::OSD(0) : entity_name_t::CLIENT(0);
std::string lname = o->is_receiver ?
"receiver" : "sender";
std::string ms_type = o->ms_type != CEPH_MSGR_TYPE_UNDEF ?
ceph_msgr_types[o->ms_type] :
g_ceph_context->_conf.get_val<std::string>("ms_type");
/* o->td__>pid doesn't set value, so use getpid() instead*/
auto nonce = o->is_receiver ? 0 : (getpid() + o->td__->thread_number);
Messenger *msgr = Messenger::create(g_ceph_context, ms_type.c_str(),
ename, lname, nonce);
if (o->is_receiver) {
msgr->set_default_policy(Messenger::Policy::stateless_server(0));
msgr->bind(hostname_to_addr(o));
} else {
msgr->set_default_policy(Messenger::Policy::lossless_client(0));
}
msgr->set_auth_client(g_dummy_auth);
msgr->set_auth_server(g_dummy_auth);
msgr->set_require_authorizer(false);
msgr->start();
return msgr;
}
static Messenger *single_msgr;
static std::atomic<int> single_msgr_ref;
static vector<FioDispatcher *> single_msgr_disps;
static void init_messenger(struct ceph_msgr_data *data)
{
struct ceph_msgr_options *o = data->o;
FioDispatcher *disp;
Messenger *msgr;
disp = new FioDispatcher(data);
if (o->is_single) {
/*
* Single messenger instance for the whole FIO
*/
if (!single_msgr) {
msgr = create_messenger(o);
single_msgr = msgr;
} else {
msgr = single_msgr;
}
single_msgr_disps.push_back(disp);
single_msgr_ref++;
} else {
/*
* Messenger instance per FIO thread
*/
msgr = create_messenger(o);
}
msgr->add_dispatcher_head(disp);
data->disp = disp;
data->msgr = msgr;
}
static void free_messenger(struct ceph_msgr_data *data)
{
data->msgr->shutdown();
data->msgr->wait();
delete data->msgr;
}
static void put_messenger(struct ceph_msgr_data *data)
{
struct ceph_msgr_options *o = data->o;
if (o->is_single) {
if (--single_msgr_ref == 0) {
free_messenger(data);
/*
* In case of a single messenger instance we have to
* free dispatchers after actual messenger destruction.
*/
for (auto disp : single_msgr_disps)
delete disp;
single_msgr = NULL;
}
} else {
free_messenger(data);
delete data->disp;
}
data->disp = NULL;
data->msgr = NULL;
}
static int fio_ceph_msgr_setup(struct thread_data *td)
{
struct ceph_msgr_options *o = (decltype(o))td->eo;
o->td__ = td;
ceph_msgr_data *data;
/* We have to manage global resources so we use threads */
td->o.use_thread = 1;
create_or_get_ceph_context(o);
if (!td->io_ops_data) {
data = new ceph_msgr_data(o, td->o.iodepth);
init_messenger(data);
td->io_ops_data = (void *)data;
}
return 0;
}
static void fio_ceph_msgr_cleanup(struct thread_data *td)
{
struct ceph_msgr_data *data;
unsigned nr;
data = (decltype(data))td->io_ops_data;
put_messenger(data);
nr = ring_buffer_used_size(&data->io_completed_q);
if (nr)
fprintf(stderr, "fio: io_completed_nr==%d, but should be zero\n",
nr);
if (data->io_inflight_nr)
fprintf(stderr, "fio: io_inflight_nr==%d, but should be zero\n",
data->io_inflight_nr);
if (data->io_pending_nr)
fprintf(stderr, "fio: io_pending_nr==%d, but should be zero\n",
data->io_pending_nr);
if (!flist_empty(&data->io_inflight_list))
fprintf(stderr, "fio: io_inflight_list is not empty\n");
if (!flist_empty(&data->io_pending_list))
fprintf(stderr, "fio: io_pending_list is not empty\n");
ring_buffer_deinit(&data->io_completed_q);
delete data;
put_ceph_context();
}
static int fio_ceph_msgr_io_u_init(struct thread_data *td, struct io_u *io_u)
{
struct ceph_msgr_options *o = (decltype(o))td->eo;
struct ceph_msgr_io *io;
MOSDOp *req_msg = NULL;
io = (decltype(io))malloc(sizeof(*io));
io->io_u = io_u;
io->data = (decltype(io->data))td->io_ops_data;
if (!o->is_receiver) {
object_t oid(ptr_to_str(io));
pg_t pgid;
object_locator_t oloc;
hobject_t hobj(oid, oloc.key, CEPH_NOSNAP, pgid.ps(),
pgid.pool(), oloc.nspace);
spg_t spgid(pgid);
entity_inst_t dest(entity_name_t::OSD(0), hostname_to_addr(o));
Messenger *msgr = io->data->msgr;
ConnectionRef con = msgr->connect_to(dest.name.type(),
entity_addrvec_t(dest.addr));
req_msg = new MOSDOp(0, 0, hobj, spgid, 0, 0, 0);
req_msg->set_connection(con);
}
io->req_msg = req_msg;
io_u->engine_data = (void *)io;
return 0;
}
static void fio_ceph_msgr_io_u_free(struct thread_data *td, struct io_u *io_u)
{
struct ceph_msgr_io *io;
io = (decltype(io))io_u->engine_data;
if (io) {
io_u->engine_data = NULL;
if (io->req_msg)
io->req_msg->put();
free(io);
}
}
static enum fio_q_status ceph_msgr_sender_queue(struct thread_data *td,
struct io_u *io_u)
{
struct ceph_msgr_data *data;
struct ceph_msgr_io *io;
bufferlist buflist = bufferlist::static_from_mem(
(char *)io_u->buf, io_u->buflen);
io = (decltype(io))io_u->engine_data;
data = (decltype(data))td->io_ops_data;
/* No handy method to clear ops before reusage? Ok */
io->req_msg->ops.clear();
/* Here we do not care about direction, always send as write */
io->req_msg->write(0, io_u->buflen, buflist);
/* Keep message alive */
io->req_msg->get();
io->req_msg->get_connection()->send_message(io->req_msg);
return FIO_Q_QUEUED;
}
static int fio_ceph_msgr_getevents(struct thread_data *td, unsigned int min,
unsigned int max, const struct timespec *ts)
{
struct ceph_msgr_data *data;
unsigned int nr;
data = (decltype(data))td->io_ops_data;
/*
* Check io_u.c : if min == 0 -> ts is valid and equal to zero,
* if min != 0 -> ts is NULL.
*/
assert(!min ^ !ts);
nr = ring_buffer_used_size(&data->io_completed_q);
if (nr >= min)
/* We got something */
return min(nr, max);
/* Here we are only if min != 0 and ts == NULL */
assert(min && !ts);
while ((nr = ring_buffer_used_size(&data->io_completed_q)) < min &&
!td->terminate) {
/* Poll, no disk IO, so we expect response immediately. */
usleep(10);
}
return min(nr, max);
}
static struct io_u *fio_ceph_msgr_event(struct thread_data *td, int event)
{
struct ceph_msgr_data *data;
struct ceph_msgr_io *io;
data = (decltype(data))td->io_ops_data;
io = (decltype(io))ring_buffer_dequeue(&data->io_completed_q);
return io->io_u;
}
static enum fio_q_status ceph_msgr_receiver_queue(struct thread_data *td,
struct io_u *io_u)
{
struct ceph_msgr_data *data;
struct ceph_msgr_io *io;
io = (decltype(io))io_u->engine_data;
data = io->data;
pthread_spin_lock(&data->spin);
if (data->io_pending_nr) {
struct ceph_msgr_reply_io *rep_io;
MOSDOpReply *rep;
data->io_pending_nr--;
rep_io = flist_first_entry(&data->io_pending_list,
struct ceph_msgr_reply_io,
list);
flist_del(&rep_io->list);
rep = rep_io->rep;
pthread_spin_unlock(&data->spin);
free(rep_io);
rep->set_completion_hook(new ReplyCompletion(rep, io));
rep->get_connection()->send_message(rep);
} else {
data->io_inflight_nr++;
flist_add_tail(&io->list, &data->io_inflight_list);
pthread_spin_unlock(&data->spin);
}
return FIO_Q_QUEUED;
}
static enum fio_q_status fio_ceph_msgr_queue(struct thread_data *td,
struct io_u *io_u)
{
struct ceph_msgr_options *o = (decltype(o))td->eo;
if (o->is_receiver)
return ceph_msgr_receiver_queue(td, io_u);
else
return ceph_msgr_sender_queue(td, io_u);
}
static int fio_ceph_msgr_open_file(struct thread_data *td, struct fio_file *f)
{
return 0;
}
static int fio_ceph_msgr_close_file(struct thread_data *, struct fio_file *)
{
return 0;
}
template <class Func>
fio_option make_option(Func&& func)
{
auto o = fio_option{};
o.category = FIO_OPT_C_ENGINE;
func(std::ref(o));
return o;
}
static std::vector<fio_option> options {
make_option([] (fio_option& o) {
o.name = "receiver";
o.lname = "CEPH messenger is receiver";
o.type = FIO_OPT_BOOL;
o.off1 = offsetof(struct ceph_msgr_options, is_receiver);
o.help = "CEPH messenger is sender or receiver";
o.def = "0";
}),
make_option([] (fio_option& o) {
o.name = "single_instance";
o.lname = "Single instance of CEPH messenger ";
o.type = FIO_OPT_BOOL;
o.off1 = offsetof(struct ceph_msgr_options, is_single);
o.help = "CEPH messenger is a created once for all threads";
o.def = "0";
}),
make_option([] (fio_option& o) {
o.name = "hostname";
o.lname = "CEPH messenger hostname";
o.type = FIO_OPT_STR_STORE;
o.off1 = offsetof(struct ceph_msgr_options, hostname);
o.help = "Hostname for CEPH messenger engine";
}),
make_option([] (fio_option& o) {
o.name = "port";
o.lname = "CEPH messenger engine port";
o.type = FIO_OPT_INT;
o.off1 = offsetof(struct ceph_msgr_options, port);
o.maxval = 65535;
o.minval = 1;
o.help = "Port to use for CEPH messenger";
}),
make_option([] (fio_option& o) {
o.name = "ms_type";
o.lname = "CEPH messenger transport type: async+posix, async+dpdk, async+rdma";
o.type = FIO_OPT_STR;
o.off1 = offsetof(struct ceph_msgr_options, ms_type);
o.help = "Transport type for CEPH messenger, see 'ms async transport type' corresponding CEPH documentation page";
o.def = "undef";
o.posval[0].ival = "undef";
o.posval[0].oval = CEPH_MSGR_TYPE_UNDEF;
o.posval[1].ival = "async+posix";
o.posval[1].oval = CEPH_MSGR_TYPE_POSIX;
o.posval[1].help = "POSIX API";
o.posval[2].ival = "async+dpdk";
o.posval[2].oval = CEPH_MSGR_TYPE_DPDK;
o.posval[2].help = "DPDK";
o.posval[3].ival = "async+rdma";
o.posval[3].oval = CEPH_MSGR_TYPE_RDMA;
o.posval[3].help = "RDMA";
}),
make_option([] (fio_option& o) {
o.name = "ceph_conf_file";
o.lname = "CEPH configuration file";
o.type = FIO_OPT_STR_STORE;
o.off1 = offsetof(struct ceph_msgr_options, conffile);
o.help = "Path to CEPH configuration file";
}),
{} /* Last NULL */
};
static struct ioengine_ops ioengine;
extern "C" {
void get_ioengine(struct ioengine_ops** ioengine_ptr)
{
/*
* Main ioengine structure
*/
ioengine.name = "ceph-msgr";
ioengine.version = FIO_IOOPS_VERSION;
ioengine.flags = FIO_DISKLESSIO | FIO_UNIDIR | FIO_PIPEIO;
ioengine.setup = fio_ceph_msgr_setup;
ioengine.queue = fio_ceph_msgr_queue;
ioengine.getevents = fio_ceph_msgr_getevents;
ioengine.event = fio_ceph_msgr_event;
ioengine.cleanup = fio_ceph_msgr_cleanup;
ioengine.open_file = fio_ceph_msgr_open_file;
ioengine.close_file = fio_ceph_msgr_close_file;
ioengine.io_u_init = fio_ceph_msgr_io_u_init;
ioengine.io_u_free = fio_ceph_msgr_io_u_free;
ioengine.option_struct_size = sizeof(struct ceph_msgr_options);
ioengine.options = options.data();
*ioengine_ptr = &ioengine;
}
} // extern "C"
| 18,120 | 24.850214 | 119 | cc |
null | ceph-main/src/test/fio/fio_ceph_objectstore.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph ObjectStore engine
*
* IO engine using Ceph's ObjectStore class to test low-level performance of
* Ceph OSDs.
*
*/
#include <memory>
#include <system_error>
#include <vector>
#include <fstream>
#include "os/ObjectStore.h"
#include "global/global_init.h"
#include "common/errno.h"
#include "include/intarith.h"
#include "include/stringify.h"
#include "include/random.h"
#include "include/str_list.h"
#include "common/perf_counters.h"
#include "common/TracepointProvider.h"
#include <fio.h>
#include <optgroup.h>
#include "include/ceph_assert.h" // fio.h clobbers our assert.h
#include <algorithm>
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_
using namespace std;
namespace {
/// fio configuration options read from the job file
struct Options {
thread_data* td;
char* conf;
char* perf_output_file;
char* throttle_values;
char* deferred_throttle_values;
unsigned long long
cycle_throttle_period,
oi_attr_len_low,
oi_attr_len_high,
snapset_attr_len_low,
snapset_attr_len_high,
pglog_omap_len_low,
pglog_omap_len_high,
pglog_dup_omap_len_low,
pglog_dup_omap_len_high,
_fastinfo_omap_len_low,
_fastinfo_omap_len_high;
unsigned simulate_pglog;
unsigned single_pool_mode;
unsigned preallocate_files;
unsigned check_files;
};
template <class Func> // void Func(fio_option&)
fio_option make_option(Func&& func)
{
// zero-initialize and set common defaults
auto o = fio_option{};
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_RBD;
func(std::ref(o));
return o;
}
static std::vector<fio_option> ceph_options{
make_option([] (fio_option& o) {
o.name = "conf";
o.lname = "ceph configuration file";
o.type = FIO_OPT_STR_STORE;
o.help = "Path to a ceph configuration file";
o.off1 = offsetof(Options, conf);
}),
make_option([] (fio_option& o) {
o.name = "perf_output_file";
o.lname = "perf output target";
o.type = FIO_OPT_STR_STORE;
o.help = "Path to which to write json formatted perf output";
o.off1 = offsetof(Options, perf_output_file);
o.def = 0;
}),
make_option([] (fio_option& o) {
o.name = "oi_attr_len";
o.lname = "OI Attr length";
o.type = FIO_OPT_STR_VAL;
o.help = "Set OI(aka '_') attribute to specified length";
o.off1 = offsetof(Options, oi_attr_len_low);
o.off2 = offsetof(Options, oi_attr_len_high);
o.def = 0;
o.minval = 0;
}),
make_option([] (fio_option& o) {
o.name = "snapset_attr_len";
o.lname = "Attr 'snapset' length";
o.type = FIO_OPT_STR_VAL;
o.help = "Set 'snapset' attribute to specified length";
o.off1 = offsetof(Options, snapset_attr_len_low);
o.off2 = offsetof(Options, snapset_attr_len_high);
o.def = 0;
o.minval = 0;
}),
make_option([] (fio_option& o) {
o.name = "_fastinfo_omap_len";
o.lname = "'_fastinfo' omap entry length";
o.type = FIO_OPT_STR_VAL;
o.help = "Set '_fastinfo' OMAP attribute to specified length";
o.off1 = offsetof(Options, _fastinfo_omap_len_low);
o.off2 = offsetof(Options, _fastinfo_omap_len_high);
o.def = 0;
o.minval = 0;
}),
make_option([] (fio_option& o) {
o.name = "pglog_simulation";
o.lname = "pglog behavior simulation";
o.type = FIO_OPT_BOOL;
o.help = "Enables PG Log simulation behavior";
o.off1 = offsetof(Options, simulate_pglog);
o.def = "0";
}),
make_option([] (fio_option& o) {
o.name = "pglog_omap_len";
o.lname = "pglog omap entry length";
o.type = FIO_OPT_STR_VAL;
o.help = "Set pglog omap entry to specified length";
o.off1 = offsetof(Options, pglog_omap_len_low);
o.off2 = offsetof(Options, pglog_omap_len_high);
o.def = 0;
o.minval = 0;
}),
make_option([] (fio_option& o) {
o.name = "pglog_dup_omap_len";
o.lname = "uplicate pglog omap entry length";
o.type = FIO_OPT_STR_VAL;
o.help = "Set duplicate pglog omap entry to specified length";
o.off1 = offsetof(Options, pglog_dup_omap_len_low);
o.off2 = offsetof(Options, pglog_dup_omap_len_high);
o.def = 0;
o.minval = 0;
}),
make_option([] (fio_option& o) {
o.name = "single_pool_mode";
o.lname = "single(shared among jobs) pool mode";
o.type = FIO_OPT_BOOL;
o.help = "Enables the mode when all jobs run against the same pool";
o.off1 = offsetof(Options, single_pool_mode);
o.def = "0";
}),
make_option([] (fio_option& o) {
o.name = "preallocate_files";
o.lname = "preallocate files on init";
o.type = FIO_OPT_BOOL;
o.help = "Enables/disables file preallocation (touch and resize) on init";
o.off1 = offsetof(Options, preallocate_files);
o.def = "1";
}),
make_option([] (fio_option& o) {
o.name = "check_files";
o.lname = "ensure files exist and are correct on init";
o.type = FIO_OPT_BOOL;
o.help = "Enables/disables checking of files on init";
o.off1 = offsetof(Options, check_files);
o.def = "0";
}),
make_option([] (fio_option& o) {
o.name = "bluestore_throttle";
o.lname = "set bluestore throttle";
o.type = FIO_OPT_STR_STORE;
o.help = "comma delimited list of throttle values",
o.off1 = offsetof(Options, throttle_values);
o.def = 0;
}),
make_option([] (fio_option& o) {
o.name = "bluestore_deferred_throttle";
o.lname = "set bluestore deferred throttle";
o.type = FIO_OPT_STR_STORE;
o.help = "comma delimited list of throttle values",
o.off1 = offsetof(Options, deferred_throttle_values);
o.def = 0;
}),
make_option([] (fio_option& o) {
o.name = "vary_bluestore_throttle_period";
o.lname = "period between different throttle values";
o.type = FIO_OPT_STR_VAL;
o.help = "set to non-zero value to periodically cycle through throttle options";
o.off1 = offsetof(Options, cycle_throttle_period);
o.def = "0";
o.minval = 0;
}),
{} // fio expects a 'null'-terminated list
};
struct Collection {
spg_t pg;
coll_t cid;
ObjectStore::CollectionHandle ch;
// Can't use mutex directly in vectors hence dynamic allocation
std::unique_ptr<std::mutex> lock;
uint64_t pglog_ver_head = 1;
uint64_t pglog_ver_tail = 1;
uint64_t pglog_dup_ver_tail = 1;
// use big pool ids to avoid clashing with existing collections
static constexpr int64_t MIN_POOL_ID = 0x0000ffffffffffff;
Collection(const spg_t& pg, ObjectStore::CollectionHandle _ch)
: pg(pg), cid(pg), ch(_ch),
lock(new std::mutex) {
}
};
int destroy_collections(
std::unique_ptr<ObjectStore>& os,
std::vector<Collection>& collections)
{
ObjectStore::Transaction t;
bool failed = false;
// remove our collections
for (auto& coll : collections) {
ghobject_t pgmeta_oid(coll.pg.make_pgmeta_oid());
t.remove(coll.cid, pgmeta_oid);
t.remove_collection(coll.cid);
int r = os->queue_transaction(coll.ch, std::move(t));
if (r && !failed) {
derr << "Engine cleanup failed with " << cpp_strerror(-r) << dendl;
failed = true;
}
}
return 0;
}
int init_collections(std::unique_ptr<ObjectStore>& os,
uint64_t pool,
std::vector<Collection>& collections,
uint64_t count)
{
ceph_assert(count > 0);
collections.reserve(count);
const int split_bits = cbits(count - 1);
{
// propagate Superblock object to ensure proper functioning of tools that
// need it. E.g. ceph-objectstore-tool
coll_t cid(coll_t::meta());
bool exists = os->collection_exists(cid);
if (!exists) {
auto ch = os->create_new_collection(cid);
OSDSuperblock superblock;
bufferlist bl;
encode(superblock, bl);
ObjectStore::Transaction t;
t.create_collection(cid, split_bits);
t.write(cid, OSD_SUPERBLOCK_GOBJECT, 0, bl.length(), bl);
int r = os->queue_transaction(ch, std::move(t));
if (r < 0) {
derr << "Failure to write OSD superblock: " << cpp_strerror(-r) << dendl;
return r;
}
}
}
for (uint32_t i = 0; i < count; i++) {
auto pg = spg_t{pg_t{i, pool}};
coll_t cid(pg);
bool exists = os->collection_exists(cid);
auto ch = exists ?
os->open_collection(cid) :
os->create_new_collection(cid) ;
collections.emplace_back(pg, ch);
ObjectStore::Transaction t;
auto& coll = collections.back();
if (!exists) {
t.create_collection(coll.cid, split_bits);
ghobject_t pgmeta_oid(coll.pg.make_pgmeta_oid());
t.touch(coll.cid, pgmeta_oid);
int r = os->queue_transaction(coll.ch, std::move(t));
if (r) {
derr << "Engine init failed with " << cpp_strerror(-r) << dendl;
destroy_collections(os, collections);
return r;
}
}
}
return 0;
}
/// global engine state shared between all jobs within the process. this
/// includes g_ceph_context and the ObjectStore instance
struct Engine {
/// the initial g_ceph_context reference to be dropped on destruction
boost::intrusive_ptr<CephContext> cct;
std::unique_ptr<ObjectStore> os;
std::vector<Collection> collections; //< shared collections to spread objects over
std::mutex lock;
int ref_count;
const bool unlink; //< unlink objects on destruction
// file to which to output formatted perf information
const std::optional<std::string> perf_output_file;
explicit Engine(thread_data* td);
~Engine();
static Engine* get_instance(thread_data* td) {
// note: creates an Engine with the options associated with the first job
static Engine engine(td);
return &engine;
}
void ref() {
std::lock_guard<std::mutex> l(lock);
++ref_count;
}
void deref() {
std::lock_guard<std::mutex> l(lock);
--ref_count;
if (!ref_count) {
ostringstream ostr;
Formatter* f = Formatter::create(
"json-pretty", "json-pretty", "json-pretty");
f->open_object_section("perf_output");
cct->get_perfcounters_collection()->dump_formatted(f, false, false);
if (g_conf()->rocksdb_perf) {
f->open_object_section("rocksdb_perf");
os->get_db_statistics(f);
f->close_section();
}
mempool::dump(f);
{
f->open_object_section("db_histogram");
os->generate_db_histogram(f);
f->close_section();
}
f->close_section();
f->flush(ostr);
delete f;
if (unlink) {
destroy_collections(os, collections);
}
os->umount();
dout(0) << "FIO plugin perf dump:" << dendl;
dout(0) << ostr.str() << dendl;
if (perf_output_file) {
try {
std::ofstream foutput(*perf_output_file);
foutput << ostr.str() << std::endl;
} catch (std::exception &e) {
std::cerr << "Unable to write formatted output to "
<< *perf_output_file
<< ", exception: " << e.what()
<< std::endl;
}
}
}
}
};
TracepointProvider::Traits bluestore_tracepoint_traits("libbluestore_tp.so",
"bluestore_tracing");
Engine::Engine(thread_data* td)
: ref_count(0),
unlink(td->o.unlink),
perf_output_file(
static_cast<Options*>(td->eo)->perf_output_file ?
std::make_optional(static_cast<Options*>(td->eo)->perf_output_file) :
std::nullopt)
{
// add the ceph command line arguments
auto o = static_cast<Options*>(td->eo);
if (!o->conf) {
throw std::runtime_error("missing conf option for ceph configuration file");
}
std::vector<const char*> args{
"-i", "0", // identify as osd.0 for osd_data and osd_journal
"--conf", o->conf, // use the requested conf file
};
if (td->o.directory) { // allow conf files to use ${fio_dir} for data
args.emplace_back("--fio_dir");
args.emplace_back(td->o.directory);
}
// claim the g_ceph_context reference and release it on destruction
cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_OSD,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
TracepointProvider::initialize<bluestore_tracepoint_traits>(g_ceph_context);
// create the ObjectStore
os = ObjectStore::create(g_ceph_context,
g_conf().get_val<std::string>("osd objectstore"),
g_conf().get_val<std::string>("osd data"),
g_conf().get_val<std::string>("osd journal"));
if (!os)
throw std::runtime_error("bad objectstore type " + g_conf()->osd_objectstore);
unsigned num_shards;
if(g_conf()->osd_op_num_shards)
num_shards = g_conf()->osd_op_num_shards;
else if(os->is_rotational())
num_shards = g_conf()->osd_op_num_shards_hdd;
else
num_shards = g_conf()->osd_op_num_shards_ssd;
os->set_cache_shards(num_shards);
//normalize options
o->oi_attr_len_high = max(o->oi_attr_len_low, o->oi_attr_len_high);
o->snapset_attr_len_high = max(o->snapset_attr_len_low,
o->snapset_attr_len_high);
o->pglog_omap_len_high = max(o->pglog_omap_len_low,
o->pglog_omap_len_high);
o->pglog_dup_omap_len_high = max(o->pglog_dup_omap_len_low,
o->pglog_dup_omap_len_high);
o->_fastinfo_omap_len_high = max(o->_fastinfo_omap_len_low,
o->_fastinfo_omap_len_high);
int r = os->mkfs();
if (r < 0)
throw std::system_error(-r, std::system_category(), "mkfs failed");
r = os->mount();
if (r < 0)
throw std::system_error(-r, std::system_category(), "mount failed");
// create shared collections up to osd_pool_default_pg_num
if (o->single_pool_mode) {
uint64_t count = g_conf().get_val<uint64_t>("osd_pool_default_pg_num");
if (count > td->o.nr_files)
count = td->o.nr_files;
init_collections(os, Collection::MIN_POOL_ID, collections, count);
}
}
Engine::~Engine()
{
ceph_assert(!ref_count);
}
struct Object {
ghobject_t oid;
Collection& coll;
Object(const char* name, Collection& coll)
: oid(hobject_t(name, "", CEPH_NOSNAP, coll.pg.ps(), coll.pg.pool(), "")),
coll(coll) {}
};
/// treat each fio job either like a separate pool with its own collections and objects
/// or just a client using its own objects from the shared pool
struct Job {
Engine* engine; //< shared ptr to the global Engine
const unsigned subjob_number; //< subjob num
std::vector<Collection> collections; //< job's private collections to spread objects over
std::vector<Object> objects; //< associate an object with each fio_file
std::vector<io_u*> events; //< completions for fio_ceph_os_event()
const bool unlink; //< unlink objects on destruction
bufferptr one_for_all_data; //< preallocated buffer long enough
//< to use for vairious operations
std::mutex throttle_lock;
const vector<unsigned> throttle_values;
const vector<unsigned> deferred_throttle_values;
std::chrono::duration<double> cycle_throttle_period;
mono_clock::time_point last = ceph::mono_clock::zero();
unsigned index = 0;
static vector<unsigned> parse_throttle_str(const char *p) {
vector<unsigned> ret;
if (p == nullptr) {
return ret;
}
ceph::for_each_substr(p, ",\"", [&ret] (auto &&s) mutable {
if (s.size() > 0) {
ret.push_back(std::stoul(std::string(s)));
}
});
return ret;
}
void check_throttle();
Job(Engine* engine, const thread_data* td);
~Job();
};
Job::Job(Engine* engine, const thread_data* td)
: engine(engine),
subjob_number(td->subjob_number),
events(td->o.iodepth),
unlink(td->o.unlink),
throttle_values(
parse_throttle_str(static_cast<Options*>(td->eo)->throttle_values)),
deferred_throttle_values(
parse_throttle_str(static_cast<Options*>(td->eo)->deferred_throttle_values)),
cycle_throttle_period(
static_cast<Options*>(td->eo)->cycle_throttle_period)
{
engine->ref();
auto o = static_cast<Options*>(td->eo);
unsigned long long max_data = max(o->oi_attr_len_high,
o->snapset_attr_len_high);
max_data = max(max_data, o->pglog_omap_len_high);
max_data = max(max_data, o->pglog_dup_omap_len_high);
max_data = max(max_data, o->_fastinfo_omap_len_high);
one_for_all_data = buffer::create(max_data);
std::vector<Collection>* colls;
// create private collections up to osd_pool_default_pg_num
if (!o->single_pool_mode) {
uint64_t count = g_conf().get_val<uint64_t>("osd_pool_default_pg_num");
if (count > td->o.nr_files)
count = td->o.nr_files;
// use the fio thread_number for our unique pool id
const uint64_t pool = Collection::MIN_POOL_ID + td->thread_number + 1;
init_collections(engine->os, pool, collections, count);
colls = &collections;
} else {
colls = &engine->collections;
}
const uint64_t file_size = td->o.size / max(1u, td->o.nr_files);
ObjectStore::Transaction t;
// create an object for each file in the job
objects.reserve(td->o.nr_files);
unsigned checked_or_preallocated = 0;
for (uint32_t i = 0; i < td->o.nr_files; i++) {
auto f = td->files[i];
f->real_file_size = file_size;
f->engine_pos = i;
// associate each object with a collection in a round-robin fashion.
auto& coll = (*colls)[i % colls->size()];
objects.emplace_back(f->file_name, coll);
if (o->preallocate_files) {
auto& oid = objects.back().oid;
t.touch(coll.cid, oid);
t.truncate(coll.cid, oid, file_size);
int r = engine->os->queue_transaction(coll.ch, std::move(t));
if (r) {
engine->deref();
throw std::system_error(r, std::system_category(), "job init");
}
}
if (o->check_files) {
auto& oid = objects.back().oid;
struct stat st;
int r = engine->os->stat(coll.ch, oid, &st);
if (r || ((unsigned)st.st_size) != file_size) {
derr << "Problem checking " << oid << ", r=" << r
<< ", st.st_size=" << st.st_size
<< ", file_size=" << file_size
<< ", nr_files=" << td->o.nr_files << dendl;
engine->deref();
throw std::system_error(
r, std::system_category(), "job init -- cannot check file");
}
}
if (o->check_files || o->preallocate_files) {
++checked_or_preallocated;
}
}
if (o->check_files) {
derr << "fio_ceph_objectstore checked " << checked_or_preallocated
<< " files"<< dendl;
}
if (o->preallocate_files ){
derr << "fio_ceph_objectstore preallocated " << checked_or_preallocated
<< " files"<< dendl;
}
}
Job::~Job()
{
if (unlink) {
ObjectStore::Transaction t;
bool failed = false;
// remove our objects
for (auto& obj : objects) {
t.remove(obj.coll.cid, obj.oid);
int r = engine->os->queue_transaction(obj.coll.ch, std::move(t));
if (r && !failed) {
derr << "job cleanup failed with " << cpp_strerror(-r) << dendl;
failed = true;
}
}
destroy_collections(engine->os, collections);
}
engine->deref();
}
void Job::check_throttle()
{
if (subjob_number != 0)
return;
std::lock_guard<std::mutex> l(throttle_lock);
if (throttle_values.empty() && deferred_throttle_values.empty())
return;
if (ceph::mono_clock::is_zero(last) ||
((cycle_throttle_period != cycle_throttle_period.zero()) &&
(ceph::mono_clock::now() - last) > cycle_throttle_period)) {
unsigned tvals = throttle_values.size() ? throttle_values.size() : 1;
unsigned dtvals = deferred_throttle_values.size() ? deferred_throttle_values.size() : 1;
if (!throttle_values.empty()) {
std::string val = std::to_string(throttle_values[index % tvals]);
std::cerr << "Setting bluestore_throttle_bytes to " << val << std::endl;
int r = engine->cct->_conf.set_val(
"bluestore_throttle_bytes",
val,
nullptr);
ceph_assert(r == 0);
}
if (!deferred_throttle_values.empty()) {
std::string val = std::to_string(deferred_throttle_values[(index / tvals) % dtvals]);
std::cerr << "Setting bluestore_deferred_throttle_bytes to " << val << std::endl;
int r = engine->cct->_conf.set_val(
"bluestore_throttle_deferred_bytes",
val,
nullptr);
ceph_assert(r == 0);
}
engine->cct->_conf.apply_changes(nullptr);
index++;
index %= tvals * dtvals;
last = ceph::mono_clock::now();
}
}
int fio_ceph_os_setup(thread_data* td)
{
// if there are multiple jobs, they must run in the same process against a
// single instance of the ObjectStore. explicitly disable fio's default
// job-per-process configuration
td->o.use_thread = 1;
try {
// get or create the global Engine instance
auto engine = Engine::get_instance(td);
// create a Job for this thread
td->io_ops_data = new Job(engine, td);
} catch (std::exception& e) {
std::cerr << "setup failed with " << e.what() << std::endl;
return -1;
}
return 0;
}
void fio_ceph_os_cleanup(thread_data* td)
{
auto job = static_cast<Job*>(td->io_ops_data);
td->io_ops_data = nullptr;
delete job;
}
io_u* fio_ceph_os_event(thread_data* td, int event)
{
// return the requested event from fio_ceph_os_getevents()
auto job = static_cast<Job*>(td->io_ops_data);
return job->events[event];
}
int fio_ceph_os_getevents(thread_data* td, unsigned int min,
unsigned int max, const timespec* t)
{
auto job = static_cast<Job*>(td->io_ops_data);
unsigned int events = 0;
io_u* u = NULL;
unsigned int i = 0;
// loop through inflight ios until we find 'min' completions
do {
io_u_qiter(&td->io_u_all, u, i) {
if (!(u->flags & IO_U_F_FLIGHT))
continue;
if (u->engine_data) {
u->engine_data = nullptr;
job->events[events] = u;
events++;
}
}
if (events >= min)
break;
usleep(100);
} while (1);
return events;
}
/// completion context for ObjectStore::queue_transaction()
class UnitComplete : public Context {
io_u* u;
public:
explicit UnitComplete(io_u* u) : u(u) {}
void finish(int r) {
// mark the pointer to indicate completion for fio_ceph_os_getevents()
u->engine_data = reinterpret_cast<void*>(1ull);
}
};
enum fio_q_status fio_ceph_os_queue(thread_data* td, io_u* u)
{
fio_ro_check(td, u);
auto o = static_cast<const Options*>(td->eo);
auto job = static_cast<Job*>(td->io_ops_data);
auto& object = job->objects[u->file->engine_pos];
auto& coll = object.coll;
auto& os = job->engine->os;
job->check_throttle();
if (u->ddir == DDIR_WRITE) {
// provide a hint if we're likely to read this data back
const int flags = td_rw(td) ? CEPH_OSD_OP_FLAG_FADVISE_WILLNEED : 0;
bufferlist bl;
bl.push_back(buffer::copy(reinterpret_cast<char*>(u->xfer_buf),
u->xfer_buflen ) );
map<string,bufferptr,less<>> attrset;
map<string, bufferlist> omaps;
// enqueue a write transaction on the collection's handle
ObjectStore::Transaction t;
char ver_key[64];
// fill attrs if any
if (o->oi_attr_len_high) {
ceph_assert(o->oi_attr_len_high >= o->oi_attr_len_low);
// fill with the garbage as we do not care of the actual content...
job->one_for_all_data.set_length(
ceph::util::generate_random_number(
o->oi_attr_len_low, o->oi_attr_len_high));
attrset["_"] = job->one_for_all_data;
}
if (o->snapset_attr_len_high) {
ceph_assert(o->snapset_attr_len_high >= o->snapset_attr_len_low);
job->one_for_all_data.set_length(
ceph::util::generate_random_number
(o->snapset_attr_len_low, o->snapset_attr_len_high));
attrset["snapset"] = job->one_for_all_data;
}
if (o->_fastinfo_omap_len_high) {
ceph_assert(o->_fastinfo_omap_len_high >= o->_fastinfo_omap_len_low);
// fill with the garbage as we do not care of the actual content...
job->one_for_all_data.set_length(
ceph::util::generate_random_number(
o->_fastinfo_omap_len_low, o->_fastinfo_omap_len_high));
omaps["_fastinfo"].append(job->one_for_all_data);
}
uint64_t pglog_trim_head = 0, pglog_trim_tail = 0;
uint64_t pglog_dup_trim_head = 0, pglog_dup_trim_tail = 0;
if (o->simulate_pglog) {
uint64_t pglog_ver_cnt = 0;
{
std::lock_guard<std::mutex> l(*coll.lock);
pglog_ver_cnt = coll.pglog_ver_head++;
if (o->pglog_omap_len_high &&
pglog_ver_cnt >=
coll.pglog_ver_tail +
g_conf()->osd_min_pg_log_entries + g_conf()->osd_pg_log_trim_min) {
pglog_trim_tail = coll.pglog_ver_tail;
coll.pglog_ver_tail = pglog_trim_head =
pglog_trim_tail + g_conf()->osd_pg_log_trim_min;
if (o->pglog_dup_omap_len_high &&
pglog_ver_cnt >=
coll.pglog_dup_ver_tail + g_conf()->osd_pg_log_dups_tracked +
g_conf()->osd_pg_log_trim_min) {
pglog_dup_trim_tail = coll.pglog_dup_ver_tail;
coll.pglog_dup_ver_tail = pglog_dup_trim_head =
pglog_dup_trim_tail + g_conf()->osd_pg_log_trim_min;
}
}
}
if (o->pglog_omap_len_high) {
ceph_assert(o->pglog_omap_len_high >= o->pglog_omap_len_low);
snprintf(ver_key, sizeof(ver_key),
"0000000011.%020llu", (unsigned long long)pglog_ver_cnt);
// fill with the garbage as we do not care of the actual content...
job->one_for_all_data.set_length(
ceph::util::generate_random_number(
o->pglog_omap_len_low, o->pglog_omap_len_high));
omaps[ver_key].append(job->one_for_all_data);
}
if (o->pglog_dup_omap_len_high) {
//insert dup
ceph_assert(o->pglog_dup_omap_len_high >= o->pglog_dup_omap_len_low);
for( auto i = pglog_trim_tail; i < pglog_trim_head; ++i) {
snprintf(ver_key, sizeof(ver_key),
"dup_0000000011.%020llu", (unsigned long long)i);
// fill with the garbage as we do not care of the actual content...
job->one_for_all_data.set_length(
ceph::util::generate_random_number(
o->pglog_dup_omap_len_low, o->pglog_dup_omap_len_high));
omaps[ver_key].append(job->one_for_all_data);
}
}
}
if (!attrset.empty()) {
t.setattrs(coll.cid, object.oid, attrset);
}
t.write(coll.cid, object.oid, u->offset, u->xfer_buflen, bl, flags);
set<string> rmkeys;
for( auto i = pglog_trim_tail; i < pglog_trim_head; ++i) {
snprintf(ver_key, sizeof(ver_key),
"0000000011.%020llu", (unsigned long long)i);
rmkeys.emplace(ver_key);
}
for( auto i = pglog_dup_trim_tail; i < pglog_dup_trim_head; ++i) {
snprintf(ver_key, sizeof(ver_key),
"dup_0000000011.%020llu", (unsigned long long)i);
rmkeys.emplace(ver_key);
}
if (rmkeys.size()) {
ghobject_t pgmeta_oid(coll.pg.make_pgmeta_oid());
t.omap_rmkeys(coll.cid, pgmeta_oid, rmkeys);
}
if (omaps.size()) {
ghobject_t pgmeta_oid(coll.pg.make_pgmeta_oid());
t.omap_setkeys(coll.cid, pgmeta_oid, omaps);
}
t.register_on_commit(new UnitComplete(u));
os->queue_transaction(coll.ch,
std::move(t));
return FIO_Q_QUEUED;
}
if (u->ddir == DDIR_READ) {
// ObjectStore reads are synchronous, so make the call and return COMPLETED
bufferlist bl;
int r = os->read(coll.ch, object.oid, u->offset, u->xfer_buflen, bl);
if (r < 0) {
u->error = r;
td_verror(td, u->error, "xfer");
} else {
bl.begin().copy(bl.length(), static_cast<char*>(u->xfer_buf));
u->resid = u->xfer_buflen - r;
}
return FIO_Q_COMPLETED;
}
derr << "WARNING: Only DDIR_READ and DDIR_WRITE are supported!" << dendl;
u->error = -EINVAL;
td_verror(td, u->error, "xfer");
return FIO_Q_COMPLETED;
}
int fio_ceph_os_commit(thread_data* td)
{
// commit() allows the engine to batch up queued requests to be submitted all
// at once. it would be natural for queue() to collect transactions in a list,
// and use commit() to pass them all to ObjectStore::queue_transactions(). but
// because we spread objects over multiple collections, we a) need to use a
// different sequencer for each collection, and b) are less likely to see a
// benefit from batching requests within a collection
return 0;
}
// open/close are noops. we set the FIO_DISKLESSIO flag in ioengine_ops to
// prevent fio from creating the files
int fio_ceph_os_open(thread_data* td, fio_file* f) { return 0; }
int fio_ceph_os_close(thread_data* td, fio_file* f) { return 0; }
int fio_ceph_os_io_u_init(thread_data* td, io_u* u)
{
// no data is allocated, we just use the pointer as a boolean 'completed' flag
u->engine_data = nullptr;
return 0;
}
void fio_ceph_os_io_u_free(thread_data* td, io_u* u)
{
u->engine_data = nullptr;
}
// ioengine_ops for get_ioengine()
struct ceph_ioengine : public ioengine_ops {
ceph_ioengine() : ioengine_ops({}) {
name = "ceph-os";
version = FIO_IOOPS_VERSION;
flags = FIO_DISKLESSIO;
setup = fio_ceph_os_setup;
queue = fio_ceph_os_queue;
commit = fio_ceph_os_commit;
getevents = fio_ceph_os_getevents;
event = fio_ceph_os_event;
cleanup = fio_ceph_os_cleanup;
open_file = fio_ceph_os_open;
close_file = fio_ceph_os_close;
io_u_init = fio_ceph_os_io_u_init;
io_u_free = fio_ceph_os_io_u_free;
options = ceph_options.data();
option_struct_size = sizeof(struct Options);
}
};
} // anonymous namespace
extern "C" {
// the exported fio engine interface
void get_ioengine(struct ioengine_ops** ioengine_ptr) {
static ceph_ioengine ioengine;
*ioengine_ptr = &ioengine;
}
} // extern "C"
| 29,440 | 30.220573 | 92 | cc |
null | ceph-main/src/test/fio/fio_librgw.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <stdint.h>
#include <tuple>
#include <vector>
#include <functional>
#include <iostream>
#include <semaphore.h> // XXX kill this?
#include "fmt/include/fmt/format.h"
#include "include/rados/librgw.h"
#include "include/rados/rgw_file.h"
//#include "rgw/rgw_file.h"
//#include "rgw/rgw_lib_frontend.h" // direct requests
/* naughty fio.h leaks min and max as C macros--include it last */
#include <fio.h>
#include <optgroup.h>
#undef min
#undef max
namespace {
struct librgw_iou {
struct io_u *io_u;
int io_complete;
};
struct librgw_data {
io_u** aio_events;
librgw_t rgw_h;
rgw_fs* fs;
rgw_file_handle* bucket_fh;
std::vector<rgw_file_handle*> fh_vec;
librgw_data(thread_data* td)
: rgw_h(nullptr), fs(nullptr), bucket_fh(nullptr)
{
auto size = td->o.iodepth * sizeof(io_u*);
aio_events = static_cast<io_u**>(malloc(size));
memset(aio_events, 0, size);
}
void save_handle(rgw_file_handle* fh) {
fh_vec.push_back(fh);
}
void release_handles() {
for (auto object_fh : fh_vec) {
rgw_fh_rele(fs, object_fh, RGW_FH_RELE_FLAG_NONE);
}
fh_vec.clear();
}
~librgw_data() {
free(aio_events);
}
};
struct opt_struct {
struct thread_data *td;
const char* config; /* can these be std::strings? */
const char* cluster;
const char* name; // instance?
const char* init_args;
const char* access_key;
const char* secret_key;
const char* userid;
const char* bucket_name;
uint32_t owner_uid = 867;
uint32_t owner_gid = 5309;
};
uint32_t create_mask = RGW_SETATTR_UID | RGW_SETATTR_GID | RGW_SETATTR_MODE;
/* borrowed from fio_ceph_objectstore */
template <class F>
fio_option make_option(F&& func)
{
// zero-initialize and set common defaults
auto o = fio_option{};
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_INVALID;
func(std::ref(o));
return o;
}
static std::vector<fio_option> options = {
make_option([] (fio_option& o) {
o.name = "ceph_conf";
o.lname = "ceph configuration file";
o.type = FIO_OPT_STR_STORE;
o.help = "Path to ceph.conf file";
o.off1 = offsetof(opt_struct, config);
}),
make_option([] (fio_option& o) {
o.name = "ceph_name";
o.lname = "ceph instance name";
o.type = FIO_OPT_STR_STORE;
o.help = "Name of this program instance";
o.off1 = offsetof(opt_struct, name);
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_INVALID;
}),
make_option([] (fio_option& o) {
o.name = "ceph_cluster";
o.lname = "ceph cluster name";
o.type = FIO_OPT_STR_STORE;
o.help = "Name of ceph cluster (default=ceph)";
o.off1 = offsetof(opt_struct, cluster);
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_INVALID;
}),
make_option([] (fio_option& o) {
o.name = "ceph_init_args";
o.lname = "ceph init args";
o.type = FIO_OPT_STR_STORE;
o.help = "Extra ceph arguments (e.g., -d --debug-rgw=16)";
o.off1 = offsetof(opt_struct, init_args);
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_INVALID;
}),
make_option([] (fio_option& o) {
o.name = "access_key";
o.lname = "AWS access key";
o.type = FIO_OPT_STR_STORE;
o.help = "AWS access key";
o.off1 = offsetof(opt_struct, access_key);
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_INVALID;
}),
make_option([] (fio_option& o) {
o.name = "secret_key";
o.lname = "AWS secret key";
o.type = FIO_OPT_STR_STORE;
o.help = "AWS secret key";
o.off1 = offsetof(opt_struct, secret_key);
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_INVALID;
}),
make_option([] (fio_option& o) {
o.name = "userid";
o.lname = "userid";
o.type = FIO_OPT_STR_STORE;
o.help = "userid corresponding to access key";
o.off1 = offsetof(opt_struct, userid);
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_INVALID;
}),
make_option([] (fio_option& o) {
o.name = "bucket_name";
o.lname = "S3 bucket";
o.type = FIO_OPT_STR_STORE;
o.help = "S3 bucket to operate on";
o.off1 = offsetof(opt_struct, bucket_name);
o.category = FIO_OPT_C_ENGINE;
o.group = FIO_OPT_G_INVALID;
}),
{} // fio expects a 'null'-terminated list
};
struct save_args {
int argc;
char *argv[8];
save_args() : argc(1)
{
argv[0] = strdup("librgw");
for (int ix = 1; ix < 8; ++ix) {
argv[ix] = nullptr;
}
}
void push_arg(const std::string sarg) {
argv[argc++] = strdup(sarg.c_str());
}
~save_args() {
for (int ix = 0; ix < argc; ++ix) {
argv[ix] = nullptr;
}
}
} args;
/*
* It looks like the setup function is called once, on module load.
* It's not documented in the skeleton driver.
*/
static int fio_librgw_setup(struct thread_data* td)
{
opt_struct& o = *(reinterpret_cast<opt_struct*>(td->eo));
librgw_data* data = nullptr;
int r = 0;
dprint(FD_IO, "fio_librgw_setup\n");
if (! td->io_ops_data) {
data = new librgw_data(td);
/* init args */
std::string sopt;
if (o.config) {
sopt = fmt::format("--conf={}", o.config);
args.push_arg(sopt);
}
std::cout << o.name << std::endl;
if (o.name) {
sopt = fmt::format("--name={}", o.name);
args.push_arg(sopt);
}
if (o.cluster) {
sopt = fmt::format("--cluster={}", o.cluster);
args.push_arg(sopt);
}
if (o.init_args) {
args.push_arg(std::string(o.init_args));
}
r = librgw_create(&data->rgw_h, args.argc, args.argv);
if (!! r) {
dprint(FD_IO, "librgw_create failed\n");
return r;
}
r = rgw_mount2(data->rgw_h, o.userid, o.access_key, o.secret_key, "/",
&data->fs, RGW_MOUNT_FLAG_NONE);
if (!! r) {
dprint(FD_IO, "rgw_mount2 failed\n");
return r;
}
/* go ahead and lookup the bucket as well */
r = rgw_lookup(data->fs, data->fs->root_fh, o.bucket_name,
&data->bucket_fh, nullptr, 0, RGW_LOOKUP_FLAG_NONE);
if (! data->bucket_fh) {
dprint(FD_IO, "rgw_lookup on bucket %s failed, will create\n",
o.bucket_name);
struct stat st;
st.st_uid = o.owner_uid;
st.st_gid = o.owner_gid;
st.st_mode = 755;
r = rgw_mkdir(data->fs, data->fs->root_fh, o.bucket_name,
&st, create_mask, &data->bucket_fh, RGW_MKDIR_FLAG_NONE);
if (! data->bucket_fh) {
dprint(FD_IO, "rgw_mkdir for bucket %s failed\n", o.bucket_name);
return EINVAL;
}
}
td->io_ops_data = data;
}
td->o.use_thread = 1;
if (r != 0) {
abort();
}
return r;
}
/*
* The init function is called once per thread/process, and should set up
* any structures that this io engine requires to keep track of io. Not
* required.
*/
static int fio_librgw_init(struct thread_data *td)
{
dprint(FD_IO, "fio_librgw_init\n");
return 0;
}
/*
* This is paired with the ->init() function and is called when a thread is
* done doing io. Should tear down anything setup by the ->init() function.
* Not required.
*
* N.b., the cohort driver made this idempotent by allocating data in
* setup, clearing data here if present, and doing nothing in the
* subsequent per-thread invocations.
*/
static void fio_librgw_cleanup(struct thread_data *td)
{
int r = 0;
dprint(FD_IO, "fio_librgw_cleanup\n");
/* cleanup specific data */
librgw_data* data = static_cast<librgw_data*>(td->io_ops_data);
if (data) {
/* release active handles */
data->release_handles();
if (data->bucket_fh) {
r = rgw_fh_rele(data->fs, data->bucket_fh, 0 /* flags */);
}
r = rgw_umount(data->fs, RGW_UMOUNT_FLAG_NONE);
librgw_shutdown(data->rgw_h);
td->io_ops_data = nullptr;
delete data;
}
}
/*
* The ->prep() function is called for each io_u prior to being submitted
* with ->queue(). This hook allows the io engine to perform any
* preparatory actions on the io_u, before being submitted. Not required.
*/
static int fio_librgw_prep(struct thread_data *td, struct io_u *io_u)
{
return 0;
}
/*
* The ->event() hook is called to match an event number with an io_u.
* After the core has called ->getevents() and it has returned eg 3,
* the ->event() hook must return the 3 events that have completed for
* subsequent calls to ->event() with [0-2]. Required.
*/
static struct io_u *fio_librgw_event(struct thread_data *td, int event)
{
return NULL;
}
/*
* The ->getevents() hook is used to reap completion events from an async
* io engine. It returns the number of completed events since the last call,
* which may then be retrieved by calling the ->event() hook with the event
* numbers. Required.
*/
static int fio_librgw_getevents(struct thread_data *td, unsigned int min,
unsigned int max, const struct timespec *t)
{
return 0;
}
/*
* The ->cancel() hook attempts to cancel the io_u. Only relevant for
* async io engines, and need not be supported.
*/
static int fio_librgw_cancel(struct thread_data *td, struct io_u *io_u)
{
return 0;
}
/*
* The ->queue() hook is responsible for initiating io on the io_u
* being passed in. If the io engine is a synchronous one, io may complete
* before ->queue() returns. Required.
*
* The io engine must transfer in the direction noted by io_u->ddir
* to the buffer pointed to by io_u->xfer_buf for as many bytes as
* io_u->xfer_buflen. Residual data count may be set in io_u->resid
* for a short read/write.
*/
static enum fio_q_status fio_librgw_queue(struct thread_data *td,
struct io_u *io_u)
{
librgw_data* data = static_cast<librgw_data*>(td->io_ops_data);
const char* object = io_u->file->file_name;
struct rgw_file_handle* object_fh = nullptr;
size_t nbytes;
int r = 0;
/*
* Double sanity check to catch errant write on a readonly setup
*/
fio_ro_check(td, io_u);
if (io_u->ddir == DDIR_WRITE) {
/* Do full write cycle */
r = rgw_lookup(data->fs, data->bucket_fh, object, &object_fh, nullptr, 0,
RGW_LOOKUP_FLAG_CREATE);
if (!! r) {
dprint(FD_IO, "rgw_lookup failed to create filehandle for %s\n",
object);
goto out;
}
r = rgw_open(data->fs, object_fh, 0 /* posix flags */, 0 /* flags */);
if (!! r) {
dprint(FD_IO, "rgw_open failed to create filehandle for %s\n",
object);
rgw_fh_rele(data->fs, object_fh, RGW_FH_RELE_FLAG_NONE);
goto out;
}
/* librgw can write at any offset, but only sequentially
* starting at 0, in one open/write/close cycle */
r = rgw_write(data->fs, object_fh, 0, io_u->xfer_buflen, &nbytes,
(void*) io_u->xfer_buf, RGW_WRITE_FLAG_NONE);
if (!! r) {
dprint(FD_IO, "rgw_write failed for %s\n",
object);
}
r = rgw_close(data->fs, object_fh, 0 /* flags */);
/* object_fh is closed but still reachable, save it */
data->save_handle(object_fh);
} else if (io_u->ddir == DDIR_READ) {
r = rgw_lookup(data->fs, data->bucket_fh, object, &object_fh,
nullptr, 0, RGW_LOOKUP_FLAG_NONE);
if (!! r) {
dprint(FD_IO, "rgw_lookup failed to create filehandle for %s\n",
object);
goto out;
}
r = rgw_open(data->fs, object_fh, 0 /* posix flags */, 0 /* flags */);
if (!! r) {
dprint(FD_IO, "rgw_open failed to create filehandle for %s\n",
object);
rgw_fh_rele(data->fs, object_fh, RGW_FH_RELE_FLAG_NONE);
goto out;
}
r = rgw_read(data->fs, object_fh, io_u->offset, io_u->xfer_buflen,
&nbytes, io_u->xfer_buf, RGW_READ_FLAG_NONE);
if (!! r) {
dprint(FD_IO, "rgw_read failed for %s\n",
object);
}
} else {
dprint(FD_IO, "%s: Warning: unhandled ddir: %d\n", __func__,
io_u->ddir);
}
if (object_fh) {
r = rgw_close(data->fs, object_fh, 0 /* flags */);
/* object_fh is closed but still reachable, save it */
data->save_handle(object_fh);
}
out:
/*
* Could return FIO_Q_QUEUED for a queued request,
* FIO_Q_COMPLETED for a completed request, and FIO_Q_BUSY
* if we could queue no more at this point (you'd have to
* define ->commit() to handle that.
*/
return FIO_Q_COMPLETED;
}
int fio_librgw_commit(thread_data* td)
{
// commit() allows the engine to batch up queued requests to be submitted all
// at once. it would be natural for queue() to collect transactions in a list,
// and use commit() to pass them all to ObjectStore::queue_transactions(). but
// because we spread objects over multiple collections, we a) need to use a
// different sequencer for each collection, and b) are less likely to see a
// benefit from batching requests within a collection
return 0;
}
/*
* Hook for opening the given file. Unless the engine has special
* needs, it usually just provides generic_open_file() as the handler.
*/
static int fio_librgw_open(struct thread_data *td, struct fio_file *f)
{
/* for now, let's try to avoid doing open/close in these hooks */
return 0;
}
/*
* Hook for closing a file. See fio_librgw_open().
*/
static int fio_librgw_close(struct thread_data *td, struct fio_file *f)
{
/* for now, let's try to avoid doing open/close in these hooks */
return 0;
}
/* XXX next two probably not needed */
int fio_librgw_io_u_init(thread_data* td, io_u* u)
{
// no data is allocated, we just use the pointer as a boolean 'completed' flag
u->engine_data = nullptr;
return 0;
}
void fio_librgw_io_u_free(thread_data* td, io_u* u)
{
u->engine_data = nullptr;
}
struct librgw_ioengine : public ioengine_ops
{
librgw_ioengine() : ioengine_ops({}) {
name = "librgw";
version = FIO_IOOPS_VERSION;
flags = FIO_DISKLESSIO;
setup = fio_librgw_setup;
init = fio_librgw_init;
queue = fio_librgw_queue;
commit = fio_librgw_commit;
getevents = fio_librgw_getevents;
event = fio_librgw_event;
cleanup = fio_librgw_cleanup;
open_file = fio_librgw_open;
close_file = fio_librgw_close;
io_u_init = fio_librgw_io_u_init;
io_u_free = fio_librgw_io_u_free;
options = ::options.data();
option_struct_size = sizeof(opt_struct);
}
};
} // namespace
extern "C" {
// the exported fio engine interface
void get_ioengine(struct ioengine_ops** ioengine_ptr) {
static librgw_ioengine ioengine;
*ioengine_ptr = &ioengine;
}
} // extern "C"
| 15,229 | 27.151571 | 82 | cc |
null | ceph-main/src/test/fio/ring_buffer.h | /*
* Very simple and fast lockless ring buffer implementatation for
* one producer and one consumer.
*/
#include <stdint.h>
#include <stddef.h>
/* Do not overcomplicate, choose generic x86 case */
#define L1_CACHE_BYTES 64
#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
struct ring_buffer
{
unsigned int read_idx __cacheline_aligned;
unsigned int write_idx __cacheline_aligned;
unsigned int size;
unsigned int low_mask;
unsigned int high_mask;
unsigned int bit_shift;
void *data_ptr;
};
static inline unsigned int upper_power_of_two(unsigned int v)
{
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
static inline int ring_buffer_init(struct ring_buffer* rbuf, unsigned int size)
{
/* Must be pow2 */
if (((size-1) & size))
size = upper_power_of_two(size);
size *= sizeof(void *);
rbuf->data_ptr = malloc(size);
rbuf->size = size;
rbuf->read_idx = 0;
rbuf->write_idx = 0;
rbuf->bit_shift = __builtin_ffs(sizeof(void *))-1;
rbuf->low_mask = rbuf->size - 1;
rbuf->high_mask = rbuf->size * 2 - 1;
return 0;
}
static inline void ring_buffer_deinit(struct ring_buffer* rbuf)
{
free(rbuf->data_ptr);
}
static inline unsigned int ring_buffer_used_size(const struct ring_buffer* rbuf)
{
__sync_synchronize();
return ((rbuf->write_idx - rbuf->read_idx) & rbuf->high_mask) >>
rbuf->bit_shift;
}
static inline void ring_buffer_enqueue(struct ring_buffer* rbuf, void *ptr)
{
unsigned int idx;
/*
* Be aware: we do not check that buffer can be full,
* assume user of the ring buffer can't submit more.
*/
idx = rbuf->write_idx & rbuf->low_mask;
*(void **)((uintptr_t)rbuf->data_ptr + idx) = ptr;
/* Barrier to be sure stored pointer will be seen properly */
__sync_synchronize();
rbuf->write_idx = (rbuf->write_idx + sizeof(ptr)) & rbuf->high_mask;
}
static inline void *ring_buffer_dequeue(struct ring_buffer* rbuf)
{
unsigned idx;
void *ptr;
/*
* Be aware: we do not check that buffer can be empty,
* assume user of the ring buffer called ring_buffer_used_size(),
* which returns actual used size and introduces memory barrier
* explicitly.
*/
idx = rbuf->read_idx & rbuf->low_mask;
ptr = *(void **)((uintptr_t)rbuf->data_ptr + idx);
rbuf->read_idx = (rbuf->read_idx + sizeof(ptr)) & rbuf->high_mask;
return ptr;
}
| 2,420 | 22.504854 | 80 | h |
null | ceph-main/src/test/fs/mds_types.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Red Hat
*
* Author: Greg Farnum <greg@inktank.com>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "mds/mdstypes.h"
#include "mds/inode_backtrace.h"
TEST(inode_t, compare_equal)
{
inode_t foo{};
inode_t bar{};
int compare_r;
bool divergent;
compare_r = foo.compare(bar, &divergent);
EXPECT_EQ(0, compare_r);
EXPECT_FALSE(divergent);
compare_r = bar.compare(foo, &divergent);
EXPECT_EQ(0, compare_r);
EXPECT_FALSE(divergent);
foo.ino = 1234;
foo.ctime.set_from_double(10.0);
foo.mode = 0777;
foo.uid = 42;
foo.gid = 43;
foo.nlink = 3;
foo.version = 3;
bar = foo;
compare_r = foo.compare(bar, &divergent);
EXPECT_EQ(0, compare_r);
EXPECT_FALSE(divergent);
compare_r = bar.compare(foo, &divergent);
EXPECT_EQ(0, compare_r);
EXPECT_FALSE(divergent);
}
TEST(inode_t, compare_aged)
{
inode_t foo{};
inode_t bar{};
foo.ino = 1234;
foo.ctime.set_from_double(10.0);
foo.mode = 0777;
foo.uid = 42;
foo.gid = 43;
foo.nlink = 3;
foo.version = 3;
foo.rstat.version = 1;
bar = foo;
bar.version = 2;
int compare_r;
bool divergent;
compare_r = foo.compare(bar, &divergent);
EXPECT_EQ(1, compare_r);
EXPECT_FALSE(divergent);
compare_r = bar.compare(foo, &divergent);
EXPECT_EQ(-1, compare_r);
EXPECT_FALSE(divergent);
}
TEST(inode_t, compare_divergent)
{
inode_t foo{};
inode_t bar{};
foo.ino = 1234;
foo.ctime.set_from_double(10.0);
foo.mode = 0777;
foo.uid = 42;
foo.gid = 43;
foo.nlink = 3;
foo.version = 3;
foo.rstat.version = 1;
bar = foo;
bar.version = 2;
bar.rstat.version = 2;
int compare_r;
bool divergent;
compare_r = foo.compare(bar, &divergent);
EXPECT_EQ(1, compare_r);
EXPECT_TRUE(divergent);
compare_r = bar.compare(foo, &divergent);
EXPECT_EQ(-1, compare_r);
EXPECT_TRUE(divergent);
}
TEST(inode_backtrace_t, compare_equal)
{
inode_backtrace_t foo;
inode_backtrace_t bar;
foo.ino = 1234;
foo.pool = 12;
foo.old_pools.push_back(10);
foo.old_pools.push_back(5);
inode_backpointer_t foop;
foop.dirino = 3;
foop.dname = "l3";
foop.version = 15;
foo.ancestors.push_back(foop);
foop.dirino = 2;
foop.dname = "l2";
foop.version = 10;
foo.ancestors.push_back(foop);
foop.dirino = 1;
foop.dname = "l1";
foop.version = 25;
foo.ancestors.push_back(foop);
bar = foo;
int compare_r;
bool equivalent;
bool divergent;
compare_r = foo.compare(bar, &equivalent, &divergent);
EXPECT_EQ(0, compare_r);
EXPECT_TRUE(equivalent);
EXPECT_FALSE(divergent);
}
TEST(inode_backtrace_t, compare_newer)
{
inode_backtrace_t foo;
inode_backtrace_t bar;
foo.ino = 1234;
foo.pool = 12;
foo.old_pools.push_back(10);
foo.old_pools.push_back(5);
bar.ino = 1234;
bar.pool = 12;
bar.old_pools.push_back(10);
inode_backpointer_t foop;
foop.dirino = 3;
foop.dname = "l3";
foop.version = 15;
foo.ancestors.push_back(foop);
foop.version = 14;
bar.ancestors.push_back(foop);
foop.dirino = 2;
foop.dname = "l2";
foop.version = 10;
foo.ancestors.push_back(foop);
foop.version = 9;
bar.ancestors.push_back(foop);
foop.dirino = 1;
foop.dname = "l1";
foop.version = 25;
foo.ancestors.push_back(foop);
bar.ancestors.push_back(foop);
int compare_r;
bool equivalent;
bool divergent;
compare_r = foo.compare(bar, &equivalent, &divergent);
EXPECT_EQ(1, compare_r);
EXPECT_TRUE(equivalent);
EXPECT_FALSE(divergent);
compare_r = bar.compare(foo, &equivalent, &divergent);
EXPECT_EQ(-1, compare_r);
EXPECT_TRUE(equivalent);
EXPECT_FALSE(divergent);
bar.ancestors.back().dirino = 75;
bar.ancestors.back().dname = "l1-old";
bar.ancestors.back().version = 70;
compare_r = foo.compare(bar, &equivalent, &divergent);
EXPECT_EQ(1, compare_r);
EXPECT_FALSE(equivalent);
EXPECT_FALSE(divergent);
compare_r = bar.compare(foo, &equivalent, &divergent);
EXPECT_EQ(-1, compare_r);
EXPECT_FALSE(equivalent);
EXPECT_FALSE(divergent);
}
TEST(inode_backtrace_t, compare_divergent)
{
inode_backtrace_t foo;
inode_backtrace_t bar;
foo.ino = 1234;
foo.pool = 12;
foo.old_pools.push_back(10);
foo.old_pools.push_back(5);
bar.ino = 1234;
bar.pool = 12;
bar.old_pools.push_back(10);
inode_backpointer_t foop;
foop.dirino = 3;
foop.dname = "l3";
foop.version = 15;
foo.ancestors.push_back(foop);
foop.version = 17;
bar.ancestors.push_back(foop);
foop.dirino = 2;
foop.dname = "l2";
foop.version = 10;
foo.ancestors.push_back(foop);
foop.version = 9;
bar.ancestors.push_back(foop);
foop.dirino = 1;
foop.dname = "l1";
foop.version = 25;
foo.ancestors.push_back(foop);
bar.ancestors.push_back(foop);
int compare_r;
bool equivalent;
bool divergent;
compare_r = foo.compare(bar, &equivalent, &divergent);
EXPECT_EQ(1, compare_r);
EXPECT_TRUE(divergent);
compare_r = bar.compare(foo, &equivalent, &divergent);
EXPECT_EQ(-1, compare_r);
EXPECT_TRUE(divergent);
}
| 5,344 | 20.126482 | 70 | cc |
null | ceph-main/src/test/fs/test_ino_release_cb.cc | #include <string>
#include <unistd.h>
#include <include/fs_types.h>
#include <mds/mdstypes.h>
#include <include/cephfs/libcephfs.h>
#define MAX_CEPH_FILES 1000
#define DIRNAME "ino_release_cb"
using namespace std;
static std::atomic<bool> cb_done = false;
static void cb(void *hdl, vinodeno_t vino)
{
cb_done = true;
}
int main(int argc, char *argv[])
{
inodeno_t inos[MAX_CEPH_FILES];
struct ceph_mount_info *cmount = NULL;
ceph_create(&cmount, "admin");
ceph_conf_read_file(cmount, NULL);
ceph_init(cmount);
[[maybe_unused]] int ret = ceph_mount(cmount, NULL);
assert(ret >= 0);
ret = ceph_mkdir(cmount, DIRNAME, 0755);
assert(ret >= 0);
ret = ceph_chdir(cmount, DIRNAME);
assert(ret >= 0);
/* Create a bunch of files, get their inode numbers and close them */
int i;
for (i = 0; i < MAX_CEPH_FILES; ++i) {
int fd;
struct ceph_statx stx;
string name = std::to_string(i);
fd = ceph_open(cmount, name.c_str(), O_RDWR|O_CREAT, 0644);
assert(fd >= 0);
ret = ceph_fstatx(cmount, fd, &stx, CEPH_STATX_INO, 0);
assert(ret >= 0);
inos[i] = stx.stx_ino;
ceph_close(cmount, fd);
}
/* Remount */
ceph_unmount(cmount);
ceph_release(cmount);
ceph_create(&cmount, "admin");
ceph_conf_read_file(cmount, NULL);
ceph_init(cmount);
struct ceph_client_callback_args args = { 0 };
args.ino_release_cb = cb;
ret = ceph_ll_register_callbacks2(cmount, &args);
assert(ret == 0);
ret = ceph_mount(cmount, NULL);
assert(ret >= 0);
Inode *inodes[MAX_CEPH_FILES];
for (i = 0; i < MAX_CEPH_FILES; ++i) {
/* We can stop if we got a callback */
if (cb_done)
break;
ret = ceph_ll_lookup_inode(cmount, inos[i], &inodes[i]);
assert(ret >= 0);
}
sleep(45);
assert(cb_done);
ceph_unmount(cmount);
ceph_release(cmount);
return 0;
}
| 1,785 | 20.011765 | 70 | cc |
null | ceph-main/src/test/fs/test_trim_caps.cc | #define _FILE_OFFSET_BITS 64
#if defined(__linux__)
#include <features.h>
#endif
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <fcntl.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <assert.h>
#include <unistd.h>
#include <include/cephfs/libcephfs.h>
int main(int argc, char *argv[])
{
char buf;
int pipefd[2];
int rc [[maybe_unused]] = pipe(pipefd);
assert(rc >= 0);
pid_t pid = fork();
assert(pid >= 0);
if (pid == 0)
close(pipefd[1]);
else
close(pipefd[0]);
struct ceph_mount_info *cmount = NULL;
ceph_create(&cmount, "admin");
ceph_conf_read_file(cmount, NULL);
int ret [[maybe_unused]] = ceph_mount(cmount, NULL);
assert(ret >= 0);
if (pid == 0) {
ret = read(pipefd[0], &buf, 1);
assert(ret == 1);
ret = ceph_rename(cmount, "1", "3");
assert(ret >= 0);
ret = ceph_rename(cmount, "2", "1");
assert(ret >= 0);
ceph_unmount(cmount);
printf("child exits\n");
} else {
ret = ceph_mkdirs(cmount, "1/2", 0755);
assert(ret >= 0);
struct ceph_statx stx;
ret = ceph_statx(cmount, "1", &stx, 0, 0);
assert(ret >= 0);
uint64_t orig_ino [[maybe_unused]] = stx.stx_ino;
ret = ceph_mkdir(cmount, "2", 0755);
assert(ret >= 0);
ret = write(pipefd[1], &buf, 1);
assert(ret == 1);
int wstatus;
ret = waitpid(pid, &wstatus, 0);
assert(ret >= 0);
assert(wstatus == 0);
// make origin '1' no parent dentry
ret = ceph_statx(cmount, "1", &stx, 0, 0);
assert(ret >= 0);
assert(orig_ino != stx.stx_ino);
// move root inode's cap_item to tail of session->caps
ret = ceph_statx(cmount, ".", &stx, 0, 0);
assert(ret >= 0);
printf("waiting for crash\n");
sleep(60);
}
return 0;
}
| 1,739 | 19.232558 | 56 | cc |
null | ceph-main/src/test/immutable_object_cache/MockCacheDaemon.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef IMMUTABLE_OBJECT_CACHE_MOCK_DAEMON
#define IMMUTABLE_OBJECT_CACHE_MOCK_DAEMON
#include <iostream>
#include <unistd.h>
#include "gmock/gmock.h"
#include "include/Context.h"
#include "tools/immutable_object_cache/CacheClient.h"
namespace ceph {
namespace immutable_obj_cache {
class MockCacheClient {
public:
MockCacheClient(const std::string& file, CephContext* ceph_ctx) {}
MOCK_METHOD0(run, void());
MOCK_METHOD0(is_session_work, bool());
MOCK_METHOD0(close, void());
MOCK_METHOD0(stop, void());
MOCK_METHOD0(connect, int());
MOCK_METHOD1(connect, void(Context*));
MOCK_METHOD6(lookup_object, void(std::string, uint64_t, uint64_t, uint64_t,
std::string, CacheGenContextURef));
MOCK_METHOD1(register_client, int(Context*));
};
class MockCacheServer {
public:
MockCacheServer(CephContext* cct, const std::string& file,
ProcessMsg processmsg) {
}
MOCK_METHOD0(run, int());
MOCK_METHOD0(start_accept, int());
MOCK_METHOD0(stop, int());
};
} // namespace immutable_obj_cach3
} // namespace ceph
#endif // IMMUTABLE_OBJECT_CACHE_MOCK_DAEMON
| 1,237 | 25.913043 | 77 | h |
null | ceph-main/src/test/immutable_object_cache/test_DomainSocket.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <iostream>
#include <unistd.h>
#include "gtest/gtest.h"
#include "include/Context.h"
#include "global/global_init.h"
#include "global/global_context.h"
#include "test/immutable_object_cache/test_common.h"
#include "tools/immutable_object_cache/CacheClient.h"
#include "tools/immutable_object_cache/CacheServer.h"
using namespace ceph::immutable_obj_cache;
class TestCommunication :public ::testing::Test {
public:
CacheServer* m_cache_server;
std::thread* srv_thd;
CacheClient* m_cache_client;
std::string m_local_path;
pthread_mutex_t m_mutex;
pthread_cond_t m_cond;
std::atomic<uint64_t> m_send_request_index;
std::atomic<uint64_t> m_recv_ack_index;
WaitEvent m_wait_event;
unordered_set<std::string> m_hit_entry_set;
TestCommunication()
: m_cache_server(nullptr), m_cache_client(nullptr),
m_local_path("/tmp/ceph_test_domain_socket"),
m_send_request_index(0), m_recv_ack_index(0)
{}
~TestCommunication() {}
static void SetUpTestCase() {}
static void TearDownTestCase() {}
void SetUp() override {
std::remove(m_local_path.c_str());
m_cache_server = new CacheServer(g_ceph_context, m_local_path,
[this](CacheSession* sid, ObjectCacheRequest* req){
handle_request(sid, req);
});
ASSERT_TRUE(m_cache_server != nullptr);
srv_thd = new std::thread([this]() {m_cache_server->run();});
m_cache_client = new CacheClient(m_local_path, g_ceph_context);
ASSERT_TRUE(m_cache_client != nullptr);
m_cache_client->run();
while (true) {
if (0 == m_cache_client->connect()) {
break;
}
}
auto ctx = new LambdaContext([](int reg) {
ASSERT_TRUE(reg == 0);
});
m_cache_client->register_client(ctx);
ASSERT_TRUE(m_cache_client->is_session_work());
}
void TearDown() override {
delete m_cache_client;
m_cache_server->stop();
if (srv_thd->joinable()) {
srv_thd->join();
}
delete m_cache_server;
std::remove(m_local_path.c_str());
delete srv_thd;
}
void handle_request(CacheSession* session_id, ObjectCacheRequest* req) {
switch (req->get_request_type()) {
case RBDSC_REGISTER: {
ObjectCacheRequest* reply = new ObjectCacheRegReplyData(RBDSC_REGISTER_REPLY, req->seq);
session_id->send(reply);
break;
}
case RBDSC_READ: {
ObjectCacheReadData* read_req = (ObjectCacheReadData*)req;
ObjectCacheRequest* reply = nullptr;
if (m_hit_entry_set.find(read_req->oid) == m_hit_entry_set.end()) {
reply = new ObjectCacheReadRadosData(RBDSC_READ_RADOS, req->seq);
} else {
reply = new ObjectCacheReadReplyData(RBDSC_READ_REPLY, req->seq, "/fakepath");
}
session_id->send(reply);
break;
}
}
}
// times: message number
// queue_depth : imitate message queue depth
// thinking : imitate handing message time
void startup_pingpong_testing(uint64_t times, uint64_t queue_depth, int thinking) {
m_send_request_index.store(0);
m_recv_ack_index.store(0);
for (uint64_t index = 0; index < times; index++) {
auto ctx = make_gen_lambda_context<ObjectCacheRequest*, std::function<void(ObjectCacheRequest*)>>
([this, thinking, times](ObjectCacheRequest* ack){
if (thinking != 0) {
usleep(thinking); // handling message
}
m_recv_ack_index++;
if (m_recv_ack_index == times) {
m_wait_event.signal();
}
});
// simple queue depth
while (m_send_request_index - m_recv_ack_index > queue_depth) {
usleep(1);
}
m_cache_client->lookup_object("pool_nspace", 1, 2, 3, "object_name", std::move(ctx));
m_send_request_index++;
}
m_wait_event.wait();
}
bool startup_lookupobject_testing(std::string pool_nspace, std::string object_id) {
bool hit;
auto ctx = make_gen_lambda_context<ObjectCacheRequest*, std::function<void(ObjectCacheRequest*)>>
([this, &hit](ObjectCacheRequest* ack){
hit = ack->type == RBDSC_READ_REPLY;
m_wait_event.signal();
});
m_cache_client->lookup_object(pool_nspace, 1, 2, 3, object_id, std::move(ctx));
m_wait_event.wait();
return hit;
}
void set_hit_entry_in_fake_lru(std::string cache_file_name) {
if (m_hit_entry_set.find(cache_file_name) == m_hit_entry_set.end()) {
m_hit_entry_set.insert(cache_file_name);
}
}
};
TEST_F(TestCommunication, test_pingpong) {
startup_pingpong_testing(64, 16, 0);
ASSERT_TRUE(m_send_request_index == m_recv_ack_index);
startup_pingpong_testing(200, 128, 0);
ASSERT_TRUE(m_send_request_index == m_recv_ack_index);
}
TEST_F(TestCommunication, test_lookup_object) {
m_hit_entry_set.clear();
srand(time(0));
uint64_t random_hit = random();
for (uint64_t i = 50; i < 100; i++) {
if ((random_hit % i) == 0) {
set_hit_entry_in_fake_lru(std::to_string(i));
}
}
for (uint64_t i = 50; i < 100; i++) {
if ((random_hit % i) != 0) {
ASSERT_FALSE(startup_lookupobject_testing("test_nspace", std::to_string(i)));
} else {
ASSERT_TRUE(startup_lookupobject_testing("test_nspace", std::to_string(i)));
}
}
}
| 5,323 | 28.910112 | 103 | cc |
null | ceph-main/src/test/immutable_object_cache/test_SimplePolicy.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <sstream>
#include <list>
#include <gtest/gtest.h>
#include "include/Context.h"
#include "tools/immutable_object_cache/SimplePolicy.h"
using namespace ceph::immutable_obj_cache;
std::string generate_file_name(uint64_t index) {
std::string pre_name("object_cache_file_");
std::ostringstream oss;
oss << index;
return pre_name + oss.str();
}
class TestSimplePolicy :public ::testing::Test {
public:
SimplePolicy* m_simple_policy;
const uint64_t m_cache_size;
uint64_t m_entry_index;
std::vector<std::string> m_promoted_lru;
std::vector<std::string> m_promoting_lru;
TestSimplePolicy() : m_cache_size(100), m_entry_index(0) {}
~TestSimplePolicy() {}
static void SetUpTestCase() {}
static void TearDownTestCase() {}
void SetUp() override {
m_simple_policy = new SimplePolicy(g_ceph_context, m_cache_size, 128, 0.9);
// populate 50 entries
for (uint64_t i = 0; i < m_cache_size / 2; i++, m_entry_index++) {
insert_entry_into_promoted_lru(generate_file_name(m_entry_index));
}
}
void TearDown() override {
while(m_promoted_lru.size()) {
ASSERT_TRUE(m_simple_policy->get_evict_entry() == m_promoted_lru.front());
m_simple_policy->evict_entry(m_simple_policy->get_evict_entry());
m_promoted_lru.erase(m_promoted_lru.begin());
}
delete m_simple_policy;
}
void insert_entry_into_promoted_lru(std::string cache_file_name) {
ASSERT_EQ(m_cache_size - m_promoted_lru.size(), m_simple_policy->get_free_size());
ASSERT_EQ(m_promoting_lru.size(), m_simple_policy->get_promoting_entry_num());
ASSERT_EQ(m_promoted_lru.size(), m_simple_policy->get_promoted_entry_num());
ASSERT_EQ(OBJ_CACHE_NONE, m_simple_policy->get_status(cache_file_name));
m_simple_policy->lookup_object(cache_file_name);
ASSERT_EQ(OBJ_CACHE_SKIP, m_simple_policy->get_status(cache_file_name));
ASSERT_EQ(m_cache_size - m_promoted_lru.size(), m_simple_policy->get_free_size());
ASSERT_EQ(m_promoting_lru.size() + 1, m_simple_policy->get_promoting_entry_num());
ASSERT_EQ(m_promoted_lru.size(), m_simple_policy->get_promoted_entry_num());
m_simple_policy->update_status(cache_file_name, OBJ_CACHE_PROMOTED, 1);
m_promoted_lru.push_back(cache_file_name);
ASSERT_EQ(OBJ_CACHE_PROMOTED, m_simple_policy->get_status(cache_file_name));
ASSERT_EQ(m_cache_size - m_promoted_lru.size(), m_simple_policy->get_free_size());
ASSERT_EQ(m_promoting_lru.size(), m_simple_policy->get_promoting_entry_num());
ASSERT_EQ(m_promoted_lru.size(), m_simple_policy->get_promoted_entry_num());
}
void insert_entry_into_promoting_lru(std::string cache_file_name) {
ASSERT_EQ(m_cache_size - m_promoted_lru.size(), m_simple_policy->get_free_size());
ASSERT_EQ(m_promoting_lru.size(), m_simple_policy->get_promoting_entry_num());
ASSERT_EQ(m_promoted_lru.size(), m_simple_policy->get_promoted_entry_num());
ASSERT_EQ(OBJ_CACHE_NONE, m_simple_policy->get_status(cache_file_name));
m_simple_policy->lookup_object(cache_file_name);
m_promoting_lru.push_back(cache_file_name);
ASSERT_EQ(OBJ_CACHE_SKIP, m_simple_policy->get_status(cache_file_name));
ASSERT_EQ(m_cache_size - m_promoted_lru.size(), m_simple_policy->get_free_size());
ASSERT_EQ(m_promoting_lru.size(), m_simple_policy->get_promoting_entry_num());
ASSERT_EQ(m_promoted_lru.size(), m_simple_policy->get_promoted_entry_num());
}
};
TEST_F(TestSimplePolicy, test_lookup_miss_and_no_free) {
// exhaust cache space
uint64_t left_entry_num = m_cache_size - m_promoted_lru.size();
for (uint64_t i = 0; i < left_entry_num; i++, ++m_entry_index) {
insert_entry_into_promoted_lru(generate_file_name(m_entry_index));
}
ASSERT_TRUE(0 == m_simple_policy->get_free_size());
ASSERT_TRUE(m_simple_policy->lookup_object("no_this_cache_file_name") == OBJ_CACHE_SKIP);
}
TEST_F(TestSimplePolicy, test_lookup_miss_and_have_free) {
ASSERT_TRUE(m_cache_size - m_promoted_lru.size() == m_simple_policy->get_free_size());
ASSERT_TRUE(m_simple_policy->lookup_object("miss_but_have_free_space_file_name") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("miss_but_have_free_space_file_name") == OBJ_CACHE_SKIP);
}
TEST_F(TestSimplePolicy, test_lookup_hit_and_promoting) {
ASSERT_TRUE(m_cache_size - m_promoted_lru.size() == m_simple_policy->get_free_size());
insert_entry_into_promoting_lru("promoting_file_1");
insert_entry_into_promoting_lru("promoting_file_2");
insert_entry_into_promoted_lru(generate_file_name(++m_entry_index));
insert_entry_into_promoted_lru(generate_file_name(++m_entry_index));
insert_entry_into_promoting_lru("promoting_file_3");
insert_entry_into_promoting_lru("promoting_file_4");
ASSERT_TRUE(m_simple_policy->get_promoting_entry_num() == 4);
ASSERT_TRUE(m_simple_policy->get_status("promoting_file_1") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_file_2") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_file_3") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_file_4") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->lookup_object("promoting_file_1") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->lookup_object("promoting_file_2") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->lookup_object("promoting_file_3") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->lookup_object("promoting_file_4") == OBJ_CACHE_SKIP);
}
TEST_F(TestSimplePolicy, test_lookup_hit_and_promoted) {
ASSERT_TRUE(m_promoted_lru.size() == m_simple_policy->get_promoted_entry_num());
for (uint64_t index = 0; index < m_entry_index; index++) {
ASSERT_TRUE(m_simple_policy->get_status(generate_file_name(index)) == OBJ_CACHE_PROMOTED);
}
}
TEST_F(TestSimplePolicy, test_update_state_from_promoting_to_none) {
ASSERT_TRUE(m_cache_size - m_promoted_lru.size() == m_simple_policy->get_free_size());
insert_entry_into_promoting_lru("promoting_to_none_file_1");
insert_entry_into_promoting_lru("promoting_to_none_file_2");
insert_entry_into_promoted_lru(generate_file_name(++m_entry_index));
insert_entry_into_promoting_lru("promoting_to_none_file_3");
insert_entry_into_promoting_lru("promoting_to_none_file_4");
ASSERT_TRUE(m_simple_policy->get_promoting_entry_num() == 4);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_1") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_2") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_3") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_4") == OBJ_CACHE_SKIP);
m_simple_policy->update_status("promoting_to_none_file_1", OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_promoting_entry_num() == 3);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_1") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_2") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_3") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_4") == OBJ_CACHE_SKIP);
m_simple_policy->update_status("promoting_to_none_file_2", OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_promoting_entry_num() == 2);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_1") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_2") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_3") == OBJ_CACHE_SKIP);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_4") == OBJ_CACHE_SKIP);
m_simple_policy->update_status("promoting_to_none_file_3", OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_promoting_entry_num() == 1);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_1") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_2") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_3") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_4") == OBJ_CACHE_SKIP);
m_simple_policy->update_status("promoting_to_none_file_4", OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_promoting_entry_num() == 0);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_1") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_2") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_3") == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_none_file_4") == OBJ_CACHE_NONE);
}
TEST_F(TestSimplePolicy, test_update_state_from_promoted_to_none) {
ASSERT_TRUE(m_promoted_lru.size() == m_simple_policy->get_promoted_entry_num());
for (uint64_t index = 0; index < m_entry_index; index++) {
ASSERT_TRUE(m_simple_policy->get_status(generate_file_name(index)) == OBJ_CACHE_PROMOTED);
m_simple_policy->update_status(generate_file_name(index), OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_status(generate_file_name(index)) == OBJ_CACHE_NONE);
ASSERT_TRUE(m_simple_policy->get_promoted_entry_num() == m_promoted_lru.size() - index - 1);
}
m_promoted_lru.clear();
}
TEST_F(TestSimplePolicy, test_update_state_from_promoting_to_promoted) {
ASSERT_TRUE(m_cache_size - m_promoted_lru.size() == m_simple_policy->get_free_size());
insert_entry_into_promoting_lru("promoting_to_promoted_file_1");
insert_entry_into_promoting_lru("promoting_to_promoted_file_2");
insert_entry_into_promoting_lru("promoting_to_promoted_file_3");
insert_entry_into_promoting_lru("promoting_to_promoted_file_4");
ASSERT_TRUE(4 == m_simple_policy->get_promoting_entry_num());
m_simple_policy->update_status("promoting_to_promoted_file_1", OBJ_CACHE_PROMOTED);
ASSERT_TRUE(3 == m_simple_policy->get_promoting_entry_num());
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_promoted_file_1") == OBJ_CACHE_PROMOTED);
m_simple_policy->update_status("promoting_to_promoted_file_2", OBJ_CACHE_PROMOTED);
ASSERT_TRUE(2 == m_simple_policy->get_promoting_entry_num());
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_promoted_file_2") == OBJ_CACHE_PROMOTED);
m_simple_policy->update_status("promoting_to_promoted_file_3", OBJ_CACHE_PROMOTED);
ASSERT_TRUE(1 == m_simple_policy->get_promoting_entry_num());
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_promoted_file_3") == OBJ_CACHE_PROMOTED);
m_simple_policy->update_status("promoting_to_promoted_file_4", OBJ_CACHE_PROMOTED);
ASSERT_TRUE(0 == m_simple_policy->get_promoting_entry_num());
ASSERT_TRUE(m_simple_policy->get_status("promoting_to_promoted_file_4") == OBJ_CACHE_PROMOTED);
m_promoted_lru.push_back("promoting_to_promoted_file_1");
m_promoted_lru.push_back("promoting_to_promoted_file_2");
m_promoted_lru.push_back("promoting_to_promoted_file_3");
m_promoted_lru.push_back("promoting_to_promoted_file_4");
}
TEST_F(TestSimplePolicy, test_evict_list_0) {
std::list<std::string> evict_entry_list;
// the default water mark is 0.9
ASSERT_TRUE((float)m_simple_policy->get_free_size() > m_cache_size*0.1);
m_simple_policy->get_evict_list(&evict_entry_list);
ASSERT_TRUE(evict_entry_list.size() == 0);
}
TEST_F(TestSimplePolicy, test_evict_list_10) {
uint64_t left_entry_num = m_cache_size - m_promoted_lru.size();
for (uint64_t i = 0; i < left_entry_num; i++, ++m_entry_index) {
insert_entry_into_promoted_lru(generate_file_name(m_entry_index));
}
ASSERT_TRUE(0 == m_simple_policy->get_free_size());
std::list<std::string> evict_entry_list;
m_simple_policy->get_evict_list(&evict_entry_list);
// evict 10% of old entries
ASSERT_TRUE(m_cache_size*0.1 == evict_entry_list.size());
ASSERT_TRUE(m_cache_size - m_cache_size*0.1 == m_simple_policy->get_promoted_entry_num());
for (auto it = evict_entry_list.begin(); it != evict_entry_list.end(); it++) {
ASSERT_TRUE(*it == m_promoted_lru.front());
m_promoted_lru.erase(m_promoted_lru.begin());
}
}
| 12,207 | 50.728814 | 102 | cc |
null | ceph-main/src/test/immutable_object_cache/test_common.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CACHE_TEST_COMMON_H
#define CACHE_TEST_COMMON_H
#include <pthread.h>
class WaitEvent {
public:
WaitEvent() : m_signaled(false) {
pthread_mutex_init(&m_lock, NULL);
pthread_cond_init(&m_cond, NULL);
}
~WaitEvent() {
pthread_mutex_destroy(&m_lock);
pthread_cond_destroy(&m_cond);
}
void wait() {
pthread_mutex_lock(&m_lock);
while (!m_signaled) {
pthread_cond_wait(&m_cond, &m_lock);
}
m_signaled = false;
pthread_mutex_unlock(&m_lock);
}
void signal() {
pthread_mutex_lock(&m_lock);
m_signaled = true;
pthread_cond_signal(&m_cond);
pthread_mutex_unlock(&m_lock);
}
private:
pthread_mutex_t m_lock;
pthread_cond_t m_cond;
bool m_signaled;
};
#endif
| 846 | 19.166667 | 70 | h |
null | ceph-main/src/test/immutable_object_cache/test_main.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.hpp"
#include "global/global_context.h"
#include "test/librados/test_cxx.h"
#include "gtest/gtest.h"
#include <iostream>
#include <string>
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
librados::Rados rados;
std::string result = connect_cluster_pp(rados);
if (result != "" ) {
std::cerr << result << std::endl;
return 1;
}
g_ceph_context = reinterpret_cast<CephContext*>(rados.cct());
int r = rados.conf_set("lockdep", "true");
if (r < 0) {
std::cerr << "warning: failed to enable lockdep" << std::endl;
}
return RUN_ALL_TESTS();
}
| 728 | 23.3 | 70 | cc |
null | ceph-main/src/test/immutable_object_cache/test_message.cc | #include "gtest/gtest.h"
#include "tools/immutable_object_cache/Types.h"
#include "tools/immutable_object_cache/SocketCommon.h"
using namespace ceph::immutable_obj_cache;
TEST(test_for_message, test_1)
{
std::string pool_nspace("this is a pool namespace");
std::string oid_name("this is a oid name");
std::string cache_file_path("/temp/ceph_immutable_object_cache");
uint16_t type = RBDSC_READ;
uint64_t seq = 123456UL;
uint64_t read_offset = 222222UL;
uint64_t read_len = 333333UL;
uint64_t pool_id = 444444UL;
uint64_t snap_id = 555555UL;
uint64_t object_size = 666666UL;
// ObjectRequest --> bufferlist
ObjectCacheRequest* req = new ObjectCacheReadData(type, seq, read_offset, read_len,
pool_id, snap_id, object_size, oid_name, pool_nspace);
req->encode();
auto payload_bl = req->get_payload_bufferlist();
uint32_t data_len = get_data_len(payload_bl.c_str());
ASSERT_EQ(payload_bl.length(), data_len + get_header_size());
ASSERT_TRUE(payload_bl.c_str() != nullptr);
// bufferlist --> ObjectCacheRequest
ObjectCacheRequest* req_decode = decode_object_cache_request(payload_bl);
ASSERT_EQ(req_decode->get_request_type(), RBDSC_READ);
ASSERT_EQ(req_decode->type, RBDSC_READ);
ASSERT_EQ(req_decode->seq, 123456UL);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->type, RBDSC_READ);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->seq, 123456UL);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->read_offset, 222222UL);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->read_len, 333333UL);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->pool_id, 444444UL);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->snap_id, 555555UL);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->oid, oid_name);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->pool_namespace, pool_nspace);
ASSERT_EQ(((ObjectCacheReadData*)req_decode)->object_size, 666666UL);
delete req;
delete req_decode;
}
| 1,963 | 37.509804 | 90 | cc |
null | ceph-main/src/test/immutable_object_cache/test_multi_session.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <iostream>
#include <unistd.h>
#include "gtest/gtest.h"
#include "include/Context.h"
#include "global/global_init.h"
#include "global/global_context.h"
#include "test/immutable_object_cache/test_common.h"
#include "tools/immutable_object_cache/CacheClient.h"
#include "tools/immutable_object_cache/CacheServer.h"
using namespace std;
using namespace ceph::immutable_obj_cache;
class TestMultiSession : public ::testing::Test {
public:
std::string m_local_path;
CacheServer* m_cache_server;
std::thread* m_cache_server_thread;
std::vector<CacheClient*> m_cache_client_vec;
WaitEvent m_wait_event;
std::atomic<uint64_t> m_send_request_index;
std::atomic<uint64_t> m_recv_ack_index;
uint64_t m_session_num = 110;
TestMultiSession() : m_local_path("/tmp/ceph_test_multisession_socket"),
m_cache_server_thread(nullptr), m_send_request_index(0),
m_recv_ack_index(0) {
m_cache_client_vec.resize(m_session_num + 1, nullptr);
}
~TestMultiSession() {}
static void SetUpTestCase() {}
static void TearDownTestCase() {}
void SetUp() override {
std::remove(m_local_path.c_str());
m_cache_server = new CacheServer(g_ceph_context, m_local_path,
[this](CacheSession* session_id, ObjectCacheRequest* req){
server_handle_request(session_id, req);
});
ASSERT_TRUE(m_cache_server != nullptr);
m_cache_server_thread = new std::thread(([this]() {
m_wait_event.signal();
m_cache_server->run();
}));
// waiting for thread running.
m_wait_event.wait();
// waiting for io_service run.
usleep(2);
}
void TearDown() override {
for (uint64_t i = 0; i < m_session_num; i++) {
if (m_cache_client_vec[i] != nullptr) {
m_cache_client_vec[i]->close();
delete m_cache_client_vec[i];
}
}
m_cache_server->stop();
if (m_cache_server_thread->joinable()) {
m_cache_server_thread->join();
}
delete m_cache_server;
delete m_cache_server_thread;
std::remove(m_local_path.c_str());
}
CacheClient* create_session(uint64_t random_index) {
CacheClient* cache_client = new CacheClient(m_local_path, g_ceph_context);
cache_client->run();
while (true) {
if (0 == cache_client->connect()) {
break;
}
}
m_cache_client_vec[random_index] = cache_client;
return cache_client;
}
void server_handle_request(CacheSession* session_id, ObjectCacheRequest* req) {
switch (req->get_request_type()) {
case RBDSC_REGISTER: {
ObjectCacheRequest* reply = new ObjectCacheRegReplyData(RBDSC_REGISTER_REPLY,
req->seq);
session_id->send(reply);
break;
}
case RBDSC_READ: {
ObjectCacheRequest* reply = new ObjectCacheReadReplyData(RBDSC_READ_REPLY,
req->seq);
session_id->send(reply);
break;
}
}
}
void test_register_client(uint64_t random_index) {
ASSERT_TRUE(m_cache_client_vec[random_index] == nullptr);
auto ctx = new LambdaContext([](int ret){
ASSERT_TRUE(ret == 0);
});
auto session = create_session(random_index);
session->register_client(ctx);
ASSERT_TRUE(m_cache_client_vec[random_index] != nullptr);
ASSERT_TRUE(session->is_session_work());
}
void test_lookup_object(std::string pool_nspace, uint64_t index,
uint64_t request_num, bool is_last) {
for (uint64_t i = 0; i < request_num; i++) {
auto ctx = make_gen_lambda_context<ObjectCacheRequest*,
std::function<void(ObjectCacheRequest*)>>([this](ObjectCacheRequest* ack) {
m_recv_ack_index++;
});
m_send_request_index++;
// here just for concurrently testing register + lookup, so fix object id.
m_cache_client_vec[index]->lookup_object(pool_nspace, 1, 2, 3, "1234", std::move(ctx));
}
if (is_last) {
while(m_send_request_index != m_recv_ack_index) {
usleep(1);
}
m_wait_event.signal();
}
}
};
// test concurrent : multi-session + register_client + lookup_request
TEST_F(TestMultiSession, test_multi_session) {
uint64_t test_times = 1000;
uint64_t test_session_num = 100;
for (uint64_t i = 0; i <= test_times; i++) {
uint64_t random_index = random() % test_session_num;
if (m_cache_client_vec[random_index] == nullptr) {
test_register_client(random_index);
} else {
test_lookup_object(string("test_nspace") + std::to_string(random_index),
random_index, 4, i == test_times ? true : false);
}
}
// make sure all ack will be received.
m_wait_event.wait();
ASSERT_TRUE(m_send_request_index == m_recv_ack_index);
}
| 4,948 | 29.361963 | 93 | cc |
null | ceph-main/src/test/immutable_object_cache/test_object_store.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <filesystem>
#include <iostream>
#include <unistd.h>
#include "gtest/gtest.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
#include "librbd/ImageCtx.h"
#include "test/librados/test.h"
#include "global/global_init.h"
#include "global/global_context.h"
#include "test/librados/test_cxx.h"
#include "tools/immutable_object_cache/ObjectCacheStore.h"
namespace fs = std::filesystem;
using namespace ceph::immutable_obj_cache;
std::string test_cache_path("/tmp/test_ceph_immutable_shared_cache");
class TestObjectStore : public ::testing::Test {
public:
ObjectCacheStore* m_object_cache_store;
librados::Rados* m_test_rados;
CephContext* m_ceph_context;
librados::IoCtx m_local_io_ctx;
std::string m_temp_pool_name;
std::string m_temp_volume_name;
TestObjectStore(): m_object_cache_store(nullptr), m_test_rados(nullptr), m_ceph_context(nullptr){}
~TestObjectStore(){}
static void SetUpTestCase() {}
static void TearDownTestCase() {}
void SetUp() override {
m_test_rados = new librados::Rados();
ASSERT_EQ("", connect_cluster_pp(*m_test_rados));
ASSERT_EQ(0, m_test_rados->conf_set("rbd_cache", "false"));
ASSERT_EQ(0, m_test_rados->conf_set("immutable_object_cache_max_size", "1024"));
ASSERT_EQ(0, m_test_rados->conf_set("immutable_object_cache_path", test_cache_path.c_str()));
}
void create_object_cache_store(uint64_t entry_num) {
m_temp_pool_name = get_temp_pool_name("test_pool_");
ASSERT_EQ(0, m_test_rados->pool_create(m_temp_pool_name.c_str()));
ASSERT_EQ(0, m_test_rados->ioctx_create(m_temp_pool_name.c_str(), m_local_io_ctx));
m_temp_volume_name = "test_volume";
m_ceph_context = reinterpret_cast<CephContext*>(m_test_rados->cct());
m_object_cache_store = new ObjectCacheStore(m_ceph_context);
}
void init_object_cache_store(std::string pool_name, std::string vol_name,
uint64_t vol_size, bool reset) {
ASSERT_EQ(0, m_object_cache_store->init(reset));
ASSERT_EQ(0, m_object_cache_store->init_cache());
}
void shutdown_object_cache_store() {
ASSERT_EQ(0, m_object_cache_store->shutdown());
}
void lookup_object_cache_store(std::string pool_name, std::string vol_name,
std::string obj_name, int& ret) {
std::string cache_path;
ret = m_object_cache_store->lookup_object(pool_name, 1, 2, 3,
obj_name, true, cache_path);
}
void TearDown() override {
if(m_test_rados)
delete m_test_rados;
if(m_object_cache_store)
delete m_object_cache_store;
}
};
TEST_F(TestObjectStore, test_1) {
create_object_cache_store(1000);
std::string cache_path(test_cache_path);
fs::remove_all(test_cache_path);
init_object_cache_store(m_temp_pool_name, m_temp_volume_name, 1000, true);
// TODO add lookup interface testing
shutdown_object_cache_store();
}
| 3,068 | 29.69 | 100 | cc |
null | ceph-main/src/test/journal/RadosTestFixture.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/librados/test_cxx.h"
#include "test/journal/RadosTestFixture.h"
#include "cls/journal/cls_journal_client.h"
#include "include/stringify.h"
#include "common/WorkQueue.h"
#include "journal/Settings.h"
using namespace std::chrono_literals;
RadosTestFixture::RadosTestFixture()
: m_timer_lock(ceph::make_mutex("m_timer_lock")),
m_listener(this) {
}
void RadosTestFixture::SetUpTestCase() {
_pool_name = get_temp_pool_name();
ASSERT_EQ("", create_one_pool_pp(_pool_name, _rados));
CephContext* cct = reinterpret_cast<CephContext*>(_rados.cct());
_thread_pool = new ThreadPool(cct, "RadosTestFixture::_thread_pool",
"tp_test", 1);
_thread_pool->start();
}
void RadosTestFixture::TearDownTestCase() {
_thread_pool->stop();
delete _thread_pool;
ASSERT_EQ(0, destroy_one_pool_pp(_pool_name, _rados));
}
std::string RadosTestFixture::get_temp_oid() {
++_oid_number;
return "oid" + stringify(_oid_number);
}
void RadosTestFixture::SetUp() {
ASSERT_EQ(0, _rados.ioctx_create(_pool_name.c_str(), m_ioctx));
CephContext* cct = reinterpret_cast<CephContext*>(m_ioctx.cct());
m_work_queue = new ContextWQ("RadosTestFixture::m_work_queue",
ceph::make_timespan(60),
_thread_pool);
m_timer = new SafeTimer(cct, m_timer_lock, true);
m_timer->init();
}
void RadosTestFixture::TearDown() {
for (auto metadata : m_metadatas) {
C_SaferCond ctx;
metadata->shut_down(&ctx);
ASSERT_EQ(0, ctx.wait());
}
{
std::lock_guard locker{m_timer_lock};
m_timer->shutdown();
}
delete m_timer;
m_work_queue->drain();
delete m_work_queue;
}
int RadosTestFixture::create(const std::string &oid, uint8_t order,
uint8_t splay_width) {
return cls::journal::client::create(m_ioctx, oid, order, splay_width, -1);
}
ceph::ref_t<journal::JournalMetadata> RadosTestFixture::create_metadata(
const std::string &oid, const std::string &client_id,
double commit_interval, int max_concurrent_object_sets) {
journal::Settings settings;
settings.commit_interval = commit_interval;
settings.max_concurrent_object_sets = max_concurrent_object_sets;
auto metadata = ceph::make_ref<journal::JournalMetadata>(
m_work_queue, m_timer, &m_timer_lock, m_ioctx, oid, client_id, settings);
m_metadatas.push_back(metadata);
return metadata;
}
int RadosTestFixture::append(const std::string &oid, const bufferlist &bl) {
librados::ObjectWriteOperation op;
op.append(bl);
return m_ioctx.operate(oid, &op);
}
int RadosTestFixture::client_register(const std::string &oid,
const std::string &id,
const std::string &description) {
bufferlist data;
data.append(description);
return cls::journal::client::client_register(m_ioctx, oid, id, data);
}
int RadosTestFixture::client_commit(const std::string &oid,
const std::string &id,
const cls::journal::ObjectSetPosition &commit_position) {
librados::ObjectWriteOperation op;
cls::journal::client::client_commit(&op, id, commit_position);
return m_ioctx.operate(oid, &op);
}
bufferlist RadosTestFixture::create_payload(const std::string &payload) {
bufferlist bl;
bl.append(payload);
return bl;
}
int RadosTestFixture::init_metadata(const ceph::ref_t<journal::JournalMetadata>& metadata) {
C_SaferCond cond;
metadata->init(&cond);
return cond.wait();
}
bool RadosTestFixture::wait_for_update(const ceph::ref_t<journal::JournalMetadata>& metadata) {
std::unique_lock locker{m_listener.mutex};
while (m_listener.updates[metadata.get()] == 0) {
if (m_listener.cond.wait_for(locker, 10s) == std::cv_status::timeout) {
return false;
}
}
--m_listener.updates[metadata.get()];
return true;
}
std::string RadosTestFixture::_pool_name;
librados::Rados RadosTestFixture::_rados;
uint64_t RadosTestFixture::_oid_number = 0;
ThreadPool *RadosTestFixture::_thread_pool = nullptr;
| 4,199 | 29.882353 | 95 | cc |
null | ceph-main/src/test/journal/RadosTestFixture.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/librados/test.h"
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include "journal/JournalMetadata.h"
#include "cls/journal/cls_journal_types.h"
#include "gtest/gtest.h"
class ThreadPool;
class RadosTestFixture : public ::testing::Test {
public:
static void SetUpTestCase();
static void TearDownTestCase();
static std::string get_temp_oid();
RadosTestFixture();
void SetUp() override;
void TearDown() override;
int create(const std::string &oid, uint8_t order = 14,
uint8_t splay_width = 2);
ceph::ref_t<journal::JournalMetadata> create_metadata(const std::string &oid,
const std::string &client_id = "client",
double commit_internal = 0.1,
int max_concurrent_object_sets = 0);
int append(const std::string &oid, const bufferlist &bl);
int client_register(const std::string &oid, const std::string &id = "client",
const std::string &description = "");
int client_commit(const std::string &oid, const std::string &id,
const cls::journal::ObjectSetPosition &commit_position);
bufferlist create_payload(const std::string &payload);
struct Listener : public journal::JournalMetadataListener {
RadosTestFixture *test_fixture;
ceph::mutex mutex = ceph::make_mutex("mutex");
ceph::condition_variable cond;
std::map<journal::JournalMetadata*, uint32_t> updates;
Listener(RadosTestFixture *_test_fixture)
: test_fixture(_test_fixture) {}
void handle_update(journal::JournalMetadata *metadata) override {
std::lock_guard locker{mutex};
++updates[metadata];
cond.notify_all();
}
};
int init_metadata(const ceph::ref_t<journal::JournalMetadata>& metadata);
bool wait_for_update(const ceph::ref_t<journal::JournalMetadata>& metadata);
static std::string _pool_name;
static librados::Rados _rados;
static uint64_t _oid_number;
static ThreadPool *_thread_pool;
librados::IoCtx m_ioctx;
ContextWQ *m_work_queue = nullptr;
ceph::mutex m_timer_lock;
SafeTimer *m_timer = nullptr;
Listener m_listener;
std::list<ceph::ref_t<journal::JournalMetadata>> m_metadatas;
};
| 2,386 | 30.826667 | 86 | h |
null | ceph-main/src/test/journal/test_Entry.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/Entry.h"
#include "gtest/gtest.h"
class TestEntry : public ::testing::Test {
};
TEST_F(TestEntry, DefaultConstructor) {
journal::Entry entry;
ASSERT_EQ(0U, entry.get_entry_tid());
ASSERT_EQ(0U, entry.get_tag_tid());
bufferlist data(entry.get_data());
bufferlist expected_data;
ASSERT_TRUE(data.contents_equal(expected_data));
}
TEST_F(TestEntry, Constructor) {
bufferlist data;
data.append("data");
journal::Entry entry(234, 123, data);
data.clear();
data = entry.get_data();
bufferlist expected_data;
expected_data.append("data");
ASSERT_EQ(123U, entry.get_entry_tid());
ASSERT_EQ(234U, entry.get_tag_tid());
ASSERT_TRUE(data.contents_equal(expected_data));
}
TEST_F(TestEntry, IsReadable) {
bufferlist data;
data.append("data");
journal::Entry entry(234, 123, data);
bufferlist full_bl;
encode(entry, full_bl);
uint32_t bytes_needed;
for (size_t i = 0; i < full_bl.length() - 1; ++i) {
bufferlist partial_bl;
if (i > 0) {
partial_bl.substr_of(full_bl, 0, i);
}
ASSERT_FALSE(journal::Entry::is_readable(partial_bl.begin(),
&bytes_needed));
ASSERT_GT(bytes_needed, 0U);
}
ASSERT_TRUE(journal::Entry::is_readable(full_bl.begin(), &bytes_needed));
ASSERT_EQ(0U, bytes_needed);
}
TEST_F(TestEntry, IsReadableBadPreamble) {
bufferlist data;
data.append("data");
journal::Entry entry(234, 123, data);
uint64_t stray_bytes = 0x1122334455667788;
bufferlist full_bl;
encode(stray_bytes, full_bl);
encode(entry, full_bl);
uint32_t bytes_needed;
bufferlist::iterator it = full_bl.begin();
ASSERT_FALSE(journal::Entry::is_readable(it, &bytes_needed));
ASSERT_EQ(0U, bytes_needed);
it += sizeof(stray_bytes);
ASSERT_TRUE(journal::Entry::is_readable(it, &bytes_needed));
ASSERT_EQ(0U, bytes_needed);
}
TEST_F(TestEntry, IsReadableBadCRC) {
bufferlist data;
data.append("data");
journal::Entry entry(234, 123, data);
bufferlist full_bl;
encode(entry, full_bl);
bufferlist bad_bl;
bad_bl.substr_of(full_bl, 0, full_bl.length() - 4);
encode(full_bl.crc32c(1), bad_bl);
uint32_t bytes_needed;
ASSERT_FALSE(journal::Entry::is_readable(bad_bl.begin(), &bytes_needed));
ASSERT_EQ(0U, bytes_needed);
}
| 2,392 | 23.670103 | 75 | cc |
null | ceph-main/src/test/journal/test_FutureImpl.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/FutureImpl.h"
#include "common/Cond.h"
#include "gtest/gtest.h"
#include "test/journal/RadosTestFixture.h"
class TestFutureImpl : public RadosTestFixture {
public:
struct FlushHandler : public journal::FutureImpl::FlushHandler {
uint64_t flushes = 0;
void flush(const ceph::ref_t<journal::FutureImpl>& future) override {
++flushes;
}
FlushHandler() = default;
};
TestFutureImpl() {
m_flush_handler = std::make_shared<FlushHandler>();
}
auto create_future(uint64_t tag_tid, uint64_t entry_tid,
uint64_t commit_tid,
ceph::ref_t<journal::FutureImpl> prev = nullptr) {
auto future = ceph::make_ref<journal::FutureImpl>(tag_tid, entry_tid, commit_tid);
future->init(prev);
return future;
}
void flush(const ceph::ref_t<journal::FutureImpl>& future) {
}
std::shared_ptr<FlushHandler> m_flush_handler;
};
TEST_F(TestFutureImpl, Getters) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 123, 456);
ASSERT_EQ(234U, future->get_tag_tid());
ASSERT_EQ(123U, future->get_entry_tid());
ASSERT_EQ(456U, future->get_commit_tid());
}
TEST_F(TestFutureImpl, Attach) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 123, 456);
ASSERT_FALSE(future->attach(m_flush_handler));
ASSERT_EQ(2U, m_flush_handler.use_count());
}
TEST_F(TestFutureImpl, AttachWithPendingFlush) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 123, 456);
future->flush(NULL);
ASSERT_TRUE(future->attach(m_flush_handler));
ASSERT_EQ(2U, m_flush_handler.use_count());
}
TEST_F(TestFutureImpl, Detach) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 123, 456);
ASSERT_FALSE(future->attach(m_flush_handler));
future->detach();
ASSERT_EQ(1U, m_flush_handler.use_count());
}
TEST_F(TestFutureImpl, DetachImplicit) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 123, 456);
ASSERT_FALSE(future->attach(m_flush_handler));
future.reset();
ASSERT_EQ(1U, m_flush_handler.use_count());
}
TEST_F(TestFutureImpl, Flush) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 123, 456);
ASSERT_FALSE(future->attach(m_flush_handler));
C_SaferCond cond;
future->flush(&cond);
ASSERT_EQ(1U, m_flush_handler->flushes);
future->safe(-EIO);
ASSERT_EQ(-EIO, cond.wait());
}
TEST_F(TestFutureImpl, FlushWithoutContext) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 123, 456);
ASSERT_FALSE(future->attach(m_flush_handler));
future->flush(NULL);
ASSERT_EQ(1U, m_flush_handler->flushes);
future->safe(-EIO);
ASSERT_TRUE(future->is_complete());
ASSERT_EQ(-EIO, future->get_return_value());
}
TEST_F(TestFutureImpl, FlushChain) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future1 = create_future(234, 123, 456);
auto future2 = create_future(234, 124, 457, future1);
auto future3 = create_future(235, 1, 458, future2);
auto flush_handler = std::make_shared<FlushHandler>();
ASSERT_FALSE(future1->attach(m_flush_handler));
ASSERT_FALSE(future2->attach(flush_handler));
ASSERT_FALSE(future3->attach(m_flush_handler));
C_SaferCond cond;
future3->flush(&cond);
ASSERT_EQ(1U, m_flush_handler->flushes);
ASSERT_EQ(1U, flush_handler->flushes);
future3->safe(0);
ASSERT_FALSE(future3->is_complete());
future1->safe(0);
ASSERT_FALSE(future3->is_complete());
future2->safe(-EIO);
ASSERT_TRUE(future3->is_complete());
ASSERT_EQ(-EIO, future3->get_return_value());
ASSERT_EQ(-EIO, cond.wait());
ASSERT_EQ(0, future1->get_return_value());
}
TEST_F(TestFutureImpl, FlushInProgress) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future1 = create_future(234, 123, 456);
auto future2 = create_future(234, 124, 457, future1);
ASSERT_FALSE(future1->attach(m_flush_handler));
ASSERT_FALSE(future2->attach(m_flush_handler));
future1->set_flush_in_progress();
ASSERT_TRUE(future1->is_flush_in_progress());
future1->flush(NULL);
ASSERT_EQ(0U, m_flush_handler->flushes);
future1->safe(0);
}
TEST_F(TestFutureImpl, FlushAlreadyComplete) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 123, 456);
future->safe(-EIO);
C_SaferCond cond;
future->flush(&cond);
ASSERT_EQ(-EIO, cond.wait());
}
TEST_F(TestFutureImpl, Wait) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 1, 456);
C_SaferCond cond;
future->wait(&cond);
future->safe(-EEXIST);
ASSERT_EQ(-EEXIST, cond.wait());
}
TEST_F(TestFutureImpl, WaitAlreadyComplete) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future = create_future(234, 1, 456);
future->safe(-EEXIST);
C_SaferCond cond;
future->wait(&cond);
ASSERT_EQ(-EEXIST, cond.wait());
}
TEST_F(TestFutureImpl, SafePreservesError) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future1 = create_future(234, 123, 456);
auto future2 = create_future(234, 124, 457, future1);
future1->safe(-EIO);
future2->safe(-EEXIST);
ASSERT_TRUE(future2->is_complete());
ASSERT_EQ(-EIO, future2->get_return_value());
}
TEST_F(TestFutureImpl, ConsistentPreservesError) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid));
auto metadata = create_metadata(oid);
ASSERT_EQ(0, init_metadata(metadata));
auto future1 = create_future(234, 123, 456);
auto future2 = create_future(234, 124, 457, future1);
future2->safe(-EEXIST);
future1->safe(-EIO);
ASSERT_TRUE(future2->is_complete());
ASSERT_EQ(-EEXIST, future2->get_return_value());
}
| 7,631 | 27.371747 | 86 | cc |
null | ceph-main/src/test/journal/test_JournalMetadata.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/JournalMetadata.h"
#include "test/journal/RadosTestFixture.h"
#include "common/Cond.h"
#include <map>
class TestJournalMetadata : public RadosTestFixture {
public:
void TearDown() override {
for (MetadataList::iterator it = m_metadata_list.begin();
it != m_metadata_list.end(); ++it) {
(*it)->remove_listener(&m_listener);
}
m_metadata_list.clear();
RadosTestFixture::TearDown();
}
auto create_metadata(const std::string &oid,
const std::string &client_id,
double commit_interval = 0.1,
int max_concurrent_object_sets = 0) {
auto metadata = RadosTestFixture::create_metadata(
oid, client_id, commit_interval, max_concurrent_object_sets);
m_metadata_list.push_back(metadata);
metadata->add_listener(&m_listener);
return metadata;
}
typedef std::list<ceph::ref_t<journal::JournalMetadata>> MetadataList;
MetadataList m_metadata_list;
};
TEST_F(TestJournalMetadata, JournalDNE) {
std::string oid = get_temp_oid();
auto metadata1 = create_metadata(oid, "client1");
ASSERT_EQ(-ENOENT, init_metadata(metadata1));
}
TEST_F(TestJournalMetadata, ClientDNE) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 14, 2));
ASSERT_EQ(0, client_register(oid, "client1", ""));
auto metadata1 = create_metadata(oid, "client1");
ASSERT_EQ(0, init_metadata(metadata1));
auto metadata2 = create_metadata(oid, "client2");
ASSERT_EQ(-ENOENT, init_metadata(metadata2));
}
TEST_F(TestJournalMetadata, Committed) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 14, 2));
ASSERT_EQ(0, client_register(oid, "client1", ""));
auto metadata1 = create_metadata(oid, "client1", 600);
ASSERT_EQ(0, init_metadata(metadata1));
auto metadata2 = create_metadata(oid, "client1");
ASSERT_EQ(0, init_metadata(metadata2));
ASSERT_TRUE(wait_for_update(metadata2));
journal::JournalMetadata::ObjectSetPosition expect_commit_position;
journal::JournalMetadata::ObjectSetPosition read_commit_position;
metadata1->get_commit_position(&read_commit_position);
ASSERT_EQ(expect_commit_position, read_commit_position);
uint64_t commit_tid1 = metadata1->allocate_commit_tid(0, 0, 0);
uint64_t commit_tid2 = metadata1->allocate_commit_tid(0, 1, 0);
uint64_t commit_tid3 = metadata1->allocate_commit_tid(1, 0, 1);
uint64_t commit_tid4 = metadata1->allocate_commit_tid(0, 0, 2);
// cannot commit until tid1 + 2 committed
metadata1->committed(commit_tid2, []() { return nullptr; });
metadata1->committed(commit_tid3, []() { return nullptr; });
C_SaferCond cond1;
metadata1->committed(commit_tid1, [&cond1]() { return &cond1; });
// given our 10 minute commit internal, this should override the
// in-flight commit
C_SaferCond cond2;
metadata1->committed(commit_tid4, [&cond2]() { return &cond2; });
ASSERT_EQ(-ESTALE, cond1.wait());
metadata1->flush_commit_position();
ASSERT_EQ(0, cond2.wait());
ASSERT_TRUE(wait_for_update(metadata2));
metadata2->get_commit_position(&read_commit_position);
expect_commit_position = {{{0, 0, 2}, {1, 0, 1}}};
ASSERT_EQ(expect_commit_position, read_commit_position);
}
TEST_F(TestJournalMetadata, UpdateActiveObject) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid, 14, 2));
ASSERT_EQ(0, client_register(oid, "client1", ""));
auto metadata1 = create_metadata(oid, "client1");
ASSERT_EQ(0, init_metadata(metadata1));
ASSERT_TRUE(wait_for_update(metadata1));
ASSERT_EQ(0U, metadata1->get_active_set());
ASSERT_EQ(0, metadata1->set_active_set(123));
ASSERT_TRUE(wait_for_update(metadata1));
ASSERT_EQ(123U, metadata1->get_active_set());
}
TEST_F(TestJournalMetadata, DisconnectLaggyClient) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid, "client1", ""));
ASSERT_EQ(0, client_register(oid, "client2", "laggy"));
int max_concurrent_object_sets = 100;
auto metadata =
create_metadata(oid, "client1", 0.1, max_concurrent_object_sets);
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(0U, metadata->get_active_set());
journal::JournalMetadata::RegisteredClients clients;
#define ASSERT_CLIENT_STATES(s1, s2) \
ASSERT_EQ(2U, clients.size()); \
for (auto &c : clients) { \
if (c.id == "client1") { \
ASSERT_EQ(c.state, s1); \
} else if (c.id == "client2") { \
ASSERT_EQ(c.state, s2); \
} else { \
ASSERT_TRUE(false); \
} \
}
metadata->get_registered_clients(&clients);
ASSERT_CLIENT_STATES(cls::journal::CLIENT_STATE_CONNECTED,
cls::journal::CLIENT_STATE_CONNECTED);
// client2 is connected when active set <= max_concurrent_object_sets
ASSERT_EQ(0, metadata->set_active_set(max_concurrent_object_sets));
ASSERT_TRUE(wait_for_update(metadata));
uint64_t commit_tid = metadata->allocate_commit_tid(0, 0, 0);
C_SaferCond cond1;
metadata->committed(commit_tid, [&cond1]() { return &cond1; });
ASSERT_EQ(0, cond1.wait());
metadata->flush_commit_position();
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(100U, metadata->get_active_set());
clients.clear();
metadata->get_registered_clients(&clients);
ASSERT_CLIENT_STATES(cls::journal::CLIENT_STATE_CONNECTED,
cls::journal::CLIENT_STATE_CONNECTED);
// client2 is disconnected when active set > max_concurrent_object_sets
ASSERT_EQ(0, metadata->set_active_set(max_concurrent_object_sets + 1));
ASSERT_TRUE(wait_for_update(metadata));
commit_tid = metadata->allocate_commit_tid(0, 0, 1);
C_SaferCond cond2;
metadata->committed(commit_tid, [&cond2]() { return &cond2; });
ASSERT_EQ(0, cond2.wait());
metadata->flush_commit_position();
ASSERT_TRUE(wait_for_update(metadata));
ASSERT_EQ(101U, metadata->get_active_set());
clients.clear();
metadata->get_registered_clients(&clients);
ASSERT_CLIENT_STATES(cls::journal::CLIENT_STATE_CONNECTED,
cls::journal::CLIENT_STATE_DISCONNECTED);
}
TEST_F(TestJournalMetadata, AssertActiveTag) {
std::string oid = get_temp_oid();
ASSERT_EQ(0, create(oid));
ASSERT_EQ(0, client_register(oid, "client1", ""));
auto metadata = create_metadata(oid, "client1");
ASSERT_EQ(0, init_metadata(metadata));
ASSERT_TRUE(wait_for_update(metadata));
C_SaferCond ctx1;
cls::journal::Tag tag1;
metadata->allocate_tag(cls::journal::Tag::TAG_CLASS_NEW, {}, &tag1, &ctx1);
ASSERT_EQ(0, ctx1.wait());
C_SaferCond ctx2;
metadata->assert_active_tag(tag1.tid, &ctx2);
ASSERT_EQ(0, ctx2.wait());
C_SaferCond ctx3;
cls::journal::Tag tag2;
metadata->allocate_tag(tag1.tag_class, {}, &tag2, &ctx3);
ASSERT_EQ(0, ctx3.wait());
C_SaferCond ctx4;
metadata->assert_active_tag(tag1.tid, &ctx4);
ASSERT_EQ(-ESTALE, ctx4.wait());
}
| 6,965 | 32.014218 | 77 | cc |
null | ceph-main/src/test/journal/test_JournalPlayer.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/JournalPlayer.h"
#include "journal/Entry.h"
#include "journal/JournalMetadata.h"
#include "journal/ReplayHandler.h"
#include "include/stringify.h"
#include "common/ceph_mutex.h"
#include "gtest/gtest.h"
#include "test/journal/RadosTestFixture.h"
#include <list>
#include <boost/scope_exit.hpp>
using namespace std::chrono_literals;
typedef std::list<journal::Entry> Entries;
template <typename T>
class TestJournalPlayer : public RadosTestFixture {
public:
typedef std::list<journal::JournalPlayer *> JournalPlayers;
static const uint64_t max_fetch_bytes = T::max_fetch_bytes;
struct ReplayHandler : public journal::ReplayHandler {
ceph::mutex lock = ceph::make_mutex("lock");
ceph::condition_variable cond;
bool entries_available;
bool complete;
int complete_result;
ReplayHandler()
: entries_available(false), complete(false),
complete_result(0) {}
void handle_entries_available() override {
std::lock_guard locker{lock};
entries_available = true;
cond.notify_all();
}
void handle_complete(int r) override {
std::lock_guard locker{lock};
complete = true;
complete_result = r;
cond.notify_all();
}
};
void TearDown() override {
for (JournalPlayers::iterator it = m_players.begin();
it != m_players.end(); ++it) {
delete *it;
}
RadosTestFixture::TearDown();
}
auto create_metadata(const std::string &oid) {
return RadosTestFixture::create_metadata(oid, "client", 0.1,
max_fetch_bytes);
}
int client_commit(const std::string &oid,
journal::JournalPlayer::ObjectSetPosition position) {
return RadosTestFixture::client_commit(oid, "client", position);
}
journal::Entry create_entry(uint64_t tag_tid, uint64_t entry_tid) {
std::string payload(128, '0');
bufferlist payload_bl;
payload_bl.append(payload);
return journal::Entry(tag_tid, entry_tid, payload_bl);
}
journal::JournalPlayer *create_player(const std::string &oid,
const ceph::ref_t<journal::JournalMetadata>& metadata) {
journal::JournalPlayer *player(new journal::JournalPlayer(
m_ioctx, oid + ".", metadata, &m_replay_hander, nullptr));
m_players.push_back(player);
return player;
}
bool wait_for_entries(journal::JournalPlayer *player, uint32_t count,
Entries *entries) {
entries->clear();
while (entries->size() < count) {
journal::Entry entry;
uint64_t commit_tid;
while (entries->size() < count &&
player->try_pop_front(&entry, &commit_tid)) {
entries->push_back(entry);
}
if (entries->size() == count) {
break;
}
std::unique_lock locker{m_replay_hander.lock};
if (m_replay_hander.entries_available) {
m_replay_hander.entries_available = false;
} else if (m_replay_hander.cond.wait_for(locker, 10s) ==
std::cv_status::timeout) {
break;
}
}
return entries->size() == count;
}
bool wait_for_complete(journal::JournalPlayer *player) {
std::unique_lock locker{m_replay_hander.lock};
while (!m_replay_hander.complete) {
journal::Entry entry;
uint64_t commit_tid;
player->try_pop_front(&entry, &commit_tid);
if (m_replay_hander.cond.wait_for(locker, 10s) ==
std::cv_status::timeout) {
return false;
}
}
m_replay_hander.complete = false;
return true;
}
int write_entry(const std::string &oid, uint64_t object_num,
uint64_t tag_tid, uint64_t entry_tid) {
bufferlist bl;
encode(create_entry(tag_tid, entry_tid), bl);
return append(oid + "." + stringify(object_num), bl);
}
JournalPlayers m_players;
ReplayHandler m_replay_hander;
};
template <uint64_t _max_fetch_bytes>
class TestJournalPlayerParams {
public:
static const uint64_t max_fetch_bytes = _max_fetch_bytes;
};
typedef ::testing::Types<TestJournalPlayerParams<0>,
TestJournalPlayerParams<16> > TestJournalPlayerTypes;
TYPED_TEST_SUITE(TestJournalPlayer, TestJournalPlayerTypes);
TYPED_TEST(TestJournalPlayer, Prefetch) {
std::string oid = this->get_temp_oid();
journal::JournalPlayer::ObjectPositions positions;
positions = {
cls::journal::ObjectPosition(0, 234, 122) };
cls::journal::ObjectSetPosition commit_position(positions);
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 122));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 123));
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 124));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 125));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 3, &entries));
ASSERT_TRUE(this->wait_for_complete(player));
Entries expected_entries;
expected_entries = {
this->create_entry(234, 123),
this->create_entry(234, 124),
this->create_entry(234, 125)};
ASSERT_EQ(expected_entries, entries);
uint64_t last_tid;
ASSERT_TRUE(metadata->get_last_allocated_entry_tid(234, &last_tid));
ASSERT_EQ(125U, last_tid);
}
TYPED_TEST(TestJournalPlayer, PrefetchSkip) {
std::string oid = this->get_temp_oid();
journal::JournalPlayer::ObjectPositions positions;
positions = {
cls::journal::ObjectPosition(0, 234, 125),
cls::journal::ObjectPosition(1, 234, 124) };
cls::journal::ObjectSetPosition commit_position(positions);
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 122));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 123));
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 124));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 125));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 0, &entries));
ASSERT_TRUE(this->wait_for_complete(player));
uint64_t last_tid;
ASSERT_TRUE(metadata->get_last_allocated_entry_tid(234, &last_tid));
ASSERT_EQ(125U, last_tid);
}
TYPED_TEST(TestJournalPlayer, PrefetchWithoutCommit) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 122));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 123));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 2, &entries));
ASSERT_TRUE(this->wait_for_complete(player));
Entries expected_entries;
expected_entries = {
this->create_entry(234, 122),
this->create_entry(234, 123)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, PrefetchMultipleTags) {
std::string oid = this->get_temp_oid();
journal::JournalPlayer::ObjectPositions positions;
positions = {
cls::journal::ObjectPosition(2, 234, 122),
cls::journal::ObjectPosition(1, 234, 121),
cls::journal::ObjectPosition(0, 234, 120)};
cls::journal::ObjectSetPosition commit_position(positions);
ASSERT_EQ(0, this->create(oid, 14, 3));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 120));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 121));
ASSERT_EQ(0, this->write_entry(oid, 2, 234, 122));
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 123));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 124));
ASSERT_EQ(0, this->write_entry(oid, 0, 236, 0)); // new tag allocated
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 3, &entries));
ASSERT_TRUE(this->wait_for_complete(player));
uint64_t last_tid;
ASSERT_TRUE(metadata->get_last_allocated_entry_tid(234, &last_tid));
ASSERT_EQ(124U, last_tid);
ASSERT_TRUE(metadata->get_last_allocated_entry_tid(236, &last_tid));
ASSERT_EQ(0U, last_tid);
}
TYPED_TEST(TestJournalPlayer, PrefetchCorruptSequence) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 120));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 121));
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 124));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 2, &entries));
journal::Entry entry;
uint64_t commit_tid;
ASSERT_FALSE(player->try_pop_front(&entry, &commit_tid));
ASSERT_TRUE(this->wait_for_complete(player));
ASSERT_EQ(-ENOMSG, this->m_replay_hander.complete_result);
}
TYPED_TEST(TestJournalPlayer, PrefetchMissingSequence) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid, 14, 4));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, metadata->set_active_set(1));
ASSERT_EQ(0, this->write_entry(oid, 0, 2, 852));
ASSERT_EQ(0, this->write_entry(oid, 0, 2, 856));
ASSERT_EQ(0, this->write_entry(oid, 0, 2, 860));
ASSERT_EQ(0, this->write_entry(oid, 1, 2, 853));
ASSERT_EQ(0, this->write_entry(oid, 1, 2, 857));
ASSERT_EQ(0, this->write_entry(oid, 5, 2, 861));
ASSERT_EQ(0, this->write_entry(oid, 2, 2, 854));
ASSERT_EQ(0, this->write_entry(oid, 0, 3, 0));
ASSERT_EQ(0, this->write_entry(oid, 5, 3, 1));
ASSERT_EQ(0, this->write_entry(oid, 2, 3, 2));
ASSERT_EQ(0, this->write_entry(oid, 3, 3, 3));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 7, &entries));
Entries expected_entries = {
this->create_entry(2, 852),
this->create_entry(2, 853),
this->create_entry(2, 854),
this->create_entry(3, 0),
this->create_entry(3, 1),
this->create_entry(3, 2),
this->create_entry(3, 3)};
ASSERT_EQ(expected_entries, entries);
ASSERT_TRUE(this->wait_for_complete(player));
ASSERT_EQ(0, this->m_replay_hander.complete_result);
}
TYPED_TEST(TestJournalPlayer, PrefetchLargeMissingSequence) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, metadata->set_active_set(2));
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 0));
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 1));
ASSERT_EQ(0, this->write_entry(oid, 3, 0, 3));
ASSERT_EQ(0, this->write_entry(oid, 4, 1, 0));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 3, &entries));
Entries expected_entries = {
this->create_entry(0, 0),
this->create_entry(0, 1),
this->create_entry(1, 0)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, PrefetchBlockedNewTag) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 0));
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 1));
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 2));
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 4));
ASSERT_EQ(0, this->write_entry(oid, 0, 1, 0));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 4, &entries));
Entries expected_entries = {
this->create_entry(0, 0),
this->create_entry(0, 1),
this->create_entry(0, 2),
this->create_entry(1, 0)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, PrefetchStaleEntries) {
std::string oid = this->get_temp_oid();
journal::JournalPlayer::ObjectPositions positions = {
cls::journal::ObjectPosition(0, 1, 0) };
cls::journal::ObjectSetPosition commit_position(positions);
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 1));
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 3));
ASSERT_EQ(0, this->write_entry(oid, 0, 1, 0));
ASSERT_EQ(0, this->write_entry(oid, 1, 1, 1));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 1, &entries));
Entries expected_entries = {
this->create_entry(1, 1)};
ASSERT_EQ(expected_entries, entries);
ASSERT_TRUE(this->wait_for_complete(player));
ASSERT_EQ(0, this->m_replay_hander.complete_result);
}
TYPED_TEST(TestJournalPlayer, PrefetchUnexpectedTag) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 120));
ASSERT_EQ(0, this->write_entry(oid, 1, 235, 121));
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 124));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 1, &entries));
journal::Entry entry;
uint64_t commit_tid;
ASSERT_FALSE(player->try_pop_front(&entry, &commit_tid));
ASSERT_TRUE(this->wait_for_complete(player));
ASSERT_EQ(0, this->m_replay_hander.complete_result);
}
TYPED_TEST(TestJournalPlayer, PrefetchAndWatch) {
std::string oid = this->get_temp_oid();
journal::JournalPlayer::ObjectPositions positions;
positions = {
cls::journal::ObjectPosition(0, 234, 122)};
cls::journal::ObjectSetPosition commit_position(positions);
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 122));
player->prefetch_and_watch(0.25);
Entries entries;
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 123));
ASSERT_TRUE(this->wait_for_entries(player, 1, &entries));
Entries expected_entries;
expected_entries = {this->create_entry(234, 123)};
ASSERT_EQ(expected_entries, entries);
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 124));
ASSERT_TRUE(this->wait_for_entries(player, 1, &entries));
expected_entries = {this->create_entry(234, 124)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, PrefetchSkippedObject) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid, 14, 3));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
ASSERT_EQ(0, metadata->set_active_set(2));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 234, 122));
ASSERT_EQ(0, this->write_entry(oid, 1, 234, 123));
ASSERT_EQ(0, this->write_entry(oid, 5, 234, 124));
ASSERT_EQ(0, this->write_entry(oid, 6, 234, 125));
ASSERT_EQ(0, this->write_entry(oid, 7, 234, 126));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 5, &entries));
ASSERT_TRUE(this->wait_for_complete(player));
Entries expected_entries;
expected_entries = {
this->create_entry(234, 122),
this->create_entry(234, 123),
this->create_entry(234, 124),
this->create_entry(234, 125),
this->create_entry(234, 126)};
ASSERT_EQ(expected_entries, entries);
uint64_t last_tid;
ASSERT_TRUE(metadata->get_last_allocated_entry_tid(234, &last_tid));
ASSERT_EQ(126U, last_tid);
}
TYPED_TEST(TestJournalPlayer, ImbalancedJournal) {
std::string oid = this->get_temp_oid();
journal::JournalPlayer::ObjectPositions positions = {
cls::journal::ObjectPosition(9, 300, 1),
cls::journal::ObjectPosition(8, 300, 0),
cls::journal::ObjectPosition(10, 200, 4334),
cls::journal::ObjectPosition(11, 200, 4331) };
cls::journal::ObjectSetPosition commit_position(positions);
ASSERT_EQ(0, this->create(oid, 14, 4));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
ASSERT_EQ(0, metadata->set_active_set(2));
metadata->set_minimum_set(2);
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 8, 300, 0));
ASSERT_EQ(0, this->write_entry(oid, 8, 301, 0));
ASSERT_EQ(0, this->write_entry(oid, 9, 300, 1));
ASSERT_EQ(0, this->write_entry(oid, 9, 301, 1));
ASSERT_EQ(0, this->write_entry(oid, 10, 200, 4334));
ASSERT_EQ(0, this->write_entry(oid, 10, 301, 2));
ASSERT_EQ(0, this->write_entry(oid, 11, 200, 4331));
ASSERT_EQ(0, this->write_entry(oid, 11, 301, 3));
player->prefetch();
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 4, &entries));
ASSERT_TRUE(this->wait_for_complete(player));
Entries expected_entries;
expected_entries = {
this->create_entry(301, 0),
this->create_entry(301, 1),
this->create_entry(301, 2),
this->create_entry(301, 3)};
ASSERT_EQ(expected_entries, entries);
uint64_t last_tid;
ASSERT_TRUE(metadata->get_last_allocated_entry_tid(301, &last_tid));
ASSERT_EQ(3U, last_tid);
}
TYPED_TEST(TestJournalPlayer, LiveReplayLaggyAppend) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 0));
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 1));
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 2));
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 4));
ASSERT_EQ(0, this->write_entry(oid, 3, 0, 5)); // laggy entry 0/3 in object 1
player->prefetch_and_watch(0.25);
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 3, &entries));
Entries expected_entries = {
this->create_entry(0, 0),
this->create_entry(0, 1),
this->create_entry(0, 2)};
ASSERT_EQ(expected_entries, entries);
journal::Entry entry;
uint64_t commit_tid;
ASSERT_FALSE(player->try_pop_front(&entry, &commit_tid));
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 3));
ASSERT_EQ(0, metadata->set_active_set(1));
ASSERT_TRUE(this->wait_for_entries(player, 3, &entries));
expected_entries = {
this->create_entry(0, 3),
this->create_entry(0, 4),
this->create_entry(0, 5)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, LiveReplayMissingSequence) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid, 14, 4));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 0, 2, 852));
ASSERT_EQ(0, this->write_entry(oid, 0, 2, 856));
ASSERT_EQ(0, this->write_entry(oid, 0, 2, 860));
ASSERT_EQ(0, this->write_entry(oid, 1, 2, 853));
ASSERT_EQ(0, this->write_entry(oid, 1, 2, 857));
ASSERT_EQ(0, this->write_entry(oid, 2, 2, 854));
ASSERT_EQ(0, this->write_entry(oid, 0, 2, 856));
player->prefetch_and_watch(0.25);
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 3, &entries));
Entries expected_entries = {
this->create_entry(2, 852),
this->create_entry(2, 853),
this->create_entry(2, 854)};
ASSERT_EQ(expected_entries, entries);
journal::Entry entry;
uint64_t commit_tid;
ASSERT_FALSE(player->try_pop_front(&entry, &commit_tid));
ASSERT_EQ(0, this->write_entry(oid, 3, 3, 3));
ASSERT_EQ(0, this->write_entry(oid, 2, 3, 2));
ASSERT_EQ(0, this->write_entry(oid, 1, 3, 1));
ASSERT_EQ(0, this->write_entry(oid, 0, 3, 0));
ASSERT_TRUE(this->wait_for_entries(player, 4, &entries));
expected_entries = {
this->create_entry(3, 0),
this->create_entry(3, 1),
this->create_entry(3, 2),
this->create_entry(3, 3)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, LiveReplayLargeMissingSequence) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, metadata->set_active_set(2));
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 0));
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 1));
ASSERT_EQ(0, this->write_entry(oid, 3, 0, 3));
ASSERT_EQ(0, this->write_entry(oid, 4, 1, 0));
player->prefetch_and_watch(0.25);
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 3, &entries));
Entries expected_entries = {
this->create_entry(0, 0),
this->create_entry(0, 1),
this->create_entry(1, 0)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, LiveReplayBlockedNewTag) {
std::string oid = this->get_temp_oid();
cls::journal::ObjectSetPosition commit_position;
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
C_SaferCond ctx1;
cls::journal::Tag tag1;
metadata->allocate_tag(cls::journal::Tag::TAG_CLASS_NEW, {}, &tag1, &ctx1);
ASSERT_EQ(0, ctx1.wait());
ASSERT_EQ(0, metadata->set_active_set(0));
ASSERT_EQ(0, this->write_entry(oid, 0, tag1.tid, 0));
ASSERT_EQ(0, this->write_entry(oid, 1, tag1.tid, 1));
ASSERT_EQ(0, this->write_entry(oid, 0, tag1.tid, 2));
ASSERT_EQ(0, this->write_entry(oid, 0, tag1.tid, 4));
player->prefetch_and_watch(0.25);
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 3, &entries));
Entries expected_entries = {
this->create_entry(tag1.tid, 0),
this->create_entry(tag1.tid, 1),
this->create_entry(tag1.tid, 2)};
ASSERT_EQ(expected_entries, entries);
journal::Entry entry;
uint64_t commit_tid;
ASSERT_FALSE(player->try_pop_front(&entry, &commit_tid));
C_SaferCond ctx2;
cls::journal::Tag tag2;
metadata->allocate_tag(tag1.tag_class, {}, &tag2, &ctx2);
ASSERT_EQ(0, ctx2.wait());
ASSERT_EQ(0, this->write_entry(oid, 0, tag2.tid, 0));
ASSERT_TRUE(this->wait_for_entries(player, 1, &entries));
expected_entries = {
this->create_entry(tag2.tid, 0)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, LiveReplayStaleEntries) {
std::string oid = this->get_temp_oid();
journal::JournalPlayer::ObjectPositions positions = {
cls::journal::ObjectPosition(0, 1, 0) };
cls::journal::ObjectSetPosition commit_position(positions);
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 1));
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 3));
ASSERT_EQ(0, this->write_entry(oid, 0, 1, 0));
ASSERT_EQ(0, this->write_entry(oid, 1, 1, 1));
player->prefetch_and_watch(0.25);
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 1, &entries));
Entries expected_entries = {
this->create_entry(1, 1)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, LiveReplayRefetchRemoveEmpty) {
std::string oid = this->get_temp_oid();
journal::JournalPlayer::ObjectPositions positions = {
cls::journal::ObjectPosition(1, 0, 1),
cls::journal::ObjectPosition(0, 0, 0)};
cls::journal::ObjectSetPosition commit_position(positions);
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, commit_position));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
ASSERT_EQ(0, metadata->set_active_set(1));
ASSERT_EQ(0, this->write_entry(oid, 0, 0, 0));
ASSERT_EQ(0, this->write_entry(oid, 1, 0, 1));
ASSERT_EQ(0, this->write_entry(oid, 3, 0, 3));
ASSERT_EQ(0, this->write_entry(oid, 2, 1, 0));
player->prefetch_and_watch(0.25);
Entries entries;
ASSERT_TRUE(this->wait_for_entries(player, 1, &entries));
Entries expected_entries = {
this->create_entry(1, 0)};
ASSERT_EQ(expected_entries, entries);
// should remove player for offset 3 after refetching
ASSERT_EQ(0, metadata->set_active_set(3));
ASSERT_EQ(0, this->write_entry(oid, 7, 1, 1));
ASSERT_TRUE(this->wait_for_entries(player, 1, &entries));
expected_entries = {
this->create_entry(1, 1)};
ASSERT_EQ(expected_entries, entries);
}
TYPED_TEST(TestJournalPlayer, PrefetchShutDown) {
std::string oid = this->get_temp_oid();
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, {}));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
player->prefetch();
}
TYPED_TEST(TestJournalPlayer, LiveReplayShutDown) {
std::string oid = this->get_temp_oid();
ASSERT_EQ(0, this->create(oid));
ASSERT_EQ(0, this->client_register(oid));
ASSERT_EQ(0, this->client_commit(oid, {}));
auto metadata = this->create_metadata(oid);
ASSERT_EQ(0, this->init_metadata(metadata));
journal::JournalPlayer *player = this->create_player(oid, metadata);
BOOST_SCOPE_EXIT_ALL( (player) ) {
C_SaferCond unwatch_ctx;
player->shut_down(&unwatch_ctx);
ASSERT_EQ(0, unwatch_ctx.wait());
};
player->prefetch_and_watch(0.25);
}
| 31,562 | 30.689759 | 96 | cc |